diff --git a/.env b/.env index 991422c8..4d4524df 100644 --- a/.env +++ b/.env @@ -12,6 +12,7 @@ AKASH_TS_ROOT=${AKASH_ROOT}/ts AKASH_TS_PACKAGE_FILE=${AKASH_TS_ROOT}/package.json AKASH_TS_NODE_MODULES=${AKASH_TS_ROOT}/node_modules AKASH_TS_NODE_BIN=${AKASH_TS_NODE_MODULES}/.bin -AKASH_DEVCACHE_TS_TMP=${AKASH_DEVCACHE_BASE}/tmp/ts -AKASH_DEVCACHE_TS_TMP_GRPC_JS=${AKASH_DEVCACHE_TS_TMP}/generated-grpc-js -AKASH_DEVCACHE_TS_TMP_PATCHES=${AKASH_DEVCACHE_TS_TMP}/patches \ No newline at end of file +AKASH_DEVCACHE_TMP=${AKASH_DEVCACHE_BASE}/tmp +AKASH_DEVCACHE_TMP_TS=${AKASH_DEVCACHE_TMP}/ts +AKASH_DEVCACHE_TMP_TS_GRPC_JS=${AKASH_DEVCACHE_TMP_TS}/generated-grpc-js +AKASH_DEVCACHE_TMP_TS_PATCHES=${AKASH_DEVCACHE_TMP_TS}/patches diff --git a/.envrc b/.envrc index 481c4e38..0783ed12 100644 --- a/.envrc +++ b/.envrc @@ -2,7 +2,6 @@ AKASH_ROOT=$(pwd) export AKASH_ROOT dotenv -dotenv_if_exists dev.env TOOLS=${AKASH_ROOT}/script/tools.sh SEMVER=${AKASH_ROOT}/script/semver.sh @@ -19,6 +18,15 @@ fi AKASH_DIRENV_SET=1 +dotenv_if_exists dev.env + +if [[ ${GOWORK} != "off" ]] && [[ -f go.work ]]; then + GOWORK=${AKASH_ROOT}/go.work +else + GOWORK=off +fi + +export TOOLS export SEMVER export GOTOOLCHAIN export GOTOOLCHAIN_SEMVER diff --git a/.gitignore b/.gitignore index bb7af02f..34fd80f2 100644 --- a/.gitignore +++ b/.gitignore @@ -13,7 +13,7 @@ vendor/ # Go workspace file -go.work +go.work* # coverage coverage.txt diff --git a/.golangci.yaml b/.golangci.yaml index 4ac032c1..cf9e769d 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -6,11 +6,8 @@ issues: # Skip generated k8s code run: - skip-dirs: - - "^go/node/types/v1beta1" - - "^go/node/types/v1beta2" - - "^go/node/market/v1beta3" - skip-files: + exclude-dirs: + exclude-files: - "\\.pb\\.go$" - "\\.pb\\.gw\\.go$" # Skip vendor/ etc @@ -29,7 +26,7 @@ linters: - staticcheck - revive - gosec - - exportloopref + - copyloopvar - prealloc linters-settings: gocritic: diff --git a/Makefile b/Makefile index d4b0a541..65fc7e70 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,5 @@ UNAME_OS := $(shell uname -s) UNAME_ARCH := $(shell uname -m) -PROTO_LEGACY ?= true ifeq (0, $(shell id -u)) $(warning "make was started with superuser privileges. it may cause issues with direnv") @@ -24,23 +23,52 @@ ifeq (, $(GOTOOLCHAIN)) $(error "GOTOOLCHAIN is not set") endif +ifeq ($(GO111MODULE),off) +else + GOMOD=readonly +endif + +ifneq ($(GOWORK),off) + ifeq ($(shell test -e ${AKASH_ROOT}/go/go.work && echo -n yes),yes) + GOWORK=${AKASH_ROOT}/go/go.work + else + GOWORK=off + endif +endif + +ifneq ($(GOWORK),off) + ifeq ($(GOMOD),$(filter $(GOMOD),mod "")) +$(error '-mod may only be set to readonly or vendor when in workspace mode, but it is set to ""') + endif +endif + +ifeq ($(GOMOD),vendor) + ifneq ($(wildcard ./vendor/.),) +$(error "go -mod is in vendor mode but vendor dir has not been found. consider to run go mod vendor") + endif +endif + +GO_ROOT := go +TS_ROOT := $(AKASH_TS_ROOT) + +BUMP_MOD ?= + GO := GO111MODULE=$(GO111MODULE) go -GO_MOD_NAME := $(shell go list -m 2>/dev/null) - -BUF_VERSION ?= 1.28.1 -PROTOC_VERSION ?= 21.12 -GOGOPROTO_VERSION ?= $(shell $(GO) list -mod=readonly -m -f '{{ .Version }}' github.com/cosmos/gogoproto) -# TODO https://github.com/akash-network/support/issues/77 -PROTOC_GEN_GOCOSMOS_VERSION ?= $(shell $(GO) list -mod=readonly -m -f '{{ .Version }}' github.com/regen-network/cosmos-proto) -PROTOC_GEN_GO_PULSAR_VERSION ?= $(shell $(GO) list -mod=readonly -m -f '{{ .Version }}' github.com/cosmos/cosmos-proto) -PROTOC_GEN_GO_VERSION ?= $(shell $(GO) list -mod=readonly -m -f '{{ .Version }}' google.golang.org/protobuf) -PROTOC_GEN_GRPC_GATEWAY_VERSION := $(shell $(GO) list -mod=readonly -m -f '{{ .Version }}' github.com/grpc-ecosystem/grpc-gateway) -PROTOC_GEN_DOC_VERSION := $(shell $(GO) list -mod=readonly -m -f '{{ .Version }}' github.com/pseudomuto/protoc-gen-doc) +GO_MOD_NAME := $(shell cd $(GO_ROOT); GOWORK=off go list -m 2>/dev/null) + +BUF_VERSION ?= 1.38.0 +PROTOC_VERSION ?= 26.1 +GOGOPROTO_VERSION ?= $(shell cd $(GO_ROOT); $(GO) list -mod=readonly -m -f '{{ .Version }}' github.com/cosmos/gogoproto) +PROTOC_GEN_GOCOSMOS_VERSION ?= $(GOGOPROTO_VERSION) +PROTOC_GEN_GO_PULSAR_VERSION ?= $(shell cd $(GO_ROOT); $(GO) list -mod=readonly -m -f '{{ .Version }}' github.com/cosmos/cosmos-proto) +PROTOC_GEN_GO_VERSION ?= $(shell cd $(GO_ROOT); $(GO) list -mod=readonly -m -f '{{ .Version }}' google.golang.org/protobuf) +PROTOC_GEN_GRPC_GATEWAY_VERSION := $(shell cd $(GO_ROOT); $(GO) list -mod=readonly -m -f '{{ .Version }}' github.com/grpc-ecosystem/grpc-gateway) +PROTOC_GEN_DOC_VERSION := $(shell cd $(GO_ROOT); $(GO) list -mod=readonly -m -f '{{ .Version }}' github.com/pseudomuto/protoc-gen-doc) PROTOC_GEN_SWAGGER_VERSION := $(PROTOC_GEN_GRPC_GATEWAY_VERSION) MODVENDOR_VERSION ?= v0.5.0 -MOCKERY_VERSION ?= 2.42.0 -GOLANGCI_LINT_VERSION ?= v1.56.1 +MOCKERY_VERSION ?= 2.45.0 +GOLANGCI_LINT_VERSION ?= v1.60.3 BUF_VERSION_FILE := $(AKASH_DEVCACHE_VERSIONS)/buf/$(BUF_VERSION) PROTOC_VERSION_FILE := $(AKASH_DEVCACHE_VERSIONS)/protoc/$(PROTOC_VERSION) @@ -58,8 +86,8 @@ GOLANGCI_LINT_VERSION_FILE := $(AKASH_DEVCACHE_VERSIONS)/golangci-lint BUF := $(AKASH_DEVCACHE_BIN)/buf PROTOC := $(AKASH_DEVCACHE_BIN)/protoc -# TODO https://github.com/akash-network/support/issues/77 -PROTOC_GEN_GOCOSMOS := $(AKASH_DEVCACHE_BIN)/legacy/protoc-gen-gocosmos +PROTOC_GEN_GOCOSMOS := $(AKASH_DEVCACHE_BIN)/protoc-gen-gocosmos +GOGOPROTO := $(AKASH_DEVCACHE_BIN)/gogoproto PROTOC_GEN_GO_PULSAR := $(AKASH_DEVCACHE_BIN)/protoc-gen-go-pulsar PROTOC_GEN_GO := $(AKASH_DEVCACHE_BIN)/protoc-gen-go PROTOC_GEN_GRPC_GATEWAY := $(AKASH_DEVCACHE_BIN)/protoc-gen-grpc-gateway @@ -72,11 +100,14 @@ MOCKERY := $(AKASH_DEVCACHE_BIN)/mockery GOLANGCI_LINT := $(AKASH_DEVCACHE_BIN)/golangci-lint GOLANGCI_LINT_RUN := $(GOLANGCI_LINT) run -GOLINT := $(GOLANGCI_LINT_RUN) ./... --disable-all --deadline=5m --enable +GOLINT := $(GOLANGCI_LINT_RUN) ./... --disable-all --deadline=10m --enable DOCKER_RUN := docker run --rm -v $(shell pwd):/workspace -w /workspace DOCKER_BUF := $(DOCKER_RUN) bufbuild/buf:$(BUF_VERSION) +GO_MODULES ?= $(shell find * -name go.mod -exec dirname {} \;) +GO_TEST_DIRS ?= ./... + include $(AKASH_ROOT)/make/setup-cache.mk include $(AKASH_ROOT)/make/mod.mk include $(AKASH_ROOT)/make/test.mk @@ -85,6 +116,10 @@ include $(AKASH_ROOT)/make/lint.mk include $(AKASH_ROOT)/make/release-ts.mk include $(AKASH_ROOT)/make/code-style.mk +.PHONY: bump-% +bump-%: + @./script/tools.sh bump "$*" "$(BUMP_MOD)" + .PHONY: clean clean: rm -rf $(AKASH_DEVCACHE) diff --git a/buf.gen.gogo.yaml b/buf.gen.go.yaml similarity index 100% rename from buf.gen.gogo.yaml rename to buf.gen.go.yaml diff --git a/buf.gen.ts.yaml b/buf.gen.ts.yaml new file mode 100644 index 00000000..cd2f4c0f --- /dev/null +++ b/buf.gen.ts.yaml @@ -0,0 +1,11 @@ +version: v1 +plugins: + - name: ts + strategy: all + path: ./ts/node_modules/.bin/protoc-gen-ts_proto + out: ./ts/src/generated + opt: "esModuleInterop=true,forceLong=long,outputTypeRegistry=true,useExactTypes=false,outputIndex=true" + - name: grpc-gateway-ts + path: ./ts/node_modules/.bin/protoc-gen-ts_proto + out: ./.cache/tmp/ts/generated-grpc-js + opt: "esModuleInterop=true,forceLong=long,outputTypeRegistry=true,useExactTypes=false,outputServices=grpc-js" \ No newline at end of file diff --git a/buf.work.yaml b/buf.work.yaml index b72eb84b..513a0f47 100644 --- a/buf.work.yaml +++ b/buf.work.yaml @@ -3,5 +3,3 @@ directories: - proto/node - proto/provider - .cache/include - - vendor/github.com/cosmos/cosmos-sdk/proto - - vendor/github.com/cosmos/cosmos-sdk/third_party/proto diff --git a/docs/config.yaml b/docs/config.yaml index 36dd3f8d..2c75fea7 100644 --- a/docs/config.yaml +++ b/docs/config.yaml @@ -1,16 +1,28 @@ +--- swagger: '2.0' info: title: "AKASH - gRPC Gateway docs" description: "A REST interface for state queries" version: "1.0.0" apis: - - url: "./.cache/tmp/swagger-gen/akash/audit/v1beta3/query.swagger.json" - - url: "./.cache/tmp/swagger-gen/akash/cert/v1beta3/query.swagger.json" - - url: "./.cache/tmp/swagger-gen/akash/deployment/v1beta3/query.swagger.json" - - url: "./.cache/tmp/swagger-gen/akash/deployment/v1beta3/service.swagger.json" - - url: "./.cache/tmp/swagger-gen/akash/market/v1beta3/query.swagger.json" - - url: "./.cache/tmp/swagger-gen/akash/market/v1beta3/service.swagger.json" - - url: "./.cache/tmp/swagger-gen/akash/provider/v1beta3/query.swagger.json" - - url: "./vendor/github.com/cosmos/cosmos-sdk/client/docs/swagger-ui/swagger.yaml" + - url: "./.cache/tmp/swagger-gen/akash/audit/v1/query.swagger.json" + - url: "./.cache/tmp/swagger-gen/akash/cert/v1/query.swagger.json" + - url: "./.cache/tmp/swagger-gen/akash/deployment/v1beta4/query.swagger.json" + operationIds: + rename: + Params: DeploymentParams + - url: "./.cache/tmp/swagger-gen/akash/deployment/v1beta4/service.swagger.json" + - url: "./.cache/tmp/swagger-gen/akash/market/v1beta5/query.swagger.json" + operationIds: + rename: + Params: MarketParams + - url: "./.cache/tmp/swagger-gen/akash/market/v1beta5/service.swagger.json" + - url: "./.cache/tmp/swagger-gen/akash/provider/v1beta4/query.swagger.json" + - url: "./.cache/tmp/swagger-gen/akash/take/v1/query.swagger.json" + operationIds: + rename: + Params: TakeParams + - url: "./.cache/tmp/swagger-gen/akash/take/v1/service.swagger.json" + - url: "./go/vendor/github.com/cosmos/cosmos-sdk/client/docs/swagger-ui/swagger.yaml" dereference: circular: "ignore" diff --git a/docs/proto/node.md b/docs/proto/node.md index 5614aea7..95773ac1 100644 --- a/docs/proto/node.md +++ b/docs/proto/node.md @@ -4,734 +4,434 @@ ## Table of Contents - - [akash/discovery/v1/client_info.proto](#akash/discovery/v1/client_info.proto) - - [ClientInfo](#akash.discovery.v1.ClientInfo) - - - [akash/discovery/v1/akash.proto](#akash/discovery/v1/akash.proto) - - [Akash](#akash.discovery.v1.Akash) - - - [akash/provider/v1beta3/query.proto](#akash/provider/v1beta3/query.proto) - - [QueryProviderRequest](#akash.provider.v1beta3.QueryProviderRequest) - - [QueryProviderResponse](#akash.provider.v1beta3.QueryProviderResponse) - - [QueryProvidersRequest](#akash.provider.v1beta3.QueryProvidersRequest) - - [QueryProvidersResponse](#akash.provider.v1beta3.QueryProvidersResponse) - - - [Query](#akash.provider.v1beta3.Query) - - - [akash/provider/v1beta3/genesis.proto](#akash/provider/v1beta3/genesis.proto) - - [GenesisState](#akash.provider.v1beta3.GenesisState) - - - [akash/provider/v1beta3/provider.proto](#akash/provider/v1beta3/provider.proto) - - [MsgCreateProvider](#akash.provider.v1beta3.MsgCreateProvider) - - [MsgCreateProviderResponse](#akash.provider.v1beta3.MsgCreateProviderResponse) - - [MsgDeleteProvider](#akash.provider.v1beta3.MsgDeleteProvider) - - [MsgDeleteProviderResponse](#akash.provider.v1beta3.MsgDeleteProviderResponse) - - [MsgUpdateProvider](#akash.provider.v1beta3.MsgUpdateProvider) - - [MsgUpdateProviderResponse](#akash.provider.v1beta3.MsgUpdateProviderResponse) - - [Provider](#akash.provider.v1beta3.Provider) - - [ProviderInfo](#akash.provider.v1beta3.ProviderInfo) - - - [Msg](#akash.provider.v1beta3.Msg) - - - [akash/provider/v1beta2/query.proto](#akash/provider/v1beta2/query.proto) - - [QueryProviderRequest](#akash.provider.v1beta2.QueryProviderRequest) - - [QueryProviderResponse](#akash.provider.v1beta2.QueryProviderResponse) - - [QueryProvidersRequest](#akash.provider.v1beta2.QueryProvidersRequest) - - [QueryProvidersResponse](#akash.provider.v1beta2.QueryProvidersResponse) - - - [Query](#akash.provider.v1beta2.Query) - - - [akash/provider/v1beta2/genesis.proto](#akash/provider/v1beta2/genesis.proto) - - [GenesisState](#akash.provider.v1beta2.GenesisState) - - - [akash/provider/v1beta2/provider.proto](#akash/provider/v1beta2/provider.proto) - - [MsgCreateProvider](#akash.provider.v1beta2.MsgCreateProvider) - - [MsgCreateProviderResponse](#akash.provider.v1beta2.MsgCreateProviderResponse) - - [MsgDeleteProvider](#akash.provider.v1beta2.MsgDeleteProvider) - - [MsgDeleteProviderResponse](#akash.provider.v1beta2.MsgDeleteProviderResponse) - - [MsgUpdateProvider](#akash.provider.v1beta2.MsgUpdateProvider) - - [MsgUpdateProviderResponse](#akash.provider.v1beta2.MsgUpdateProviderResponse) - - [Provider](#akash.provider.v1beta2.Provider) - - [ProviderInfo](#akash.provider.v1beta2.ProviderInfo) - - - [Msg](#akash.provider.v1beta2.Msg) - - - [akash/provider/v1beta1/provider.proto](#akash/provider/v1beta1/provider.proto) - - [MsgCreateProvider](#akash.provider.v1beta1.MsgCreateProvider) - - [MsgCreateProviderResponse](#akash.provider.v1beta1.MsgCreateProviderResponse) - - [MsgDeleteProvider](#akash.provider.v1beta1.MsgDeleteProvider) - - [MsgDeleteProviderResponse](#akash.provider.v1beta1.MsgDeleteProviderResponse) - - [MsgUpdateProvider](#akash.provider.v1beta1.MsgUpdateProvider) - - [MsgUpdateProviderResponse](#akash.provider.v1beta1.MsgUpdateProviderResponse) - - [Provider](#akash.provider.v1beta1.Provider) - - [ProviderInfo](#akash.provider.v1beta1.ProviderInfo) - - - [Msg](#akash.provider.v1beta1.Msg) - - - [akash/audit/v1beta3/audit.proto](#akash/audit/v1beta3/audit.proto) - - [AttributesFilters](#akash.audit.v1beta3.AttributesFilters) - - [AttributesResponse](#akash.audit.v1beta3.AttributesResponse) - - [AuditedAttributes](#akash.audit.v1beta3.AuditedAttributes) - - [MsgDeleteProviderAttributes](#akash.audit.v1beta3.MsgDeleteProviderAttributes) - - [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta3.MsgDeleteProviderAttributesResponse) - - [MsgSignProviderAttributes](#akash.audit.v1beta3.MsgSignProviderAttributes) - - [MsgSignProviderAttributesResponse](#akash.audit.v1beta3.MsgSignProviderAttributesResponse) - - [Provider](#akash.audit.v1beta3.Provider) - - - [Msg](#akash.audit.v1beta3.Msg) - - - [akash/audit/v1beta3/query.proto](#akash/audit/v1beta3/query.proto) - - [QueryAllProvidersAttributesRequest](#akash.audit.v1beta3.QueryAllProvidersAttributesRequest) - - [QueryAuditorAttributesRequest](#akash.audit.v1beta3.QueryAuditorAttributesRequest) - - [QueryProviderAttributesRequest](#akash.audit.v1beta3.QueryProviderAttributesRequest) - - [QueryProviderAuditorRequest](#akash.audit.v1beta3.QueryProviderAuditorRequest) - - [QueryProviderRequest](#akash.audit.v1beta3.QueryProviderRequest) - - [QueryProvidersResponse](#akash.audit.v1beta3.QueryProvidersResponse) - - - [Query](#akash.audit.v1beta3.Query) - - - [akash/audit/v1beta3/genesis.proto](#akash/audit/v1beta3/genesis.proto) - - [GenesisState](#akash.audit.v1beta3.GenesisState) - - - [akash/audit/v1beta2/audit.proto](#akash/audit/v1beta2/audit.proto) - - [AttributesFilters](#akash.audit.v1beta2.AttributesFilters) - - [AttributesResponse](#akash.audit.v1beta2.AttributesResponse) - - [AuditedAttributes](#akash.audit.v1beta2.AuditedAttributes) - - [MsgDeleteProviderAttributes](#akash.audit.v1beta2.MsgDeleteProviderAttributes) - - [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta2.MsgDeleteProviderAttributesResponse) - - [MsgSignProviderAttributes](#akash.audit.v1beta2.MsgSignProviderAttributes) - - [MsgSignProviderAttributesResponse](#akash.audit.v1beta2.MsgSignProviderAttributesResponse) - - [Provider](#akash.audit.v1beta2.Provider) - - - [Msg](#akash.audit.v1beta2.Msg) - - - [akash/audit/v1beta2/query.proto](#akash/audit/v1beta2/query.proto) - - [QueryAllProvidersAttributesRequest](#akash.audit.v1beta2.QueryAllProvidersAttributesRequest) - - [QueryAuditorAttributesRequest](#akash.audit.v1beta2.QueryAuditorAttributesRequest) - - [QueryProviderAttributesRequest](#akash.audit.v1beta2.QueryProviderAttributesRequest) - - [QueryProviderAuditorRequest](#akash.audit.v1beta2.QueryProviderAuditorRequest) - - [QueryProviderRequest](#akash.audit.v1beta2.QueryProviderRequest) - - [QueryProvidersResponse](#akash.audit.v1beta2.QueryProvidersResponse) - - - [Query](#akash.audit.v1beta2.Query) - - - [akash/audit/v1beta2/genesis.proto](#akash/audit/v1beta2/genesis.proto) - - [GenesisState](#akash.audit.v1beta2.GenesisState) - - - [akash/audit/v1beta1/audit.proto](#akash/audit/v1beta1/audit.proto) - - [AttributesFilters](#akash.audit.v1beta1.AttributesFilters) - - [AttributesResponse](#akash.audit.v1beta1.AttributesResponse) - - [AuditedAttributes](#akash.audit.v1beta1.AuditedAttributes) - - [MsgDeleteProviderAttributes](#akash.audit.v1beta1.MsgDeleteProviderAttributes) - - [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta1.MsgDeleteProviderAttributesResponse) - - [MsgSignProviderAttributes](#akash.audit.v1beta1.MsgSignProviderAttributes) - - [MsgSignProviderAttributesResponse](#akash.audit.v1beta1.MsgSignProviderAttributesResponse) - - [Provider](#akash.audit.v1beta1.Provider) - - - [Msg](#akash.audit.v1beta1.Msg) - - - [akash/take/v1beta3/query.proto](#akash/take/v1beta3/query.proto) - - [Query](#akash.take.v1beta3.Query) - - - [akash/take/v1beta3/genesis.proto](#akash/take/v1beta3/genesis.proto) - - [GenesisState](#akash.take.v1beta3.GenesisState) - - - [akash/take/v1beta3/params.proto](#akash/take/v1beta3/params.proto) - - [DenomTakeRate](#akash.take.v1beta3.DenomTakeRate) - - [Params](#akash.take.v1beta3.Params) - - - [akash/deployment/v1beta3/groupmsg.proto](#akash/deployment/v1beta3/groupmsg.proto) - - [MsgCloseGroup](#akash.deployment.v1beta3.MsgCloseGroup) - - [MsgCloseGroupResponse](#akash.deployment.v1beta3.MsgCloseGroupResponse) - - [MsgPauseGroup](#akash.deployment.v1beta3.MsgPauseGroup) - - [MsgPauseGroupResponse](#akash.deployment.v1beta3.MsgPauseGroupResponse) - - [MsgStartGroup](#akash.deployment.v1beta3.MsgStartGroup) - - [MsgStartGroupResponse](#akash.deployment.v1beta3.MsgStartGroupResponse) - - - [akash/deployment/v1beta3/resourceunit.proto](#akash/deployment/v1beta3/resourceunit.proto) - - [ResourceUnit](#akash.deployment.v1beta3.ResourceUnit) - - - [akash/deployment/v1beta3/group.proto](#akash/deployment/v1beta3/group.proto) - - [Group](#akash.deployment.v1beta3.Group) - - - [Group.State](#akash.deployment.v1beta3.Group.State) - - - [akash/deployment/v1beta3/groupid.proto](#akash/deployment/v1beta3/groupid.proto) - - [GroupID](#akash.deployment.v1beta3.GroupID) - - - [akash/deployment/v1beta3/deployment.proto](#akash/deployment/v1beta3/deployment.proto) - - [Deployment](#akash.deployment.v1beta3.Deployment) - - [DeploymentFilters](#akash.deployment.v1beta3.DeploymentFilters) - - [DeploymentID](#akash.deployment.v1beta3.DeploymentID) - - - [Deployment.State](#akash.deployment.v1beta3.Deployment.State) - - - [akash/deployment/v1beta3/query.proto](#akash/deployment/v1beta3/query.proto) - - [QueryDeploymentRequest](#akash.deployment.v1beta3.QueryDeploymentRequest) - - [QueryDeploymentResponse](#akash.deployment.v1beta3.QueryDeploymentResponse) - - [QueryDeploymentsRequest](#akash.deployment.v1beta3.QueryDeploymentsRequest) - - [QueryDeploymentsResponse](#akash.deployment.v1beta3.QueryDeploymentsResponse) - - [QueryGroupRequest](#akash.deployment.v1beta3.QueryGroupRequest) - - [QueryGroupResponse](#akash.deployment.v1beta3.QueryGroupResponse) - - - [Query](#akash.deployment.v1beta3.Query) - - - [akash/deployment/v1beta3/deploymentmsg.proto](#akash/deployment/v1beta3/deploymentmsg.proto) - - [MsgCloseDeployment](#akash.deployment.v1beta3.MsgCloseDeployment) - - [MsgCloseDeploymentResponse](#akash.deployment.v1beta3.MsgCloseDeploymentResponse) - - [MsgCreateDeployment](#akash.deployment.v1beta3.MsgCreateDeployment) - - [MsgCreateDeploymentResponse](#akash.deployment.v1beta3.MsgCreateDeploymentResponse) - - [MsgDepositDeployment](#akash.deployment.v1beta3.MsgDepositDeployment) - - [MsgDepositDeploymentResponse](#akash.deployment.v1beta3.MsgDepositDeploymentResponse) - - [MsgUpdateDeployment](#akash.deployment.v1beta3.MsgUpdateDeployment) - - [MsgUpdateDeploymentResponse](#akash.deployment.v1beta3.MsgUpdateDeploymentResponse) - - - [akash/deployment/v1beta3/service.proto](#akash/deployment/v1beta3/service.proto) - - [Msg](#akash.deployment.v1beta3.Msg) - - - [akash/deployment/v1beta3/authz.proto](#akash/deployment/v1beta3/authz.proto) - - [DepositDeploymentAuthorization](#akash.deployment.v1beta3.DepositDeploymentAuthorization) - - - [akash/deployment/v1beta3/genesis.proto](#akash/deployment/v1beta3/genesis.proto) - - [GenesisDeployment](#akash.deployment.v1beta3.GenesisDeployment) - - [GenesisState](#akash.deployment.v1beta3.GenesisState) - - - [akash/deployment/v1beta3/groupspec.proto](#akash/deployment/v1beta3/groupspec.proto) - - [GroupSpec](#akash.deployment.v1beta3.GroupSpec) - - - [akash/deployment/v1beta3/params.proto](#akash/deployment/v1beta3/params.proto) - - [Params](#akash.deployment.v1beta3.Params) - - - [akash/deployment/v1beta2/groupmsg.proto](#akash/deployment/v1beta2/groupmsg.proto) - - [MsgCloseGroup](#akash.deployment.v1beta2.MsgCloseGroup) - - [MsgCloseGroupResponse](#akash.deployment.v1beta2.MsgCloseGroupResponse) - - [MsgPauseGroup](#akash.deployment.v1beta2.MsgPauseGroup) - - [MsgPauseGroupResponse](#akash.deployment.v1beta2.MsgPauseGroupResponse) - - [MsgStartGroup](#akash.deployment.v1beta2.MsgStartGroup) - - [MsgStartGroupResponse](#akash.deployment.v1beta2.MsgStartGroupResponse) - - - [akash/deployment/v1beta2/group.proto](#akash/deployment/v1beta2/group.proto) - - [Group](#akash.deployment.v1beta2.Group) - - - [Group.State](#akash.deployment.v1beta2.Group.State) - - - [akash/deployment/v1beta2/groupid.proto](#akash/deployment/v1beta2/groupid.proto) - - [GroupID](#akash.deployment.v1beta2.GroupID) - - - [akash/deployment/v1beta2/deployment.proto](#akash/deployment/v1beta2/deployment.proto) - - [Deployment](#akash.deployment.v1beta2.Deployment) - - [DeploymentFilters](#akash.deployment.v1beta2.DeploymentFilters) - - [DeploymentID](#akash.deployment.v1beta2.DeploymentID) - - - [Deployment.State](#akash.deployment.v1beta2.Deployment.State) - - - [akash/deployment/v1beta2/query.proto](#akash/deployment/v1beta2/query.proto) - - [QueryDeploymentRequest](#akash.deployment.v1beta2.QueryDeploymentRequest) - - [QueryDeploymentResponse](#akash.deployment.v1beta2.QueryDeploymentResponse) - - [QueryDeploymentsRequest](#akash.deployment.v1beta2.QueryDeploymentsRequest) - - [QueryDeploymentsResponse](#akash.deployment.v1beta2.QueryDeploymentsResponse) - - [QueryGroupRequest](#akash.deployment.v1beta2.QueryGroupRequest) - - [QueryGroupResponse](#akash.deployment.v1beta2.QueryGroupResponse) - - - [Query](#akash.deployment.v1beta2.Query) - - - [akash/deployment/v1beta2/deploymentmsg.proto](#akash/deployment/v1beta2/deploymentmsg.proto) - - [MsgCloseDeployment](#akash.deployment.v1beta2.MsgCloseDeployment) - - [MsgCloseDeploymentResponse](#akash.deployment.v1beta2.MsgCloseDeploymentResponse) - - [MsgCreateDeployment](#akash.deployment.v1beta2.MsgCreateDeployment) - - [MsgCreateDeploymentResponse](#akash.deployment.v1beta2.MsgCreateDeploymentResponse) - - [MsgDepositDeployment](#akash.deployment.v1beta2.MsgDepositDeployment) - - [MsgDepositDeploymentResponse](#akash.deployment.v1beta2.MsgDepositDeploymentResponse) - - [MsgUpdateDeployment](#akash.deployment.v1beta2.MsgUpdateDeployment) - - [MsgUpdateDeploymentResponse](#akash.deployment.v1beta2.MsgUpdateDeploymentResponse) - - - [akash/deployment/v1beta2/service.proto](#akash/deployment/v1beta2/service.proto) - - [Msg](#akash.deployment.v1beta2.Msg) - - - [akash/deployment/v1beta2/authz.proto](#akash/deployment/v1beta2/authz.proto) - - [DepositDeploymentAuthorization](#akash.deployment.v1beta2.DepositDeploymentAuthorization) - - - [akash/deployment/v1beta2/genesis.proto](#akash/deployment/v1beta2/genesis.proto) - - [GenesisDeployment](#akash.deployment.v1beta2.GenesisDeployment) - - [GenesisState](#akash.deployment.v1beta2.GenesisState) - - - [akash/deployment/v1beta2/groupspec.proto](#akash/deployment/v1beta2/groupspec.proto) - - [GroupSpec](#akash.deployment.v1beta2.GroupSpec) - - - [akash/deployment/v1beta2/resource.proto](#akash/deployment/v1beta2/resource.proto) - - [Resource](#akash.deployment.v1beta2.Resource) - - - [akash/deployment/v1beta2/params.proto](#akash/deployment/v1beta2/params.proto) - - [Params](#akash.deployment.v1beta2.Params) - - - [akash/deployment/v1beta1/group.proto](#akash/deployment/v1beta1/group.proto) - - [Group](#akash.deployment.v1beta1.Group) - - [GroupID](#akash.deployment.v1beta1.GroupID) - - [GroupSpec](#akash.deployment.v1beta1.GroupSpec) - - [MsgCloseGroup](#akash.deployment.v1beta1.MsgCloseGroup) - - [MsgCloseGroupResponse](#akash.deployment.v1beta1.MsgCloseGroupResponse) - - [MsgPauseGroup](#akash.deployment.v1beta1.MsgPauseGroup) - - [MsgPauseGroupResponse](#akash.deployment.v1beta1.MsgPauseGroupResponse) - - [MsgStartGroup](#akash.deployment.v1beta1.MsgStartGroup) - - [MsgStartGroupResponse](#akash.deployment.v1beta1.MsgStartGroupResponse) - - [Resource](#akash.deployment.v1beta1.Resource) - - - [Group.State](#akash.deployment.v1beta1.Group.State) + - [akash/base/attributes/v1/attribute.proto](#akash/base/attributes/v1/attribute.proto) + - [Attribute](#akash.base.attributes.v1.Attribute) + - [PlacementRequirements](#akash.base.attributes.v1.PlacementRequirements) + - [SignedBy](#akash.base.attributes.v1.SignedBy) - - [akash/deployment/v1beta1/deployment.proto](#akash/deployment/v1beta1/deployment.proto) - - [Deployment](#akash.deployment.v1beta1.Deployment) - - [DeploymentFilters](#akash.deployment.v1beta1.DeploymentFilters) - - [DeploymentID](#akash.deployment.v1beta1.DeploymentID) - - [MsgCloseDeployment](#akash.deployment.v1beta1.MsgCloseDeployment) - - [MsgCloseDeploymentResponse](#akash.deployment.v1beta1.MsgCloseDeploymentResponse) - - [MsgCreateDeployment](#akash.deployment.v1beta1.MsgCreateDeployment) - - [MsgCreateDeploymentResponse](#akash.deployment.v1beta1.MsgCreateDeploymentResponse) - - [MsgDepositDeployment](#akash.deployment.v1beta1.MsgDepositDeployment) - - [MsgDepositDeploymentResponse](#akash.deployment.v1beta1.MsgDepositDeploymentResponse) - - [MsgUpdateDeployment](#akash.deployment.v1beta1.MsgUpdateDeployment) - - [MsgUpdateDeploymentResponse](#akash.deployment.v1beta1.MsgUpdateDeploymentResponse) + - [akash/audit/v1/audit.proto](#akash/audit/v1/audit.proto) + - [AttributesFilters](#akash.audit.v1.AttributesFilters) + - [AuditedAttributesStore](#akash.audit.v1.AuditedAttributesStore) + - [AuditedProvider](#akash.audit.v1.AuditedProvider) - - [Deployment.State](#akash.deployment.v1beta1.Deployment.State) + - [akash/audit/v1/event.proto](#akash/audit/v1/event.proto) + - [EventTrustedAuditorCreated](#akash.audit.v1.EventTrustedAuditorCreated) + - [EventTrustedAuditorDeleted](#akash.audit.v1.EventTrustedAuditorDeleted) - - [Msg](#akash.deployment.v1beta1.Msg) + - [akash/audit/v1/genesis.proto](#akash/audit/v1/genesis.proto) + - [GenesisState](#akash.audit.v1.GenesisState) - - [akash/deployment/v1beta1/query.proto](#akash/deployment/v1beta1/query.proto) - - [QueryDeploymentRequest](#akash.deployment.v1beta1.QueryDeploymentRequest) - - [QueryDeploymentResponse](#akash.deployment.v1beta1.QueryDeploymentResponse) - - [QueryDeploymentsRequest](#akash.deployment.v1beta1.QueryDeploymentsRequest) - - [QueryDeploymentsResponse](#akash.deployment.v1beta1.QueryDeploymentsResponse) - - [QueryGroupRequest](#akash.deployment.v1beta1.QueryGroupRequest) - - [QueryGroupResponse](#akash.deployment.v1beta1.QueryGroupResponse) + - [akash/audit/v1/msg.proto](#akash/audit/v1/msg.proto) + - [MsgDeleteProviderAttributes](#akash.audit.v1.MsgDeleteProviderAttributes) + - [MsgDeleteProviderAttributesResponse](#akash.audit.v1.MsgDeleteProviderAttributesResponse) + - [MsgSignProviderAttributes](#akash.audit.v1.MsgSignProviderAttributes) + - [MsgSignProviderAttributesResponse](#akash.audit.v1.MsgSignProviderAttributesResponse) - - [Query](#akash.deployment.v1beta1.Query) + - [akash/audit/v1/query.proto](#akash/audit/v1/query.proto) + - [QueryAllProvidersAttributesRequest](#akash.audit.v1.QueryAllProvidersAttributesRequest) + - [QueryAuditorAttributesRequest](#akash.audit.v1.QueryAuditorAttributesRequest) + - [QueryProviderAttributesRequest](#akash.audit.v1.QueryProviderAttributesRequest) + - [QueryProviderAuditorRequest](#akash.audit.v1.QueryProviderAuditorRequest) + - [QueryProviderRequest](#akash.audit.v1.QueryProviderRequest) + - [QueryProvidersResponse](#akash.audit.v1.QueryProvidersResponse) - - [akash/deployment/v1beta1/authz.proto](#akash/deployment/v1beta1/authz.proto) - - [DepositDeploymentAuthorization](#akash.deployment.v1beta1.DepositDeploymentAuthorization) + - [Query](#akash.audit.v1.Query) - - [akash/deployment/v1beta1/genesis.proto](#akash/deployment/v1beta1/genesis.proto) - - [GenesisDeployment](#akash.deployment.v1beta1.GenesisDeployment) - - [GenesisState](#akash.deployment.v1beta1.GenesisState) + - [akash/audit/v1/service.proto](#akash/audit/v1/service.proto) + - [Msg](#akash.audit.v1.Msg) - - [akash/deployment/v1beta1/params.proto](#akash/deployment/v1beta1/params.proto) - - [Params](#akash.deployment.v1beta1.Params) + - [akash/base/resources/v1beta4/resourcevalue.proto](#akash/base/resources/v1beta4/resourcevalue.proto) + - [ResourceValue](#akash.base.resources.v1beta4.ResourceValue) - - [akash/staking/v1beta3/genesis.proto](#akash/staking/v1beta3/genesis.proto) - - [GenesisState](#akash.staking.v1beta3.GenesisState) - - - [akash/staking/v1beta3/params.proto](#akash/staking/v1beta3/params.proto) - - [Params](#akash.staking.v1beta3.Params) - - - [akash/cert/v1beta3/query.proto](#akash/cert/v1beta3/query.proto) - - [CertificateResponse](#akash.cert.v1beta3.CertificateResponse) - - [QueryCertificatesRequest](#akash.cert.v1beta3.QueryCertificatesRequest) - - [QueryCertificatesResponse](#akash.cert.v1beta3.QueryCertificatesResponse) - - - [Query](#akash.cert.v1beta3.Query) - - - [akash/cert/v1beta3/cert.proto](#akash/cert/v1beta3/cert.proto) - - [Certificate](#akash.cert.v1beta3.Certificate) - - [CertificateFilter](#akash.cert.v1beta3.CertificateFilter) - - [CertificateID](#akash.cert.v1beta3.CertificateID) - - [MsgCreateCertificate](#akash.cert.v1beta3.MsgCreateCertificate) - - [MsgCreateCertificateResponse](#akash.cert.v1beta3.MsgCreateCertificateResponse) - - [MsgRevokeCertificate](#akash.cert.v1beta3.MsgRevokeCertificate) - - [MsgRevokeCertificateResponse](#akash.cert.v1beta3.MsgRevokeCertificateResponse) - - - [Certificate.State](#akash.cert.v1beta3.Certificate.State) - - - [Msg](#akash.cert.v1beta3.Msg) - - - [akash/cert/v1beta3/genesis.proto](#akash/cert/v1beta3/genesis.proto) - - [GenesisCertificate](#akash.cert.v1beta3.GenesisCertificate) - - [GenesisState](#akash.cert.v1beta3.GenesisState) - - - [akash/cert/v1beta2/query.proto](#akash/cert/v1beta2/query.proto) - - [CertificateResponse](#akash.cert.v1beta2.CertificateResponse) - - [QueryCertificatesRequest](#akash.cert.v1beta2.QueryCertificatesRequest) - - [QueryCertificatesResponse](#akash.cert.v1beta2.QueryCertificatesResponse) - - - [Query](#akash.cert.v1beta2.Query) - - - [akash/cert/v1beta2/cert.proto](#akash/cert/v1beta2/cert.proto) - - [Certificate](#akash.cert.v1beta2.Certificate) - - [CertificateFilter](#akash.cert.v1beta2.CertificateFilter) - - [CertificateID](#akash.cert.v1beta2.CertificateID) - - [MsgCreateCertificate](#akash.cert.v1beta2.MsgCreateCertificate) - - [MsgCreateCertificateResponse](#akash.cert.v1beta2.MsgCreateCertificateResponse) - - [MsgRevokeCertificate](#akash.cert.v1beta2.MsgRevokeCertificate) - - [MsgRevokeCertificateResponse](#akash.cert.v1beta2.MsgRevokeCertificateResponse) - - - [Certificate.State](#akash.cert.v1beta2.Certificate.State) - - - [Msg](#akash.cert.v1beta2.Msg) + - [akash/base/resources/v1beta4/cpu.proto](#akash/base/resources/v1beta4/cpu.proto) + - [CPU](#akash.base.resources.v1beta4.CPU) - - [akash/cert/v1beta2/genesis.proto](#akash/cert/v1beta2/genesis.proto) - - [GenesisCertificate](#akash.cert.v1beta2.GenesisCertificate) - - [GenesisState](#akash.cert.v1beta2.GenesisState) + - [akash/base/resources/v1beta4/endpoint.proto](#akash/base/resources/v1beta4/endpoint.proto) + - [Endpoint](#akash.base.resources.v1beta4.Endpoint) - - [akash/escrow/v1beta3/types.proto](#akash/escrow/v1beta3/types.proto) - - [Account](#akash.escrow.v1beta3.Account) - - [AccountID](#akash.escrow.v1beta3.AccountID) - - [FractionalPayment](#akash.escrow.v1beta3.FractionalPayment) + - [Endpoint.Kind](#akash.base.resources.v1beta4.Endpoint.Kind) - - [Account.State](#akash.escrow.v1beta3.Account.State) - - [FractionalPayment.State](#akash.escrow.v1beta3.FractionalPayment.State) + - [akash/base/resources/v1beta4/gpu.proto](#akash/base/resources/v1beta4/gpu.proto) + - [GPU](#akash.base.resources.v1beta4.GPU) - - [akash/escrow/v1beta3/query.proto](#akash/escrow/v1beta3/query.proto) - - [QueryAccountsRequest](#akash.escrow.v1beta3.QueryAccountsRequest) - - [QueryAccountsResponse](#akash.escrow.v1beta3.QueryAccountsResponse) - - [QueryPaymentsRequest](#akash.escrow.v1beta3.QueryPaymentsRequest) - - [QueryPaymentsResponse](#akash.escrow.v1beta3.QueryPaymentsResponse) + - [akash/base/resources/v1beta4/memory.proto](#akash/base/resources/v1beta4/memory.proto) + - [Memory](#akash.base.resources.v1beta4.Memory) - - [Query](#akash.escrow.v1beta3.Query) + - [akash/base/resources/v1beta4/storage.proto](#akash/base/resources/v1beta4/storage.proto) + - [Storage](#akash.base.resources.v1beta4.Storage) - - [akash/escrow/v1beta3/genesis.proto](#akash/escrow/v1beta3/genesis.proto) - - [GenesisState](#akash.escrow.v1beta3.GenesisState) + - [akash/base/resources/v1beta4/resources.proto](#akash/base/resources/v1beta4/resources.proto) + - [Resources](#akash.base.resources.v1beta4.Resources) - - [akash/escrow/v1beta2/types.proto](#akash/escrow/v1beta2/types.proto) - - [Account](#akash.escrow.v1beta2.Account) - - [AccountID](#akash.escrow.v1beta2.AccountID) - - [FractionalPayment](#akash.escrow.v1beta2.FractionalPayment) + - [akash/cert/v1/cert.proto](#akash/cert/v1/cert.proto) + - [Certificate](#akash.cert.v1.Certificate) + - [ID](#akash.cert.v1.ID) - - [Account.State](#akash.escrow.v1beta2.Account.State) - - [FractionalPayment.State](#akash.escrow.v1beta2.FractionalPayment.State) + - [State](#akash.cert.v1.State) - - [akash/escrow/v1beta2/query.proto](#akash/escrow/v1beta2/query.proto) - - [QueryAccountsRequest](#akash.escrow.v1beta2.QueryAccountsRequest) - - [QueryAccountsResponse](#akash.escrow.v1beta2.QueryAccountsResponse) - - [QueryPaymentsRequest](#akash.escrow.v1beta2.QueryPaymentsRequest) - - [QueryPaymentsResponse](#akash.escrow.v1beta2.QueryPaymentsResponse) + - [akash/cert/v1/filters.proto](#akash/cert/v1/filters.proto) + - [CertificateFilter](#akash.cert.v1.CertificateFilter) - - [Query](#akash.escrow.v1beta2.Query) + - [akash/cert/v1/genesis.proto](#akash/cert/v1/genesis.proto) + - [GenesisCertificate](#akash.cert.v1.GenesisCertificate) + - [GenesisState](#akash.cert.v1.GenesisState) - - [akash/escrow/v1beta2/genesis.proto](#akash/escrow/v1beta2/genesis.proto) - - [GenesisState](#akash.escrow.v1beta2.GenesisState) + - [akash/cert/v1/msg.proto](#akash/cert/v1/msg.proto) + - [MsgCreateCertificate](#akash.cert.v1.MsgCreateCertificate) + - [MsgCreateCertificateResponse](#akash.cert.v1.MsgCreateCertificateResponse) + - [MsgRevokeCertificate](#akash.cert.v1.MsgRevokeCertificate) + - [MsgRevokeCertificateResponse](#akash.cert.v1.MsgRevokeCertificateResponse) - - [akash/escrow/v1beta1/types.proto](#akash/escrow/v1beta1/types.proto) - - [Account](#akash.escrow.v1beta1.Account) - - [AccountID](#akash.escrow.v1beta1.AccountID) - - [Payment](#akash.escrow.v1beta1.Payment) + - [akash/cert/v1/query.proto](#akash/cert/v1/query.proto) + - [CertificateResponse](#akash.cert.v1.CertificateResponse) + - [QueryCertificatesRequest](#akash.cert.v1.QueryCertificatesRequest) + - [QueryCertificatesResponse](#akash.cert.v1.QueryCertificatesResponse) - - [Account.State](#akash.escrow.v1beta1.Account.State) - - [Payment.State](#akash.escrow.v1beta1.Payment.State) + - [Query](#akash.cert.v1.Query) - - [akash/escrow/v1beta1/query.proto](#akash/escrow/v1beta1/query.proto) - - [QueryAccountsRequest](#akash.escrow.v1beta1.QueryAccountsRequest) - - [QueryAccountsResponse](#akash.escrow.v1beta1.QueryAccountsResponse) - - [QueryPaymentsRequest](#akash.escrow.v1beta1.QueryPaymentsRequest) - - [QueryPaymentsResponse](#akash.escrow.v1beta1.QueryPaymentsResponse) + - [akash/cert/v1/service.proto](#akash/cert/v1/service.proto) + - [Msg](#akash.cert.v1.Msg) - - [Query](#akash.escrow.v1beta1.Query) + - [akash/deployment/v1/authz.proto](#akash/deployment/v1/authz.proto) + - [DepositAuthorization](#akash.deployment.v1.DepositAuthorization) - - [akash/escrow/v1beta1/genesis.proto](#akash/escrow/v1beta1/genesis.proto) - - [GenesisState](#akash.escrow.v1beta1.GenesisState) + - [akash/deployment/v1/deployment.proto](#akash/deployment/v1/deployment.proto) + - [Deployment](#akash.deployment.v1.Deployment) + - [DeploymentID](#akash.deployment.v1.DeploymentID) - - [akash/market/v1beta4/bid.proto](#akash/market/v1beta4/bid.proto) - - [Bid](#akash.market.v1beta4.Bid) - - [BidFilters](#akash.market.v1beta4.BidFilters) - - [BidID](#akash.market.v1beta4.BidID) - - [MsgCloseBid](#akash.market.v1beta4.MsgCloseBid) - - [MsgCloseBidResponse](#akash.market.v1beta4.MsgCloseBidResponse) - - [MsgCreateBid](#akash.market.v1beta4.MsgCreateBid) - - [MsgCreateBidResponse](#akash.market.v1beta4.MsgCreateBidResponse) - - [ResourceOffer](#akash.market.v1beta4.ResourceOffer) + - [Deployment.State](#akash.deployment.v1.Deployment.State) - - [Bid.State](#akash.market.v1beta4.Bid.State) + - [akash/deployment/v1/group.proto](#akash/deployment/v1/group.proto) + - [GroupID](#akash.deployment.v1.GroupID) - - [akash/market/v1beta4/query.proto](#akash/market/v1beta4/query.proto) - - [QueryBidRequest](#akash.market.v1beta4.QueryBidRequest) - - [QueryBidResponse](#akash.market.v1beta4.QueryBidResponse) - - [QueryBidsRequest](#akash.market.v1beta4.QueryBidsRequest) - - [QueryBidsResponse](#akash.market.v1beta4.QueryBidsResponse) - - [QueryLeaseRequest](#akash.market.v1beta4.QueryLeaseRequest) - - [QueryLeaseResponse](#akash.market.v1beta4.QueryLeaseResponse) - - [QueryLeasesRequest](#akash.market.v1beta4.QueryLeasesRequest) - - [QueryLeasesResponse](#akash.market.v1beta4.QueryLeasesResponse) - - [QueryOrderRequest](#akash.market.v1beta4.QueryOrderRequest) - - [QueryOrderResponse](#akash.market.v1beta4.QueryOrderResponse) - - [QueryOrdersRequest](#akash.market.v1beta4.QueryOrdersRequest) - - [QueryOrdersResponse](#akash.market.v1beta4.QueryOrdersResponse) + - [akash/deployment/v1/event.proto](#akash/deployment/v1/event.proto) + - [EventDeploymentClosed](#akash.deployment.v1.EventDeploymentClosed) + - [EventDeploymentCreated](#akash.deployment.v1.EventDeploymentCreated) + - [EventDeploymentUpdated](#akash.deployment.v1.EventDeploymentUpdated) + - [EventGroupClosed](#akash.deployment.v1.EventGroupClosed) + - [EventGroupPaused](#akash.deployment.v1.EventGroupPaused) + - [EventGroupStarted](#akash.deployment.v1.EventGroupStarted) - - [Query](#akash.market.v1beta4.Query) + - [akash/deployment/v1/msg.proto](#akash/deployment/v1/msg.proto) + - [MsgDepositDeployment](#akash.deployment.v1.MsgDepositDeployment) + - [MsgDepositDeploymentResponse](#akash.deployment.v1.MsgDepositDeploymentResponse) - - [akash/market/v1beta4/service.proto](#akash/market/v1beta4/service.proto) - - [Msg](#akash.market.v1beta4.Msg) + - [akash/deployment/v1beta4/resourceunit.proto](#akash/deployment/v1beta4/resourceunit.proto) + - [ResourceUnit](#akash.deployment.v1beta4.ResourceUnit) - - [akash/market/v1beta4/lease.proto](#akash/market/v1beta4/lease.proto) - - [Lease](#akash.market.v1beta4.Lease) - - [LeaseFilters](#akash.market.v1beta4.LeaseFilters) - - [LeaseID](#akash.market.v1beta4.LeaseID) - - [MsgCloseLease](#akash.market.v1beta4.MsgCloseLease) - - [MsgCloseLeaseResponse](#akash.market.v1beta4.MsgCloseLeaseResponse) - - [MsgCreateLease](#akash.market.v1beta4.MsgCreateLease) - - [MsgCreateLeaseResponse](#akash.market.v1beta4.MsgCreateLeaseResponse) - - [MsgWithdrawLease](#akash.market.v1beta4.MsgWithdrawLease) - - [MsgWithdrawLeaseResponse](#akash.market.v1beta4.MsgWithdrawLeaseResponse) + - [akash/deployment/v1beta4/groupspec.proto](#akash/deployment/v1beta4/groupspec.proto) + - [GroupSpec](#akash.deployment.v1beta4.GroupSpec) - - [Lease.State](#akash.market.v1beta4.Lease.State) + - [akash/deployment/v1beta4/deploymentmsg.proto](#akash/deployment/v1beta4/deploymentmsg.proto) + - [MsgCloseDeployment](#akash.deployment.v1beta4.MsgCloseDeployment) + - [MsgCloseDeploymentResponse](#akash.deployment.v1beta4.MsgCloseDeploymentResponse) + - [MsgCreateDeployment](#akash.deployment.v1beta4.MsgCreateDeployment) + - [MsgCreateDeploymentResponse](#akash.deployment.v1beta4.MsgCreateDeploymentResponse) + - [MsgUpdateDeployment](#akash.deployment.v1beta4.MsgUpdateDeployment) + - [MsgUpdateDeploymentResponse](#akash.deployment.v1beta4.MsgUpdateDeploymentResponse) - - [akash/market/v1beta4/genesis.proto](#akash/market/v1beta4/genesis.proto) - - [GenesisState](#akash.market.v1beta4.GenesisState) + - [akash/deployment/v1beta4/filters.proto](#akash/deployment/v1beta4/filters.proto) + - [DeploymentFilters](#akash.deployment.v1beta4.DeploymentFilters) + - [GroupFilters](#akash.deployment.v1beta4.GroupFilters) - - [akash/market/v1beta4/order.proto](#akash/market/v1beta4/order.proto) - - [Order](#akash.market.v1beta4.Order) - - [OrderFilters](#akash.market.v1beta4.OrderFilters) - - [OrderID](#akash.market.v1beta4.OrderID) + - [akash/deployment/v1beta4/group.proto](#akash/deployment/v1beta4/group.proto) + - [Group](#akash.deployment.v1beta4.Group) - - [Order.State](#akash.market.v1beta4.Order.State) + - [Group.State](#akash.deployment.v1beta4.Group.State) - - [akash/market/v1beta4/params.proto](#akash/market/v1beta4/params.proto) - - [Params](#akash.market.v1beta4.Params) + - [akash/deployment/v1beta4/params.proto](#akash/deployment/v1beta4/params.proto) + - [Params](#akash.deployment.v1beta4.Params) - - [akash/market/v1beta3/bid.proto](#akash/market/v1beta3/bid.proto) - - [Bid](#akash.market.v1beta3.Bid) - - [BidFilters](#akash.market.v1beta3.BidFilters) - - [BidID](#akash.market.v1beta3.BidID) - - [MsgCloseBid](#akash.market.v1beta3.MsgCloseBid) - - [MsgCloseBidResponse](#akash.market.v1beta3.MsgCloseBidResponse) - - [MsgCreateBid](#akash.market.v1beta3.MsgCreateBid) - - [MsgCreateBidResponse](#akash.market.v1beta3.MsgCreateBidResponse) + - [akash/deployment/v1beta4/genesis.proto](#akash/deployment/v1beta4/genesis.proto) + - [GenesisDeployment](#akash.deployment.v1beta4.GenesisDeployment) + - [GenesisState](#akash.deployment.v1beta4.GenesisState) - - [Bid.State](#akash.market.v1beta3.Bid.State) + - [akash/deployment/v1beta4/groupmsg.proto](#akash/deployment/v1beta4/groupmsg.proto) + - [MsgCloseGroup](#akash.deployment.v1beta4.MsgCloseGroup) + - [MsgCloseGroupResponse](#akash.deployment.v1beta4.MsgCloseGroupResponse) + - [MsgPauseGroup](#akash.deployment.v1beta4.MsgPauseGroup) + - [MsgPauseGroupResponse](#akash.deployment.v1beta4.MsgPauseGroupResponse) + - [MsgStartGroup](#akash.deployment.v1beta4.MsgStartGroup) + - [MsgStartGroupResponse](#akash.deployment.v1beta4.MsgStartGroupResponse) - - [akash/market/v1beta3/query.proto](#akash/market/v1beta3/query.proto) - - [QueryBidRequest](#akash.market.v1beta3.QueryBidRequest) - - [QueryBidResponse](#akash.market.v1beta3.QueryBidResponse) - - [QueryBidsRequest](#akash.market.v1beta3.QueryBidsRequest) - - [QueryBidsResponse](#akash.market.v1beta3.QueryBidsResponse) - - [QueryLeaseRequest](#akash.market.v1beta3.QueryLeaseRequest) - - [QueryLeaseResponse](#akash.market.v1beta3.QueryLeaseResponse) - - [QueryLeasesRequest](#akash.market.v1beta3.QueryLeasesRequest) - - [QueryLeasesResponse](#akash.market.v1beta3.QueryLeasesResponse) - - [QueryOrderRequest](#akash.market.v1beta3.QueryOrderRequest) - - [QueryOrderResponse](#akash.market.v1beta3.QueryOrderResponse) - - [QueryOrdersRequest](#akash.market.v1beta3.QueryOrdersRequest) - - [QueryOrdersResponse](#akash.market.v1beta3.QueryOrdersResponse) + - [akash/deployment/v1beta4/paramsmsg.proto](#akash/deployment/v1beta4/paramsmsg.proto) + - [MsgUpdateParams](#akash.deployment.v1beta4.MsgUpdateParams) + - [MsgUpdateParamsResponse](#akash.deployment.v1beta4.MsgUpdateParamsResponse) - - [Query](#akash.market.v1beta3.Query) + - [akash/escrow/v1/accountid.proto](#akash/escrow/v1/accountid.proto) + - [AccountID](#akash.escrow.v1.AccountID) - - [akash/market/v1beta3/service.proto](#akash/market/v1beta3/service.proto) - - [Msg](#akash.market.v1beta3.Msg) + - [akash/escrow/v1/account.proto](#akash/escrow/v1/account.proto) + - [Account](#akash.escrow.v1.Account) - - [akash/market/v1beta3/lease.proto](#akash/market/v1beta3/lease.proto) - - [Lease](#akash.market.v1beta3.Lease) - - [LeaseFilters](#akash.market.v1beta3.LeaseFilters) - - [LeaseID](#akash.market.v1beta3.LeaseID) - - [MsgCloseLease](#akash.market.v1beta3.MsgCloseLease) - - [MsgCloseLeaseResponse](#akash.market.v1beta3.MsgCloseLeaseResponse) - - [MsgCreateLease](#akash.market.v1beta3.MsgCreateLease) - - [MsgCreateLeaseResponse](#akash.market.v1beta3.MsgCreateLeaseResponse) - - [MsgWithdrawLease](#akash.market.v1beta3.MsgWithdrawLease) - - [MsgWithdrawLeaseResponse](#akash.market.v1beta3.MsgWithdrawLeaseResponse) + - [Account.State](#akash.escrow.v1.Account.State) - - [Lease.State](#akash.market.v1beta3.Lease.State) + - [akash/deployment/v1beta4/query.proto](#akash/deployment/v1beta4/query.proto) + - [QueryDeploymentRequest](#akash.deployment.v1beta4.QueryDeploymentRequest) + - [QueryDeploymentResponse](#akash.deployment.v1beta4.QueryDeploymentResponse) + - [QueryDeploymentsRequest](#akash.deployment.v1beta4.QueryDeploymentsRequest) + - [QueryDeploymentsResponse](#akash.deployment.v1beta4.QueryDeploymentsResponse) + - [QueryGroupRequest](#akash.deployment.v1beta4.QueryGroupRequest) + - [QueryGroupResponse](#akash.deployment.v1beta4.QueryGroupResponse) + - [QueryParamsRequest](#akash.deployment.v1beta4.QueryParamsRequest) + - [QueryParamsResponse](#akash.deployment.v1beta4.QueryParamsResponse) - - [akash/market/v1beta3/genesis.proto](#akash/market/v1beta3/genesis.proto) - - [GenesisState](#akash.market.v1beta3.GenesisState) + - [Query](#akash.deployment.v1beta4.Query) - - [akash/market/v1beta3/order.proto](#akash/market/v1beta3/order.proto) - - [Order](#akash.market.v1beta3.Order) - - [OrderFilters](#akash.market.v1beta3.OrderFilters) - - [OrderID](#akash.market.v1beta3.OrderID) + - [akash/deployment/v1beta4/service.proto](#akash/deployment/v1beta4/service.proto) + - [Msg](#akash.deployment.v1beta4.Msg) - - [Order.State](#akash.market.v1beta3.Order.State) - - - [akash/market/v1beta3/params.proto](#akash/market/v1beta3/params.proto) - - [Params](#akash.market.v1beta3.Params) - - - [akash/market/v1beta2/bid.proto](#akash/market/v1beta2/bid.proto) - - [Bid](#akash.market.v1beta2.Bid) - - [BidFilters](#akash.market.v1beta2.BidFilters) - - [BidID](#akash.market.v1beta2.BidID) - - [MsgCloseBid](#akash.market.v1beta2.MsgCloseBid) - - [MsgCloseBidResponse](#akash.market.v1beta2.MsgCloseBidResponse) - - [MsgCreateBid](#akash.market.v1beta2.MsgCreateBid) - - [MsgCreateBidResponse](#akash.market.v1beta2.MsgCreateBidResponse) - - - [Bid.State](#akash.market.v1beta2.Bid.State) - - - [akash/market/v1beta2/query.proto](#akash/market/v1beta2/query.proto) - - [QueryBidRequest](#akash.market.v1beta2.QueryBidRequest) - - [QueryBidResponse](#akash.market.v1beta2.QueryBidResponse) - - [QueryBidsRequest](#akash.market.v1beta2.QueryBidsRequest) - - [QueryBidsResponse](#akash.market.v1beta2.QueryBidsResponse) - - [QueryLeaseRequest](#akash.market.v1beta2.QueryLeaseRequest) - - [QueryLeaseResponse](#akash.market.v1beta2.QueryLeaseResponse) - - [QueryLeasesRequest](#akash.market.v1beta2.QueryLeasesRequest) - - [QueryLeasesResponse](#akash.market.v1beta2.QueryLeasesResponse) - - [QueryOrderRequest](#akash.market.v1beta2.QueryOrderRequest) - - [QueryOrderResponse](#akash.market.v1beta2.QueryOrderResponse) - - [QueryOrdersRequest](#akash.market.v1beta2.QueryOrdersRequest) - - [QueryOrdersResponse](#akash.market.v1beta2.QueryOrdersResponse) - - - [Query](#akash.market.v1beta2.Query) - - - [akash/market/v1beta2/service.proto](#akash/market/v1beta2/service.proto) - - [Msg](#akash.market.v1beta2.Msg) - - - [akash/market/v1beta2/lease.proto](#akash/market/v1beta2/lease.proto) - - [Lease](#akash.market.v1beta2.Lease) - - [LeaseFilters](#akash.market.v1beta2.LeaseFilters) - - [LeaseID](#akash.market.v1beta2.LeaseID) - - [MsgCloseLease](#akash.market.v1beta2.MsgCloseLease) - - [MsgCloseLeaseResponse](#akash.market.v1beta2.MsgCloseLeaseResponse) - - [MsgCreateLease](#akash.market.v1beta2.MsgCreateLease) - - [MsgCreateLeaseResponse](#akash.market.v1beta2.MsgCreateLeaseResponse) - - [MsgWithdrawLease](#akash.market.v1beta2.MsgWithdrawLease) - - [MsgWithdrawLeaseResponse](#akash.market.v1beta2.MsgWithdrawLeaseResponse) + - [akash/discovery/v1/client_info.proto](#akash/discovery/v1/client_info.proto) + - [ClientInfo](#akash.discovery.v1.ClientInfo) - - [Lease.State](#akash.market.v1beta2.Lease.State) + - [akash/discovery/v1/akash.proto](#akash/discovery/v1/akash.proto) + - [Akash](#akash.discovery.v1.Akash) - - [akash/market/v1beta2/genesis.proto](#akash/market/v1beta2/genesis.proto) - - [GenesisState](#akash.market.v1beta2.GenesisState) + - [akash/escrow/v1/fractional_payment.proto](#akash/escrow/v1/fractional_payment.proto) + - [FractionalPayment](#akash.escrow.v1.FractionalPayment) - - [akash/market/v1beta2/order.proto](#akash/market/v1beta2/order.proto) - - [Order](#akash.market.v1beta2.Order) - - [OrderFilters](#akash.market.v1beta2.OrderFilters) - - [OrderID](#akash.market.v1beta2.OrderID) + - [FractionalPayment.State](#akash.escrow.v1.FractionalPayment.State) - - [Order.State](#akash.market.v1beta2.Order.State) + - [akash/escrow/v1/genesis.proto](#akash/escrow/v1/genesis.proto) + - [GenesisState](#akash.escrow.v1.GenesisState) - - [akash/market/v1beta2/params.proto](#akash/market/v1beta2/params.proto) - - [Params](#akash.market.v1beta2.Params) + - [akash/escrow/v1/query.proto](#akash/escrow/v1/query.proto) + - [QueryAccountsRequest](#akash.escrow.v1.QueryAccountsRequest) + - [QueryAccountsResponse](#akash.escrow.v1.QueryAccountsResponse) + - [QueryPaymentsRequest](#akash.escrow.v1.QueryPaymentsRequest) + - [QueryPaymentsResponse](#akash.escrow.v1.QueryPaymentsResponse) - - [akash/inflation/v1beta3/genesis.proto](#akash/inflation/v1beta3/genesis.proto) - - [GenesisState](#akash.inflation.v1beta3.GenesisState) + - [Query](#akash.escrow.v1.Query) - - [akash/inflation/v1beta3/params.proto](#akash/inflation/v1beta3/params.proto) - - [Params](#akash.inflation.v1beta3.Params) + - [akash/gov/v1beta3/params.proto](#akash/gov/v1beta3/params.proto) + - [DepositParams](#akash.gov.v1beta3.DepositParams) - - [akash/inflation/v1beta2/genesis.proto](#akash/inflation/v1beta2/genesis.proto) - - [GenesisState](#akash.inflation.v1beta2.GenesisState) + - [akash/gov/v1beta3/genesis.proto](#akash/gov/v1beta3/genesis.proto) + - [GenesisState](#akash.gov.v1beta3.GenesisState) - [akash/inflation/v1beta2/params.proto](#akash/inflation/v1beta2/params.proto) - [Params](#akash.inflation.v1beta2.Params) - - [akash/base/v1beta3/memory.proto](#akash/base/v1beta3/memory.proto) - - [Memory](#akash.base.v1beta3.Memory) - - - [akash/base/v1beta3/cpu.proto](#akash/base/v1beta3/cpu.proto) - - [CPU](#akash.base.v1beta3.CPU) - - - [akash/base/v1beta3/resources.proto](#akash/base/v1beta3/resources.proto) - - [Resources](#akash.base.v1beta3.Resources) - - - [akash/base/v1beta3/attribute.proto](#akash/base/v1beta3/attribute.proto) - - [Attribute](#akash.base.v1beta3.Attribute) - - [PlacementRequirements](#akash.base.v1beta3.PlacementRequirements) - - [SignedBy](#akash.base.v1beta3.SignedBy) - - - [akash/base/v1beta3/endpoint.proto](#akash/base/v1beta3/endpoint.proto) - - [Endpoint](#akash.base.v1beta3.Endpoint) - - - [Endpoint.Kind](#akash.base.v1beta3.Endpoint.Kind) - - - [akash/base/v1beta3/gpu.proto](#akash/base/v1beta3/gpu.proto) - - [GPU](#akash.base.v1beta3.GPU) + - [akash/inflation/v1beta2/genesis.proto](#akash/inflation/v1beta2/genesis.proto) + - [GenesisState](#akash.inflation.v1beta2.GenesisState) - - [akash/base/v1beta3/storage.proto](#akash/base/v1beta3/storage.proto) - - [Storage](#akash.base.v1beta3.Storage) + - [akash/inflation/v1beta3/params.proto](#akash/inflation/v1beta3/params.proto) + - [Params](#akash.inflation.v1beta3.Params) - - [akash/base/v1beta3/resourcevalue.proto](#akash/base/v1beta3/resourcevalue.proto) - - [ResourceValue](#akash.base.v1beta3.ResourceValue) + - [akash/inflation/v1beta3/genesis.proto](#akash/inflation/v1beta3/genesis.proto) + - [GenesisState](#akash.inflation.v1beta3.GenesisState) - - [akash/base/v1beta2/resourceunits.proto](#akash/base/v1beta2/resourceunits.proto) - - [ResourceUnits](#akash.base.v1beta2.ResourceUnits) + - [akash/market/v1/bid.proto](#akash/market/v1/bid.proto) + - [BidID](#akash.market.v1.BidID) + + - [akash/market/v1/order.proto](#akash/market/v1/order.proto) + - [OrderID](#akash.market.v1.OrderID) + + - [akash/market/v1/lease.proto](#akash/market/v1/lease.proto) + - [Lease](#akash.market.v1.Lease) + - [LeaseID](#akash.market.v1.LeaseID) + + - [Lease.State](#akash.market.v1.Lease.State) + + - [akash/market/v1/event.proto](#akash/market/v1/event.proto) + - [EventBidClosed](#akash.market.v1.EventBidClosed) + - [EventBidCreated](#akash.market.v1.EventBidCreated) + - [EventLeaseClosed](#akash.market.v1.EventLeaseClosed) + - [EventLeaseCreated](#akash.market.v1.EventLeaseCreated) + - [EventOrderClosed](#akash.market.v1.EventOrderClosed) + - [EventOrderCreated](#akash.market.v1.EventOrderCreated) + + - [akash/market/v1/filters.proto](#akash/market/v1/filters.proto) + - [LeaseFilters](#akash.market.v1.LeaseFilters) + + - [akash/market/v1beta5/resourcesoffer.proto](#akash/market/v1beta5/resourcesoffer.proto) + - [ResourceOffer](#akash.market.v1beta5.ResourceOffer) + + - [akash/market/v1beta5/bid.proto](#akash/market/v1beta5/bid.proto) + - [Bid](#akash.market.v1beta5.Bid) + + - [Bid.State](#akash.market.v1beta5.Bid.State) + + - [akash/market/v1beta5/bidmsg.proto](#akash/market/v1beta5/bidmsg.proto) + - [MsgCloseBid](#akash.market.v1beta5.MsgCloseBid) + - [MsgCloseBidResponse](#akash.market.v1beta5.MsgCloseBidResponse) + - [MsgCreateBid](#akash.market.v1beta5.MsgCreateBid) + - [MsgCreateBidResponse](#akash.market.v1beta5.MsgCreateBidResponse) + + - [akash/market/v1beta5/filters.proto](#akash/market/v1beta5/filters.proto) + - [BidFilters](#akash.market.v1beta5.BidFilters) + - [OrderFilters](#akash.market.v1beta5.OrderFilters) + + - [akash/market/v1beta5/params.proto](#akash/market/v1beta5/params.proto) + - [Params](#akash.market.v1beta5.Params) + + - [akash/market/v1beta5/order.proto](#akash/market/v1beta5/order.proto) + - [Order](#akash.market.v1beta5.Order) + + - [Order.State](#akash.market.v1beta5.Order.State) + + - [akash/market/v1beta5/genesis.proto](#akash/market/v1beta5/genesis.proto) + - [GenesisState](#akash.market.v1beta5.GenesisState) + + - [akash/market/v1beta5/leasemsg.proto](#akash/market/v1beta5/leasemsg.proto) + - [MsgCloseLease](#akash.market.v1beta5.MsgCloseLease) + - [MsgCloseLeaseResponse](#akash.market.v1beta5.MsgCloseLeaseResponse) + - [MsgCreateLease](#akash.market.v1beta5.MsgCreateLease) + - [MsgCreateLeaseResponse](#akash.market.v1beta5.MsgCreateLeaseResponse) + - [MsgWithdrawLease](#akash.market.v1beta5.MsgWithdrawLease) + - [MsgWithdrawLeaseResponse](#akash.market.v1beta5.MsgWithdrawLeaseResponse) + + - [akash/market/v1beta5/paramsmsg.proto](#akash/market/v1beta5/paramsmsg.proto) + - [MsgUpdateParams](#akash.market.v1beta5.MsgUpdateParams) + - [MsgUpdateParamsResponse](#akash.market.v1beta5.MsgUpdateParamsResponse) + + - [akash/market/v1beta5/query.proto](#akash/market/v1beta5/query.proto) + - [QueryBidRequest](#akash.market.v1beta5.QueryBidRequest) + - [QueryBidResponse](#akash.market.v1beta5.QueryBidResponse) + - [QueryBidsRequest](#akash.market.v1beta5.QueryBidsRequest) + - [QueryBidsResponse](#akash.market.v1beta5.QueryBidsResponse) + - [QueryLeaseRequest](#akash.market.v1beta5.QueryLeaseRequest) + - [QueryLeaseResponse](#akash.market.v1beta5.QueryLeaseResponse) + - [QueryLeasesRequest](#akash.market.v1beta5.QueryLeasesRequest) + - [QueryLeasesResponse](#akash.market.v1beta5.QueryLeasesResponse) + - [QueryOrderRequest](#akash.market.v1beta5.QueryOrderRequest) + - [QueryOrderResponse](#akash.market.v1beta5.QueryOrderResponse) + - [QueryOrdersRequest](#akash.market.v1beta5.QueryOrdersRequest) + - [QueryOrdersResponse](#akash.market.v1beta5.QueryOrdersResponse) + - [QueryParamsRequest](#akash.market.v1beta5.QueryParamsRequest) + - [QueryParamsResponse](#akash.market.v1beta5.QueryParamsResponse) + + - [Query](#akash.market.v1beta5.Query) + + - [akash/market/v1beta5/service.proto](#akash/market/v1beta5/service.proto) + - [Msg](#akash.market.v1beta5.Msg) + + - [akash/provider/v1beta4/event.proto](#akash/provider/v1beta4/event.proto) + - [EventProviderCreated](#akash.provider.v1beta4.EventProviderCreated) + - [EventProviderDeleted](#akash.provider.v1beta4.EventProviderDeleted) + - [EventProviderUpdated](#akash.provider.v1beta4.EventProviderUpdated) + + - [akash/provider/v1beta4/provider.proto](#akash/provider/v1beta4/provider.proto) + - [Info](#akash.provider.v1beta4.Info) + - [Provider](#akash.provider.v1beta4.Provider) + + - [akash/provider/v1beta4/genesis.proto](#akash/provider/v1beta4/genesis.proto) + - [GenesisState](#akash.provider.v1beta4.GenesisState) + + - [akash/provider/v1beta4/msg.proto](#akash/provider/v1beta4/msg.proto) + - [MsgCreateProvider](#akash.provider.v1beta4.MsgCreateProvider) + - [MsgCreateProviderResponse](#akash.provider.v1beta4.MsgCreateProviderResponse) + - [MsgDeleteProvider](#akash.provider.v1beta4.MsgDeleteProvider) + - [MsgDeleteProviderResponse](#akash.provider.v1beta4.MsgDeleteProviderResponse) + - [MsgUpdateProvider](#akash.provider.v1beta4.MsgUpdateProvider) + - [MsgUpdateProviderResponse](#akash.provider.v1beta4.MsgUpdateProviderResponse) + + - [akash/provider/v1beta4/query.proto](#akash/provider/v1beta4/query.proto) + - [QueryProviderRequest](#akash.provider.v1beta4.QueryProviderRequest) + - [QueryProviderResponse](#akash.provider.v1beta4.QueryProviderResponse) + - [QueryProvidersRequest](#akash.provider.v1beta4.QueryProvidersRequest) + - [QueryProvidersResponse](#akash.provider.v1beta4.QueryProvidersResponse) + + - [Query](#akash.provider.v1beta4.Query) + + - [akash/provider/v1beta4/service.proto](#akash/provider/v1beta4/service.proto) + - [Msg](#akash.provider.v1beta4.Msg) - - [akash/base/v1beta2/attribute.proto](#akash/base/v1beta2/attribute.proto) - - [Attribute](#akash.base.v1beta2.Attribute) - - [PlacementRequirements](#akash.base.v1beta2.PlacementRequirements) - - [SignedBy](#akash.base.v1beta2.SignedBy) + - [akash/staking/v1beta3/params.proto](#akash/staking/v1beta3/params.proto) + - [Params](#akash.staking.v1beta3.Params) - - [akash/base/v1beta2/endpoint.proto](#akash/base/v1beta2/endpoint.proto) - - [Endpoint](#akash.base.v1beta2.Endpoint) + - [akash/staking/v1beta3/genesis.proto](#akash/staking/v1beta3/genesis.proto) + - [GenesisState](#akash.staking.v1beta3.GenesisState) - - [Endpoint.Kind](#akash.base.v1beta2.Endpoint.Kind) + - [akash/staking/v1beta3/paramsmsg.proto](#akash/staking/v1beta3/paramsmsg.proto) + - [MsgUpdateParams](#akash.staking.v1beta3.MsgUpdateParams) + - [MsgUpdateParamsResponse](#akash.staking.v1beta3.MsgUpdateParamsResponse) - - [akash/base/v1beta2/resource.proto](#akash/base/v1beta2/resource.proto) - - [CPU](#akash.base.v1beta2.CPU) - - [Memory](#akash.base.v1beta2.Memory) - - [Storage](#akash.base.v1beta2.Storage) + - [akash/staking/v1beta3/query.proto](#akash/staking/v1beta3/query.proto) + - [QueryParamsRequest](#akash.staking.v1beta3.QueryParamsRequest) + - [QueryParamsResponse](#akash.staking.v1beta3.QueryParamsResponse) - - [akash/base/v1beta2/resourcevalue.proto](#akash/base/v1beta2/resourcevalue.proto) - - [ResourceValue](#akash.base.v1beta2.ResourceValue) + - [Query](#akash.staking.v1beta3.Query) - - [akash/base/v1beta1/attribute.proto](#akash/base/v1beta1/attribute.proto) - - [Attribute](#akash.base.v1beta1.Attribute) - - [PlacementRequirements](#akash.base.v1beta1.PlacementRequirements) - - [SignedBy](#akash.base.v1beta1.SignedBy) + - [akash/staking/v1beta3/service.proto](#akash/staking/v1beta3/service.proto) + - [Msg](#akash.staking.v1beta3.Msg) - - [akash/base/v1beta1/endpoint.proto](#akash/base/v1beta1/endpoint.proto) - - [Endpoint](#akash.base.v1beta1.Endpoint) + - [akash/take/v1/params.proto](#akash/take/v1/params.proto) + - [DenomTakeRate](#akash.take.v1.DenomTakeRate) + - [Params](#akash.take.v1.Params) - - [Endpoint.Kind](#akash.base.v1beta1.Endpoint.Kind) + - [akash/take/v1/genesis.proto](#akash/take/v1/genesis.proto) + - [GenesisState](#akash.take.v1.GenesisState) - - [akash/base/v1beta1/resource.proto](#akash/base/v1beta1/resource.proto) - - [CPU](#akash.base.v1beta1.CPU) - - [Memory](#akash.base.v1beta1.Memory) - - [ResourceUnits](#akash.base.v1beta1.ResourceUnits) - - [Storage](#akash.base.v1beta1.Storage) + - [akash/take/v1/paramsmsg.proto](#akash/take/v1/paramsmsg.proto) + - [MsgUpdateParams](#akash.take.v1.MsgUpdateParams) + - [MsgUpdateParamsResponse](#akash.take.v1.MsgUpdateParamsResponse) - - [akash/base/v1beta1/resourcevalue.proto](#akash/base/v1beta1/resourcevalue.proto) - - [ResourceValue](#akash.base.v1beta1.ResourceValue) + - [akash/take/v1/query.proto](#akash/take/v1/query.proto) + - [QueryParamsRequest](#akash.take.v1.QueryParamsRequest) + - [QueryParamsResponse](#akash.take.v1.QueryParamsResponse) - - [akash/gov/v1beta3/genesis.proto](#akash/gov/v1beta3/genesis.proto) - - [GenesisState](#akash.gov.v1beta3.GenesisState) + - [Query](#akash.take.v1.Query) - - [akash/gov/v1beta3/params.proto](#akash/gov/v1beta3/params.proto) - - [DepositParams](#akash.gov.v1beta3.DepositParams) + - [akash/take/v1/service.proto](#akash/take/v1/service.proto) + - [Msg](#akash.take.v1.Msg) - [Scalar Value Types](#scalar-value-types) - +

Top

- ## akash/discovery/v1/client_info.proto + ## akash/base/attributes/v1/attribute.proto - + - ### ClientInfo - ClientInfo akash specific client info + ### Attribute + Attribute represents key value pair | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `api_version` | [string](#string) | | | + | `key` | [string](#string) | | | + | `value` | [string](#string) | | | - - - - - + + - + ### PlacementRequirements + PlacementRequirements + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `signed_by` | [SignedBy](#akash.base.attributes.v1.SignedBy) | | SignedBy list of keys that tenants expect to have signatures from | + | `attributes` | [Attribute](#akash.base.attributes.v1.Attribute) | repeated | Attribute list of attributes tenant expects from the provider | + - -

Top

- ## akash/discovery/v1/akash.proto - + - ### Akash - Akash akash specific RPC parameters + ### SignedBy + SignedBy represents validation accounts that tenant expects signatures for provider attributes +AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many +entries there +this behaviour to be discussed | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `client_info` | [ClientInfo](#akash.discovery.v1.ClientInfo) | | | + | `all_of` | [string](#string) | repeated | all_of all keys in this list must have signed attributes | + | `any_of` | [string](#string) | repeated | any_of at least of of the keys from the list must have signed attributes | @@ -747,110 +447,134 @@ - +

Top

- ## akash/provider/v1beta3/query.proto + ## akash/audit/v1/audit.proto - + - ### QueryProviderRequest - QueryProviderRequest is request type for the Query/Provider RPC method + ### AttributesFilters + AttributesFilters defines filters used to filter deployments | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | + | `auditors` | [string](#string) | repeated | | + | `owners` | [string](#string) | repeated | | - + - ### QueryProviderResponse - QueryProviderResponse is response type for the Query/Provider RPC method + ### AuditedAttributesStore + Attributes | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `provider` | [Provider](#akash.provider.v1beta3.Provider) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | - + - ### QueryProvidersRequest - QueryProvidersRequest is request type for the Query/Providers RPC method + ### AuditedProvider + Provider stores owner auditor and attributes details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + | `owner` | [string](#string) | | | + | `auditor` | [string](#string) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | + + + + + + + + + + + + + + +

Top

+ ## akash/audit/v1/event.proto - + - ### QueryProvidersResponse - QueryProvidersResponse is response type for the Query/Providers RPC method + ### EventTrustedAuditorCreated + EventTrustedAuditorCreated defines an SDK message for signing a provider attributes | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `providers` | [Provider](#akash.provider.v1beta3.Provider) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + | `owner` | [string](#string) | | | + | `auditor` | [string](#string) | | | - + + - + ### EventTrustedAuditorDeleted + EventTrustedAuditorCreated defines an SDK message for signing a provider attributes - + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `auditor` | [string](#string) | | | + + - - ### Query - Query defines the gRPC querier service + + + + + - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Providers` | [QueryProvidersRequest](#akash.provider.v1beta3.QueryProvidersRequest) | [QueryProvidersResponse](#akash.provider.v1beta3.QueryProvidersResponse) | Providers queries providers | GET|/akash/provider/v1beta3/providers| - | `Provider` | [QueryProviderRequest](#akash.provider.v1beta3.QueryProviderRequest) | [QueryProviderResponse](#akash.provider.v1beta3.QueryProviderResponse) | Provider queries provider details | GET|/akash/provider/v1beta3/providers/{owner}| - - +

Top

- ## akash/provider/v1beta3/genesis.proto + ## akash/audit/v1/genesis.proto - + ### GenesisState - GenesisState defines the basic genesis state used by provider module + GenesisState defines the basic genesis state used by audit module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `providers` | [Provider](#akash.provider.v1beta3.Provider) | repeated | | + | `providers` | [AuditedProvider](#akash.audit.v1.AuditedProvider) | repeated | | @@ -866,157 +590,147 @@ - +

Top

- ## akash/provider/v1beta3/provider.proto + ## akash/audit/v1/msg.proto - + - ### MsgCreateProvider - MsgCreateProvider defines an SDK message for creating a provider + ### MsgDeleteProviderAttributes + MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | - | `host_uri` | [string](#string) | | | - | `attributes` | [akash.base.v1beta3.Attribute](#akash.base.v1beta3.Attribute) | repeated | | - | `info` | [ProviderInfo](#akash.provider.v1beta3.ProviderInfo) | | | + | `auditor` | [string](#string) | | | + | `keys` | [string](#string) | repeated | | - + - ### MsgCreateProviderResponse - MsgCreateProviderResponse defines the Msg/CreateProvider response type. + ### MsgDeleteProviderAttributesResponse + MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. - + - ### MsgDeleteProvider - MsgDeleteProvider defines an SDK message for deleting a provider + ### MsgSignProviderAttributes + MsgSignProviderAttributes defines an SDK message for signing a provider attributes | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | + | `auditor` | [string](#string) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | - + - ### MsgDeleteProviderResponse - MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. + ### MsgSignProviderAttributesResponse + MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. + + + + + + + + + + + + + + +

Top

+ ## akash/audit/v1/query.proto - + - ### MsgUpdateProvider - MsgUpdateProvider defines an SDK message for updating a provider + ### QueryAllProvidersAttributesRequest + QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `host_uri` | [string](#string) | | | - | `attributes` | [akash.base.v1beta3.Attribute](#akash.base.v1beta3.Attribute) | repeated | | - | `info` | [ProviderInfo](#akash.provider.v1beta3.ProviderInfo) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - + - ### MsgUpdateProviderResponse - MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. - - - - - - - - - ### Provider - Provider stores owner and host details + ### QueryAuditorAttributesRequest + QueryAuditorAttributesRequest is request type for the Query/Providers RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `host_uri` | [string](#string) | | | - | `attributes` | [akash.base.v1beta3.Attribute](#akash.base.v1beta3.Attribute) | repeated | | - | `info` | [ProviderInfo](#akash.provider.v1beta3.ProviderInfo) | | | + | `auditor` | [string](#string) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - + - ### ProviderInfo - ProviderInfo + ### QueryProviderAttributesRequest + QueryProviderAttributesRequest is request type for the Query/Provider RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `email` | [string](#string) | | | - | `website` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - + - ### Msg - Msg defines the provider Msg service + ### QueryProviderAuditorRequest + QueryProviderAuditorRequest is request type for the Query/Providers RPC method - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateProvider` | [MsgCreateProvider](#akash.provider.v1beta3.MsgCreateProvider) | [MsgCreateProviderResponse](#akash.provider.v1beta3.MsgCreateProviderResponse) | CreateProvider defines a method that creates a provider given the proper inputs | | - | `UpdateProvider` | [MsgUpdateProvider](#akash.provider.v1beta3.MsgUpdateProvider) | [MsgUpdateProviderResponse](#akash.provider.v1beta3.MsgUpdateProviderResponse) | UpdateProvider defines a method that updates a provider given the proper inputs | | - | `DeleteProvider` | [MsgDeleteProvider](#akash.provider.v1beta3.MsgDeleteProvider) | [MsgDeleteProviderResponse](#akash.provider.v1beta3.MsgDeleteProviderResponse) | DeleteProvider defines a method that deletes a provider given the proper inputs | | - - + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `auditor` | [string](#string) | | | + | `owner` | [string](#string) | | | - -

Top

- ## akash/provider/v1beta2/query.proto - + ### QueryProviderRequest QueryProviderRequest is request type for the Query/Provider RPC method @@ -1024,6 +738,7 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | + | `auditor` | [string](#string) | | | | `owner` | [string](#string) | | | @@ -1031,49 +746,48 @@ - + - ### QueryProviderResponse - QueryProviderResponse is response type for the Query/Provider RPC method + ### QueryProvidersResponse + QueryProvidersResponse is response type for the Query/Providers RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `provider` | [Provider](#akash.provider.v1beta2.Provider) | | | - + | `providers` | [AuditedProvider](#akash.audit.v1.AuditedProvider) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - ### QueryProvidersRequest - QueryProvidersRequest is request type for the Query/Providers RPC method + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - + - + - + - ### QueryProvidersResponse - QueryProvidersResponse is response type for the Query/Providers RPC method + ### Query + Query defines the gRPC querier service + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `AllProvidersAttributes` | [QueryAllProvidersAttributesRequest](#akash.audit.v1.QueryAllProvidersAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1.QueryProvidersResponse) | AllProvidersAttributes queries all providers buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1/audit/attributes/list| + | `ProviderAttributes` | [QueryProviderAttributesRequest](#akash.audit.v1.QueryProviderAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1.QueryProvidersResponse) | ProviderAttributes queries all provider signed attributes buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1/audit/attributes/{owner}/list| + | `ProviderAuditorAttributes` | [QueryProviderAuditorRequest](#akash.audit.v1.QueryProviderAuditorRequest) | [QueryProvidersResponse](#akash.audit.v1.QueryProvidersResponse) | ProviderAuditorAttributes queries provider signed attributes by specific auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1/audit/attributes/{auditor}/{owner}| + | `AuditorAttributes` | [QueryAuditorAttributesRequest](#akash.audit.v1.QueryAuditorAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1.QueryProvidersResponse) | AuditorAttributes queries all providers signed by this auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/provider/v1/auditor/{auditor}/list| - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `providers` | [Provider](#akash.provider.v1beta2.Provider) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + +

Top

+ ## akash/audit/v1/service.proto @@ -1083,36 +797,36 @@ - + - ### Query - Query defines the gRPC querier service + ### Msg + Msg defines the provider Msg service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Providers` | [QueryProvidersRequest](#akash.provider.v1beta2.QueryProvidersRequest) | [QueryProvidersResponse](#akash.provider.v1beta2.QueryProvidersResponse) | Providers queries providers | GET|/akash/provider/v1beta2/providers| - | `Provider` | [QueryProviderRequest](#akash.provider.v1beta2.QueryProviderRequest) | [QueryProviderResponse](#akash.provider.v1beta2.QueryProviderResponse) | Provider queries provider details | GET|/akash/provider/v1beta2/providers/{owner}| + | `SignProviderAttributes` | [MsgSignProviderAttributes](#akash.audit.v1.MsgSignProviderAttributes) | [MsgSignProviderAttributesResponse](#akash.audit.v1.MsgSignProviderAttributesResponse) | SignProviderAttributes defines a method that signs provider attributes | | + | `DeleteProviderAttributes` | [MsgDeleteProviderAttributes](#akash.audit.v1.MsgDeleteProviderAttributes) | [MsgDeleteProviderAttributesResponse](#akash.audit.v1.MsgDeleteProviderAttributesResponse) | DeleteProviderAttributes defines a method that deletes provider attributes | | - +

Top

- ## akash/provider/v1beta2/genesis.proto + ## akash/base/resources/v1beta4/resourcevalue.proto - + - ### GenesisState - GenesisState defines the basic genesis state used by provider module + ### ResourceValue + Unit stores cpu, memory and storage metrics | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `providers` | [Provider](#akash.provider.v1beta2.Provider) | repeated | | + | `val` | [bytes](#bytes) | | | @@ -1128,122 +842,100 @@ - +

Top

- ## akash/provider/v1beta2/provider.proto + ## akash/base/resources/v1beta4/cpu.proto - + - ### MsgCreateProvider - MsgCreateProvider defines an SDK message for creating a provider + ### CPU + CPU stores resource units and cpu config attributes | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `host_uri` | [string](#string) | | | - | `attributes` | [akash.base.v1beta2.Attribute](#akash.base.v1beta2.Attribute) | repeated | | - | `info` | [ProviderInfo](#akash.provider.v1beta2.ProviderInfo) | | | - - - + | `units` | [ResourceValue](#akash.base.resources.v1beta4.ResourceValue) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | - - - - - ### MsgCreateProviderResponse - MsgCreateProviderResponse defines the Msg/CreateProvider response type. - - - + - ### MsgDeleteProvider - MsgDeleteProvider defines an SDK message for deleting a provider + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - - + - + - - - ### MsgDeleteProviderResponse - MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. - + +

Top

+ ## akash/base/resources/v1beta4/endpoint.proto - + - ### MsgUpdateProvider - MsgUpdateProvider defines an SDK message for updating a provider + ### Endpoint + Endpoint describes a publicly accessible IP service | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `host_uri` | [string](#string) | | | - | `attributes` | [akash.base.v1beta2.Attribute](#akash.base.v1beta2.Attribute) | repeated | | - | `info` | [ProviderInfo](#akash.provider.v1beta2.ProviderInfo) | | | + | `kind` | [Endpoint.Kind](#akash.base.resources.v1beta4.Endpoint.Kind) | | | + | `sequence_number` | [uint32](#uint32) | | | + + - + - ### MsgUpdateProviderResponse - MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. + ### Endpoint.Kind + This describes how the endpoint is implemented when the lease is deployed + | Name | Number | Description | + | ---- | ------ | ----------- | + | SHARED_HTTP | 0 | Describes an endpoint that becomes a Kubernetes Ingress | + | RANDOM_PORT | 1 | Describes an endpoint that becomes a Kubernetes NodePort | + | LEASED_IP | 2 | Describes an endpoint that becomes a leased IP | - + - - + - ### Provider - Provider stores owner and host details + - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `host_uri` | [string](#string) | | | - | `attributes` | [akash.base.v1beta2.Attribute](#akash.base.v1beta2.Attribute) | repeated | | - | `info` | [ProviderInfo](#akash.provider.v1beta2.ProviderInfo) | | | - + +

Top

+ ## akash/base/resources/v1beta4/gpu.proto - + - ### ProviderInfo - ProviderInfo + ### GPU + GPU stores resource units and cpu config attributes | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `email` | [string](#string) | | | - | `website` | [string](#string) | | | + | `units` | [ResourceValue](#akash.base.resources.v1beta4.ResourceValue) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | @@ -1255,138 +947,146 @@ - - - - ### Msg - Msg defines the provider Msg service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateProvider` | [MsgCreateProvider](#akash.provider.v1beta2.MsgCreateProvider) | [MsgCreateProviderResponse](#akash.provider.v1beta2.MsgCreateProviderResponse) | CreateProvider defines a method that creates a provider given the proper inputs | | - | `UpdateProvider` | [MsgUpdateProvider](#akash.provider.v1beta2.MsgUpdateProvider) | [MsgUpdateProviderResponse](#akash.provider.v1beta2.MsgUpdateProviderResponse) | UpdateProvider defines a method that updates a provider given the proper inputs | | - | `DeleteProvider` | [MsgDeleteProvider](#akash.provider.v1beta2.MsgDeleteProvider) | [MsgDeleteProviderResponse](#akash.provider.v1beta2.MsgDeleteProviderResponse) | DeleteProvider defines a method that deletes a provider given the proper inputs | | - - +

Top

- ## akash/provider/v1beta1/provider.proto + ## akash/base/resources/v1beta4/memory.proto - + - ### MsgCreateProvider - MsgCreateProvider defines an SDK message for creating a provider + ### Memory + Memory stores resource quantity and memory attributes | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `host_uri` | [string](#string) | | | - | `attributes` | [akash.base.v1beta1.Attribute](#akash.base.v1beta1.Attribute) | repeated | | - | `info` | [ProviderInfo](#akash.provider.v1beta1.ProviderInfo) | | | + | `quantity` | [ResourceValue](#akash.base.resources.v1beta4.ResourceValue) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | - - + - ### MsgCreateProviderResponse - MsgCreateProviderResponse defines the Msg/CreateProvider response type. + + + + + + + +

Top

+ ## akash/base/resources/v1beta4/storage.proto - + - ### MsgDeleteProvider - MsgDeleteProvider defines an SDK message for deleting a provider + ### Storage + Storage stores resource quantity and storage attributes | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | + | `name` | [string](#string) | | | + | `quantity` | [ResourceValue](#akash.base.resources.v1beta4.ResourceValue) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | - - + - ### MsgDeleteProviderResponse - MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. + + + + + + + +

Top

+ ## akash/base/resources/v1beta4/resources.proto - + - ### MsgUpdateProvider - MsgUpdateProvider defines an SDK message for updating a provider + ### Resources + Resources describes all available resources types for deployment/node etc +if field is nil resource is not present in the given data-structure | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `host_uri` | [string](#string) | | | - | `attributes` | [akash.base.v1beta1.Attribute](#akash.base.v1beta1.Attribute) | repeated | | - | `info` | [ProviderInfo](#akash.provider.v1beta1.ProviderInfo) | | | + | `id` | [uint32](#uint32) | | | + | `cpu` | [CPU](#akash.base.resources.v1beta4.CPU) | | | + | `memory` | [Memory](#akash.base.resources.v1beta4.Memory) | | | + | `storage` | [Storage](#akash.base.resources.v1beta4.Storage) | repeated | | + | `gpu` | [GPU](#akash.base.resources.v1beta4.GPU) | | | + | `endpoints` | [Endpoint](#akash.base.resources.v1beta4.Endpoint) | repeated | | - - + - ### MsgUpdateProviderResponse - MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. + + + + + + + +

Top

+ ## akash/cert/v1/cert.proto - + - ### Provider - Provider stores owner and host details + ### Certificate + Certificate stores state, certificate and it's public key | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `host_uri` | [string](#string) | | | - | `attributes` | [akash.base.v1beta1.Attribute](#akash.base.v1beta1.Attribute) | repeated | | - | `info` | [ProviderInfo](#akash.provider.v1beta1.ProviderInfo) | | | + | `state` | [State](#akash.cert.v1.State) | | | + | `cert` | [bytes](#bytes) | | | + | `pubkey` | [bytes](#bytes) | | | - + - ### ProviderInfo - ProviderInfo + ### ID + ID stores owner and sequence number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `email` | [string](#string) | | | - | `website` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `serial` | [string](#string) | | | @@ -1394,219 +1094,207 @@ - + + - + ### State + State is an enum which refers to state of deployment + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | valid | 1 | CertificateValid denotes state for deployment active | + | revoked | 2 | CertificateRevoked denotes state for deployment closed | - - ### Msg - Msg defines the provider Msg service + + + - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateProvider` | [MsgCreateProvider](#akash.provider.v1beta1.MsgCreateProvider) | [MsgCreateProviderResponse](#akash.provider.v1beta1.MsgCreateProviderResponse) | CreateProvider defines a method that creates a provider given the proper inputs | | - | `UpdateProvider` | [MsgUpdateProvider](#akash.provider.v1beta1.MsgUpdateProvider) | [MsgUpdateProviderResponse](#akash.provider.v1beta1.MsgUpdateProviderResponse) | UpdateProvider defines a method that updates a provider given the proper inputs | | - | `DeleteProvider` | [MsgDeleteProvider](#akash.provider.v1beta1.MsgDeleteProvider) | [MsgDeleteProviderResponse](#akash.provider.v1beta1.MsgDeleteProviderResponse) | DeleteProvider defines a method that deletes a provider given the proper inputs | | - - +

Top

- ## akash/audit/v1beta3/audit.proto + ## akash/cert/v1/filters.proto - + - ### AttributesFilters - AttributesFilters defines filters used to filter deployments + ### CertificateFilter + CertificateFilter defines filters used to filter certificates | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `auditors` | [string](#string) | repeated | | - | `owners` | [string](#string) | repeated | | + | `owner` | [string](#string) | | | + | `serial` | [string](#string) | | | + | `state` | [string](#string) | | | - - + - ### AttributesResponse - AttributesResponse represents details of deployment along with group details + + + + + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `attributes` | [AuditedAttributes](#akash.audit.v1beta3.AuditedAttributes) | repeated | | + +

Top

+ ## akash/cert/v1/genesis.proto - + - ### AuditedAttributes - Attributes + ### GenesisCertificate + GenesisCertificate defines certificate entry at genesis | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | - | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta3.Attribute](#akash.base.v1beta3.Attribute) | repeated | | + | `certificate` | [Certificate](#akash.cert.v1.Certificate) | | | - + - ### MsgDeleteProviderAttributes - MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes + ### GenesisState + GenesisState defines the basic genesis state used by cert module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `auditor` | [string](#string) | | | - | `keys` | [string](#string) | repeated | | + | `certificates` | [GenesisCertificate](#akash.cert.v1.GenesisCertificate) | repeated | | - - + - ### MsgDeleteProviderAttributesResponse - MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. + + + + + + + +

Top

+ ## akash/cert/v1/msg.proto - + - ### MsgSignProviderAttributes - MsgSignProviderAttributes defines an SDK message for signing a provider attributes + ### MsgCreateCertificate + MsgCreateCertificate defines an SDK message for creating certificate | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | - | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta3.Attribute](#akash.base.v1beta3.Attribute) | repeated | | + | `cert` | [bytes](#bytes) | | | + | `pubkey` | [bytes](#bytes) | | | - + - ### MsgSignProviderAttributesResponse - MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. + ### MsgCreateCertificateResponse + MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. - + - ### Provider - Provider stores owner auditor and attributes details + ### MsgRevokeCertificate + MsgRevokeCertificate defines an SDK message for revoking certificate | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta3.Attribute](#akash.base.v1beta3.Attribute) | repeated | | + | `id` | [ID](#akash.cert.v1.ID) | | | - - - - - - - + - ### Msg - Msg defines the provider Msg service + ### MsgRevokeCertificateResponse + MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `SignProviderAttributes` | [MsgSignProviderAttributes](#akash.audit.v1beta3.MsgSignProviderAttributes) | [MsgSignProviderAttributesResponse](#akash.audit.v1beta3.MsgSignProviderAttributesResponse) | SignProviderAttributes defines a method that signs provider attributes | | - | `DeleteProviderAttributes` | [MsgDeleteProviderAttributes](#akash.audit.v1beta3.MsgDeleteProviderAttributes) | [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta3.MsgDeleteProviderAttributesResponse) | DeleteProviderAttributes defines a method that deletes provider attributes | | - - - -

Top

- ## akash/audit/v1beta3/query.proto - + - - + - ### QueryAllProvidersAttributesRequest - QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method + + + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + +

Top

+ ## akash/cert/v1/query.proto - + - ### QueryAuditorAttributesRequest - QueryAuditorAttributesRequest is request type for the Query/Providers RPC method + ### CertificateResponse + CertificateResponse contains a single X509 certificate and its serial number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `auditor` | [string](#string) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + | `certificate` | [Certificate](#akash.cert.v1.Certificate) | | | + | `serial` | [string](#string) | | | - + - ### QueryProviderAttributesRequest - QueryProviderAttributesRequest is request type for the Query/Provider RPC method + ### QueryCertificatesRequest + QueryDeploymentsRequest is request type for the Query/Deployments RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | + | `filter` | [CertificateFilter](#akash.cert.v1.CertificateFilter) | | | | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | @@ -1614,51 +1302,45 @@ - + - ### QueryProviderAuditorRequest - QueryProviderAuditorRequest is request type for the Query/Providers RPC method + ### QueryCertificatesResponse + QueryCertificatesResponse is response type for the Query/Certificates RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `auditor` | [string](#string) | | | - | `owner` | [string](#string) | | | - + | `certificates` | [CertificateResponse](#akash.cert.v1.CertificateResponse) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - ### QueryProviderRequest - QueryProviderRequest is request type for the Query/Provider RPC method + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `auditor` | [string](#string) | | | - | `owner` | [string](#string) | | | - - + - + - + - ### QueryProvidersResponse - QueryProvidersResponse is response type for the Query/Providers RPC method + ### Query + Query defines the gRPC querier service + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Certificates` | [QueryCertificatesRequest](#akash.cert.v1.QueryCertificatesRequest) | [QueryCertificatesResponse](#akash.cert.v1.QueryCertificatesResponse) | Certificates queries certificates | GET|/akash/cert/v1/certificates/list| - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `providers` | [Provider](#akash.audit.v1beta3.Provider) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + +

Top

+ ## akash/cert/v1/service.proto @@ -1668,38 +1350,37 @@ - + - ### Query - Query defines the gRPC querier service + ### Msg + Msg defines the provider Msg service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `AllProvidersAttributes` | [QueryAllProvidersAttributesRequest](#akash.audit.v1beta3.QueryAllProvidersAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta3.QueryProvidersResponse) | AllProvidersAttributes queries all providers buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta3/audit/attributes/list| - | `ProviderAttributes` | [QueryProviderAttributesRequest](#akash.audit.v1beta3.QueryProviderAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta3.QueryProvidersResponse) | ProviderAttributes queries all provider signed attributes buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta3/audit/attributes/{owner}/list| - | `ProviderAuditorAttributes` | [QueryProviderAuditorRequest](#akash.audit.v1beta3.QueryProviderAuditorRequest) | [QueryProvidersResponse](#akash.audit.v1beta3.QueryProvidersResponse) | ProviderAuditorAttributes queries provider signed attributes by specific auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta3/audit/attributes/{auditor}/{owner}| - | `AuditorAttributes` | [QueryAuditorAttributesRequest](#akash.audit.v1beta3.QueryAuditorAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta3.QueryProvidersResponse) | AuditorAttributes queries all providers signed by this auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/provider/v1beta3/auditor/{auditor}/list| + | `CreateCertificate` | [MsgCreateCertificate](#akash.cert.v1.MsgCreateCertificate) | [MsgCreateCertificateResponse](#akash.cert.v1.MsgCreateCertificateResponse) | CreateCertificate defines a method to create new certificate given proper inputs. | | + | `RevokeCertificate` | [MsgRevokeCertificate](#akash.cert.v1.MsgRevokeCertificate) | [MsgRevokeCertificateResponse](#akash.cert.v1.MsgRevokeCertificateResponse) | RevokeCertificate defines a method to revoke the certificate | | - +

Top

- ## akash/audit/v1beta3/genesis.proto + ## akash/deployment/v1/authz.proto - + - ### GenesisState - GenesisState defines the basic genesis state used by audit module + ### DepositAuthorization + DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from +the granter's account for a deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `attributes` | [AuditedAttributes](#akash.audit.v1beta3.AuditedAttributes) | repeated | | + | `spend_limit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | SpendLimit is the amount the grantee is authorized to spend from the granter's account for the purpose of deployment. | @@ -1715,126 +1396,87 @@ - +

Top

- ## akash/audit/v1beta2/audit.proto - - - - - - ### AttributesFilters - AttributesFilters defines filters used to filter deployments - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `auditors` | [string](#string) | repeated | | - | `owners` | [string](#string) | repeated | | - - - + ## akash/deployment/v1/deployment.proto - + - ### AttributesResponse - AttributesResponse represents details of deployment along with group details - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `attributes` | [AuditedAttributes](#akash.audit.v1beta2.AuditedAttributes) | repeated | | - - - - - - - - - ### AuditedAttributes - Attributes + ### Deployment + Deployment stores deploymentID, state and checksum details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta2.Attribute](#akash.base.v1beta2.Attribute) | repeated | | + | `id` | [DeploymentID](#akash.deployment.v1.DeploymentID) | | | + | `state` | [Deployment.State](#akash.deployment.v1.Deployment.State) | | | + | `hash` | [bytes](#bytes) | | | + | `created_at` | [int64](#int64) | | | - + - ### MsgDeleteProviderAttributes - MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes + ### DeploymentID + DeploymentID stores owner and sequence number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | - | `auditor` | [string](#string) | | | - | `keys` | [string](#string) | repeated | | - + | `dseq` | [uint64](#uint64) | | | - - - ### MsgDeleteProviderAttributesResponse - MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. + + - + ### Deployment.State + State is an enum which refers to state of deployment + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | active | 1 | DeploymentActive denotes state for deployment active | + | closed | 2 | DeploymentClosed denotes state for deployment closed | - - ### MsgSignProviderAttributes - MsgSignProviderAttributes defines an SDK message for signing a provider attributes + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta2.Attribute](#akash.base.v1beta2.Attribute) | repeated | | - - + - + - - - ### MsgSignProviderAttributesResponse - MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. - + +

Top

+ ## akash/deployment/v1/group.proto - + - ### Provider - Provider stores owner auditor and attributes details + ### GroupID + GroupID stores owner, deployment sequence number and group sequence number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | - | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta2.Attribute](#akash.base.v1beta2.Attribute) | repeated | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | @@ -1846,117 +1488,103 @@ - - - - ### Msg - Msg defines the provider Msg service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `SignProviderAttributes` | [MsgSignProviderAttributes](#akash.audit.v1beta2.MsgSignProviderAttributes) | [MsgSignProviderAttributesResponse](#akash.audit.v1beta2.MsgSignProviderAttributesResponse) | SignProviderAttributes defines a method that signs provider attributes | | - | `DeleteProviderAttributes` | [MsgDeleteProviderAttributes](#akash.audit.v1beta2.MsgDeleteProviderAttributes) | [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta2.MsgDeleteProviderAttributesResponse) | DeleteProviderAttributes defines a method that deletes provider attributes | | - - +

Top

- ## akash/audit/v1beta2/query.proto + ## akash/deployment/v1/event.proto - + - ### QueryAllProvidersAttributesRequest - QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method + ### EventDeploymentClosed + EventDeploymentClosed is triggered when deployment is closed on chain | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + | `id` | [DeploymentID](#akash.deployment.v1.DeploymentID) | | | - + - ### QueryAuditorAttributesRequest - QueryAuditorAttributesRequest is request type for the Query/Providers RPC method + ### EventDeploymentCreated + EventDeploymentCreated event is triggered when deployment is created on chain | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `auditor` | [string](#string) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + | `id` | [DeploymentID](#akash.deployment.v1.DeploymentID) | | | + | `hash` | [bytes](#bytes) | | | - + - ### QueryProviderAttributesRequest - QueryProviderAttributesRequest is request type for the Query/Provider RPC method + ### EventDeploymentUpdated + EventDeploymentUpdated is triggered when deployment is updated on chain | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + | `id` | [DeploymentID](#akash.deployment.v1.DeploymentID) | | | + | `hash` | [bytes](#bytes) | | | - + - ### QueryProviderAuditorRequest - QueryProviderAuditorRequest is request type for the Query/Providers RPC method + ### EventGroupClosed + EventGroupClosed is triggered when deployment group is closed | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `auditor` | [string](#string) | | | - | `owner` | [string](#string) | | | + | `id` | [GroupID](#akash.deployment.v1.GroupID) | | | - + - ### QueryProviderRequest - QueryProviderRequest is request type for the Query/Provider RPC method + ### EventGroupPaused + EventGroupPaused is triggered when deployment group is paused | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `auditor` | [string](#string) | | | - | `owner` | [string](#string) | | | + | `id` | [GroupID](#akash.deployment.v1.GroupID) | | | - + - ### QueryProvidersResponse - QueryProvidersResponse is response type for the Query/Providers RPC method + ### EventGroupStarted + EventGroupStarted is triggered when deployment group is started | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `providers` | [Provider](#akash.audit.v1beta2.Provider) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + | `id` | [GroupID](#akash.deployment.v1.GroupID) | | | @@ -1968,174 +1596,251 @@ - - - - ### Query - Query defines the gRPC querier service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `AllProvidersAttributes` | [QueryAllProvidersAttributesRequest](#akash.audit.v1beta2.QueryAllProvidersAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta2.QueryProvidersResponse) | AllProvidersAttributes queries all providers buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta2/audit/attributes/list| - | `ProviderAttributes` | [QueryProviderAttributesRequest](#akash.audit.v1beta2.QueryProviderAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta2.QueryProvidersResponse) | ProviderAttributes queries all provider signed attributes buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta2/audit/attributes/{owner}/list| - | `ProviderAuditorAttributes` | [QueryProviderAuditorRequest](#akash.audit.v1beta2.QueryProviderAuditorRequest) | [QueryProvidersResponse](#akash.audit.v1beta2.QueryProvidersResponse) | ProviderAuditorAttributes queries provider signed attributes by specific auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta2/audit/attributes/{auditor}/{owner}| - | `AuditorAttributes` | [QueryAuditorAttributesRequest](#akash.audit.v1beta2.QueryAuditorAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta2.QueryProvidersResponse) | AuditorAttributes queries all providers signed by this auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/provider/v1beta2/auditor/{auditor}/list| - - +

Top

- ## akash/audit/v1beta2/genesis.proto + ## akash/deployment/v1/msg.proto - + - ### GenesisState - GenesisState defines the basic genesis state used by audit module + ### MsgDepositDeployment + MsgDepositDeployment deposits more funds into the deposit account | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `attributes` | [AuditedAttributes](#akash.audit.v1beta2.AuditedAttributes) | repeated | | + | `id` | [DeploymentID](#akash.deployment.v1.DeploymentID) | | | + | `amount` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `depositor` | [string](#string) | | Depositor pays for the deposit | - + + - + ### MsgDepositDeploymentResponse + MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. - + + + + + + + + + - +

Top

- ## akash/audit/v1beta1/audit.proto + ## akash/deployment/v1beta4/resourceunit.proto - + - ### AttributesFilters - AttributesFilters defines filters used to filter deployments + ### ResourceUnit + ResourceUnit extends Resources and adds Count along with the Price | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `auditors` | [string](#string) | repeated | | - | `owners` | [string](#string) | repeated | | + | `resource` | [akash.base.resources.v1beta4.Resources](#akash.base.resources.v1beta4.Resources) | | | + | `count` | [uint32](#uint32) | | | + | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + + + + + + + + + + + +

Top

+ + ## akash/deployment/v1beta4/groupspec.proto - - ### AttributesResponse - AttributesResponse represents details of deployment along with group details + + + + ### GroupSpec + Spec stores group specifications | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `attributes` | [AuditedAttributes](#akash.audit.v1beta1.AuditedAttributes) | repeated | | + | `name` | [string](#string) | | | + | `requirements` | [akash.base.attributes.v1.PlacementRequirements](#akash.base.attributes.v1.PlacementRequirements) | | | + | `resources` | [ResourceUnit](#akash.deployment.v1beta4.ResourceUnit) | repeated | | + + + + + + + + - + + +

Top

- ### AuditedAttributes - Attributes + ## akash/deployment/v1beta4/deploymentmsg.proto + + + + + + ### MsgCloseDeployment + MsgCloseDeployment defines an SDK message for closing deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta1.Attribute](#akash.base.v1beta1.Attribute) | repeated | | + | `id` | [akash.deployment.v1.DeploymentID](#akash.deployment.v1.DeploymentID) | | | - + - ### MsgDeleteProviderAttributes - MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes + ### MsgCloseDeploymentResponse + MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. + + + + + + + + + ### MsgCreateDeployment + MsgCreateDeployment defines an SDK message for creating deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `auditor` | [string](#string) | | | - | `keys` | [string](#string) | repeated | | + | `id` | [akash.deployment.v1.DeploymentID](#akash.deployment.v1.DeploymentID) | | | + | `groups` | [GroupSpec](#akash.deployment.v1beta4.GroupSpec) | repeated | | + | `hash` | [bytes](#bytes) | | | + | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `depositor` | [string](#string) | | Depositor pays for the deposit | - + - ### MsgDeleteProviderAttributesResponse - MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. + ### MsgCreateDeploymentResponse + MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. - + - ### MsgSignProviderAttributes - MsgSignProviderAttributes defines an SDK message for signing a provider attributes + ### MsgUpdateDeployment + MsgUpdateDeployment defines an SDK message for updating deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta1.Attribute](#akash.base.v1beta1.Attribute) | repeated | | + | `id` | [akash.deployment.v1.DeploymentID](#akash.deployment.v1.DeploymentID) | | | + | `hash` | [bytes](#bytes) | | | - + - ### MsgSignProviderAttributesResponse - MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. + ### MsgUpdateDeploymentResponse + MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. + + + + + + + + + - + +

Top

- ### Provider - Provider stores owner auditor and attributes details + ## akash/deployment/v1beta4/filters.proto + + + + + + ### DeploymentFilters + DeploymentFilters defines filters used to filter deployments | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | - | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta1.Attribute](#akash.base.v1beta1.Attribute) | repeated | | + | `dseq` | [uint64](#uint64) | | | + | `state` | [string](#string) | | | + + + + + + + + + ### GroupFilters + GroupFilters defines filters used to filter groups + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint64](#uint64) | | | + | `state` | [string](#string) | | | @@ -2147,62 +1852,75 @@ + + - + + +

Top

- ### Msg - Msg defines the provider Msg service + ## akash/deployment/v1beta4/group.proto + - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `SignProviderAttributes` | [MsgSignProviderAttributes](#akash.audit.v1beta1.MsgSignProviderAttributes) | [MsgSignProviderAttributesResponse](#akash.audit.v1beta1.MsgSignProviderAttributesResponse) | SignProviderAttributes defines a method that signs provider attributes | | - | `DeleteProviderAttributes` | [MsgDeleteProviderAttributes](#akash.audit.v1beta1.MsgDeleteProviderAttributes) | [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta1.MsgDeleteProviderAttributesResponse) | DeleteProviderAttributes defines a method that deletes provider attributes | | - + + ### Group + Group stores group id, state and specifications of group + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [akash.deployment.v1.GroupID](#akash.deployment.v1.GroupID) | | | + | `state` | [Group.State](#akash.deployment.v1beta4.Group.State) | | | + | `group_spec` | [GroupSpec](#akash.deployment.v1beta4.GroupSpec) | | | + | `created_at` | [int64](#int64) | | | - -

Top

- ## akash/take/v1beta3/query.proto - + + - + ### Group.State + State is an enum which refers to state of group + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | open | 1 | GroupOpen denotes state for group open | + | paused | 2 | GroupOrdered denotes state for group ordered | + | insufficient_funds | 3 | GroupInsufficientFunds denotes state for group insufficient_funds | + | closed | 4 | GroupClosed denotes state for group closed | - - ### Query - Query defines the gRPC querier service + + + - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - - +

Top

- ## akash/take/v1beta3/genesis.proto + ## akash/deployment/v1beta4/params.proto - + - ### GenesisState - GenesisState stores slice of genesis deployment instance + ### Params + Params defines the parameters for the x/deployment module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `params` | [Params](#akash.take.v1beta3.Params) | | | + | `min_deposits` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | repeated | | @@ -2218,39 +1936,39 @@ - +

Top

- ## akash/take/v1beta3/params.proto + ## akash/deployment/v1beta4/genesis.proto - + - ### DenomTakeRate - DenomTakeRate describes take rate for specified denom + ### GenesisDeployment + GenesisDeployment defines the basic genesis state used by deployment module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `denom` | [string](#string) | | | - | `rate` | [uint32](#uint32) | | | + | `deployment` | [akash.deployment.v1.Deployment](#akash.deployment.v1.Deployment) | | | + | `groups` | [Group](#akash.deployment.v1beta4.Group) | repeated | | - + - ### Params - Params defines the parameters for the x/take package + ### GenesisState + GenesisState stores slice of genesis deployment instance | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `denom_take_rates` | [DenomTakeRate](#akash.take.v1beta3.DenomTakeRate) | repeated | denom -> % take rate | - | `default_take_rate` | [uint32](#uint32) | | | + | `deployments` | [GenesisDeployment](#akash.deployment.v1beta4.GenesisDeployment) | repeated | | + | `params` | [Params](#akash.deployment.v1beta4.Params) | | | @@ -2266,14 +1984,14 @@ - +

Top

- ## akash/deployment/v1beta3/groupmsg.proto + ## akash/deployment/v1beta4/groupmsg.proto - + ### MsgCloseGroup MsgCloseGroup defines SDK message to close a single Group within a Deployment. @@ -2281,14 +1999,14 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | + | `id` | [akash.deployment.v1.GroupID](#akash.deployment.v1.GroupID) | | | - + ### MsgCloseGroupResponse MsgCloseGroupResponse defines the Msg/CloseGroup response type. @@ -2298,7 +2016,7 @@ - + ### MsgPauseGroup MsgPauseGroup defines SDK message to close a single Group within a Deployment. @@ -2306,14 +2024,14 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | + | `id` | [akash.deployment.v1.GroupID](#akash.deployment.v1.GroupID) | | | - + ### MsgPauseGroupResponse MsgPauseGroupResponse defines the Msg/PauseGroup response type. @@ -2323,7 +2041,7 @@ - + ### MsgStartGroup MsgStartGroup defines SDK message to close a single Group within a Deployment. @@ -2331,14 +2049,14 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | + | `id` | [akash.deployment.v1.GroupID](#akash.deployment.v1.GroupID) | | | - + ### MsgStartGroupResponse MsgStartGroupResponse defines the Msg/StartGroup response type. @@ -2357,25 +2075,41 @@ - +

Top

- ## akash/deployment/v1beta3/resourceunit.proto + ## akash/deployment/v1beta4/paramsmsg.proto - + - ### ResourceUnit - ResourceUnit extends Resources and adds Count along with the Price + ### MsgUpdateParams + MsgUpdateParams is the Msg/UpdateParams request type. + +Since: akash v1.0.0 | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `resource` | [akash.base.v1beta3.Resources](#akash.base.v1beta3.Resources) | | | - | `count` | [uint32](#uint32) | | | - | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `authority` | [string](#string) | | authority is the address of the governance account. | + | `params` | [Params](#akash.deployment.v1beta4.Params) | | params defines the x/deployment parameters to update. + +NOTE: All parameters must be supplied. | + + + + + + + + ### MsgUpdateParamsResponse + MsgUpdateParamsResponse defines the response structure for executing a +MsgUpdateParams message. + +Since: akash v1.0.0 + @@ -2390,25 +2124,23 @@ - +

Top

- ## akash/deployment/v1beta3/group.proto + ## akash/escrow/v1/accountid.proto - + - ### Group - Group stores group id, state and specifications of group + ### AccountID + AccountID is the account identifier | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `group_id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | - | `state` | [Group.State](#akash.deployment.v1beta3.Group.State) | | | - | `group_spec` | [GroupSpec](#akash.deployment.v1beta3.GroupSpec) | | | - | `created_at` | [int64](#int64) | | | + | `scope` | [string](#string) | | | + | `xid` | [string](#string) | | | @@ -2416,21 +2148,6 @@ - - - - ### Group.State - State is an enum which refers to state of group - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | open | 1 | GroupOpen denotes state for group open | - | paused | 2 | GroupOrdered denotes state for group ordered | - | insufficient_funds | 3 | GroupInsufficientFunds denotes state for group insufficient_funds | - | closed | 4 | GroupClosed denotes state for group closed | - - @@ -2439,24 +2156,29 @@ - +

Top

- ## akash/deployment/v1beta3/groupid.proto + ## akash/escrow/v1/account.proto - + - ### GroupID - GroupID stores owner, deployment sequence number and group sequence number + ### Account + Account stores state for an escrow account | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | + | `id` | [AccountID](#akash.escrow.v1.AccountID) | | unique identifier for this escrow account | + | `owner` | [string](#string) | | bech32 encoded account address of the owner of this escrow account | + | `state` | [Account.State](#akash.escrow.v1.Account.State) | | current state of this escrow account | + | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | unspent coins received from the owner's wallet | + | `transferred` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | total coins spent by this account | + | `settled_at` | [int64](#int64) | | block height at which this account was last settled | + | `depositor` | [string](#string) | | bech32 encoded account address of the depositor. If depositor is same as the owner, then any incoming coins are added to the Balance. If depositor isn't same as the owner, then any incoming coins are added to the Funds. | + | `funds` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | Funds are unspent coins received from the (non-Owner) Depositor's wallet. If there are any funds, they should be spent before spending the Balance. | @@ -2464,6 +2186,20 @@ + + + + ### Account.State + State stores state for an escrow account + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | AccountStateInvalid is an invalid state | + | open | 1 | AccountOpen is the state when an account is open | + | closed | 2 | AccountClosed is the state when an account is closed | + | overdrawn | 3 | AccountOverdrawn is the state when an account is overdrawn | + + @@ -2472,184 +2208,158 @@ - +

Top

- ## akash/deployment/v1beta3/deployment.proto + ## akash/deployment/v1beta4/query.proto - + - ### Deployment - Deployment stores deploymentID, state and version details + ### QueryDeploymentRequest + QueryDeploymentRequest is request type for the Query/Deployment RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployment_id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | - | `state` | [Deployment.State](#akash.deployment.v1beta3.Deployment.State) | | | - | `version` | [bytes](#bytes) | | | - | `created_at` | [int64](#int64) | | | + | `id` | [akash.deployment.v1.DeploymentID](#akash.deployment.v1.DeploymentID) | | | - + - ### DeploymentFilters - DeploymentFilters defines filters used to filter deployments + ### QueryDeploymentResponse + QueryDeploymentResponse is response type for the Query/Deployment RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `state` | [string](#string) | | | + | `deployment` | [akash.deployment.v1.Deployment](#akash.deployment.v1.Deployment) | | | + | `groups` | [Group](#akash.deployment.v1beta4.Group) | repeated | | + | `escrow_account` | [akash.escrow.v1.Account](#akash.escrow.v1.Account) | | | - + - ### DeploymentID - DeploymentID stores owner and sequence number + ### QueryDeploymentsRequest + QueryDeploymentsRequest is request type for the Query/Deployments RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | + | `filters` | [DeploymentFilters](#akash.deployment.v1beta4.DeploymentFilters) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - + - ### Deployment.State - State is an enum which refers to state of deployment + ### QueryDeploymentsResponse + QueryDeploymentsResponse is response type for the Query/Deployments RPC method - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | active | 1 | DeploymentActive denotes state for deployment active | - | closed | 2 | DeploymentClosed denotes state for deployment closed | - - - - - - - + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `deployments` | [QueryDeploymentResponse](#akash.deployment.v1beta4.QueryDeploymentResponse) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - -

Top

- ## akash/deployment/v1beta3/query.proto - + - ### QueryDeploymentRequest - QueryDeploymentRequest is request type for the Query/Deployment RPC method + ### QueryGroupRequest + QueryGroupRequest is request type for the Query/Group RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | + | `id` | [akash.deployment.v1.GroupID](#akash.deployment.v1.GroupID) | | | - + - ### QueryDeploymentResponse - QueryDeploymentResponse is response type for the Query/Deployment RPC method + ### QueryGroupResponse + QueryGroupResponse is response type for the Query/Group RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployment` | [Deployment](#akash.deployment.v1beta3.Deployment) | | | - | `groups` | [Group](#akash.deployment.v1beta3.Group) | repeated | | - | `escrow_account` | [akash.escrow.v1beta3.Account](#akash.escrow.v1beta3.Account) | | | + | `group` | [Group](#akash.deployment.v1beta4.Group) | | | - + - ### QueryDeploymentsRequest - QueryDeploymentsRequest is request type for the Query/Deployments RPC method + ### QueryParamsRequest + QueryParamsRequest is the request type for the Query/Params RPC method. - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filters` | [DeploymentFilters](#akash.deployment.v1beta3.DeploymentFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - + - ### QueryDeploymentsResponse - QueryDeploymentsResponse is response type for the Query/Deployments RPC method + ### QueryParamsResponse + QueryParamsResponse is the response type for the Query/Params RPC method. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployments` | [QueryDeploymentResponse](#akash.deployment.v1beta3.QueryDeploymentResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + | `params` | [Params](#akash.deployment.v1beta4.Params) | | params defines the parameters of the module. | - - - - ### QueryGroupRequest - QueryGroupRequest is request type for the Query/Group RPC method + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | - - + - + - + - ### QueryGroupResponse - QueryGroupResponse is response type for the Query/Group RPC method + ### Query + Query defines the gRPC querier service + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Deployments` | [QueryDeploymentsRequest](#akash.deployment.v1beta4.QueryDeploymentsRequest) | [QueryDeploymentsResponse](#akash.deployment.v1beta4.QueryDeploymentsResponse) | Deployments queries deployments | GET|/akash/deployment/v1beta4/deployments/list| + | `Deployment` | [QueryDeploymentRequest](#akash.deployment.v1beta4.QueryDeploymentRequest) | [QueryDeploymentResponse](#akash.deployment.v1beta4.QueryDeploymentResponse) | Deployment queries deployment details | GET|/akash/deployment/v1beta4/deployments/info| + | `Group` | [QueryGroupRequest](#akash.deployment.v1beta4.QueryGroupRequest) | [QueryGroupResponse](#akash.deployment.v1beta4.QueryGroupResponse) | Group queries group details | GET|/akash/deployment/v1beta4/groups/info| + | `Params` | [QueryParamsRequest](#akash.deployment.v1beta4.QueryParamsRequest) | [QueryParamsResponse](#akash.deployment.v1beta4.QueryParamsResponse) | Params returns the total set of minting parameters. | GET|/akash/deployment/v1beta4/params| - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `group` | [Group](#akash.deployment.v1beta3.Group) | | | + + + +

Top

+ ## akash/deployment/v1beta4/service.proto @@ -2659,131 +2369,160 @@ - + - ### Query - Query defines the gRPC querier service + ### Msg + Msg defines the x/deployment Msg service. | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Deployments` | [QueryDeploymentsRequest](#akash.deployment.v1beta3.QueryDeploymentsRequest) | [QueryDeploymentsResponse](#akash.deployment.v1beta3.QueryDeploymentsResponse) | Deployments queries deployments | GET|/akash/deployment/v1beta3/deployments/list| - | `Deployment` | [QueryDeploymentRequest](#akash.deployment.v1beta3.QueryDeploymentRequest) | [QueryDeploymentResponse](#akash.deployment.v1beta3.QueryDeploymentResponse) | Deployment queries deployment details | GET|/akash/deployment/v1beta3/deployments/info| - | `Group` | [QueryGroupRequest](#akash.deployment.v1beta3.QueryGroupRequest) | [QueryGroupResponse](#akash.deployment.v1beta3.QueryGroupResponse) | Group queries group details | GET|/akash/deployment/v1beta3/groups/info| + | `CreateDeployment` | [MsgCreateDeployment](#akash.deployment.v1beta4.MsgCreateDeployment) | [MsgCreateDeploymentResponse](#akash.deployment.v1beta4.MsgCreateDeploymentResponse) | CreateDeployment defines a method to create new deployment given proper inputs. | | + | `DepositDeployment` | [.akash.deployment.v1.MsgDepositDeployment](#akash.deployment.v1.MsgDepositDeployment) | [.akash.deployment.v1.MsgDepositDeploymentResponse](#akash.deployment.v1.MsgDepositDeploymentResponse) | DepositDeployment deposits more funds into the deployment account | | + | `UpdateDeployment` | [MsgUpdateDeployment](#akash.deployment.v1beta4.MsgUpdateDeployment) | [MsgUpdateDeploymentResponse](#akash.deployment.v1beta4.MsgUpdateDeploymentResponse) | UpdateDeployment defines a method to update a deployment given proper inputs. | | + | `CloseDeployment` | [MsgCloseDeployment](#akash.deployment.v1beta4.MsgCloseDeployment) | [MsgCloseDeploymentResponse](#akash.deployment.v1beta4.MsgCloseDeploymentResponse) | CloseDeployment defines a method to close a deployment given proper inputs. | | + | `CloseGroup` | [MsgCloseGroup](#akash.deployment.v1beta4.MsgCloseGroup) | [MsgCloseGroupResponse](#akash.deployment.v1beta4.MsgCloseGroupResponse) | CloseGroup defines a method to close a group of a deployment given proper inputs. | | + | `PauseGroup` | [MsgPauseGroup](#akash.deployment.v1beta4.MsgPauseGroup) | [MsgPauseGroupResponse](#akash.deployment.v1beta4.MsgPauseGroupResponse) | PauseGroup defines a method to close a group of a deployment given proper inputs. | | + | `StartGroup` | [MsgStartGroup](#akash.deployment.v1beta4.MsgStartGroup) | [MsgStartGroupResponse](#akash.deployment.v1beta4.MsgStartGroupResponse) | StartGroup defines a method to close a group of a deployment given proper inputs. | | + | `UpdateParams` | [MsgUpdateParams](#akash.deployment.v1beta4.MsgUpdateParams) | [MsgUpdateParamsResponse](#akash.deployment.v1beta4.MsgUpdateParamsResponse) | UpdateParams defines a governance operation for updating the x/deployment module parameters. The authority is hard-coded to the x/gov module account. + +Since: akash v1.0.0 | | - +

Top

- ## akash/deployment/v1beta3/deploymentmsg.proto + ## akash/discovery/v1/client_info.proto - + - ### MsgCloseDeployment - MsgCloseDeployment defines an SDK message for closing deployment + ### ClientInfo + ClientInfo akash specific client info | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | + | `api_version` | [string](#string) | | | - - + - ### MsgCloseDeploymentResponse - MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. + + + + + + + +

Top

+ ## akash/discovery/v1/akash.proto - + - ### MsgCreateDeployment - MsgCreateDeployment defines an SDK message for creating deployment + ### Akash + Akash akash specific RPC parameters | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | - | `groups` | [GroupSpec](#akash.deployment.v1beta3.GroupSpec) | repeated | | - | `version` | [bytes](#bytes) | | | - | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `depositor` | [string](#string) | | Depositor pays for the deposit | + | `client_info` | [ClientInfo](#akash.discovery.v1.ClientInfo) | | | - - + - ### MsgCreateDeploymentResponse - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. + + + + + + + +

Top

+ ## akash/escrow/v1/fractional_payment.proto - + - ### MsgDepositDeployment - MsgDepositDeployment deposits more funds into the deposit account + ### FractionalPayment + Payment stores state for a payment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | - | `amount` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `depositor` | [string](#string) | | Depositor pays for the deposit | + | `account_id` | [AccountID](#akash.escrow.v1.AccountID) | | | + | `payment_id` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `state` | [FractionalPayment.State](#akash.escrow.v1.FractionalPayment.State) | | | + | `rate` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `withdrawn` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + + - + - ### MsgDepositDeploymentResponse - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. + ### FractionalPayment.State + State defines payment state + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | PaymentStateInvalid is the state when the payment is invalid | + | open | 1 | PaymentStateOpen is the state when the payment is open | + | closed | 2 | PaymentStateClosed is the state when the payment is closed | + | overdrawn | 3 | PaymentStateOverdrawn is the state when the payment is overdrawn | - + - - + - ### MsgUpdateDeployment - MsgUpdateDeployment defines an SDK message for updating deployment + - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | - | `version` | [bytes](#bytes) | | | - + +

Top

+ ## akash/escrow/v1/genesis.proto - + - ### MsgUpdateDeploymentResponse - MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. + ### GenesisState + GenesisState defines the basic genesis state used by the escrow module + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `accounts` | [Account](#akash.escrow.v1.Account) | repeated | | + | `payments` | [FractionalPayment](#akash.escrow.v1.FractionalPayment) | repeated | | + + @@ -2797,55 +2536,78 @@ - +

Top

- ## akash/deployment/v1beta3/service.proto + ## akash/escrow/v1/query.proto - + + - + ### QueryAccountsRequest + QueryAccountRequest is request type for the Query/Account RPC method - + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `scope` | [string](#string) | | | + | `xid` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `state` | [string](#string) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + + - - ### Msg - Msg defines the deployment Msg service. + + + + ### QueryAccountsResponse + QueryProvidersResponse is response type for the Query/Providers RPC method - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateDeployment` | [MsgCreateDeployment](#akash.deployment.v1beta3.MsgCreateDeployment) | [MsgCreateDeploymentResponse](#akash.deployment.v1beta3.MsgCreateDeploymentResponse) | CreateDeployment defines a method to create new deployment given proper inputs. | | - | `DepositDeployment` | [MsgDepositDeployment](#akash.deployment.v1beta3.MsgDepositDeployment) | [MsgDepositDeploymentResponse](#akash.deployment.v1beta3.MsgDepositDeploymentResponse) | DepositDeployment deposits more funds into the deployment account | | - | `UpdateDeployment` | [MsgUpdateDeployment](#akash.deployment.v1beta3.MsgUpdateDeployment) | [MsgUpdateDeploymentResponse](#akash.deployment.v1beta3.MsgUpdateDeploymentResponse) | UpdateDeployment defines a method to update a deployment given proper inputs. | | - | `CloseDeployment` | [MsgCloseDeployment](#akash.deployment.v1beta3.MsgCloseDeployment) | [MsgCloseDeploymentResponse](#akash.deployment.v1beta3.MsgCloseDeploymentResponse) | CloseDeployment defines a method to close a deployment given proper inputs. | | - | `CloseGroup` | [MsgCloseGroup](#akash.deployment.v1beta3.MsgCloseGroup) | [MsgCloseGroupResponse](#akash.deployment.v1beta3.MsgCloseGroupResponse) | CloseGroup defines a method to close a group of a deployment given proper inputs. | | - | `PauseGroup` | [MsgPauseGroup](#akash.deployment.v1beta3.MsgPauseGroup) | [MsgPauseGroupResponse](#akash.deployment.v1beta3.MsgPauseGroupResponse) | PauseGroup defines a method to close a group of a deployment given proper inputs. | | - | `StartGroup` | [MsgStartGroup](#akash.deployment.v1beta3.MsgStartGroup) | [MsgStartGroupResponse](#akash.deployment.v1beta3.MsgStartGroupResponse) | StartGroup defines a method to close a group of a deployment given proper inputs. | | - + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `accounts` | [Account](#akash.escrow.v1.Account) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + - -

Top

+ + + ### QueryPaymentsRequest + QueryPaymentRequest is request type for the Query/Payment RPC method - ## akash/deployment/v1beta3/authz.proto + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `scope` | [string](#string) | | | + | `xid` | [string](#string) | | | + | `id` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `state` | [string](#string) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + - - ### DepositDeploymentAuthorization - DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from -the granter's account for a deployment. + + + + ### QueryPaymentsResponse + QueryProvidersResponse is response type for the Query/Providers RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `spend_limit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | SpendLimit is the amount the grantee is authorized to spend from the granter's account for the purpose of deployment. | + | `payments` | [FractionalPayment](#akash.escrow.v1.FractionalPayment) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -2857,43 +2619,37 @@ the granter's account for a deployment. - - - - -

Top

+ - ## akash/deployment/v1beta3/genesis.proto - + ### Query + Query defines the gRPC querier service + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Accounts` | [QueryAccountsRequest](#akash.escrow.v1.QueryAccountsRequest) | [QueryAccountsResponse](#akash.escrow.v1.QueryAccountsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Accounts queries all accounts | GET|/akash/escrow/v1/types/accounts/list| + | `Payments` | [QueryPaymentsRequest](#akash.escrow.v1.QueryPaymentsRequest) | [QueryPaymentsResponse](#akash.escrow.v1.QueryPaymentsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Payments queries all payments | GET|/akash/escrow/v1/types/payments/list| - - - ### GenesisDeployment - GenesisDeployment defines the basic genesis state used by deployment module + - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployment` | [Deployment](#akash.deployment.v1beta3.Deployment) | | | - | `groups` | [Group](#akash.deployment.v1beta3.Group) | repeated | | - + +

Top

+ ## akash/gov/v1beta3/params.proto - + - ### GenesisState - GenesisState stores slice of genesis deployment instance + ### DepositParams + DepositParams defines the parameters for the x/gov module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployments` | [GenesisDeployment](#akash.deployment.v1beta3.GenesisDeployment) | repeated | | - | `params` | [Params](#akash.deployment.v1beta3.Params) | | | + | `min_initial_deposit_rate` | [bytes](#bytes) | | min_initial_deposit_rate minimum % of TotalDeposit author of the proposal must put in order for proposal tx to be committed | @@ -2909,24 +2665,22 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta3/groupspec.proto + ## akash/gov/v1beta3/genesis.proto - + - ### GroupSpec - GroupSpec stores group specifications + ### GenesisState + GenesisState stores slice of genesis deployment instance | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `requirements` | [akash.base.v1beta3.PlacementRequirements](#akash.base.v1beta3.PlacementRequirements) | | | - | `resources` | [ResourceUnit](#akash.deployment.v1beta3.ResourceUnit) | repeated | | + | `deposit_params` | [DepositParams](#akash.gov.v1beta3.DepositParams) | | | @@ -2942,14 +2696,14 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta3/params.proto + ## akash/inflation/v1beta2/params.proto - + ### Params Params defines the parameters for the x/deployment package @@ -2957,7 +2711,9 @@ the granter's account for a deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `min_deposits` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | repeated | | + | `inflation_decay_factor` | [string](#string) | | InflationDecayFactor is the number of years it takes inflation to halve. | + | `initial_inflation` | [string](#string) | | InitialInflation is the rate at which inflation starts at genesis. It is a decimal value in the range [0.0, 100.0]. | + | `variance` | [string](#string) | | Variance defines the fraction by which inflation can vary from ideal inflation in a block. It is a decimal value in the range [0.0, 1.0]. | @@ -2973,83 +2729,87 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta2/groupmsg.proto + ## akash/inflation/v1beta2/genesis.proto - + - ### MsgCloseGroup - MsgCloseGroup defines SDK message to close a single Group within a Deployment. + ### GenesisState + GenesisState stores slice of genesis deployment instance | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | + | `params` | [Params](#akash.inflation.v1beta2.Params) | | | - - + - ### MsgCloseGroupResponse - MsgCloseGroupResponse defines the Msg/CloseGroup response type. + + + + + + + +

Top

+ ## akash/inflation/v1beta3/params.proto - + - ### MsgPauseGroup - MsgPauseGroup defines SDK message to close a single Group within a Deployment. + ### Params + Params defines the parameters for the x/deployment package | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | + | `inflation_decay_factor` | [string](#string) | | InflationDecayFactor is the number of years it takes inflation to halve. | + | `initial_inflation` | [string](#string) | | InitialInflation is the rate at which inflation starts at genesis. It is a decimal value in the range [0.0, 100.0]. | + | `variance` | [string](#string) | | Variance defines the fraction by which inflation can vary from ideal inflation in a block. It is a decimal value in the range [0.0, 1.0]. | - - + - ### MsgPauseGroupResponse - MsgPauseGroupResponse defines the Msg/PauseGroup response type. + + + + + + + +

Top

+ ## akash/inflation/v1beta3/genesis.proto - + - ### MsgStartGroup - MsgStartGroup defines SDK message to close a single Group within a Deployment. + ### GenesisState + GenesisState stores slice of genesis deployment instance | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | - - - - - + | `params` | [Params](#akash.inflation.v1beta3.Params) | | | - - - ### MsgStartGroupResponse - MsgStartGroupResponse defines the Msg/StartGroup response type. - @@ -3064,25 +2824,27 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta2/group.proto + ## akash/market/v1/bid.proto - + - ### Group - Group stores group id, state and specifications of group + ### BidID + BidID stores owner and all other seq numbers +A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `group_id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | - | `state` | [Group.State](#akash.deployment.v1beta2.Group.State) | | | - | `group_spec` | [GroupSpec](#akash.deployment.v1beta2.GroupSpec) | | | - | `created_at` | [int64](#int64) | | | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | + | `oseq` | [uint32](#uint32) | | | + | `provider` | [string](#string) | | | @@ -3090,21 +2852,6 @@ the granter's account for a deployment. - - - - ### Group.State - State is an enum which refers to state of group - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | open | 1 | GroupOpen denotes state for group open | - | paused | 2 | GroupOrdered denotes state for group ordered | - | insufficient_funds | 3 | GroupInsufficientFunds denotes state for group insufficient_funds | - | closed | 4 | GroupClosed denotes state for group closed | - - @@ -3113,17 +2860,17 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta2/groupid.proto + ## akash/market/v1/order.proto - + - ### GroupID - GroupID stores owner, deployment sequence number and group sequence number + ### OrderID + OrderID stores owner and all other seq numbers | Field | Type | Label | Description | @@ -3131,6 +2878,7 @@ the granter's account for a deployment. | `owner` | [string](#string) | | | | `dseq` | [uint64](#uint64) | | | | `gseq` | [uint32](#uint32) | | | + | `oseq` | [uint32](#uint32) | | | @@ -3146,58 +2894,45 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta2/deployment.proto + ## akash/market/v1/lease.proto - + - ### Deployment - Deployment stores deploymentID, state and version details + ### Lease + Lease stores LeaseID, state of lease and price | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployment_id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | - | `state` | [Deployment.State](#akash.deployment.v1beta2.Deployment.State) | | | - | `version` | [bytes](#bytes) | | | + | `id` | [LeaseID](#akash.market.v1.LeaseID) | | | + | `state` | [Lease.State](#akash.market.v1.Lease.State) | | | + | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | | `created_at` | [int64](#int64) | | | + | `closed_on` | [int64](#int64) | | | - - - ### DeploymentFilters - DeploymentFilters defines filters used to filter deployments - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `state` | [string](#string) | | | - - - - - - - + - ### DeploymentID - DeploymentID stores owner and sequence number + ### LeaseID + LeaseID stores bid details of lease | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | + | `oseq` | [uint32](#uint32) | | | + | `provider` | [string](#string) | | | @@ -3206,16 +2941,17 @@ the granter's account for a deployment. - + - ### Deployment.State - State is an enum which refers to state of deployment + ### Lease.State + State is an enum which refers to state of lease | Name | Number | Description | | ---- | ------ | ----------- | | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | active | 1 | DeploymentActive denotes state for deployment active | - | closed | 2 | DeploymentClosed denotes state for deployment closed | + | active | 1 | LeaseActive denotes state for lease active | + | insufficient_funds | 2 | LeaseInsufficientFunds denotes state for lease insufficient_funds | + | closed | 3 | LeaseClosed denotes state for lease closed | @@ -3226,101 +2962,99 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta2/query.proto + ## akash/market/v1/event.proto - + - ### QueryDeploymentRequest - QueryDeploymentRequest is request type for the Query/Deployment RPC method + ### EventBidClosed + EventBidClosed | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | + | `id` | [BidID](#akash.market.v1.BidID) | | | - + - ### QueryDeploymentResponse - QueryDeploymentResponse is response type for the Query/Deployment RPC method + ### EventBidCreated + EventBidCreated | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployment` | [Deployment](#akash.deployment.v1beta2.Deployment) | | | - | `groups` | [Group](#akash.deployment.v1beta2.Group) | repeated | | - | `escrow_account` | [akash.escrow.v1beta2.Account](#akash.escrow.v1beta2.Account) | | | + | `id` | [BidID](#akash.market.v1.BidID) | | | + | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - + - ### QueryDeploymentsRequest - QueryDeploymentsRequest is request type for the Query/Deployments RPC method + ### EventLeaseClosed + EventLeaseClosed | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filters` | [DeploymentFilters](#akash.deployment.v1beta2.DeploymentFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + | `id` | [LeaseID](#akash.market.v1.LeaseID) | | | - + - ### QueryDeploymentsResponse - QueryDeploymentsResponse is response type for the Query/Deployments RPC method + ### EventLeaseCreated + EventLeaseCreated | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployments` | [QueryDeploymentResponse](#akash.deployment.v1beta2.QueryDeploymentResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + | `id` | [LeaseID](#akash.market.v1.LeaseID) | | | + | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - + - ### QueryGroupRequest - QueryGroupRequest is request type for the Query/Group RPC method + ### EventOrderClosed + EventOrderClosed | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | + | `id` | [OrderID](#akash.market.v1.OrderID) | | | - + - ### QueryGroupResponse - QueryGroupResponse is response type for the Query/Group RPC method + ### EventOrderCreated + EventOrderCreated | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `group` | [Group](#akash.deployment.v1beta2.Group) | | | + | `id` | [OrderID](#akash.market.v1.OrderID) | | | @@ -3332,130 +3066,184 @@ the granter's account for a deployment. + + - + + +

Top

- ### Query - Query defines the gRPC querier service + ## akash/market/v1/filters.proto + + + + + + ### LeaseFilters + LeaseFilters defines flags for lease list filter + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | + | `oseq` | [uint32](#uint32) | | | + | `provider` | [string](#string) | | | + | `state` | [string](#string) | | | + + - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Deployments` | [QueryDeploymentsRequest](#akash.deployment.v1beta2.QueryDeploymentsRequest) | [QueryDeploymentsResponse](#akash.deployment.v1beta2.QueryDeploymentsResponse) | Deployments queries deployments | GET|/akash/deployment/v1beta2/deployments/list| - | `Deployment` | [QueryDeploymentRequest](#akash.deployment.v1beta2.QueryDeploymentRequest) | [QueryDeploymentResponse](#akash.deployment.v1beta2.QueryDeploymentResponse) | Deployment queries deployment details | GET|/akash/deployment/v1beta2/deployments/info| - | `Group` | [QueryGroupRequest](#akash.deployment.v1beta2.QueryGroupRequest) | [QueryGroupResponse](#akash.deployment.v1beta2.QueryGroupResponse) | Group queries group details | GET|/akash/deployment/v1beta2/groups/info| + + + + + + + - +

Top

- ## akash/deployment/v1beta2/deploymentmsg.proto + ## akash/market/v1beta5/resourcesoffer.proto - + - ### MsgCloseDeployment - MsgCloseDeployment defines an SDK message for closing deployment + ### ResourceOffer + ResourceOffer describes resources that provider is offering +for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | + | `resources` | [akash.base.resources.v1beta4.Resources](#akash.base.resources.v1beta4.Resources) | | | + | `count` | [uint32](#uint32) | | | - - + - ### MsgCloseDeploymentResponse - MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. + + + + + + + +

Top

+ ## akash/market/v1beta5/bid.proto - + - ### MsgCreateDeployment - MsgCreateDeployment defines an SDK message for creating deployment + ### Bid + Bid stores BidID, state of bid and price | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | - | `groups` | [GroupSpec](#akash.deployment.v1beta2.GroupSpec) | repeated | | - | `version` | [bytes](#bytes) | | | - | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `depositor` | [string](#string) | | Depositor pays for the deposit | + | `id` | [akash.market.v1.BidID](#akash.market.v1.BidID) | | | + | `state` | [Bid.State](#akash.market.v1beta5.Bid.State) | | | + | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `created_at` | [int64](#int64) | | | + | `resources_offer` | [ResourceOffer](#akash.market.v1beta5.ResourceOffer) | repeated | | + + - + - ### MsgCreateDeploymentResponse - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. + ### Bid.State + BidState is an enum which refers to state of bid + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | open | 1 | BidOpen denotes state for bid open | + | active | 2 | BidMatched denotes state for bid open | + | lost | 3 | BidLost denotes state for bid lost | + | closed | 4 | BidClosed denotes state for bid closed | + + + + + + + + + +

Top

+ ## akash/market/v1beta5/bidmsg.proto - + - ### MsgDepositDeployment - MsgDepositDeployment deposits more funds into the deposit account + ### MsgCloseBid + MsgCloseBid defines an SDK message for closing bid | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | - | `amount` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `depositor` | [string](#string) | | Depositor pays for the deposit | + | `id` | [akash.market.v1.BidID](#akash.market.v1.BidID) | | | - + - ### MsgDepositDeploymentResponse - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. + ### MsgCloseBidResponse + MsgCloseBidResponse defines the Msg/CloseBid response type. - + - ### MsgUpdateDeployment - MsgUpdateDeployment defines an SDK message for updating deployment + ### MsgCreateBid + MsgCreateBid defines an SDK message for creating Bid | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | - | `version` | [bytes](#bytes) | | | + | `order_id` | [akash.market.v1.OrderID](#akash.market.v1.OrderID) | | | + | `provider` | [string](#string) | | | + | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `resources_offer` | [ResourceOffer](#akash.market.v1beta5.ResourceOffer) | repeated | | - + - ### MsgUpdateDeploymentResponse - MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. + ### MsgCreateBidResponse + MsgCreateBidResponse defines the Msg/CreateBid response type. @@ -3471,55 +3259,46 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta2/service.proto + ## akash/market/v1beta5/filters.proto - - - - - - - + - ### Msg - Msg defines the deployment Msg service. + ### BidFilters + BidFilters defines flags for bid list filter - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateDeployment` | [MsgCreateDeployment](#akash.deployment.v1beta2.MsgCreateDeployment) | [MsgCreateDeploymentResponse](#akash.deployment.v1beta2.MsgCreateDeploymentResponse) | CreateDeployment defines a method to create new deployment given proper inputs. | | - | `DepositDeployment` | [MsgDepositDeployment](#akash.deployment.v1beta2.MsgDepositDeployment) | [MsgDepositDeploymentResponse](#akash.deployment.v1beta2.MsgDepositDeploymentResponse) | DepositDeployment deposits more funds into the deployment account | | - | `UpdateDeployment` | [MsgUpdateDeployment](#akash.deployment.v1beta2.MsgUpdateDeployment) | [MsgUpdateDeploymentResponse](#akash.deployment.v1beta2.MsgUpdateDeploymentResponse) | UpdateDeployment defines a method to update a deployment given proper inputs. | | - | `CloseDeployment` | [MsgCloseDeployment](#akash.deployment.v1beta2.MsgCloseDeployment) | [MsgCloseDeploymentResponse](#akash.deployment.v1beta2.MsgCloseDeploymentResponse) | CloseDeployment defines a method to close a deployment given proper inputs. | | - | `CloseGroup` | [MsgCloseGroup](#akash.deployment.v1beta2.MsgCloseGroup) | [MsgCloseGroupResponse](#akash.deployment.v1beta2.MsgCloseGroupResponse) | CloseGroup defines a method to close a group of a deployment given proper inputs. | | - | `PauseGroup` | [MsgPauseGroup](#akash.deployment.v1beta2.MsgPauseGroup) | [MsgPauseGroupResponse](#akash.deployment.v1beta2.MsgPauseGroupResponse) | PauseGroup defines a method to close a group of a deployment given proper inputs. | | - | `StartGroup` | [MsgStartGroup](#akash.deployment.v1beta2.MsgStartGroup) | [MsgStartGroupResponse](#akash.deployment.v1beta2.MsgStartGroupResponse) | StartGroup defines a method to close a group of a deployment given proper inputs. | | - - + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | + | `oseq` | [uint32](#uint32) | | | + | `provider` | [string](#string) | | | + | `state` | [string](#string) | | | - -

Top

- ## akash/deployment/v1beta2/authz.proto - + - ### DepositDeploymentAuthorization - DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from -the granter's account for a deployment. + ### OrderFilters + OrderFilters defines flags for order list filter | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `spend_limit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | SpendLimit is the amount the grantee is authorized to spend from the granter's account for the purpose of deployment. | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | + | `oseq` | [uint32](#uint32) | | | + | `state` | [string](#string) | | | @@ -3535,3806 +3314,57 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta2/genesis.proto + ## akash/market/v1beta5/params.proto - + - ### GenesisDeployment - GenesisDeployment defines the basic genesis state used by deployment module + ### Params + Params is the params for the x/market module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployment` | [Deployment](#akash.deployment.v1beta2.Deployment) | | | - | `groups` | [Group](#akash.deployment.v1beta2.Group) | repeated | | - - - - - - - - - ### GenesisState - GenesisState stores slice of genesis deployment instance - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployments` | [GenesisDeployment](#akash.deployment.v1beta2.GenesisDeployment) | repeated | | - | `params` | [Params](#akash.deployment.v1beta2.Params) | | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/deployment/v1beta2/groupspec.proto - - - - - - ### GroupSpec - GroupSpec stores group specifications - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `requirements` | [akash.base.v1beta2.PlacementRequirements](#akash.base.v1beta2.PlacementRequirements) | | | - | `resources` | [Resource](#akash.deployment.v1beta2.Resource) | repeated | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/deployment/v1beta2/resource.proto - - - - - - ### Resource - Resource stores unit, total count and price of resource - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `resources` | [akash.base.v1beta2.ResourceUnits](#akash.base.v1beta2.ResourceUnits) | | | - | `count` | [uint32](#uint32) | | | - | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/deployment/v1beta2/params.proto - - - - - - ### Params - Params defines the parameters for the x/deployment package - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployment_min_deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/deployment/v1beta1/group.proto - - - - - - ### Group - Group stores group id, state and specifications of group - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `group_id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | - | `state` | [Group.State](#akash.deployment.v1beta1.Group.State) | | | - | `group_spec` | [GroupSpec](#akash.deployment.v1beta1.GroupSpec) | | | - | `created_at` | [int64](#int64) | | | - - - - - - - - - ### GroupID - GroupID stores owner, deployment sequence number and group sequence number - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - - - - - - - - - ### GroupSpec - GroupSpec stores group specifications - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `requirements` | [akash.base.v1beta1.PlacementRequirements](#akash.base.v1beta1.PlacementRequirements) | | | - | `resources` | [Resource](#akash.deployment.v1beta1.Resource) | repeated | | - - - - - - - - - ### MsgCloseGroup - MsgCloseGroup defines SDK message to close a single Group within a Deployment. - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | - - - - - - - - - ### MsgCloseGroupResponse - MsgCloseGroupResponse defines the Msg/CloseGroup response type. - - - - - - - - - ### MsgPauseGroup - MsgPauseGroup defines SDK message to close a single Group within a Deployment. - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | - - - - - - - - - ### MsgPauseGroupResponse - MsgPauseGroupResponse defines the Msg/PauseGroup response type. - - - - - - - - - ### MsgStartGroup - MsgStartGroup defines SDK message to close a single Group within a Deployment. - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | - - - - - - - - - ### MsgStartGroupResponse - MsgStartGroupResponse defines the Msg/StartGroup response type. - - - - - - - - - ### Resource - Resource stores unit, total count and price of resource - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `resources` | [akash.base.v1beta1.ResourceUnits](#akash.base.v1beta1.ResourceUnits) | | | - | `count` | [uint32](#uint32) | | | - | `price` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - - - - - - - - - - - ### Group.State - State is an enum which refers to state of group - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | open | 1 | GroupOpen denotes state for group open | - | paused | 2 | GroupOrdered denotes state for group ordered | - | insufficient_funds | 3 | GroupInsufficientFunds denotes state for group insufficient_funds | - | closed | 4 | GroupClosed denotes state for group closed | - - - - - - - - - - - -

Top

- - ## akash/deployment/v1beta1/deployment.proto - - - - - - ### Deployment - Deployment stores deploymentID, state and version details - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployment_id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | - | `state` | [Deployment.State](#akash.deployment.v1beta1.Deployment.State) | | | - | `version` | [bytes](#bytes) | | | - | `created_at` | [int64](#int64) | | | - - - - - - - - - ### DeploymentFilters - DeploymentFilters defines filters used to filter deployments - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `state` | [string](#string) | | | - - - - - - - - - ### DeploymentID - DeploymentID stores owner and sequence number - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - - - - - - - - - ### MsgCloseDeployment - MsgCloseDeployment defines an SDK message for closing deployment - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | - - - - - - - - - ### MsgCloseDeploymentResponse - MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. - - - - - - - - - ### MsgCreateDeployment - MsgCreateDeployment defines an SDK message for creating deployment - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | - | `groups` | [GroupSpec](#akash.deployment.v1beta1.GroupSpec) | repeated | | - | `version` | [bytes](#bytes) | | | - | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - - - - - - - - - ### MsgCreateDeploymentResponse - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. - - - - - - - - - ### MsgDepositDeployment - MsgDepositDeployment deposits more funds into the deposit account - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | - | `amount` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - - - - - - - - - ### MsgDepositDeploymentResponse - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. - - - - - - - - - ### MsgUpdateDeployment - MsgUpdateDeployment defines an SDK message for updating deployment - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | - | `groups` | [GroupSpec](#akash.deployment.v1beta1.GroupSpec) | repeated | | - | `version` | [bytes](#bytes) | | | - - - - - - - - - ### MsgUpdateDeploymentResponse - MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. - - - - - - - - - - - ### Deployment.State - State is an enum which refers to state of deployment - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | active | 1 | DeploymentActive denotes state for deployment active | - | closed | 2 | DeploymentClosed denotes state for deployment closed | - - - - - - - - - - ### Msg - Msg defines the deployment Msg service. - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateDeployment` | [MsgCreateDeployment](#akash.deployment.v1beta1.MsgCreateDeployment) | [MsgCreateDeploymentResponse](#akash.deployment.v1beta1.MsgCreateDeploymentResponse) | CreateDeployment defines a method to create new deployment given proper inputs. | | - | `DepositDeployment` | [MsgDepositDeployment](#akash.deployment.v1beta1.MsgDepositDeployment) | [MsgDepositDeploymentResponse](#akash.deployment.v1beta1.MsgDepositDeploymentResponse) | DepositDeployment deposits more funds into the deployment account | | - | `UpdateDeployment` | [MsgUpdateDeployment](#akash.deployment.v1beta1.MsgUpdateDeployment) | [MsgUpdateDeploymentResponse](#akash.deployment.v1beta1.MsgUpdateDeploymentResponse) | UpdateDeployment defines a method to update a deployment given proper inputs. | | - | `CloseDeployment` | [MsgCloseDeployment](#akash.deployment.v1beta1.MsgCloseDeployment) | [MsgCloseDeploymentResponse](#akash.deployment.v1beta1.MsgCloseDeploymentResponse) | CloseDeployment defines a method to close a deployment given proper inputs. | | - | `CloseGroup` | [MsgCloseGroup](#akash.deployment.v1beta1.MsgCloseGroup) | [MsgCloseGroupResponse](#akash.deployment.v1beta1.MsgCloseGroupResponse) | CloseGroup defines a method to close a group of a deployment given proper inputs. | | - | `PauseGroup` | [MsgPauseGroup](#akash.deployment.v1beta1.MsgPauseGroup) | [MsgPauseGroupResponse](#akash.deployment.v1beta1.MsgPauseGroupResponse) | PauseGroup defines a method to close a group of a deployment given proper inputs. | | - | `StartGroup` | [MsgStartGroup](#akash.deployment.v1beta1.MsgStartGroup) | [MsgStartGroupResponse](#akash.deployment.v1beta1.MsgStartGroupResponse) | StartGroup defines a method to close a group of a deployment given proper inputs. | | - - - - - - -

Top

- - ## akash/deployment/v1beta1/query.proto - - - - - - ### QueryDeploymentRequest - QueryDeploymentRequest is request type for the Query/Deployment RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | - - - - - - - - - ### QueryDeploymentResponse - QueryDeploymentResponse is response type for the Query/Deployment RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployment` | [Deployment](#akash.deployment.v1beta1.Deployment) | | | - | `groups` | [Group](#akash.deployment.v1beta1.Group) | repeated | | - | `escrow_account` | [akash.escrow.v1beta1.Account](#akash.escrow.v1beta1.Account) | | | - - - - - - - - - ### QueryDeploymentsRequest - QueryDeploymentsRequest is request type for the Query/Deployments RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filters` | [DeploymentFilters](#akash.deployment.v1beta1.DeploymentFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryDeploymentsResponse - QueryDeploymentsResponse is response type for the Query/Deployments RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployments` | [QueryDeploymentResponse](#akash.deployment.v1beta1.QueryDeploymentResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - ### QueryGroupRequest - QueryGroupRequest is request type for the Query/Group RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | - - - - - - - - - ### QueryGroupResponse - QueryGroupResponse is response type for the Query/Group RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `group` | [Group](#akash.deployment.v1beta1.Group) | | | - - - - - - - - - - - - - - - ### Query - Query defines the gRPC querier service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Deployments` | [QueryDeploymentsRequest](#akash.deployment.v1beta1.QueryDeploymentsRequest) | [QueryDeploymentsResponse](#akash.deployment.v1beta1.QueryDeploymentsResponse) | Deployments queries deployments | GET|/akash/deployment/v1beta1/deployments/list| - | `Deployment` | [QueryDeploymentRequest](#akash.deployment.v1beta1.QueryDeploymentRequest) | [QueryDeploymentResponse](#akash.deployment.v1beta1.QueryDeploymentResponse) | Deployment queries deployment details | GET|/akash/deployment/v1beta1/deployments/info| - | `Group` | [QueryGroupRequest](#akash.deployment.v1beta1.QueryGroupRequest) | [QueryGroupResponse](#akash.deployment.v1beta1.QueryGroupResponse) | Group queries group details | GET|/akash/deployment/v1beta1/groups/info| - - - - - - -

Top

- - ## akash/deployment/v1beta1/authz.proto - - - - - - ### DepositDeploymentAuthorization - DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from -the granter's account for a deployment. - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `spend_limit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | SpendLimit is the amount the grantee is authorized to spend from the granter's account for the purpose of deployment. | - - - - - - - - - - - - - - - - -

Top

- - ## akash/deployment/v1beta1/genesis.proto - - - - - - ### GenesisDeployment - GenesisDeployment defines the basic genesis state used by deployment module - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployment` | [Deployment](#akash.deployment.v1beta1.Deployment) | | | - | `groups` | [Group](#akash.deployment.v1beta1.Group) | repeated | | - - - - - - - - - ### GenesisState - GenesisState stores slice of genesis deployment instance - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployments` | [GenesisDeployment](#akash.deployment.v1beta1.GenesisDeployment) | repeated | | - | `params` | [Params](#akash.deployment.v1beta1.Params) | | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/deployment/v1beta1/params.proto - - - - - - ### Params - Params defines the parameters for the x/deployment package - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployment_min_deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/staking/v1beta3/genesis.proto - - - - - - ### GenesisState - GenesisState stores slice of genesis deployment instance - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `params` | [Params](#akash.staking.v1beta3.Params) | | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/staking/v1beta3/params.proto - - - - - - ### Params - Params extends the parameters for the x/staking module - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `min_commission_rate` | [string](#string) | | min_commission_rate is the chain-wide minimum commission rate that a validator can charge their delegators | - - - - - - - - - - - - - - - - -

Top

- - ## akash/cert/v1beta3/query.proto - - - - - - ### CertificateResponse - CertificateResponse contains a single X509 certificate and its serial number - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `certificate` | [Certificate](#akash.cert.v1beta3.Certificate) | | | - | `serial` | [string](#string) | | | - - - - - - - - - ### QueryCertificatesRequest - QueryDeploymentsRequest is request type for the Query/Deployments RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filter` | [CertificateFilter](#akash.cert.v1beta3.CertificateFilter) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryCertificatesResponse - QueryCertificatesResponse is response type for the Query/Certificates RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `certificates` | [CertificateResponse](#akash.cert.v1beta3.CertificateResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - - - - - - - ### Query - Query defines the gRPC querier service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Certificates` | [QueryCertificatesRequest](#akash.cert.v1beta3.QueryCertificatesRequest) | [QueryCertificatesResponse](#akash.cert.v1beta3.QueryCertificatesResponse) | Certificates queries certificates | GET|/akash/cert/v1beta3/certificates/list| - - - - - - -

Top

- - ## akash/cert/v1beta3/cert.proto - - - - - - ### Certificate - Certificate stores state, certificate and it's public key - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `state` | [Certificate.State](#akash.cert.v1beta3.Certificate.State) | | | - | `cert` | [bytes](#bytes) | | | - | `pubkey` | [bytes](#bytes) | | | - - - - - - - - - ### CertificateFilter - CertificateFilter defines filters used to filter certificates - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `serial` | [string](#string) | | | - | `state` | [string](#string) | | | - - - - - - - - - ### CertificateID - CertificateID stores owner and sequence number - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `serial` | [string](#string) | | | - - - - - - - - - ### MsgCreateCertificate - MsgCreateCertificate defines an SDK message for creating certificate - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `cert` | [bytes](#bytes) | | | - | `pubkey` | [bytes](#bytes) | | | - - - - - - - - - ### MsgCreateCertificateResponse - MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. - - - - - - - - - ### MsgRevokeCertificate - MsgRevokeCertificate defines an SDK message for revoking certificate - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [CertificateID](#akash.cert.v1beta3.CertificateID) | | | - - - - - - - - - ### MsgRevokeCertificateResponse - MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. - - - - - - - - - - - ### Certificate.State - State is an enum which refers to state of deployment - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | valid | 1 | CertificateValid denotes state for deployment active | - | revoked | 2 | CertificateRevoked denotes state for deployment closed | - - - - - - - - - - ### Msg - Msg defines the provider Msg service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateCertificate` | [MsgCreateCertificate](#akash.cert.v1beta3.MsgCreateCertificate) | [MsgCreateCertificateResponse](#akash.cert.v1beta3.MsgCreateCertificateResponse) | CreateCertificate defines a method to create new certificate given proper inputs. | | - | `RevokeCertificate` | [MsgRevokeCertificate](#akash.cert.v1beta3.MsgRevokeCertificate) | [MsgRevokeCertificateResponse](#akash.cert.v1beta3.MsgRevokeCertificateResponse) | RevokeCertificate defines a method to revoke the certificate | | - - - - - - -

Top

- - ## akash/cert/v1beta3/genesis.proto - - - - - - ### GenesisCertificate - GenesisCertificate defines certificate entry at genesis - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `certificate` | [Certificate](#akash.cert.v1beta3.Certificate) | | | - - - - - - - - - ### GenesisState - GenesisState defines the basic genesis state used by cert module - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `certificates` | [GenesisCertificate](#akash.cert.v1beta3.GenesisCertificate) | repeated | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/cert/v1beta2/query.proto - - - - - - ### CertificateResponse - CertificateResponse contains a single X509 certificate and its serial number - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `certificate` | [Certificate](#akash.cert.v1beta2.Certificate) | | | - | `serial` | [string](#string) | | | - - - - - - - - - ### QueryCertificatesRequest - QueryDeploymentsRequest is request type for the Query/Deployments RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filter` | [CertificateFilter](#akash.cert.v1beta2.CertificateFilter) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryCertificatesResponse - QueryCertificatesResponse is response type for the Query/Certificates RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `certificates` | [CertificateResponse](#akash.cert.v1beta2.CertificateResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - - - - - - - ### Query - Query defines the gRPC querier service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Certificates` | [QueryCertificatesRequest](#akash.cert.v1beta2.QueryCertificatesRequest) | [QueryCertificatesResponse](#akash.cert.v1beta2.QueryCertificatesResponse) | Certificates queries certificates | GET|/akash/cert/v1beta3/certificates/list| - - - - - - -

Top

- - ## akash/cert/v1beta2/cert.proto - - - - - - ### Certificate - Certificate stores state, certificate and it's public key - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `state` | [Certificate.State](#akash.cert.v1beta2.Certificate.State) | | | - | `cert` | [bytes](#bytes) | | | - | `pubkey` | [bytes](#bytes) | | | - - - - - - - - - ### CertificateFilter - CertificateFilter defines filters used to filter certificates - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `serial` | [string](#string) | | | - | `state` | [string](#string) | | | - - - - - - - - - ### CertificateID - CertificateID stores owner and sequence number - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `serial` | [string](#string) | | | - - - - - - - - - ### MsgCreateCertificate - MsgCreateCertificate defines an SDK message for creating certificate - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `cert` | [bytes](#bytes) | | | - | `pubkey` | [bytes](#bytes) | | | - - - - - - - - - ### MsgCreateCertificateResponse - MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. - - - - - - - - - ### MsgRevokeCertificate - MsgRevokeCertificate defines an SDK message for revoking certificate - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [CertificateID](#akash.cert.v1beta2.CertificateID) | | | - - - - - - - - - ### MsgRevokeCertificateResponse - MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. - - - - - - - - - - - ### Certificate.State - State is an enum which refers to state of deployment - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | valid | 1 | CertificateValid denotes state for deployment active | - | revoked | 2 | CertificateRevoked denotes state for deployment closed | - - - - - - - - - - ### Msg - Msg defines the provider Msg service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateCertificate` | [MsgCreateCertificate](#akash.cert.v1beta2.MsgCreateCertificate) | [MsgCreateCertificateResponse](#akash.cert.v1beta2.MsgCreateCertificateResponse) | CreateCertificate defines a method to create new certificate given proper inputs. | | - | `RevokeCertificate` | [MsgRevokeCertificate](#akash.cert.v1beta2.MsgRevokeCertificate) | [MsgRevokeCertificateResponse](#akash.cert.v1beta2.MsgRevokeCertificateResponse) | RevokeCertificate defines a method to revoke the certificate | | - - - - - - -

Top

- - ## akash/cert/v1beta2/genesis.proto - - - - - - ### GenesisCertificate - GenesisCertificate defines certificate entry at genesis - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `certificate` | [Certificate](#akash.cert.v1beta2.Certificate) | | | - - - - - - - - - ### GenesisState - GenesisState defines the basic genesis state used by cert module - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `certificates` | [GenesisCertificate](#akash.cert.v1beta2.GenesisCertificate) | repeated | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/escrow/v1beta3/types.proto - - - - - - ### Account - Account stores state for an escrow account - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [AccountID](#akash.escrow.v1beta3.AccountID) | | unique identifier for this escrow account | - | `owner` | [string](#string) | | bech32 encoded account address of the owner of this escrow account | - | `state` | [Account.State](#akash.escrow.v1beta3.Account.State) | | current state of this escrow account | - | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | unspent coins received from the owner's wallet | - | `transferred` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | total coins spent by this account | - | `settled_at` | [int64](#int64) | | block height at which this account was last settled | - | `depositor` | [string](#string) | | bech32 encoded account address of the depositor. If depositor is same as the owner, then any incoming coins are added to the Balance. If depositor isn't same as the owner, then any incoming coins are added to the Funds. | - | `funds` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | Funds are unspent coins received from the (non-Owner) Depositor's wallet. If there are any funds, they should be spent before spending the Balance. | - - - - - - - - - ### AccountID - AccountID is the account identifier - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - - - - - - - - - ### FractionalPayment - Payment stores state for a payment - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `account_id` | [AccountID](#akash.escrow.v1beta3.AccountID) | | | - | `payment_id` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [FractionalPayment.State](#akash.escrow.v1beta3.FractionalPayment.State) | | | - | `rate` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `withdrawn` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - - - - - - - - - - - ### Account.State - State stores state for an escrow account - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | AccountStateInvalid is an invalid state | - | open | 1 | AccountOpen is the state when an account is open | - | closed | 2 | AccountClosed is the state when an account is closed | - | overdrawn | 3 | AccountOverdrawn is the state when an account is overdrawn | - - - - - - ### FractionalPayment.State - Payment State - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | PaymentStateInvalid is the state when the payment is invalid | - | open | 1 | PaymentStateOpen is the state when the payment is open | - | closed | 2 | PaymentStateClosed is the state when the payment is closed | - | overdrawn | 3 | PaymentStateOverdrawn is the state when the payment is overdrawn | - - - - - - - - - - - -

Top

- - ## akash/escrow/v1beta3/query.proto - - - - - - ### QueryAccountsRequest - QueryAccountRequest is request type for the Query/Account RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [string](#string) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryAccountsResponse - QueryProvidersResponse is response type for the Query/Providers RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `accounts` | [Account](#akash.escrow.v1beta3.Account) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - ### QueryPaymentsRequest - QueryPaymentRequest is request type for the Query/Payment RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - | `id` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [string](#string) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryPaymentsResponse - QueryProvidersResponse is response type for the Query/Providers RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `payments` | [FractionalPayment](#akash.escrow.v1beta3.FractionalPayment) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - - - - - - - ### Query - Query defines the gRPC querier service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Accounts` | [QueryAccountsRequest](#akash.escrow.v1beta3.QueryAccountsRequest) | [QueryAccountsResponse](#akash.escrow.v1beta3.QueryAccountsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Accounts queries all accounts | GET|/akash/escrow/v1beta3/types/accounts/list| - | `Payments` | [QueryPaymentsRequest](#akash.escrow.v1beta3.QueryPaymentsRequest) | [QueryPaymentsResponse](#akash.escrow.v1beta3.QueryPaymentsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Payments queries all payments | GET|/akash/escrow/v1beta3/types/payments/list| - - - - - - -

Top

- - ## akash/escrow/v1beta3/genesis.proto - - - - - - ### GenesisState - GenesisState defines the basic genesis state used by escrow module - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `accounts` | [Account](#akash.escrow.v1beta3.Account) | repeated | | - | `payments` | [FractionalPayment](#akash.escrow.v1beta3.FractionalPayment) | repeated | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/escrow/v1beta2/types.proto - - - - - - ### Account - Account stores state for an escrow account - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [AccountID](#akash.escrow.v1beta2.AccountID) | | unique identifier for this escrow account | - | `owner` | [string](#string) | | bech32 encoded account address of the owner of this escrow account | - | `state` | [Account.State](#akash.escrow.v1beta2.Account.State) | | current state of this escrow account | - | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | unspent coins received from the owner's wallet | - | `transferred` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | total coins spent by this account | - | `settled_at` | [int64](#int64) | | block height at which this account was last settled | - | `depositor` | [string](#string) | | bech32 encoded account address of the depositor. If depositor is same as the owner, then any incoming coins are added to the Balance. If depositor isn't same as the owner, then any incoming coins are added to the Funds. | - | `funds` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | Funds are unspent coins received from the (non-Owner) Depositor's wallet. If there are any funds, they should be spent before spending the Balance. | - - - - - - - - - ### AccountID - AccountID is the account identifier - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - - - - - - - - - ### FractionalPayment - Payment stores state for a payment - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `account_id` | [AccountID](#akash.escrow.v1beta2.AccountID) | | | - | `payment_id` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [FractionalPayment.State](#akash.escrow.v1beta2.FractionalPayment.State) | | | - | `rate` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `withdrawn` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - - - - - - - - - - - ### Account.State - State stores state for an escrow account - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | AccountStateInvalid is an invalid state | - | open | 1 | AccountOpen is the state when an account is open | - | closed | 2 | AccountClosed is the state when an account is closed | - | overdrawn | 3 | AccountOverdrawn is the state when an account is overdrawn | - - - - - - ### FractionalPayment.State - Payment State - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | PaymentStateInvalid is the state when the payment is invalid | - | open | 1 | PaymentStateOpen is the state when the payment is open | - | closed | 2 | PaymentStateClosed is the state when the payment is closed | - | overdrawn | 3 | PaymentStateOverdrawn is the state when the payment is overdrawn | - - - - - - - - - - - -

Top

- - ## akash/escrow/v1beta2/query.proto - - - - - - ### QueryAccountsRequest - QueryAccountRequest is request type for the Query/Account RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [string](#string) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryAccountsResponse - QueryProvidersResponse is response type for the Query/Providers RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `accounts` | [Account](#akash.escrow.v1beta2.Account) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - ### QueryPaymentsRequest - QueryPaymentRequest is request type for the Query/Payment RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - | `id` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [string](#string) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryPaymentsResponse - QueryProvidersResponse is response type for the Query/Providers RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `payments` | [FractionalPayment](#akash.escrow.v1beta2.FractionalPayment) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - - - - - - - ### Query - Query defines the gRPC querier service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Accounts` | [QueryAccountsRequest](#akash.escrow.v1beta2.QueryAccountsRequest) | [QueryAccountsResponse](#akash.escrow.v1beta2.QueryAccountsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Accounts queries all accounts | GET|/akash/escrow/v1beta2/types/accounts/list| - | `Payments` | [QueryPaymentsRequest](#akash.escrow.v1beta2.QueryPaymentsRequest) | [QueryPaymentsResponse](#akash.escrow.v1beta2.QueryPaymentsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Payments queries all payments | GET|/akash/escrow/v1beta2/types/payments/list| - - - - - - -

Top

- - ## akash/escrow/v1beta2/genesis.proto - - - - - - ### GenesisState - GenesisState defines the basic genesis state used by escrow module - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `accounts` | [Account](#akash.escrow.v1beta2.Account) | repeated | | - | `payments` | [FractionalPayment](#akash.escrow.v1beta2.FractionalPayment) | repeated | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/escrow/v1beta1/types.proto - - - - - - ### Account - Account stores state for an escrow account - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [AccountID](#akash.escrow.v1beta1.AccountID) | | unique identifier for this escrow account | - | `owner` | [string](#string) | | bech32 encoded account address of the owner of this escrow account | - | `state` | [Account.State](#akash.escrow.v1beta1.Account.State) | | current state of this escrow account | - | `balance` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | unspent coins received from the owner's wallet | - | `transferred` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | total coins spent by this account | - | `settled_at` | [int64](#int64) | | block height at which this account was last settled | - - - - - - - - - ### AccountID - AccountID is the account identifier - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - - - - - - - - - ### Payment - Payment stores state for a payment - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `account_id` | [AccountID](#akash.escrow.v1beta1.AccountID) | | | - | `payment_id` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [Payment.State](#akash.escrow.v1beta1.Payment.State) | | | - | `rate` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `balance` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `withdrawn` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - - - - - - - - - - - ### Account.State - State stores state for an escrow account - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | AccountStateInvalid is an invalid state | - | open | 1 | AccountOpen is the state when an account is open | - | closed | 2 | AccountClosed is the state when an account is closed | - | overdrawn | 3 | AccountOverdrawn is the state when an account is overdrawn | - - - - - - ### Payment.State - Payment State - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | PaymentStateInvalid is the state when the payment is invalid | - | open | 1 | PaymentStateOpen is the state when the payment is open | - | closed | 2 | PaymentStateClosed is the state when the payment is closed | - | overdrawn | 3 | PaymentStateOverdrawn is the state when the payment is overdrawn | - - - - - - - - - - - -

Top

- - ## akash/escrow/v1beta1/query.proto - - - - - - ### QueryAccountsRequest - QueryAccountRequest is request type for the Query/Account RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [string](#string) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryAccountsResponse - QueryProvidersResponse is response type for the Query/Providers RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `accounts` | [Account](#akash.escrow.v1beta1.Account) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - ### QueryPaymentsRequest - QueryPaymentRequest is request type for the Query/Payment RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - | `id` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [string](#string) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryPaymentsResponse - QueryProvidersResponse is response type for the Query/Providers RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `payments` | [Payment](#akash.escrow.v1beta1.Payment) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - - - - - - - ### Query - Query defines the gRPC querier service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Accounts` | [QueryAccountsRequest](#akash.escrow.v1beta1.QueryAccountsRequest) | [QueryAccountsResponse](#akash.escrow.v1beta1.QueryAccountsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Accounts queries all accounts | GET|/akash/escrow/v1beta1/types/accounts/list| - | `Payments` | [QueryPaymentsRequest](#akash.escrow.v1beta1.QueryPaymentsRequest) | [QueryPaymentsResponse](#akash.escrow.v1beta1.QueryPaymentsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Payments queries all payments | GET|/akash/escrow/v1beta1/types/payments/list| - - - - - - -

Top

- - ## akash/escrow/v1beta1/genesis.proto - - - - - - ### GenesisState - GenesisState defines the basic genesis state used by escrow module - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `accounts` | [Account](#akash.escrow.v1beta1.Account) | repeated | | - | `payments` | [Payment](#akash.escrow.v1beta1.Payment) | repeated | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/market/v1beta4/bid.proto - - - - - - ### Bid - Bid stores BidID, state of bid and price - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta4.BidID) | | | - | `state` | [Bid.State](#akash.market.v1beta4.Bid.State) | | | - | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `created_at` | [int64](#int64) | | | - | `resources_offer` | [ResourceOffer](#akash.market.v1beta4.ResourceOffer) | repeated | | - - - - - - - - - ### BidFilters - BidFilters defines flags for bid list filter - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `provider` | [string](#string) | | | - | `state` | [string](#string) | | | - - - - - - - - - ### BidID - BidID stores owner and all other seq numbers -A successful bid becomes a Lease(ID). - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `provider` | [string](#string) | | | - - - - - - - - - ### MsgCloseBid - MsgCloseBid defines an SDK message for closing bid - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta4.BidID) | | | - - - - - - - - - ### MsgCloseBidResponse - MsgCloseBidResponse defines the Msg/CloseBid response type. - - - - - - - - - ### MsgCreateBid - MsgCreateBid defines an SDK message for creating Bid - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `order` | [OrderID](#akash.market.v1beta4.OrderID) | | | - | `provider` | [string](#string) | | | - | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `resources_offer` | [ResourceOffer](#akash.market.v1beta4.ResourceOffer) | repeated | | - - - - - - - - - ### MsgCreateBidResponse - MsgCreateBidResponse defines the Msg/CreateBid response type. - - - - - - - - - ### ResourceOffer - ResourceOffer describes resources that provider is offering -for deployment - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `resources` | [akash.base.v1beta3.Resources](#akash.base.v1beta3.Resources) | | | - | `count` | [uint32](#uint32) | | | - - - - - - - - - - - ### Bid.State - State is an enum which refers to state of bid - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | open | 1 | BidOpen denotes state for bid open | - | active | 2 | BidMatched denotes state for bid open | - | lost | 3 | BidLost denotes state for bid lost | - | closed | 4 | BidClosed denotes state for bid closed | - - - - - - - - - - - -

Top

- - ## akash/market/v1beta4/query.proto - - - - - - ### QueryBidRequest - QueryBidRequest is request type for the Query/Bid RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [BidID](#akash.market.v1beta4.BidID) | | | - - - - - - - - - ### QueryBidResponse - QueryBidResponse is response type for the Query/Bid RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid` | [Bid](#akash.market.v1beta4.Bid) | | | - | `escrow_account` | [akash.escrow.v1beta3.Account](#akash.escrow.v1beta3.Account) | | | - - - - - - - - - ### QueryBidsRequest - QueryBidsRequest is request type for the Query/Bids RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filters` | [BidFilters](#akash.market.v1beta4.BidFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryBidsResponse - QueryBidsResponse is response type for the Query/Bids RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bids` | [QueryBidResponse](#akash.market.v1beta4.QueryBidResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - ### QueryLeaseRequest - QueryLeaseRequest is request type for the Query/Lease RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [LeaseID](#akash.market.v1beta4.LeaseID) | | | - - - - - - - - - ### QueryLeaseResponse - QueryLeaseResponse is response type for the Query/Lease RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `lease` | [Lease](#akash.market.v1beta4.Lease) | | | - | `escrow_payment` | [akash.escrow.v1beta3.FractionalPayment](#akash.escrow.v1beta3.FractionalPayment) | | | - - - - - - - - - ### QueryLeasesRequest - QueryLeasesRequest is request type for the Query/Leases RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filters` | [LeaseFilters](#akash.market.v1beta4.LeaseFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryLeasesResponse - QueryLeasesResponse is response type for the Query/Leases RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `leases` | [QueryLeaseResponse](#akash.market.v1beta4.QueryLeaseResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - ### QueryOrderRequest - QueryOrderRequest is request type for the Query/Order RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [OrderID](#akash.market.v1beta4.OrderID) | | | - - - - - - - - - ### QueryOrderResponse - QueryOrderResponse is response type for the Query/Order RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `order` | [Order](#akash.market.v1beta4.Order) | | | - - - - - - - - - ### QueryOrdersRequest - QueryOrdersRequest is request type for the Query/Orders RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filters` | [OrderFilters](#akash.market.v1beta4.OrderFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryOrdersResponse - QueryOrdersResponse is response type for the Query/Orders RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `orders` | [Order](#akash.market.v1beta4.Order) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - - - - - - - ### Query - Query defines the gRPC querier service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Orders` | [QueryOrdersRequest](#akash.market.v1beta4.QueryOrdersRequest) | [QueryOrdersResponse](#akash.market.v1beta4.QueryOrdersResponse) | Orders queries orders with filters | GET|/akash/market/v1beta4/orders/list| - | `Order` | [QueryOrderRequest](#akash.market.v1beta4.QueryOrderRequest) | [QueryOrderResponse](#akash.market.v1beta4.QueryOrderResponse) | Order queries order details | GET|/akash/market/v1beta4/orders/info| - | `Bids` | [QueryBidsRequest](#akash.market.v1beta4.QueryBidsRequest) | [QueryBidsResponse](#akash.market.v1beta4.QueryBidsResponse) | Bids queries bids with filters | GET|/akash/market/v1beta4/bids/list| - | `Bid` | [QueryBidRequest](#akash.market.v1beta4.QueryBidRequest) | [QueryBidResponse](#akash.market.v1beta4.QueryBidResponse) | Bid queries bid details | GET|/akash/market/v1beta4/bids/info| - | `Leases` | [QueryLeasesRequest](#akash.market.v1beta4.QueryLeasesRequest) | [QueryLeasesResponse](#akash.market.v1beta4.QueryLeasesResponse) | Leases queries leases with filters | GET|/akash/market/v1beta4/leases/list| - | `Lease` | [QueryLeaseRequest](#akash.market.v1beta4.QueryLeaseRequest) | [QueryLeaseResponse](#akash.market.v1beta4.QueryLeaseResponse) | Lease queries lease details | GET|/akash/market/v1beta4/leases/info| - - - - - - -

Top

- - ## akash/market/v1beta4/service.proto - - - - - - - - - - - - ### Msg - Msg defines the market Msg service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateBid` | [MsgCreateBid](#akash.market.v1beta4.MsgCreateBid) | [MsgCreateBidResponse](#akash.market.v1beta4.MsgCreateBidResponse) | CreateBid defines a method to create a bid given proper inputs. | | - | `CloseBid` | [MsgCloseBid](#akash.market.v1beta4.MsgCloseBid) | [MsgCloseBidResponse](#akash.market.v1beta4.MsgCloseBidResponse) | CloseBid defines a method to close a bid given proper inputs. | | - | `WithdrawLease` | [MsgWithdrawLease](#akash.market.v1beta4.MsgWithdrawLease) | [MsgWithdrawLeaseResponse](#akash.market.v1beta4.MsgWithdrawLeaseResponse) | WithdrawLease withdraws accrued funds from the lease payment | | - | `CreateLease` | [MsgCreateLease](#akash.market.v1beta4.MsgCreateLease) | [MsgCreateLeaseResponse](#akash.market.v1beta4.MsgCreateLeaseResponse) | CreateLease creates a new lease | | - | `CloseLease` | [MsgCloseLease](#akash.market.v1beta4.MsgCloseLease) | [MsgCloseLeaseResponse](#akash.market.v1beta4.MsgCloseLeaseResponse) | CloseLease defines a method to close an order given proper inputs. | | - - - - - - -

Top

- - ## akash/market/v1beta4/lease.proto - - - - - - ### Lease - Lease stores LeaseID, state of lease and price - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `lease_id` | [LeaseID](#akash.market.v1beta4.LeaseID) | | | - | `state` | [Lease.State](#akash.market.v1beta4.Lease.State) | | | - | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `created_at` | [int64](#int64) | | | - | `closed_on` | [int64](#int64) | | | - - - - - - - - - ### LeaseFilters - LeaseFilters defines flags for lease list filter - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `provider` | [string](#string) | | | - | `state` | [string](#string) | | | - - - - - - - - - ### LeaseID - LeaseID stores bid details of lease - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `provider` | [string](#string) | | | - - - - - - - - - ### MsgCloseLease - MsgCloseLease defines an SDK message for closing order - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `lease_id` | [LeaseID](#akash.market.v1beta4.LeaseID) | | | - - - - - - - - - ### MsgCloseLeaseResponse - MsgCloseLeaseResponse defines the Msg/CloseLease response type. - - - - - - - - - ### MsgCreateLease - MsgCreateLease is sent to create a lease - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta4.BidID) | | | - - - - - - - - - ### MsgCreateLeaseResponse - MsgCreateLeaseResponse is the response from creating a lease - - - - - - - - - ### MsgWithdrawLease - MsgWithdrawLease defines an SDK message for closing bid - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid_id` | [LeaseID](#akash.market.v1beta4.LeaseID) | | | - - - - - - - - - ### MsgWithdrawLeaseResponse - MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. - - - - - - - - - - - ### Lease.State - State is an enum which refers to state of lease - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | active | 1 | LeaseActive denotes state for lease active | - | insufficient_funds | 2 | LeaseInsufficientFunds denotes state for lease insufficient_funds | - | closed | 3 | LeaseClosed denotes state for lease closed | - - - - - - - - - - - -

Top

- - ## akash/market/v1beta4/genesis.proto - - - - - - ### GenesisState - GenesisState defines the basic genesis state used by market module - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `params` | [Params](#akash.market.v1beta4.Params) | | | - | `orders` | [Order](#akash.market.v1beta4.Order) | repeated | | - | `leases` | [Lease](#akash.market.v1beta4.Lease) | repeated | | - | `bids` | [Bid](#akash.market.v1beta4.Bid) | repeated | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/market/v1beta4/order.proto - - - - - - ### Order - Order stores orderID, state of order and other details - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `order_id` | [OrderID](#akash.market.v1beta4.OrderID) | | | - | `state` | [Order.State](#akash.market.v1beta4.Order.State) | | | - | `spec` | [akash.deployment.v1beta3.GroupSpec](#akash.deployment.v1beta3.GroupSpec) | | | - | `created_at` | [int64](#int64) | | | - - - - - - - - - ### OrderFilters - OrderFilters defines flags for order list filter - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `state` | [string](#string) | | | - - - - - - - - - ### OrderID - OrderID stores owner and all other seq numbers - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - - - - - - - - - - - ### Order.State - State is an enum which refers to state of order - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | open | 1 | OrderOpen denotes state for order open | - | active | 2 | OrderMatched denotes state for order matched | - | closed | 3 | OrderClosed denotes state for order lost | - - - - - - - - - - - -

Top

- - ## akash/market/v1beta4/params.proto - - - - - - ### Params - Params is the params for the x/market module - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid_min_deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `order_max_bids` | [uint32](#uint32) | | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/market/v1beta3/bid.proto - - - - - - ### Bid - Bid stores BidID, state of bid and price - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta3.BidID) | | | - | `state` | [Bid.State](#akash.market.v1beta3.Bid.State) | | | - | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `created_at` | [int64](#int64) | | | - - - - - - - - - ### BidFilters - BidFilters defines flags for bid list filter - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `provider` | [string](#string) | | | - | `state` | [string](#string) | | | - - - - - - - - - ### BidID - BidID stores owner and all other seq numbers -A successful bid becomes a Lease(ID). - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `provider` | [string](#string) | | | - - - - - - - - - ### MsgCloseBid - MsgCloseBid defines an SDK message for closing bid - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta3.BidID) | | | - - - - - - - - - ### MsgCloseBidResponse - MsgCloseBidResponse defines the Msg/CloseBid response type. - - - - - - - - - ### MsgCreateBid - MsgCreateBid defines an SDK message for creating Bid - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `order` | [OrderID](#akash.market.v1beta3.OrderID) | | | - | `provider` | [string](#string) | | | - | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - - - - - - - - - ### MsgCreateBidResponse - MsgCreateBidResponse defines the Msg/CreateBid response type. - - - - - - - - - - - ### Bid.State - State is an enum which refers to state of bid - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | open | 1 | BidOpen denotes state for bid open | - | active | 2 | BidMatched denotes state for bid open | - | lost | 3 | BidLost denotes state for bid lost | - | closed | 4 | BidClosed denotes state for bid closed | - - - - - - - - - - - -

Top

- - ## akash/market/v1beta3/query.proto - - - - - - ### QueryBidRequest - QueryBidRequest is request type for the Query/Bid RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [BidID](#akash.market.v1beta3.BidID) | | | - - - - - - - - - ### QueryBidResponse - QueryBidResponse is response type for the Query/Bid RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid` | [Bid](#akash.market.v1beta3.Bid) | | | - | `escrow_account` | [akash.escrow.v1beta3.Account](#akash.escrow.v1beta3.Account) | | | - - - - - - - - - ### QueryBidsRequest - QueryBidsRequest is request type for the Query/Bids RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filters` | [BidFilters](#akash.market.v1beta3.BidFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryBidsResponse - QueryBidsResponse is response type for the Query/Bids RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bids` | [QueryBidResponse](#akash.market.v1beta3.QueryBidResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - ### QueryLeaseRequest - QueryLeaseRequest is request type for the Query/Lease RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [LeaseID](#akash.market.v1beta3.LeaseID) | | | - - - - - - - - - ### QueryLeaseResponse - QueryLeaseResponse is response type for the Query/Lease RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `lease` | [Lease](#akash.market.v1beta3.Lease) | | | - | `escrow_payment` | [akash.escrow.v1beta3.FractionalPayment](#akash.escrow.v1beta3.FractionalPayment) | | | - - - - - - - - - ### QueryLeasesRequest - QueryLeasesRequest is request type for the Query/Leases RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filters` | [LeaseFilters](#akash.market.v1beta3.LeaseFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryLeasesResponse - QueryLeasesResponse is response type for the Query/Leases RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `leases` | [QueryLeaseResponse](#akash.market.v1beta3.QueryLeaseResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - ### QueryOrderRequest - QueryOrderRequest is request type for the Query/Order RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [OrderID](#akash.market.v1beta3.OrderID) | | | - - - - - - - - - ### QueryOrderResponse - QueryOrderResponse is response type for the Query/Order RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `order` | [Order](#akash.market.v1beta3.Order) | | | - - - - - - - - - ### QueryOrdersRequest - QueryOrdersRequest is request type for the Query/Orders RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filters` | [OrderFilters](#akash.market.v1beta3.OrderFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryOrdersResponse - QueryOrdersResponse is response type for the Query/Orders RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `orders` | [Order](#akash.market.v1beta3.Order) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - - - - - - - ### Query - Query defines the gRPC querier service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Orders` | [QueryOrdersRequest](#akash.market.v1beta3.QueryOrdersRequest) | [QueryOrdersResponse](#akash.market.v1beta3.QueryOrdersResponse) | Orders queries orders with filters | GET|/akash/market/v1beta3/orders/list| - | `Order` | [QueryOrderRequest](#akash.market.v1beta3.QueryOrderRequest) | [QueryOrderResponse](#akash.market.v1beta3.QueryOrderResponse) | Order queries order details | GET|/akash/market/v1beta3/orders/info| - | `Bids` | [QueryBidsRequest](#akash.market.v1beta3.QueryBidsRequest) | [QueryBidsResponse](#akash.market.v1beta3.QueryBidsResponse) | Bids queries bids with filters | GET|/akash/market/v1beta3/bids/list| - | `Bid` | [QueryBidRequest](#akash.market.v1beta3.QueryBidRequest) | [QueryBidResponse](#akash.market.v1beta3.QueryBidResponse) | Bid queries bid details | GET|/akash/market/v1beta3/bids/info| - | `Leases` | [QueryLeasesRequest](#akash.market.v1beta3.QueryLeasesRequest) | [QueryLeasesResponse](#akash.market.v1beta3.QueryLeasesResponse) | Leases queries leases with filters | GET|/akash/market/v1beta3/leases/list| - | `Lease` | [QueryLeaseRequest](#akash.market.v1beta3.QueryLeaseRequest) | [QueryLeaseResponse](#akash.market.v1beta3.QueryLeaseResponse) | Lease queries lease details | GET|/akash/market/v1beta3/leases/info| - - - - - - -

Top

- - ## akash/market/v1beta3/service.proto - - - - - - - - - - - - ### Msg - Msg defines the market Msg service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateBid` | [MsgCreateBid](#akash.market.v1beta3.MsgCreateBid) | [MsgCreateBidResponse](#akash.market.v1beta3.MsgCreateBidResponse) | CreateBid defines a method to create a bid given proper inputs. | | - | `CloseBid` | [MsgCloseBid](#akash.market.v1beta3.MsgCloseBid) | [MsgCloseBidResponse](#akash.market.v1beta3.MsgCloseBidResponse) | CloseBid defines a method to close a bid given proper inputs. | | - | `WithdrawLease` | [MsgWithdrawLease](#akash.market.v1beta3.MsgWithdrawLease) | [MsgWithdrawLeaseResponse](#akash.market.v1beta3.MsgWithdrawLeaseResponse) | WithdrawLease withdraws accrued funds from the lease payment | | - | `CreateLease` | [MsgCreateLease](#akash.market.v1beta3.MsgCreateLease) | [MsgCreateLeaseResponse](#akash.market.v1beta3.MsgCreateLeaseResponse) | CreateLease creates a new lease | | - | `CloseLease` | [MsgCloseLease](#akash.market.v1beta3.MsgCloseLease) | [MsgCloseLeaseResponse](#akash.market.v1beta3.MsgCloseLeaseResponse) | CloseLease defines a method to close an order given proper inputs. | | - - - - - - -

Top

- - ## akash/market/v1beta3/lease.proto - - - - - - ### Lease - Lease stores LeaseID, state of lease and price - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `lease_id` | [LeaseID](#akash.market.v1beta3.LeaseID) | | | - | `state` | [Lease.State](#akash.market.v1beta3.Lease.State) | | | - | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `created_at` | [int64](#int64) | | | - | `closed_on` | [int64](#int64) | | | - - - - - - - - - ### LeaseFilters - LeaseFilters defines flags for lease list filter - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `provider` | [string](#string) | | | - | `state` | [string](#string) | | | - - - - - - - - - ### LeaseID - LeaseID stores bid details of lease - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `provider` | [string](#string) | | | - - - - - - - - - ### MsgCloseLease - MsgCloseLease defines an SDK message for closing order - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `lease_id` | [LeaseID](#akash.market.v1beta3.LeaseID) | | | - - - - - - - - - ### MsgCloseLeaseResponse - MsgCloseLeaseResponse defines the Msg/CloseLease response type. - - - - - - - - - ### MsgCreateLease - MsgCreateLease is sent to create a lease - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta3.BidID) | | | - - - - - - - - - ### MsgCreateLeaseResponse - MsgCreateLeaseResponse is the response from creating a lease - - - - - - - - - ### MsgWithdrawLease - MsgWithdrawLease defines an SDK message for closing bid - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid_id` | [LeaseID](#akash.market.v1beta3.LeaseID) | | | - - - - - - - - - ### MsgWithdrawLeaseResponse - MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. - - - - - - - - - - - ### Lease.State - State is an enum which refers to state of lease - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | active | 1 | LeaseActive denotes state for lease active | - | insufficient_funds | 2 | LeaseInsufficientFunds denotes state for lease insufficient_funds | - | closed | 3 | LeaseClosed denotes state for lease closed | - - - - - - - - - - - -

Top

- - ## akash/market/v1beta3/genesis.proto - - - - - - ### GenesisState - GenesisState defines the basic genesis state used by market module - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `params` | [Params](#akash.market.v1beta3.Params) | | | - | `orders` | [Order](#akash.market.v1beta3.Order) | repeated | | - | `leases` | [Lease](#akash.market.v1beta3.Lease) | repeated | | - | `bids` | [Bid](#akash.market.v1beta3.Bid) | repeated | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/market/v1beta3/order.proto - - - - - - ### Order - Order stores orderID, state of order and other details - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `order_id` | [OrderID](#akash.market.v1beta3.OrderID) | | | - | `state` | [Order.State](#akash.market.v1beta3.Order.State) | | | - | `spec` | [akash.deployment.v1beta3.GroupSpec](#akash.deployment.v1beta3.GroupSpec) | | | - | `created_at` | [int64](#int64) | | | - - - - - - - - - ### OrderFilters - OrderFilters defines flags for order list filter - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `state` | [string](#string) | | | - - - - - - - - - ### OrderID - OrderID stores owner and all other seq numbers - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - - - - - - - - - - - ### Order.State - State is an enum which refers to state of order - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | open | 1 | OrderOpen denotes state for order open | - | active | 2 | OrderMatched denotes state for order matched | - | closed | 3 | OrderClosed denotes state for order lost | - - - - - - - - - - - -

Top

- - ## akash/market/v1beta3/params.proto - - - - - - ### Params - Params is the params for the x/market module - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid_min_deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `order_max_bids` | [uint32](#uint32) | | | - - - - - - - - - - - - - - - - -

Top

- - ## akash/market/v1beta2/bid.proto - - - - - - ### Bid - Bid stores BidID, state of bid and price - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta2.BidID) | | | - | `state` | [Bid.State](#akash.market.v1beta2.Bid.State) | | | - | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `created_at` | [int64](#int64) | | | - - - - - - - - - ### BidFilters - BidFilters defines flags for bid list filter - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `provider` | [string](#string) | | | - | `state` | [string](#string) | | | - - - - - - - - - ### BidID - BidID stores owner and all other seq numbers -A successful bid becomes a Lease(ID). - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `provider` | [string](#string) | | | - - - - - - - - - ### MsgCloseBid - MsgCloseBid defines an SDK message for closing bid - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta2.BidID) | | | - - - - - - - - - ### MsgCloseBidResponse - MsgCloseBidResponse defines the Msg/CloseBid response type. - - - - - - - - - ### MsgCreateBid - MsgCreateBid defines an SDK message for creating Bid - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `order` | [OrderID](#akash.market.v1beta2.OrderID) | | | - | `provider` | [string](#string) | | | - | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - - - - - - - - - ### MsgCreateBidResponse - MsgCreateBidResponse defines the Msg/CreateBid response type. - - - - - - - - - - - ### Bid.State - State is an enum which refers to state of bid - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | open | 1 | BidOpen denotes state for bid open | - | active | 2 | BidMatched denotes state for bid open | - | lost | 3 | BidLost denotes state for bid lost | - | closed | 4 | BidClosed denotes state for bid closed | - - - - - - - - - - - -

Top

- - ## akash/market/v1beta2/query.proto - - - - - - ### QueryBidRequest - QueryBidRequest is request type for the Query/Bid RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [BidID](#akash.market.v1beta2.BidID) | | | - - - - - - - - - ### QueryBidResponse - QueryBidResponse is response type for the Query/Bid RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bid` | [Bid](#akash.market.v1beta2.Bid) | | | - | `escrow_account` | [akash.escrow.v1beta2.Account](#akash.escrow.v1beta2.Account) | | | - - - - - - - - - ### QueryBidsRequest - QueryBidsRequest is request type for the Query/Bids RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filters` | [BidFilters](#akash.market.v1beta2.BidFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryBidsResponse - QueryBidsResponse is response type for the Query/Bids RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `bids` | [QueryBidResponse](#akash.market.v1beta2.QueryBidResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - ### QueryLeaseRequest - QueryLeaseRequest is request type for the Query/Lease RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [LeaseID](#akash.market.v1beta2.LeaseID) | | | - - - - - - - - - ### QueryLeaseResponse - QueryLeaseResponse is response type for the Query/Lease RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `lease` | [Lease](#akash.market.v1beta2.Lease) | | | - | `escrow_payment` | [akash.escrow.v1beta2.FractionalPayment](#akash.escrow.v1beta2.FractionalPayment) | | | - - - - - - - - - ### QueryLeasesRequest - QueryLeasesRequest is request type for the Query/Leases RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filters` | [LeaseFilters](#akash.market.v1beta2.LeaseFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - - - - - ### QueryLeasesResponse - QueryLeasesResponse is response type for the Query/Leases RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `leases` | [QueryLeaseResponse](#akash.market.v1beta2.QueryLeaseResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - - - - - ### QueryOrderRequest - QueryOrderRequest is request type for the Query/Order RPC method - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [OrderID](#akash.market.v1beta2.OrderID) | | | - + | `bid_min_deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `order_max_bids` | [uint32](#uint32) | | | - - - - ### QueryOrderResponse - QueryOrderResponse is response type for the Query/Order RPC method - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `order` | [Order](#akash.market.v1beta2.Order) | | | - - + - + - - + - ### QueryOrdersRequest - QueryOrdersRequest is request type for the Query/Orders RPC method + - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filters` | [OrderFilters](#akash.market.v1beta2.OrderFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - + +

Top

+ ## akash/market/v1beta5/order.proto - + - ### QueryOrdersResponse - QueryOrdersResponse is response type for the Query/Orders RPC method + ### Order + Order stores orderID, state of order and other details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `orders` | [Order](#akash.market.v1beta2.Order) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + | `id` | [akash.market.v1.OrderID](#akash.market.v1.OrderID) | | | + | `state` | [Order.State](#akash.market.v1beta5.Order.State) | | | + | `spec` | [akash.deployment.v1beta4.GroupSpec](#akash.deployment.v1beta4.GroupSpec) | | | + | `created_at` | [int64](#int64) | | | @@ -7342,125 +3372,70 @@ A successful bid becomes a Lease(ID). - - - - - - - ### Query - Query defines the gRPC querier service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Orders` | [QueryOrdersRequest](#akash.market.v1beta2.QueryOrdersRequest) | [QueryOrdersResponse](#akash.market.v1beta2.QueryOrdersResponse) | Orders queries orders with filters | GET|/akash/market/v1beta2/orders/list| - | `Order` | [QueryOrderRequest](#akash.market.v1beta2.QueryOrderRequest) | [QueryOrderResponse](#akash.market.v1beta2.QueryOrderResponse) | Order queries order details | GET|/akash/market/v1beta2/orders/info| - | `Bids` | [QueryBidsRequest](#akash.market.v1beta2.QueryBidsRequest) | [QueryBidsResponse](#akash.market.v1beta2.QueryBidsResponse) | Bids queries bids with filters | GET|/akash/market/v1beta2/bids/list| - | `Bid` | [QueryBidRequest](#akash.market.v1beta2.QueryBidRequest) | [QueryBidResponse](#akash.market.v1beta2.QueryBidResponse) | Bid queries bid details | GET|/akash/market/v1beta2/bids/info| - | `Leases` | [QueryLeasesRequest](#akash.market.v1beta2.QueryLeasesRequest) | [QueryLeasesResponse](#akash.market.v1beta2.QueryLeasesResponse) | Leases queries leases with filters | GET|/akash/market/v1beta2/leases/list| - | `Lease` | [QueryLeaseRequest](#akash.market.v1beta2.QueryLeaseRequest) | [QueryLeaseResponse](#akash.market.v1beta2.QueryLeaseResponse) | Lease queries lease details | GET|/akash/market/v1beta2/leases/info| - - + - - - -

Top

+ ### Order.State + State is an enum which refers to state of order - ## akash/market/v1beta2/service.proto + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | open | 1 | OrderOpen denotes state for order open | + | active | 2 | OrderMatched denotes state for order matched | + | closed | 3 | OrderClosed denotes state for order lost | - - - - - - ### Msg - Msg defines the market Msg service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateBid` | [MsgCreateBid](#akash.market.v1beta2.MsgCreateBid) | [MsgCreateBidResponse](#akash.market.v1beta2.MsgCreateBidResponse) | CreateBid defines a method to create a bid given proper inputs. | | - | `CloseBid` | [MsgCloseBid](#akash.market.v1beta2.MsgCloseBid) | [MsgCloseBidResponse](#akash.market.v1beta2.MsgCloseBidResponse) | CloseBid defines a method to close a bid given proper inputs. | | - | `WithdrawLease` | [MsgWithdrawLease](#akash.market.v1beta2.MsgWithdrawLease) | [MsgWithdrawLeaseResponse](#akash.market.v1beta2.MsgWithdrawLeaseResponse) | WithdrawLease withdraws accrued funds from the lease payment | | - | `CreateLease` | [MsgCreateLease](#akash.market.v1beta2.MsgCreateLease) | [MsgCreateLeaseResponse](#akash.market.v1beta2.MsgCreateLeaseResponse) | CreateLease creates a new lease | | - | `CloseLease` | [MsgCloseLease](#akash.market.v1beta2.MsgCloseLease) | [MsgCloseLeaseResponse](#akash.market.v1beta2.MsgCloseLeaseResponse) | CloseLease defines a method to close an order given proper inputs. | | - - +

Top

- ## akash/market/v1beta2/lease.proto + ## akash/market/v1beta5/genesis.proto - + - ### Lease - Lease stores LeaseID, state of lease and price + ### GenesisState + GenesisState defines the basic genesis state used by market module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `lease_id` | [LeaseID](#akash.market.v1beta2.LeaseID) | | | - | `state` | [Lease.State](#akash.market.v1beta2.Lease.State) | | | - | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `created_at` | [int64](#int64) | | | - | `closed_on` | [int64](#int64) | | | + | `params` | [Params](#akash.market.v1beta5.Params) | | | + | `orders` | [Order](#akash.market.v1beta5.Order) | repeated | | + | `leases` | [akash.market.v1.Lease](#akash.market.v1.Lease) | repeated | | + | `bids` | [Bid](#akash.market.v1beta5.Bid) | repeated | | - - - - ### LeaseFilters - LeaseFilters defines flags for lease list filter - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `provider` | [string](#string) | | | - | `state` | [string](#string) | | | - - + - + - - + - ### LeaseID - LeaseID stores bid details of lease + - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `provider` | [string](#string) | | | - + +

Top

+ ## akash/market/v1beta5/leasemsg.proto - + ### MsgCloseLease MsgCloseLease defines an SDK message for closing order @@ -7468,14 +3443,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `lease_id` | [LeaseID](#akash.market.v1beta2.LeaseID) | | | + | `lease_id` | [akash.market.v1.LeaseID](#akash.market.v1.LeaseID) | | | - + ### MsgCloseLeaseResponse MsgCloseLeaseResponse defines the Msg/CloseLease response type. @@ -7485,7 +3460,7 @@ A successful bid becomes a Lease(ID). - + ### MsgCreateLease MsgCreateLease is sent to create a lease @@ -7493,14 +3468,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta2.BidID) | | | + | `bid_id` | [akash.market.v1.BidID](#akash.market.v1.BidID) | | | - + ### MsgCreateLeaseResponse MsgCreateLeaseResponse is the response from creating a lease @@ -7510,22 +3485,22 @@ A successful bid becomes a Lease(ID). - + ### MsgWithdrawLease - MsgWithdrawLease defines an SDK message for closing bid + MsgWithdrawLease defines an SDK message for withdrawing lease funds | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [LeaseID](#akash.market.v1beta2.LeaseID) | | | + | `bid_id` | [akash.market.v1.LeaseID](#akash.market.v1.LeaseID) | | | - + ### MsgWithdrawLeaseResponse MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. @@ -7536,20 +3511,6 @@ A successful bid becomes a Lease(ID). - - - - ### Lease.State - State is an enum which refers to state of lease - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | active | 1 | LeaseActive denotes state for lease active | - | insufficient_funds | 2 | LeaseInsufficientFunds denotes state for lease insufficient_funds | - | closed | 3 | LeaseClosed denotes state for lease closed | - - @@ -7558,116 +3519,47 @@ A successful bid becomes a Lease(ID). - +

Top

- ## akash/market/v1beta2/genesis.proto - - - - - - ### GenesisState - GenesisState defines the basic genesis state used by market module - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `orders` | [Order](#akash.market.v1beta2.Order) | repeated | | - | `leases` | [Lease](#akash.market.v1beta2.Lease) | repeated | | - | `params` | [Params](#akash.market.v1beta2.Params) | | | - - - - - - - - - - - - - - + ## akash/market/v1beta5/paramsmsg.proto - -

Top

- ## akash/market/v1beta2/order.proto + - - + ### MsgUpdateParams + MsgUpdateParams is the Msg/UpdateParams request type. - ### Order - Order stores orderID, state of order and other details +Since: akash v1.0.0 | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `order_id` | [OrderID](#akash.market.v1beta2.OrderID) | | | - | `state` | [Order.State](#akash.market.v1beta2.Order.State) | | | - | `spec` | [akash.deployment.v1beta2.GroupSpec](#akash.deployment.v1beta2.GroupSpec) | | | - | `created_at` | [int64](#int64) | | | - - - - - - - - - ### OrderFilters - OrderFilters defines flags for order list filter + | `authority` | [string](#string) | | authority is the address of the governance account. | + | `params` | [Params](#akash.market.v1beta5.Params) | | params defines the x/deployment parameters to update. - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - | `state` | [string](#string) | | | +NOTE: All parameters must be supplied. | - - - ### OrderID - OrderID stores owner and all other seq numbers - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | - | `oseq` | [uint32](#uint32) | | | - - + - + ### MsgUpdateParamsResponse + MsgUpdateParamsResponse defines the response structure for executing a +MsgUpdateParams message. - +Since: akash v1.0.0 - - - ### Order.State - State is an enum which refers to state of order - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | open | 1 | OrderOpen denotes state for order open | - | active | 2 | OrderMatched denotes state for order matched | - | closed | 3 | OrderClosed denotes state for order lost | + + @@ -7676,215 +3568,220 @@ A successful bid becomes a Lease(ID). - +

Top

- ## akash/market/v1beta2/params.proto + ## akash/market/v1beta5/query.proto - + - ### Params - Params is the params for the x/market module + ### QueryBidRequest + QueryBidRequest is request type for the Query/Bid RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_min_deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `order_max_bids` | [uint32](#uint32) | | | + | `id` | [akash.market.v1.BidID](#akash.market.v1.BidID) | | | - - - - - + + - + ### QueryBidResponse + QueryBidResponse is response type for the Query/Bid RPC method + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `bid` | [Bid](#akash.market.v1beta5.Bid) | | | + | `escrow_account` | [akash.escrow.v1.Account](#akash.escrow.v1.Account) | | | + - -

Top

- ## akash/inflation/v1beta3/genesis.proto - + - ### GenesisState - GenesisState stores slice of genesis deployment instance + ### QueryBidsRequest + QueryBidsRequest is request type for the Query/Bids RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `params` | [Params](#akash.inflation.v1beta3.Params) | | | + | `filters` | [BidFilters](#akash.market.v1beta5.BidFilters) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - + + - + ### QueryBidsResponse + QueryBidsResponse is response type for the Query/Bids RPC method + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `bids` | [QueryBidResponse](#akash.market.v1beta5.QueryBidResponse) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + - -

Top

- ## akash/inflation/v1beta3/params.proto - + - ### Params - Params defines the parameters for the x/deployment package + ### QueryLeaseRequest + QueryLeaseRequest is request type for the Query/Lease RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `inflation_decay_factor` | [string](#string) | | InflationDecayFactor is the number of years it takes inflation to halve. | - | `initial_inflation` | [string](#string) | | InitialInflation is the rate at which inflation starts at genesis. It is a decimal value in the range [0.0, 100.0]. | - | `variance` | [string](#string) | | Variance defines the fraction by which inflation can vary from ideal inflation in a block. It is a decimal value in the range [0.0, 1.0]. | + | `id` | [akash.market.v1.LeaseID](#akash.market.v1.LeaseID) | | | - - - - - + + - + ### QueryLeaseResponse + QueryLeaseResponse is response type for the Query/Lease RPC method + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `lease` | [akash.market.v1.Lease](#akash.market.v1.Lease) | | | + | `escrow_payment` | [akash.escrow.v1.FractionalPayment](#akash.escrow.v1.FractionalPayment) | | | + - -

Top

- ## akash/inflation/v1beta2/genesis.proto - + - ### GenesisState - GenesisState stores slice of genesis deployment instance + ### QueryLeasesRequest + QueryLeasesRequest is request type for the Query/Leases RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `params` | [Params](#akash.inflation.v1beta2.Params) | | | + | `filters` | [akash.market.v1.LeaseFilters](#akash.market.v1.LeaseFilters) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - + + - + ### QueryLeasesResponse + QueryLeasesResponse is response type for the Query/Leases RPC method + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `leases` | [QueryLeaseResponse](#akash.market.v1beta5.QueryLeaseResponse) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + - -

Top

- ## akash/inflation/v1beta2/params.proto - + - ### Params - Params defines the parameters for the x/deployment package + ### QueryOrderRequest + QueryOrderRequest is request type for the Query/Order RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `inflation_decay_factor` | [string](#string) | | InflationDecayFactor is the number of years it takes inflation to halve. | - | `initial_inflation` | [string](#string) | | InitialInflation is the rate at which inflation starts at genesis. It is a decimal value in the range [0.0, 100.0]. | - | `variance` | [string](#string) | | Variance defines the fraction by which inflation can vary from ideal inflation in a block. It is a decimal value in the range [0.0, 1.0]. | + | `id` | [akash.market.v1.OrderID](#akash.market.v1.OrderID) | | | - - - - - + + - + ### QueryOrderResponse + QueryOrderResponse is response type for the Query/Order RPC method + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `order` | [Order](#akash.market.v1beta5.Order) | | | + - -

Top

- ## akash/base/v1beta3/memory.proto - + - ### Memory - Memory stores resource quantity and memory attributes + ### QueryOrdersRequest + QueryOrdersRequest is request type for the Query/Orders RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `quantity` | [ResourceValue](#akash.base.v1beta3.ResourceValue) | | | - | `attributes` | [Attribute](#akash.base.v1beta3.Attribute) | repeated | | + | `filters` | [OrderFilters](#akash.market.v1beta5.OrderFilters) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - + + - + ### QueryOrdersResponse + QueryOrdersResponse is response type for the Query/Orders RPC method - + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `orders` | [Order](#akash.market.v1beta5.Order) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + - + + + + ### QueryParamsRequest + QueryParamsRequest is the request type for the Query/Params RPC method. + - -

Top

- ## akash/base/v1beta3/cpu.proto - + - ### CPU - CPU stores resource units and cpu config attributes + ### QueryParamsResponse + QueryParamsResponse is the response type for the Query/Params RPC method. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `units` | [ResourceValue](#akash.base.v1beta3.ResourceValue) | | | - | `attributes` | [Attribute](#akash.base.v1beta3.Attribute) | repeated | | + | `params` | [Params](#akash.market.v1beta5.Params) | | params defines the parameters of the module. | @@ -7896,35 +3793,30 @@ A successful bid becomes a Lease(ID). - - - - -

Top

+ - ## akash/base/v1beta3/resources.proto - + ### Query + Query defines the gRPC querier service + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Orders` | [QueryOrdersRequest](#akash.market.v1beta5.QueryOrdersRequest) | [QueryOrdersResponse](#akash.market.v1beta5.QueryOrdersResponse) | Orders queries orders with filters | GET|/akash/market/v1beta5/orders/list| + | `Order` | [QueryOrderRequest](#akash.market.v1beta5.QueryOrderRequest) | [QueryOrderResponse](#akash.market.v1beta5.QueryOrderResponse) | Order queries order details | GET|/akash/market/v1beta5/orders/info| + | `Bids` | [QueryBidsRequest](#akash.market.v1beta5.QueryBidsRequest) | [QueryBidsResponse](#akash.market.v1beta5.QueryBidsResponse) | Bids queries bids with filters | GET|/akash/market/v1beta5/bids/list| + | `Bid` | [QueryBidRequest](#akash.market.v1beta5.QueryBidRequest) | [QueryBidResponse](#akash.market.v1beta5.QueryBidResponse) | Bid queries bid details | GET|/akash/market/v1beta5/bids/info| + | `Leases` | [QueryLeasesRequest](#akash.market.v1beta5.QueryLeasesRequest) | [QueryLeasesResponse](#akash.market.v1beta5.QueryLeasesResponse) | Leases queries leases with filters | GET|/akash/market/v1beta5/leases/list| + | `Lease` | [QueryLeaseRequest](#akash.market.v1beta5.QueryLeaseRequest) | [QueryLeaseResponse](#akash.market.v1beta5.QueryLeaseResponse) | Lease queries lease details | GET|/akash/market/v1beta5/leases/info| + | `Params` | [QueryParamsRequest](#akash.market.v1beta5.QueryParamsRequest) | [QueryParamsResponse](#akash.market.v1beta5.QueryParamsResponse) | Params returns the total set of minting parameters. | GET|/akash/market/v1beta5/params| - - - ### Resources - Resources describes all available resources types for deployment/node etc -if field is nil resource is not present in the given data-structure + - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [uint32](#uint32) | | | - | `cpu` | [CPU](#akash.base.v1beta3.CPU) | | | - | `memory` | [Memory](#akash.base.v1beta3.Memory) | | | - | `storage` | [Storage](#akash.base.v1beta3.Storage) | repeated | | - | `gpu` | [GPU](#akash.base.v1beta3.GPU) | | | - | `endpoints` | [Endpoint](#akash.base.v1beta3.Endpoint) | repeated | | - + +

Top

+ ## akash/market/v1beta5/service.proto @@ -7933,62 +3825,73 @@ if field is nil resource is not present in the given data-structure + + + + ### Msg + Msg defines the market Msg service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `CreateBid` | [MsgCreateBid](#akash.market.v1beta5.MsgCreateBid) | [MsgCreateBidResponse](#akash.market.v1beta5.MsgCreateBidResponse) | CreateBid defines a method to create a bid given proper inputs. | | + | `CloseBid` | [MsgCloseBid](#akash.market.v1beta5.MsgCloseBid) | [MsgCloseBidResponse](#akash.market.v1beta5.MsgCloseBidResponse) | CloseBid defines a method to close a bid given proper inputs. | | + | `WithdrawLease` | [MsgWithdrawLease](#akash.market.v1beta5.MsgWithdrawLease) | [MsgWithdrawLeaseResponse](#akash.market.v1beta5.MsgWithdrawLeaseResponse) | WithdrawLease withdraws accrued funds from the lease payment | | + | `CreateLease` | [MsgCreateLease](#akash.market.v1beta5.MsgCreateLease) | [MsgCreateLeaseResponse](#akash.market.v1beta5.MsgCreateLeaseResponse) | CreateLease creates a new lease | | + | `CloseLease` | [MsgCloseLease](#akash.market.v1beta5.MsgCloseLease) | [MsgCloseLeaseResponse](#akash.market.v1beta5.MsgCloseLeaseResponse) | CloseLease defines a method to close an order given proper inputs. | | + | `UpdateParams` | [MsgUpdateParams](#akash.market.v1beta5.MsgUpdateParams) | [MsgUpdateParamsResponse](#akash.market.v1beta5.MsgUpdateParamsResponse) | UpdateParams defines a governance operation for updating the x/market module parameters. The authority is hard-coded to the x/gov module account. + +Since: akash v1.0.0 | | + - +

Top

- ## akash/base/v1beta3/attribute.proto + ## akash/provider/v1beta4/event.proto - + - ### Attribute - Attribute represents key value pair + ### EventProviderCreated + EventProviderCreated defines an SDK message for provider created event | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `key` | [string](#string) | | | - | `value` | [string](#string) | | | + | `owner` | [string](#string) | | | - + - ### PlacementRequirements - PlacementRequirements + ### EventProviderDeleted + EventProviderDeleted defines an SDK message for provider deleted event | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `signed_by` | [SignedBy](#akash.base.v1beta3.SignedBy) | | SignedBy list of keys that tenants expect to have signatures from | - | `attributes` | [Attribute](#akash.base.v1beta3.Attribute) | repeated | Attribute list of attributes tenant expects from the provider | + | `owner` | [string](#string) | | | - + - ### SignedBy - SignedBy represents validation accounts that tenant expects signatures for provider attributes -AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many -entries there -this behaviour to be discussed + ### EventProviderUpdated + EventProviderUpdated defines an SDK message for provider updated event | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `all_of` | [string](#string) | repeated | all_of all keys in this list must have signed attributes | - | `any_of` | [string](#string) | repeated | any_of at least of of the keys from the list must have signed attributes | + | `owner` | [string](#string) | | | @@ -8004,43 +3907,48 @@ this behaviour to be discussed - +

Top

- ## akash/base/v1beta3/endpoint.proto + ## akash/provider/v1beta4/provider.proto - + - ### Endpoint - Endpoint describes a publicly accessible IP service + ### Info + Info | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `kind` | [Endpoint.Kind](#akash.base.v1beta3.Endpoint.Kind) | | | - | `sequence_number` | [uint32](#uint32) | | | + | `email` | [string](#string) | | | + | `website` | [string](#string) | | | - - - + - ### Endpoint.Kind - This describes how the endpoint is implemented when the lease is deployed + ### Provider + Provider stores owner and host details + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `host_uri` | [string](#string) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | + | `info` | [Info](#akash.provider.v1beta4.Info) | | | + + - | Name | Number | Description | - | ---- | ------ | ----------- | - | SHARED_HTTP | 0 | Describes an endpoint that becomes a Kubernetes Ingress | - | RANDOM_PORT | 1 | Describes an endpoint that becomes a Kubernetes NodePort | - | LEASED_IP | 2 | Describes an endpoint that becomes a leased IP | + + @@ -8049,23 +3957,22 @@ this behaviour to be discussed - +

Top

- ## akash/base/v1beta3/gpu.proto + ## akash/provider/v1beta4/genesis.proto - + - ### GPU - GPU stores resource units and cpu config attributes + ### GenesisState + GenesisState defines the basic genesis state used by provider module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `units` | [ResourceValue](#akash.base.v1beta3.ResourceValue) | | | - | `attributes` | [Attribute](#akash.base.v1beta3.Attribute) | repeated | | + | `providers` | [Provider](#akash.provider.v1beta4.Provider) | repeated | | @@ -8081,91 +3988,89 @@ this behaviour to be discussed - +

Top

- ## akash/base/v1beta3/storage.proto + ## akash/provider/v1beta4/msg.proto - + - ### Storage - Storage stores resource quantity and storage attributes + ### MsgCreateProvider + MsgCreateProvider defines an SDK message for creating a provider | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `quantity` | [ResourceValue](#akash.base.v1beta3.ResourceValue) | | | - | `attributes` | [Attribute](#akash.base.v1beta3.Attribute) | repeated | | + | `owner` | [string](#string) | | | + | `host_uri` | [string](#string) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | + | `info` | [Info](#akash.provider.v1beta4.Info) | | | - - - - - + + - + ### MsgCreateProviderResponse + MsgCreateProviderResponse defines the Msg/CreateProvider response type. - - -

Top

- ## akash/base/v1beta3/resourcevalue.proto - + - ### ResourceValue - Unit stores cpu, memory and storage metrics + ### MsgDeleteProvider + MsgDeleteProvider defines an SDK message for deleting a provider | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `val` | [bytes](#bytes) | | | + | `owner` | [string](#string) | | | - - - - - + + - + ### MsgDeleteProviderResponse + MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. - - -

Top

- ## akash/base/v1beta2/resourceunits.proto - + - ### ResourceUnits - ResourceUnits describes all available resources types for deployment/node etc -if field is nil resource is not present in the given data-structure + ### MsgUpdateProvider + MsgUpdateProvider defines an SDK message for updating a provider | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `cpu` | [CPU](#akash.base.v1beta2.CPU) | | | - | `memory` | [Memory](#akash.base.v1beta2.Memory) | | | - | `storage` | [Storage](#akash.base.v1beta2.Storage) | repeated | | - | `endpoints` | [Endpoint](#akash.base.v1beta2.Endpoint) | repeated | | + | `owner` | [string](#string) | | | + | `host_uri` | [string](#string) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | + | `info` | [Info](#akash.provider.v1beta4.Info) | | | + + + + + + + + ### MsgUpdateProviderResponse + MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. + @@ -8180,90 +4085,68 @@ if field is nil resource is not present in the given data-structure - +

Top

- ## akash/base/v1beta2/attribute.proto + ## akash/provider/v1beta4/query.proto - + - ### Attribute - Attribute represents key value pair + ### QueryProviderRequest + QueryProviderRequest is request type for the Query/Provider RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `key` | [string](#string) | | | - | `value` | [string](#string) | | | + | `owner` | [string](#string) | | | - + - ### PlacementRequirements - PlacementRequirements + ### QueryProviderResponse + QueryProviderResponse is response type for the Query/Provider RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `signed_by` | [SignedBy](#akash.base.v1beta2.SignedBy) | | SignedBy list of keys that tenants expect to have signatures from | - | `attributes` | [Attribute](#akash.base.v1beta2.Attribute) | repeated | Attribute list of attributes tenant expects from the provider | + | `provider` | [Provider](#akash.provider.v1beta4.Provider) | | | - + - ### SignedBy - SignedBy represents validation accounts that tenant expects signatures for provider attributes -AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many -entries there -this behaviour to be discussed + ### QueryProvidersRequest + QueryProvidersRequest is request type for the Query/Providers RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `all_of` | [string](#string) | repeated | all_of all keys in this list must have signed attributes | - | `any_of` | [string](#string) | repeated | any_of at least of of the keys from the list must have signed attributes | - - - - - - - - - - - - - + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - -

Top

- ## akash/base/v1beta2/endpoint.proto - + - ### Endpoint - Endpoint describes a publicly accessible IP service + ### QueryProvidersResponse + QueryProvidersResponse is response type for the Query/Providers RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `kind` | [Endpoint.Kind](#akash.base.v1beta2.Endpoint.Kind) | | | - | `sequence_number` | [uint32](#uint32) | | | + | `providers` | [Provider](#akash.provider.v1beta4.Provider) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -8271,77 +4154,69 @@ this behaviour to be discussed - - + - ### Endpoint.Kind - This describes how the endpoint is implemented when the lease is deployed + - | Name | Number | Description | - | ---- | ------ | ----------- | - | SHARED_HTTP | 0 | Describes an endpoint that becomes a Kubernetes Ingress | - | RANDOM_PORT | 1 | Describes an endpoint that becomes a Kubernetes NodePort | - | LEASED_IP | 2 | Describes an endpoint that becomes a leased IP | + - - - + ### Query + Query defines the gRPC querier service + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Providers` | [QueryProvidersRequest](#akash.provider.v1beta4.QueryProvidersRequest) | [QueryProvidersResponse](#akash.provider.v1beta4.QueryProvidersResponse) | Providers queries providers | GET|/akash/provider/v1beta4/providers| + | `Provider` | [QueryProviderRequest](#akash.provider.v1beta4.QueryProviderRequest) | [QueryProviderResponse](#akash.provider.v1beta4.QueryProviderResponse) | Provider queries provider details | GET|/akash/provider/v1beta4/providers/{owner}| + - +

Top

- ## akash/base/v1beta2/resource.proto - - + ## akash/provider/v1beta4/service.proto - - ### CPU - CPU stores resource units and cpu config attributes + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `units` | [ResourceValue](#akash.base.v1beta2.ResourceValue) | | | - | `attributes` | [Attribute](#akash.base.v1beta2.Attribute) | repeated | | - - + - + - + - ### Memory - Memory stores resource quantity and memory attributes + ### Msg + Msg defines the provider Msg service + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `CreateProvider` | [MsgCreateProvider](#akash.provider.v1beta4.MsgCreateProvider) | [MsgCreateProviderResponse](#akash.provider.v1beta4.MsgCreateProviderResponse) | CreateProvider defines a method that creates a provider given the proper inputs | | + | `UpdateProvider` | [MsgUpdateProvider](#akash.provider.v1beta4.MsgUpdateProvider) | [MsgUpdateProviderResponse](#akash.provider.v1beta4.MsgUpdateProviderResponse) | UpdateProvider defines a method that updates a provider given the proper inputs | | + | `DeleteProvider` | [MsgDeleteProvider](#akash.provider.v1beta4.MsgDeleteProvider) | [MsgDeleteProviderResponse](#akash.provider.v1beta4.MsgDeleteProviderResponse) | DeleteProvider defines a method that deletes a provider given the proper inputs | | - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `quantity` | [ResourceValue](#akash.base.v1beta2.ResourceValue) | | | - | `attributes` | [Attribute](#akash.base.v1beta2.Attribute) | repeated | | + + + +

Top

+ ## akash/staking/v1beta3/params.proto - + - ### Storage - Storage stores resource quantity and storage attributes + ### Params + Params extends the parameters for the x/staking module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `quantity` | [ResourceValue](#akash.base.v1beta2.ResourceValue) | | | - | `attributes` | [Attribute](#akash.base.v1beta2.Attribute) | repeated | | + | `min_commission_rate` | [string](#string) | | min_commission_rate is the chain-wide minimum commission rate that a validator can charge their delegators | @@ -8357,22 +4232,22 @@ this behaviour to be discussed - +

Top

- ## akash/base/v1beta2/resourcevalue.proto + ## akash/staking/v1beta3/genesis.proto - + - ### ResourceValue - Unit stores cpu, memory and storage metrics + ### GenesisState + GenesisState stores slice of genesis deployment instance | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `val` | [bytes](#bytes) | | | + | `params` | [Params](#akash.staking.v1beta3.Params) | | | @@ -8388,60 +4263,42 @@ this behaviour to be discussed - +

Top

- ## akash/base/v1beta1/attribute.proto + ## akash/staking/v1beta3/paramsmsg.proto - + - ### Attribute - Attribute represents key value pair + ### MsgUpdateParams + MsgUpdateParams is the Msg/UpdateParams request type. + +Since: akash v1.0.0 | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `key` | [string](#string) | | | - | `value` | [string](#string) | | | - - + | `authority` | [string](#string) | | authority is the address of the governance account. | + | `params` | [Params](#akash.staking.v1beta3.Params) | | params defines the x/deployment parameters to update. +NOTE: All parameters must be supplied. | - - - - ### PlacementRequirements - PlacementRequirements - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `signed_by` | [SignedBy](#akash.base.v1beta1.SignedBy) | | SignedBy list of keys that tenants expect to have signatures from | - | `attributes` | [Attribute](#akash.base.v1beta1.Attribute) | repeated | Attribute list of attributes tenant expects from the provider | - - + - - + ### MsgUpdateParamsResponse + MsgUpdateParamsResponse defines the response structure for executing a +MsgUpdateParams message. - ### SignedBy - SignedBy represents validation accounts that tenant expects signatures for provider attributes -AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many -entries there -this behaviour to be discussed +Since: akash v1.0.0 - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `all_of` | [string](#string) | repeated | all_of all keys in this list must have signed attributes | - | `any_of` | [string](#string) | repeated | any_of at least of of the keys from the list must have signed attributes | - - @@ -8455,22 +4312,32 @@ this behaviour to be discussed - +

Top

- ## akash/base/v1beta1/endpoint.proto + ## akash/staking/v1beta3/query.proto - + - ### Endpoint - Endpoint describes a publicly accessible IP service + ### QueryParamsRequest + QueryParamsRequest is the request type for the Query/Params RPC method. + + + + + + + + + ### QueryParamsResponse + QueryParamsResponse is the response type for the Query/Params RPC method. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `kind` | [Endpoint.Kind](#akash.base.v1beta1.Endpoint.Kind) | | | + | `params` | [Params](#akash.staking.v1beta3.Params) | | params defines the parameters of the module. | @@ -8478,94 +4345,85 @@ this behaviour to be discussed - - + - ### Endpoint.Kind - This describes how the endpoint is implemented when the lease is deployed + - | Name | Number | Description | - | ---- | ------ | ----------- | - | SHARED_HTTP | 0 | Describes an endpoint that becomes a Kubernetes Ingress | - | RANDOM_PORT | 1 | Describes an endpoint that becomes a Kubernetes NodePort | + - - - + ### Query + Query defines the gRPC querier service + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Params` | [QueryParamsRequest](#akash.staking.v1beta3.QueryParamsRequest) | [QueryParamsResponse](#akash.staking.v1beta3.QueryParamsResponse) | Params returns the total set of minting parameters. | GET|/akash/staking/v1beta3/params| + - +

Top

- ## akash/base/v1beta1/resource.proto + ## akash/staking/v1beta3/service.proto - - + - ### CPU - CPU stores resource units and cpu config attributes + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `units` | [ResourceValue](#akash.base.v1beta1.ResourceValue) | | | - | `attributes` | [Attribute](#akash.base.v1beta1.Attribute) | repeated | | - - + + - - + ### Msg + Msg defines the market Msg service - ### Memory - Memory stores resource quantity and memory attributes + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `UpdateParams` | [MsgUpdateParams](#akash.staking.v1beta3.MsgUpdateParams) | [MsgUpdateParamsResponse](#akash.staking.v1beta3.MsgUpdateParamsResponse) | UpdateParams defines a governance operation for updating the x/market module parameters. The authority is hard-coded to the x/gov module account. +Since: akash v1.0.0 | | - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `quantity` | [ResourceValue](#akash.base.v1beta1.ResourceValue) | | | - | `attributes` | [Attribute](#akash.base.v1beta1.Attribute) | repeated | | + + + +

Top

+ ## akash/take/v1/params.proto - + - ### ResourceUnits - ResourceUnits describes all available resources types for deployment/node etc -if field is nil resource is not present in the given data-structure + ### DenomTakeRate + DenomTakeRate describes take rate for specified denom | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `cpu` | [CPU](#akash.base.v1beta1.CPU) | | | - | `memory` | [Memory](#akash.base.v1beta1.Memory) | | | - | `storage` | [Storage](#akash.base.v1beta1.Storage) | | | - | `endpoints` | [Endpoint](#akash.base.v1beta1.Endpoint) | repeated | | + | `denom` | [string](#string) | | | + | `rate` | [uint32](#uint32) | | | - + - ### Storage - Storage stores resource quantity and storage attributes + ### Params + Params defines the parameters for the x/take package | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `quantity` | [ResourceValue](#akash.base.v1beta1.ResourceValue) | | | - | `attributes` | [Attribute](#akash.base.v1beta1.Attribute) | repeated | | + | `denom_take_rates` | [DenomTakeRate](#akash.take.v1.DenomTakeRate) | repeated | denom -> % take rate | + | `default_take_rate` | [uint32](#uint32) | | | @@ -8581,22 +4439,22 @@ if field is nil resource is not present in the given data-structure - +

Top

- ## akash/base/v1beta1/resourcevalue.proto + ## akash/take/v1/genesis.proto - + - ### ResourceValue - Unit stores cpu, memory and storage metrics + ### GenesisState + GenesisState stores slice of genesis deployment instance | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `val` | [bytes](#bytes) | | | + | `params` | [Params](#akash.take.v1.Params) | | | @@ -8612,23 +4470,41 @@ if field is nil resource is not present in the given data-structure - +

Top

- ## akash/gov/v1beta3/genesis.proto + ## akash/take/v1/paramsmsg.proto - + - ### GenesisState - GenesisState stores slice of genesis deployment instance + ### MsgUpdateParams + MsgUpdateParams is the Msg/UpdateParams request type. + +Since: akash v1.0.0 | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deposit_params` | [DepositParams](#akash.gov.v1beta3.DepositParams) | | | + | `authority` | [string](#string) | | authority is the address of the governance account. | + | `params` | [Params](#akash.take.v1.Params) | | params defines the x/deployment parameters to update. + +NOTE: All parameters must be supplied. | + + + + + + + + ### MsgUpdateParamsResponse + MsgUpdateParamsResponse defines the response structure for executing a +MsgUpdateParams message. + +Since: akash v1.0.0 + @@ -8643,25 +4519,61 @@ if field is nil resource is not present in the given data-structure - +

Top

- ## akash/gov/v1beta3/params.proto + ## akash/take/v1/query.proto - + - ### DepositParams - DepositParams defines the parameters for the x/gov module + ### QueryParamsRequest + QueryParamsRequest is the request type for the Query/Params RPC method. + + + + + + + + + ### QueryParamsResponse + QueryParamsResponse is the response type for the Query/Params RPC method. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `min_initial_deposit_rate` | [bytes](#bytes) | | min_initial_deposit_rate minimum % of TotalDeposit author of the proposal must put in order for proposal tx to be committed | + | `params` | [Params](#akash.take.v1.Params) | | params defines the parameters of the module. | + + + + + + + + + + + + + + + ### Query + Query defines the gRPC querier service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Params` | [QueryParamsRequest](#akash.take.v1.QueryParamsRequest) | [QueryParamsResponse](#akash.take.v1.QueryParamsResponse) | Params returns the total set of minting parameters. | GET|/akash/take/v1/params| + + + + +

Top

+ ## akash/take/v1/service.proto @@ -8670,6 +4582,18 @@ if field is nil resource is not present in the given data-structure + + + + ### Msg + Msg defines the market Msg service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `UpdateParams` | [MsgUpdateParams](#akash.take.v1.MsgUpdateParams) | [MsgUpdateParamsResponse](#akash.take.v1.MsgUpdateParamsResponse) | UpdateParams defines a governance operation for updating the x/market module parameters. The authority is hard-coded to the x/gov module account. + +Since: akash v1.0.0 | | + diff --git a/docs/proto/provider.md b/docs/proto/provider.md index 853de01c..2af7eee7 100644 --- a/docs/proto/provider.md +++ b/docs/proto/provider.md @@ -4,34 +4,69 @@ ## Table of Contents - - [akash/manifest/v2beta1/group.proto](#akash/manifest/v2beta1/group.proto) - - [Group](#akash.manifest.v2beta1.Group) + - [akash/inventory/v1/resourcepair.proto](#akash/inventory/v1/resourcepair.proto) + - [ResourcePair](#akash.inventory.v1.ResourcePair) + + - [akash/inventory/v1/cpu.proto](#akash/inventory/v1/cpu.proto) + - [CPU](#akash.inventory.v1.CPU) + - [CPUInfo](#akash.inventory.v1.CPUInfo) + + - [akash/inventory/v1/gpu.proto](#akash/inventory/v1/gpu.proto) + - [GPU](#akash.inventory.v1.GPU) + - [GPUInfo](#akash.inventory.v1.GPUInfo) + + - [akash/inventory/v1/memory.proto](#akash/inventory/v1/memory.proto) + - [Memory](#akash.inventory.v1.Memory) + - [MemoryInfo](#akash.inventory.v1.MemoryInfo) + + - [akash/inventory/v1/resources.proto](#akash/inventory/v1/resources.proto) + - [NodeResources](#akash.inventory.v1.NodeResources) + + - [akash/inventory/v1/node.proto](#akash/inventory/v1/node.proto) + - [Node](#akash.inventory.v1.Node) + - [NodeCapabilities](#akash.inventory.v1.NodeCapabilities) - - [akash/manifest/v2beta1/httpoptions.proto](#akash/manifest/v2beta1/httpoptions.proto) - - [ServiceExposeHTTPOptions](#akash.manifest.v2beta1.ServiceExposeHTTPOptions) + - [akash/inventory/v1/storage.proto](#akash/inventory/v1/storage.proto) + - [Storage](#akash.inventory.v1.Storage) + - [StorageInfo](#akash.inventory.v1.StorageInfo) + + - [akash/inventory/v1/cluster.proto](#akash/inventory/v1/cluster.proto) + - [Cluster](#akash.inventory.v1.Cluster) + + - [akash/inventory/v1/service.proto](#akash/inventory/v1/service.proto) + - [ClusterRPC](#akash.inventory.v1.ClusterRPC) + - [NodeRPC](#akash.inventory.v1.NodeRPC) - - [akash/manifest/v2beta1/serviceexpose.proto](#akash/manifest/v2beta1/serviceexpose.proto) - - [ServiceExpose](#akash.manifest.v2beta1.ServiceExpose) + - [akash/manifest/v2beta3/httpoptions.proto](#akash/manifest/v2beta3/httpoptions.proto) + - [ServiceExposeHTTPOptions](#akash.manifest.v2beta3.ServiceExposeHTTPOptions) - - [akash/manifest/v2beta1/service.proto](#akash/manifest/v2beta1/service.proto) - - [Service](#akash.manifest.v2beta1.Service) - - [ServiceParams](#akash.manifest.v2beta1.ServiceParams) - - [StorageParams](#akash.manifest.v2beta1.StorageParams) + - [akash/manifest/v2beta3/serviceexpose.proto](#akash/manifest/v2beta3/serviceexpose.proto) + - [ServiceExpose](#akash.manifest.v2beta3.ServiceExpose) - - [akash/manifest/v2beta2/group.proto](#akash/manifest/v2beta2/group.proto) - - [Group](#akash.manifest.v2beta2.Group) + - [akash/manifest/v2beta3/service.proto](#akash/manifest/v2beta3/service.proto) + - [ImageCredentials](#akash.manifest.v2beta3.ImageCredentials) + - [Service](#akash.manifest.v2beta3.Service) + - [ServiceParams](#akash.manifest.v2beta3.ServiceParams) + - [StorageParams](#akash.manifest.v2beta3.StorageParams) - - [akash/manifest/v2beta2/httpoptions.proto](#akash/manifest/v2beta2/httpoptions.proto) - - [ServiceExposeHTTPOptions](#akash.manifest.v2beta2.ServiceExposeHTTPOptions) + - [akash/manifest/v2beta3/group.proto](#akash/manifest/v2beta3/group.proto) + - [Group](#akash.manifest.v2beta3.Group) - - [akash/manifest/v2beta2/serviceexpose.proto](#akash/manifest/v2beta2/serviceexpose.proto) - - [ServiceExpose](#akash.manifest.v2beta2.ServiceExpose) + - [akash/provider/lease/v1/service.proto](#akash/provider/lease/v1/service.proto) + - [ForwarderPortStatus](#akash.provider.lease.v1.ForwarderPortStatus) + - [LeaseIPStatus](#akash.provider.lease.v1.LeaseIPStatus) + - [LeaseServiceStatus](#akash.provider.lease.v1.LeaseServiceStatus) + - [SendManifestRequest](#akash.provider.lease.v1.SendManifestRequest) + - [SendManifestResponse](#akash.provider.lease.v1.SendManifestResponse) + - [ServiceLogs](#akash.provider.lease.v1.ServiceLogs) + - [ServiceLogsRequest](#akash.provider.lease.v1.ServiceLogsRequest) + - [ServiceLogsResponse](#akash.provider.lease.v1.ServiceLogsResponse) + - [ServiceStatus](#akash.provider.lease.v1.ServiceStatus) + - [ServiceStatusRequest](#akash.provider.lease.v1.ServiceStatusRequest) + - [ServiceStatusResponse](#akash.provider.lease.v1.ServiceStatusResponse) + - [ShellRequest](#akash.provider.lease.v1.ShellRequest) - - [akash/manifest/v2beta2/service.proto](#akash/manifest/v2beta2/service.proto) - - [Service](#akash.manifest.v2beta2.Service) - - [ServiceImageCredentials](#akash.manifest.v2beta2.ServiceImageCredentials) - - [ServiceParams](#akash.manifest.v2beta2.ServiceParams) - - [StorageParams](#akash.manifest.v2beta2.StorageParams) + - [LeaseRPC](#akash.provider.lease.v1.LeaseRPC) - [akash/provider/v1/status.proto](#akash/provider/v1/status.proto) - [BidEngineStatus](#akash.provider.v1.BidEngineStatus) @@ -48,60 +83,28 @@ - [akash/provider/v1/service.proto](#akash/provider/v1/service.proto) - [ProviderRPC](#akash.provider.v1.ProviderRPC) - - [akash/inventory/v1/memory.proto](#akash/inventory/v1/memory.proto) - - [Memory](#akash.inventory.v1.Memory) - - [MemoryInfo](#akash.inventory.v1.MemoryInfo) - - - [akash/inventory/v1/cpu.proto](#akash/inventory/v1/cpu.proto) - - [CPU](#akash.inventory.v1.CPU) - - [CPUInfo](#akash.inventory.v1.CPUInfo) - - - [akash/inventory/v1/cluster.proto](#akash/inventory/v1/cluster.proto) - - [Cluster](#akash.inventory.v1.Cluster) - - - [akash/inventory/v1/resources.proto](#akash/inventory/v1/resources.proto) - - [NodeResources](#akash.inventory.v1.NodeResources) - - - [akash/inventory/v1/node.proto](#akash/inventory/v1/node.proto) - - [Node](#akash.inventory.v1.Node) - - [NodeCapabilities](#akash.inventory.v1.NodeCapabilities) - - - [akash/inventory/v1/resourcepair.proto](#akash/inventory/v1/resourcepair.proto) - - [ResourcePair](#akash.inventory.v1.ResourcePair) - - - [akash/inventory/v1/gpu.proto](#akash/inventory/v1/gpu.proto) - - [GPU](#akash.inventory.v1.GPU) - - [GPUInfo](#akash.inventory.v1.GPUInfo) - - - [akash/inventory/v1/storage.proto](#akash/inventory/v1/storage.proto) - - [Storage](#akash.inventory.v1.Storage) - - [StorageInfo](#akash.inventory.v1.StorageInfo) - - - [akash/inventory/v1/service.proto](#akash/inventory/v1/service.proto) - - [ClusterRPC](#akash.inventory.v1.ClusterRPC) - - [NodeRPC](#akash.inventory.v1.NodeRPC) - - [Scalar Value Types](#scalar-value-types) - +

Top

- ## akash/manifest/v2beta1/group.proto + ## akash/inventory/v1/resourcepair.proto - + - ### Group - Group store name and list of services + ### ResourcePair + ResourcePair to extents resource.Quantity to provide total and available units of the resource | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `services` | [Service](#akash.manifest.v2beta1.Service) | repeated | | + | `allocatable` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | + | `allocated` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | @@ -117,66 +120,41 @@ - +

Top

- ## akash/manifest/v2beta1/httpoptions.proto + ## akash/inventory/v1/cpu.proto - + - ### ServiceExposeHTTPOptions - ServiceExposeHTTPOptions + ### CPU + CPU reports CPU inventory details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `max_body_size` | [uint32](#uint32) | | | - | `read_timeout` | [uint32](#uint32) | | | - | `send_timeout` | [uint32](#uint32) | | | - | `next_tries` | [uint32](#uint32) | | | - | `next_timeout` | [uint32](#uint32) | | | - | `next_cases` | [string](#string) | repeated | | - - - - - - - - - - - - - + | `quantity` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | + | `info` | [CPUInfo](#akash.inventory.v1.CPUInfo) | repeated | | - -

Top

- ## akash/manifest/v2beta1/serviceexpose.proto - + - ### ServiceExpose - ServiceExpose stores exposed ports and hosts details + ### CPUInfo + CPUInfo reports CPU details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `port` | [uint32](#uint32) | | port on the container | - | `external_port` | [uint32](#uint32) | | port on the service definition | - | `proto` | [string](#string) | | | - | `service` | [string](#string) | | | - | `global` | [bool](#bool) | | | - | `hosts` | [string](#string) | repeated | | - | `http_options` | [ServiceExposeHTTPOptions](#akash.manifest.v2beta1.ServiceExposeHTTPOptions) | | | - | `ip` | [string](#string) | | The name of the IP address associated with this, if any | - | `endpoint_sequence_number` | [uint32](#uint32) | | The sequence number of the associated endpoint in the on-chain data | + | `id` | [string](#string) | | | + | `vendor` | [string](#string) | | | + | `model` | [string](#string) | | | + | `vcores` | [uint32](#uint32) | | | @@ -192,62 +170,43 @@ - +

Top

- ## akash/manifest/v2beta1/service.proto - - - - - - ### Service - Service stores name, image, args, env, unit, count and expose list of service - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `image` | [string](#string) | | | - | `command` | [string](#string) | repeated | | - | `args` | [string](#string) | repeated | | - | `env` | [string](#string) | repeated | | - | `resources` | [akash.base.v1beta2.ResourceUnits](#akash.base.v1beta2.ResourceUnits) | | | - | `count` | [uint32](#uint32) | | | - | `expose` | [ServiceExpose](#akash.manifest.v2beta1.ServiceExpose) | repeated | | - | `params` | [ServiceParams](#akash.manifest.v2beta1.ServiceParams) | | | - - - + ## akash/inventory/v1/gpu.proto - + - ### ServiceParams - ServiceParams + ### GPU + GPUInfo reports GPU inventory details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `storage` | [StorageParams](#akash.manifest.v2beta1.StorageParams) | repeated | | + | `quantity` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | + | `info` | [GPUInfo](#akash.inventory.v1.GPUInfo) | repeated | | - + - ### StorageParams - StorageParams + ### GPUInfo + GPUInfo reports GPU details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | + | `vendor` | [string](#string) | | | + | `vendor_id` | [string](#string) | | | | `name` | [string](#string) | | | - | `mount` | [string](#string) | | | - | `read_only` | [bool](#bool) | | | + | `modelid` | [string](#string) | | | + | `interface` | [string](#string) | | | + | `memory_size` | [string](#string) | | | @@ -263,23 +222,41 @@ - +

Top

- ## akash/manifest/v2beta2/group.proto + ## akash/inventory/v1/memory.proto - + - ### Group - Group store name and list of services + ### Memory + Memory reports Memory inventory details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `services` | [Service](#akash.manifest.v2beta2.Service) | repeated | | + | `quantity` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | + | `info` | [MemoryInfo](#akash.inventory.v1.MemoryInfo) | repeated | | + + + + + + + + + ### MemoryInfo + MemoryInfo reports Memory details + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `vendor` | [string](#string) | | | + | `type` | [string](#string) | | | + | `total_size` | [string](#string) | | | + | `speed` | [string](#string) | | | @@ -295,27 +272,27 @@ - +

Top

- ## akash/manifest/v2beta2/httpoptions.proto + ## akash/inventory/v1/resources.proto - + - ### ServiceExposeHTTPOptions - ServiceExposeHTTPOptions + ### NodeResources + NodeResources reports node inventory details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `max_body_size` | [uint32](#uint32) | | | - | `read_timeout` | [uint32](#uint32) | | | - | `send_timeout` | [uint32](#uint32) | | | - | `next_tries` | [uint32](#uint32) | | | - | `next_timeout` | [uint32](#uint32) | | | - | `next_cases` | [string](#string) | repeated | | + | `cpu` | [CPU](#akash.inventory.v1.CPU) | | | + | `memory` | [Memory](#akash.inventory.v1.Memory) | | | + | `gpu` | [GPU](#akash.inventory.v1.GPU) | | | + | `ephemeral_storage` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | + | `volumes_attached` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | + | `volumes_mounted` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | @@ -331,30 +308,39 @@ - +

Top

- ## akash/manifest/v2beta2/serviceexpose.proto + ## akash/inventory/v1/node.proto - + - ### ServiceExpose - ServiceExpose stores exposed ports and hosts details + ### Node + Node reports node inventory details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `port` | [uint32](#uint32) | | port on the container | - | `external_port` | [uint32](#uint32) | | port on the service definition | - | `proto` | [string](#string) | | | - | `service` | [string](#string) | | | - | `global` | [bool](#bool) | | | - | `hosts` | [string](#string) | repeated | | - | `http_options` | [ServiceExposeHTTPOptions](#akash.manifest.v2beta2.ServiceExposeHTTPOptions) | | | - | `ip` | [string](#string) | | The name of the IP address associated with this, if any | - | `endpoint_sequence_number` | [uint32](#uint32) | | The sequence number of the associated endpoint in the on-chain data | + | `name` | [string](#string) | | | + | `resources` | [NodeResources](#akash.inventory.v1.NodeResources) | | | + | `capabilities` | [NodeCapabilities](#akash.inventory.v1.NodeCapabilities) | | | + + + + + + + + + ### NodeCapabilities + NodeCapabilities extended list of node capabilities + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `storage_classes` | [string](#string) | repeated | | @@ -370,81 +356,71 @@ - +

Top

- ## akash/manifest/v2beta2/service.proto + ## akash/inventory/v1/storage.proto - + - ### Service - Service stores name, image, args, env, unit, count and expose list of service + ### Storage + Storage reports Storage inventory details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `image` | [string](#string) | | | - | `command` | [string](#string) | repeated | | - | `args` | [string](#string) | repeated | | - | `env` | [string](#string) | repeated | | - | `resources` | [akash.base.v1beta3.Resources](#akash.base.v1beta3.Resources) | | | - | `count` | [uint32](#uint32) | | | - | `expose` | [ServiceExpose](#akash.manifest.v2beta2.ServiceExpose) | repeated | | - | `params` | [ServiceParams](#akash.manifest.v2beta2.ServiceParams) | | | - | `credentials` | [ServiceImageCredentials](#akash.manifest.v2beta2.ServiceImageCredentials) | | | + | `quantity` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | + | `info` | [StorageInfo](#akash.inventory.v1.StorageInfo) | | | - + - ### ServiceImageCredentials - Credentials to fetch image from registry + ### StorageInfo + StorageInfo reports Storage details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `host` | [string](#string) | | | - | `email` | [string](#string) | | | - | `username` | [string](#string) | | | - | `password` | [string](#string) | | | + | `class` | [string](#string) | | | + | `iops` | [string](#string) | | | - - + - ### ServiceParams - ServiceParams + + + + + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `storage` | [StorageParams](#akash.manifest.v2beta2.StorageParams) | repeated | | + +

Top

+ ## akash/inventory/v1/cluster.proto - + - ### StorageParams - StorageParams + ### Cluster + Cluster reports inventory across entire cluster | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `mount` | [string](#string) | | | - | `read_only` | [bool](#bool) | | | + | `nodes` | [Node](#akash.inventory.v1.Node) | repeated | | + | `storage` | [Storage](#akash.inventory.v1.Storage) | repeated | | @@ -460,171 +436,227 @@ - +

Top

- ## akash/provider/v1/status.proto + ## akash/inventory/v1/service.proto + + + + + + + + + + ### ClusterRPC + ClusterRPC defines the RPC server of cluster + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `QueryCluster` | [.google.protobuf.Empty](#google.protobuf.Empty) | [Cluster](#akash.inventory.v1.Cluster) | QueryCluster defines a method to query hardware state of the cluster buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/v1/inventory| + | `StreamCluster` | [.google.protobuf.Empty](#google.protobuf.Empty) | [Cluster](#akash.inventory.v1.Cluster) stream | StreamCluster defines a method to stream hardware state of the cluster buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | | - + + - ### BidEngineStatus - BidEngineStatus + ### NodeRPC + NodeRPC defines the RPC server of node + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `QueryNode` | [.google.protobuf.Empty](#google.protobuf.Empty) | [Node](#akash.inventory.v1.Node) | QueryNode defines a method to query hardware state of the node buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/v1/node| + | `StreamNode` | [.google.protobuf.Empty](#google.protobuf.Empty) | [Node](#akash.inventory.v1.Node) stream | StreamNode defines a method to stream hardware state of the node buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | | - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `orders` | [uint32](#uint32) | | | + + + +

Top

+ ## akash/manifest/v2beta3/httpoptions.proto - + - ### ClusterStatus - ClusterStatus + ### ServiceExposeHTTPOptions + ServiceExposeHTTPOptions | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `leases` | [Leases](#akash.provider.v1.Leases) | | | - | `inventory` | [Inventory](#akash.provider.v1.Inventory) | | | + | `max_body_size` | [uint32](#uint32) | | | + | `read_timeout` | [uint32](#uint32) | | | + | `send_timeout` | [uint32](#uint32) | | | + | `next_tries` | [uint32](#uint32) | | | + | `next_timeout` | [uint32](#uint32) | | | + | `next_cases` | [string](#string) | repeated | | - - + - ### Inventory - Inventory + + + + + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `cluster` | [akash.inventory.v1.Cluster](#akash.inventory.v1.Cluster) | | | - | `reservations` | [Reservations](#akash.provider.v1.Reservations) | | | + +

Top

+ ## akash/manifest/v2beta3/serviceexpose.proto - + - ### Leases - Leases + ### ServiceExpose + ServiceExpose stores exposed ports and hosts details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `active` | [uint32](#uint32) | | | + | `port` | [uint32](#uint32) | | port on the container | + | `external_port` | [uint32](#uint32) | | port on the service definition | + | `proto` | [string](#string) | | | + | `service` | [string](#string) | | | + | `global` | [bool](#bool) | | | + | `hosts` | [string](#string) | repeated | | + | `http_options` | [ServiceExposeHTTPOptions](#akash.manifest.v2beta3.ServiceExposeHTTPOptions) | | | + | `ip` | [string](#string) | | The name of the IP address associated with this, if any | + | `endpoint_sequence_number` | [uint32](#uint32) | | The sequence number of the associated endpoint in the on-chain data | - - + - ### ManifestStatus - ManifestStatus + + + + + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployments` | [uint32](#uint32) | | | + +

Top

+ ## akash/manifest/v2beta3/service.proto - + - ### Reservations - Reservations + ### ImageCredentials + Credentials to fetch image from registry | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `pending` | [ReservationsMetric](#akash.provider.v1.ReservationsMetric) | | | - | `active` | [ReservationsMetric](#akash.provider.v1.ReservationsMetric) | | | + | `host` | [string](#string) | | | + | `email` | [string](#string) | | | + | `username` | [string](#string) | | | + | `password` | [string](#string) | | | - + - ### ReservationsMetric - ReservationsMetric + ### Service + Service stores name, image, args, env, unit, count and expose list of service | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | + | `name` | [string](#string) | | | + | `image` | [string](#string) | | | + | `command` | [string](#string) | repeated | | + | `args` | [string](#string) | repeated | | + | `env` | [string](#string) | repeated | | + | `resources` | [akash.base.resources.v1beta4.Resources](#akash.base.resources.v1beta4.Resources) | | | | `count` | [uint32](#uint32) | | | - | `resources` | [ResourcesMetric](#akash.provider.v1.ResourcesMetric) | | | + | `expose` | [ServiceExpose](#akash.manifest.v2beta3.ServiceExpose) | repeated | | + | `params` | [ServiceParams](#akash.manifest.v2beta3.ServiceParams) | | | + | `credentials` | [ImageCredentials](#akash.manifest.v2beta3.ImageCredentials) | | | - + - ### ResourcesMetric - ResourceMetrics + ### ServiceParams + ServiceParams | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `cpu` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | - | `memory` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | - | `gpu` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | - | `ephemeral_storage` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | - | `storage` | [ResourcesMetric.StorageEntry](#akash.provider.v1.ResourcesMetric.StorageEntry) | repeated | | + | `storage` | [StorageParams](#akash.manifest.v2beta3.StorageParams) | repeated | | + | `credentials` | [ImageCredentials](#akash.manifest.v2beta3.ImageCredentials) | | | - + - ### ResourcesMetric.StorageEntry - + ### StorageParams + StorageParams | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `key` | [string](#string) | | | - | `value` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | + | `name` | [string](#string) | | | + | `mount` | [string](#string) | | | + | `read_only` | [bool](#bool) | | | + + + + + + + + - + + +

Top

- ### Status - Status + ## akash/manifest/v2beta3/group.proto + + + + + + ### Group + Group store name and list of services | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `errors` | [string](#string) | repeated | | - | `cluster` | [ClusterStatus](#akash.provider.v1.ClusterStatus) | | | - | `bid_engine` | [BidEngineStatus](#akash.provider.v1.BidEngineStatus) | | | - | `manifest` | [ManifestStatus](#akash.provider.v1.ManifestStatus) | | | - | `public_hostnames` | [string](#string) | repeated | | - | `timestamp` | [google.protobuf.Timestamp](#google.protobuf.Timestamp) | | | + | `name` | [string](#string) | | | + | `services` | [Service](#akash.manifest.v2beta3.Service) | repeated | | @@ -640,186 +672,203 @@ - +

Top

- ## akash/provider/v1/service.proto + ## akash/provider/lease/v1/service.proto - - - + + - + ### ForwarderPortStatus + ForwarderPortStatus - + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `host` | [string](#string) | | | + | `port` | [uint32](#uint32) | | | + | `external_port` | [uint32](#uint32) | | | + | `proto` | [string](#string) | | | + | `name` | [string](#string) | | | + + - ### ProviderRPC - ProviderRPC defines the RPC server for provider + - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `GetStatus` | [.google.protobuf.Empty](#google.protobuf.Empty) | [Status](#akash.provider.v1.Status) | GetStatus defines a method to query provider state buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/v1/status| - | `StreamStatus` | [.google.protobuf.Empty](#google.protobuf.Empty) | [Status](#akash.provider.v1.Status) stream | Status defines a method to stream provider state buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | | - + + + ### LeaseIPStatus + LeaseIPStatus + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `port` | [uint32](#uint32) | | | + | `external_port` | [uint32](#uint32) | | | + | `protocol` | [string](#string) | | | + | `ip` | [string](#string) | | | + - -

Top

- ## akash/inventory/v1/memory.proto - + - ### Memory - Memory reports Memory inventory details + ### LeaseServiceStatus + LeaseServiceStatus | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `quantity` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | - | `info` | [MemoryInfo](#akash.inventory.v1.MemoryInfo) | repeated | | + | `available` | [int32](#int32) | | | + | `total` | [int32](#int32) | | | + | `uris` | [string](#string) | repeated | | + | `observed_generation` | [int64](#int64) | | | + | `replicas` | [int32](#int32) | | | + | `updated_replicas` | [int32](#int32) | | | + | `ready_replicas` | [int32](#int32) | | | + | `available_replicas` | [int32](#int32) | | | - + - ### MemoryInfo - MemoryInfo reports Memory details + ### SendManifestRequest + SendManifestRequest is request type for the SendManifest Providers RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `vendor` | [string](#string) | | | - | `type` | [string](#string) | | | - | `total_size` | [string](#string) | | | - | `speed` | [string](#string) | | | + | `lease_id` | [akash.market.v1.LeaseID](#akash.market.v1.LeaseID) | | | + | `manifest` | [akash.manifest.v2beta3.Group](#akash.manifest.v2beta3.Group) | repeated | | - + + - + ### SendManifestResponse + SendManifestResponse is response type for the SendManifest Providers RPC method - + - + + + + ### ServiceLogs + ServiceLogs + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `name` | [string](#string) | | | + | `logs` | [bytes](#bytes) | | | + - -

Top

- ## akash/inventory/v1/cpu.proto - + - ### CPU - CPU reports CPU inventory details + ### ServiceLogsRequest + ServiceLogsRequest | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `quantity` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | - | `info` | [CPUInfo](#akash.inventory.v1.CPUInfo) | repeated | | + | `lease_id` | [akash.market.v1.LeaseID](#akash.market.v1.LeaseID) | | | + | `services` | [string](#string) | repeated | | - + - ### CPUInfo - CPUInfo reports CPU details + ### ServiceLogsResponse + ServiceLogsResponse | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [string](#string) | | | - | `vendor` | [string](#string) | | | - | `model` | [string](#string) | | | - | `vcores` | [uint32](#uint32) | | | + | `services` | [ServiceLogs](#akash.provider.lease.v1.ServiceLogs) | repeated | | - - - - - + + - + ### ServiceStatus + ServiceStatus + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `name` | [string](#string) | | | + | `status` | [LeaseServiceStatus](#akash.provider.lease.v1.LeaseServiceStatus) | | | + | `ports` | [ForwarderPortStatus](#akash.provider.lease.v1.ForwarderPortStatus) | repeated | | + | `ips` | [LeaseIPStatus](#akash.provider.lease.v1.LeaseIPStatus) | repeated | | + - -

Top

- ## akash/inventory/v1/cluster.proto - + - ### Cluster - Cluster reports inventory across entire cluster + ### ServiceStatusRequest + ServiceStatusRequest | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `nodes` | [Node](#akash.inventory.v1.Node) | repeated | | - | `storage` | [Storage](#akash.inventory.v1.Storage) | repeated | | + | `lease_id` | [akash.market.v1.LeaseID](#akash.market.v1.LeaseID) | | | + | `services` | [string](#string) | repeated | | - - - - - + + - + ### ServiceStatusResponse + ServiceStatusResponse + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `services` | [ServiceStatus](#akash.provider.lease.v1.ServiceStatus) | repeated | | + - -

Top

- ## akash/inventory/v1/resources.proto - + - ### NodeResources - NodeResources reports node inventory details + ### ShellRequest + ShellRequest | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `cpu` | [CPU](#akash.inventory.v1.CPU) | | | - | `memory` | [Memory](#akash.inventory.v1.Memory) | | | - | `gpu` | [GPU](#akash.inventory.v1.GPU) | | | - | `ephemeral_storage` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | - | `volumes_attached` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | - | `volumes_mounted` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | + | `lease_id` | [akash.market.v1.LeaseID](#akash.market.v1.LeaseID) | | | @@ -831,176 +880,188 @@ + + + + ### LeaseRPC + LeaseRPC defines the RPC server for lease control + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `SendManifest` | [SendManifestRequest](#akash.provider.lease.v1.SendManifestRequest) | [SendManifestResponse](#akash.provider.lease.v1.SendManifestResponse) | SendManifest sends manifest to the provider | | + | `ServiceStatus` | [ServiceStatusRequest](#akash.provider.lease.v1.ServiceStatusRequest) | [ServiceStatusResponse](#akash.provider.lease.v1.ServiceStatusResponse) | ServiceStatus buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | | + | `StreamServiceStatus` | [ServiceStatusRequest](#akash.provider.lease.v1.ServiceStatusRequest) | [ServiceStatusResponse](#akash.provider.lease.v1.ServiceStatusResponse) stream | StreamServiceStatus buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | | + | `ServiceLogs` | [ServiceLogsRequest](#akash.provider.lease.v1.ServiceLogsRequest) | [ServiceLogsResponse](#akash.provider.lease.v1.ServiceLogsResponse) | ServiceLogs buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | | + | `StreamServiceLogs` | [ServiceLogsRequest](#akash.provider.lease.v1.ServiceLogsRequest) | [ServiceLogsResponse](#akash.provider.lease.v1.ServiceLogsResponse) stream | StreamServiceLogs buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | | + - +

Top

- ## akash/inventory/v1/node.proto + ## akash/provider/v1/status.proto - + - ### Node - Node reports node inventory details + ### BidEngineStatus + BidEngineStatus | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `resources` | [NodeResources](#akash.inventory.v1.NodeResources) | | | - | `capabilities` | [NodeCapabilities](#akash.inventory.v1.NodeCapabilities) | | | + | `orders` | [uint32](#uint32) | | | - + - ### NodeCapabilities - NodeCapabilities extended list of node capabilities + ### ClusterStatus + ClusterStatus | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `storage_classes` | [string](#string) | repeated | | + | `leases` | [Leases](#akash.provider.v1.Leases) | | | + | `inventory` | [Inventory](#akash.provider.v1.Inventory) | | | - - - - - + + - + ### Inventory + Inventory + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `cluster` | [akash.inventory.v1.Cluster](#akash.inventory.v1.Cluster) | | | + | `reservations` | [Reservations](#akash.provider.v1.Reservations) | | | + - -

Top

- ## akash/inventory/v1/resourcepair.proto - + - ### ResourcePair - ResourcePair to extents resource.Quantity to provide total and available units of the resource + ### Leases + Leases | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `allocatable` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | - | `allocated` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | - | `attributes` | [akash.base.v1beta3.Attribute](#akash.base.v1beta3.Attribute) | repeated | | + | `active` | [uint32](#uint32) | | | - - - - - + + - + ### ManifestStatus + ManifestStatus + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `deployments` | [uint32](#uint32) | | | + - -

Top

- ## akash/inventory/v1/gpu.proto - + - ### GPU - GPUInfo reports GPU inventory details + ### Reservations + Reservations | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `quantity` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | - | `info` | [GPUInfo](#akash.inventory.v1.GPUInfo) | repeated | | + | `pending` | [ReservationsMetric](#akash.provider.v1.ReservationsMetric) | | | + | `active` | [ReservationsMetric](#akash.provider.v1.ReservationsMetric) | | | - + - ### GPUInfo - GPUInfo reports GPU details + ### ReservationsMetric + ReservationsMetric | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `vendor` | [string](#string) | | | - | `vendor_id` | [string](#string) | | | - | `name` | [string](#string) | | | - | `modelid` | [string](#string) | | | - | `interface` | [string](#string) | | | - | `memory_size` | [string](#string) | | | + | `count` | [uint32](#uint32) | | | + | `resources` | [ResourcesMetric](#akash.provider.v1.ResourcesMetric) | | | - - - - - + + - + ### ResourcesMetric + ResourceMetrics + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `cpu` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | + | `memory` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | + | `gpu` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | + | `ephemeral_storage` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | + | `storage` | [ResourcesMetric.StorageEntry](#akash.provider.v1.ResourcesMetric.StorageEntry) | repeated | | + - -

Top

- ## akash/inventory/v1/storage.proto - + - ### Storage - Storage reports Storage inventory details + ### ResourcesMetric.StorageEntry + | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `quantity` | [ResourcePair](#akash.inventory.v1.ResourcePair) | | | - | `info` | [StorageInfo](#akash.inventory.v1.StorageInfo) | | | + | `key` | [string](#string) | | | + | `value` | [k8s.io.apimachinery.pkg.api.resource.Quantity](#k8s.io.apimachinery.pkg.api.resource.Quantity) | | | - + - ### StorageInfo - StorageInfo reports Storage details + ### Status + Status | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `class` | [string](#string) | | | - | `iops` | [string](#string) | | | + | `errors` | [string](#string) | repeated | | + | `cluster` | [ClusterStatus](#akash.provider.v1.ClusterStatus) | | | + | `bid_engine` | [BidEngineStatus](#akash.provider.v1.BidEngineStatus) | | | + | `manifest` | [ManifestStatus](#akash.provider.v1.ManifestStatus) | | | + | `public_hostnames` | [string](#string) | repeated | | @@ -1016,10 +1077,10 @@ - +

Top

- ## akash/inventory/v1/service.proto + ## akash/provider/v1/service.proto @@ -1029,26 +1090,15 @@ - - - ### ClusterRPC - ClusterRPC defines the RPC server of cluster - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `QueryCluster` | [.google.protobuf.Empty](#google.protobuf.Empty) | [Cluster](#akash.inventory.v1.Cluster) | QueryCluster defines a method to query hardware state of the cluster buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/v1/inventory| - | `StreamCluster` | [.google.protobuf.Empty](#google.protobuf.Empty) | [Cluster](#akash.inventory.v1.Cluster) stream | StreamCluster defines a method to stream hardware state of the cluster buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | | - - - + - ### NodeRPC - NodeRPC defines the RPC server of node + ### ProviderRPC + ProviderRPC defines the RPC server for provider | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `QueryNode` | [.google.protobuf.Empty](#google.protobuf.Empty) | [Node](#akash.inventory.v1.Node) | QueryNode defines a method to query hardware state of the node buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/v1/node| - | `StreamNode` | [.google.protobuf.Empty](#google.protobuf.Empty) | [Node](#akash.inventory.v1.Node) stream | StreamNode defines a method to stream hardware state of the node buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | | + | `GetStatus` | [.google.protobuf.Empty](#google.protobuf.Empty) | [Status](#akash.provider.v1.Status) | GetStatus defines a method to query provider state buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/v1/status| + | `StreamStatus` | [.google.protobuf.Empty](#google.protobuf.Empty) | [Status](#akash.provider.v1.Status) stream | Status defines a method to stream provider state buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | | diff --git a/docs/swagger-ui/swagger.yaml b/docs/swagger-ui/swagger.yaml index ab848a95..ddc9c451 100644 --- a/docs/swagger-ui/swagger.yaml +++ b/docs/swagger-ui/swagger.yaml @@ -4,7 +4,7 @@ info: description: A REST interface for state queries version: 1.0.0 paths: - /akash/audit/v1beta3/audit/attributes/list: + /akash/audit/v1/audit/attributes/list: get: summary: |- AllProvidersAttributes queries all providers @@ -43,9 +43,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -148,7 +149,7 @@ paths: type: boolean tags: - Query - /akash/audit/v1beta3/audit/attributes/{auditor}/{owner}: + /akash/audit/v1/audit/attributes/{auditor}/{owner}: get: summary: >- ProviderAuditorAttributes queries provider signed attributes by specific @@ -190,9 +191,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -247,7 +249,7 @@ paths: type: string tags: - Query - /akash/audit/v1beta3/audit/attributes/{owner}/list: + /akash/audit/v1/audit/attributes/{owner}/list: get: summary: |- ProviderAttributes queries all provider signed attributes @@ -286,9 +288,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -395,7 +398,7 @@ paths: type: boolean tags: - Query - /akash/provider/v1beta3/auditor/{auditor}/list: + /akash/provider/v1/auditor/{auditor}/list: get: summary: |- AuditorAttributes queries all providers signed by this auditor @@ -434,9 +437,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -543,7 +547,7 @@ paths: type: boolean tags: - Query - /akash/cert/v1beta3/certificates/list: + /akash/cert/v1/certificates/list: get: summary: Certificates queries certificates operationId: Certificates @@ -594,9 +598,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -711,7 +716,7 @@ paths: type: boolean tags: - Query - /akash/deployment/v1beta3/deployments/info: + /akash/deployment/v1beta4/deployments/info: get: summary: Deployment queries deployment details operationId: Deployment @@ -724,7 +729,7 @@ paths: deployment: type: object properties: - deployment_id: + id: type: object properties: owner: @@ -746,19 +751,19 @@ paths: - active: DeploymentActive denotes state for deployment active - closed: DeploymentClosed denotes state for deployment closed title: State is an enum which refers to state of deployment - version: + hash: type: string format: byte created_at: type: string format: int64 - title: Deployment stores deploymentID, state and version details + title: Deployment stores deploymentID, state and checksum details groups: type: array items: type: object properties: - group_id: + id: type: object properties: owner: @@ -1000,7 +1005,7 @@ paths: title: >- ResourceUnit extends Resources and adds Count along with the Price - title: GroupSpec stores group specifications + title: Spec stores group specifications created_at: type: string format: int64 @@ -1137,7 +1142,7 @@ paths: format: uint64 tags: - Query - /akash/deployment/v1beta3/deployments/list: + /akash/deployment/v1beta4/deployments/list: get: summary: Deployments queries deployments operationId: Deployments @@ -1155,7 +1160,7 @@ paths: deployment: type: object properties: - deployment_id: + id: type: object properties: owner: @@ -1177,21 +1182,21 @@ paths: - active: DeploymentActive denotes state for deployment active - closed: DeploymentClosed denotes state for deployment closed title: State is an enum which refers to state of deployment - version: + hash: type: string format: byte created_at: type: string format: int64 title: >- - Deployment stores deploymentID, state and version + Deployment stores deploymentID, state and checksum details groups: type: array items: type: object properties: - group_id: + id: type: object properties: owner: @@ -1433,7 +1438,7 @@ paths: title: >- ResourceUnit extends Resources and adds Count along with the Price - title: GroupSpec stores group specifications + title: Spec stores group specifications created_at: type: string format: int64 @@ -1544,9 +1549,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -1662,7 +1668,7 @@ paths: type: boolean tags: - Query - /akash/deployment/v1beta3/groups/info: + /akash/deployment/v1beta4/groups/info: get: summary: Group queries group details operationId: Group @@ -1675,7 +1681,7 @@ paths: group: type: object properties: - group_id: + id: type: object properties: owner: @@ -1917,7 +1923,7 @@ paths: title: >- ResourceUnit extends Resources and adds Count along with the Price - title: GroupSpec stores group specifications + title: Spec stores group specifications created_at: type: string format: int64 @@ -1962,7 +1968,66 @@ paths: format: int64 tags: - Query - /akash/market/v1beta3/bids/info: + /akash/deployment/v1beta4/params: + get: + summary: Params returns the total set of minting parameters. + operationId: DeploymentParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + description: params defines the parameters of the module. + type: object + properties: + min_deposits: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the + custom method + + signatures required by gogoproto. + title: Params defines the parameters for the x/deployment module + description: >- + QueryParamsResponse is the response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + tags: + - Query + /akash/market/v1beta5/bids/info: get: summary: Bid queries bid details operationId: Bid @@ -1975,7 +2040,7 @@ paths: bid: type: object properties: - bid_id: + id: type: object properties: owner: @@ -2010,7 +2075,7 @@ paths: - active: BidMatched denotes state for bid open - lost: BidLost denotes state for bid lost - closed: BidClosed denotes state for bid closed - title: State is an enum which refers to state of bid + title: BidState is an enum which refers to state of bid price: type: object properties: @@ -2030,6 +2095,155 @@ paths: created_at: type: string format: int64 + resources_offer: + type: array + items: + type: object + properties: + resources: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + CPU stores resource units and cpu config + attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Memory stores resource quantity and memory + attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: >- + Unit stores cpu, memory and storage + metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Storage stores resource quantity and storage + attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + GPU stores resource units and cpu config + attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that + becomes a Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is + implemented when the lease is deployed + sequence_number: + type: integer + format: int64 + title: >- + Endpoint describes a publicly accessible IP + service + title: >- + Resources describes all available resources types + for deployment/node etc + + if field is nil resource is not present in the given + data-structure + count: + type: integer + format: int64 + title: >- + ResourceOffer describes resources that provider is + offering + + for deployment title: Bid stores BidID, state of bid and price escrow_account: type: object @@ -2175,7 +2389,7 @@ paths: type: string tags: - Query - /akash/market/v1beta3/bids/list: + /akash/market/v1beta5/bids/list: get: summary: Bids queries bids with filters operationId: Bids @@ -2193,7 +2407,7 @@ paths: bid: type: object properties: - bid_id: + id: type: object properties: owner: @@ -2228,7 +2442,7 @@ paths: - active: BidMatched denotes state for bid open - lost: BidLost denotes state for bid lost - closed: BidClosed denotes state for bid closed - title: State is an enum which refers to state of bid + title: BidState is an enum which refers to state of bid price: type: object properties: @@ -2248,6 +2462,161 @@ paths: created_at: type: string format: int64 + resources_offer: + type: array + items: + type: object + properties: + resources: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: >- + Unit stores cpu, memory and storage + metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + CPU stores resource units and cpu config + attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: >- + Unit stores cpu, memory and storage + metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Memory stores resource quantity and memory + attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: >- + Unit stores cpu, memory and storage + metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Storage stores resource quantity and + storage attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: >- + Unit stores cpu, memory and storage + metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + GPU stores resource units and cpu config + attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint + that becomes a Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is + implemented when the lease is deployed + sequence_number: + type: integer + format: int64 + title: >- + Endpoint describes a publicly accessible + IP service + title: >- + Resources describes all available resources + types for deployment/node etc + + if field is nil resource is not present in the + given data-structure + count: + type: integer + format: int64 + title: >- + ResourceOffer describes resources that provider is + offering + + for deployment title: Bid stores BidID, state of bid and price escrow_account: type: object @@ -2353,9 +2722,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -2483,7 +2853,7 @@ paths: type: boolean tags: - Query - /akash/market/v1beta3/leases/info: + /akash/market/v1beta5/leases/info: get: summary: Lease queries lease details operationId: Lease @@ -2496,7 +2866,7 @@ paths: lease: type: object properties: - lease_id: + id: type: object properties: owner: @@ -2580,7 +2950,7 @@ paths: - open: PaymentStateOpen is the state when the payment is open - closed: PaymentStateClosed is the state when the payment is closed - overdrawn: PaymentStateOverdrawn is the state when the payment is overdrawn - title: Payment State + title: State defines payment state rate: type: object properties: @@ -2678,7 +3048,7 @@ paths: type: string tags: - Query - /akash/market/v1beta3/leases/list: + /akash/market/v1beta5/leases/list: get: summary: Leases queries leases with filters operationId: Leases @@ -2696,7 +3066,7 @@ paths: lease: type: object properties: - lease_id: + id: type: object properties: owner: @@ -2780,7 +3150,7 @@ paths: - open: PaymentStateOpen is the state when the payment is open - closed: PaymentStateClosed is the state when the payment is closed - overdrawn: PaymentStateOverdrawn is the state when the payment is overdrawn - title: Payment State + title: State defines payment state rate: type: object properties: @@ -2839,9 +3209,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -2971,7 +3342,7 @@ paths: type: boolean tags: - Query - /akash/market/v1beta3/orders/info: + /akash/market/v1beta5/orders/info: get: summary: Order queries order details operationId: Order @@ -2984,7 +3355,7 @@ paths: order: type: object properties: - order_id: + id: type: object properties: owner: @@ -3225,7 +3596,7 @@ paths: title: >- ResourceUnit extends Resources and adds Count along with the Price - title: GroupSpec stores group specifications + title: Spec stores group specifications created_at: type: string format: int64 @@ -3275,7 +3646,7 @@ paths: format: int64 tags: - Query - /akash/market/v1beta3/orders/list: + /akash/market/v1beta5/orders/list: get: summary: Orders queries orders with filters operationId: Orders @@ -3290,7 +3661,7 @@ paths: items: type: object properties: - order_id: + id: type: object properties: owner: @@ -3531,7 +3902,7 @@ paths: title: >- ResourceUnit extends Resources and adds Count along with the Price - title: GroupSpec stores group specifications + title: Spec stores group specifications created_at: type: string format: int64 @@ -3542,9 +3913,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -3670,7 +4042,67 @@ paths: type: boolean tags: - Query - /akash/provider/v1beta3/providers: + /akash/market/v1beta5/params: + get: + summary: Params returns the total set of minting parameters. + operationId: MarketParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + description: params defines the parameters of the module. + type: object + properties: + bid_min_deposit: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the + custom method + + signatures required by gogoproto. + order_max_bids: + type: integer + format: int64 + title: Params is the params for the x/market module + description: >- + QueryParamsResponse is the response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + tags: + - Query + /akash/provider/v1beta4/providers: get: summary: Providers queries providers operationId: Providers @@ -3706,7 +4138,7 @@ paths: type: string website: type: string - title: ProviderInfo + title: Info title: Provider stores owner and host details pagination: type: object @@ -3714,9 +4146,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -3819,7 +4252,7 @@ paths: type: boolean tags: - Query - /akash/provider/v1beta3/providers/{owner}: + /akash/provider/v1beta4/providers/{owner}: get: summary: Provider queries provider details operationId: Provider @@ -3853,7 +4286,7 @@ paths: type: string website: type: string - title: ProviderInfo + title: Info title: Provider stores owner and host details title: >- QueryProviderResponse is response type for the Query/Provider RPC @@ -3887,4638 +4320,3026 @@ paths: type: string tags: - Query - /node_info: - get: - description: Information about the connected node - summary: The properties of the connected node - tags: - - Gaia REST - produces: - - application/json - responses: - '200': - description: Node status - schema: - type: object - properties: - application_version: - properties: - build_tags: - type: string - client_name: - type: string - commit: - type: string - go: - type: string - name: - type: string - server_name: - type: string - version: - type: string - node_info: - properties: - id: - type: string - moniker: - type: string - example: validator-name - protocol_version: - properties: - p2p: - type: string - example: 7 - block: - type: string - example: 10 - app: - type: string - example: 0 - network: - type: string - example: gaia-2 - channels: - type: string - listen_addr: - type: string - example: 192.168.56.1:26656 - version: - description: Tendermint version - type: string - example: 0.15.0 - other: - description: more information on versions - type: object - properties: - tx_index: - type: string - example: 'on' - rpc_address: - type: string - example: tcp://0.0.0.0:26657 - '500': - description: Failed to query node status - /syncing: - get: - summary: Syncing state of node - tags: - - Tendermint RPC - description: Get if the node is currently syning with other nodes - produces: - - application/json - responses: - '200': - description: Node syncing status - schema: - type: object - properties: - syncing: - type: boolean - '500': - description: Server internal error - /blocks/latest: - get: - summary: Get the latest block - tags: - - Tendermint RPC - produces: - - application/json - responses: - '200': - description: The latest block - schema: - type: object - properties: - block_meta: - type: object - properties: - header: - type: object - properties: - chain_id: - type: string - example: cosmoshub-2 - height: - type: number - example: 1 - time: - type: string - example: '2017-12-30T05:53:09.287+01:00' - num_txs: - type: number - example: 0 - last_block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - total_txs: - type: number - example: 35 - last_commit_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - data_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - next_validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - consensus_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - app_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - last_results_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - evidence_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - proposer_address: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - version: - type: object - properties: - block: - type: string - example: 10 - app: - type: string - example: 0 - block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - block: - type: object - properties: - header: - type: object - properties: - chain_id: - type: string - example: cosmoshub-2 - height: - type: number - example: 1 - time: - type: string - example: '2017-12-30T05:53:09.287+01:00' - num_txs: - type: number - example: 0 - last_block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - total_txs: - type: number - example: 35 - last_commit_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - data_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - next_validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - consensus_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - app_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - last_results_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - evidence_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - proposer_address: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - version: - type: object - properties: - block: - type: string - example: 10 - app: - type: string - example: 0 - txs: - type: array - items: - type: string - evidence: - type: array - items: - type: string - last_commit: - type: object - properties: - block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - precommits: - type: array - items: - type: object - properties: - validator_address: - type: string - validator_index: - type: string - example: '0' - height: - type: string - example: '0' - round: - type: string - example: '0' - timestamp: - type: string - example: '2017-12-30T05:53:09.287+01:00' - type: - type: number - example: 2 - block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - signature: - type: string - example: >- - 7uTC74QlknqYWEwg7Vn6M8Om7FuZ0EO4bjvuj6rwH1mTUJrRuMMZvAAqT9VjNgP0RA/TDp6u/92AqrZfXJSpBQ== - '500': - description: Server internal error - /blocks/{height}: + /akash/take/v1/params: get: - summary: Get a block at a certain height - tags: - - Tendermint RPC - produces: - - application/json - parameters: - - in: path - name: height - description: Block height - required: true - type: number - x-example: 1 + summary: Params returns the total set of minting parameters. + operationId: TakeParams responses: '200': - description: The block at a specific height + description: A successful response. schema: type: object properties: - block_meta: - type: object - properties: - header: - type: object - properties: - chain_id: - type: string - example: cosmoshub-2 - height: - type: number - example: 1 - time: - type: string - example: '2017-12-30T05:53:09.287+01:00' - num_txs: - type: number - example: 0 - last_block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - total_txs: - type: number - example: 35 - last_commit_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - data_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - next_validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - consensus_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - app_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - last_results_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - evidence_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - proposer_address: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - version: - type: object - properties: - block: - type: string - example: 10 - app: - type: string - example: 0 - block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - block: + params: + description: params defines the parameters of the module. type: object properties: - header: - type: object - properties: - chain_id: - type: string - example: cosmoshub-2 - height: - type: number - example: 1 - time: - type: string - example: '2017-12-30T05:53:09.287+01:00' - num_txs: - type: number - example: 0 - last_block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - total_txs: - type: number - example: 35 - last_commit_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - data_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - next_validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - consensus_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - app_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - last_results_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - evidence_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - proposer_address: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - version: - type: object - properties: - block: - type: string - example: 10 - app: - type: string - example: 0 - txs: - type: array - items: - type: string - evidence: + denom_take_rates: type: array items: - type: string - last_commit: - type: object - properties: - block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - precommits: - type: array - items: - type: object - properties: - validator_address: - type: string - validator_index: - type: string - example: '0' - height: - type: string - example: '0' - round: - type: string - example: '0' - timestamp: - type: string - example: '2017-12-30T05:53:09.287+01:00' - type: - type: number - example: 2 - block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - signature: - type: string - example: >- - 7uTC74QlknqYWEwg7Vn6M8Om7FuZ0EO4bjvuj6rwH1mTUJrRuMMZvAAqT9VjNgP0RA/TDp6u/92AqrZfXJSpBQ== - '400': - description: Invalid height - '404': - description: Request block height doesn't - '500': - description: Server internal error - /validatorsets/latest: - get: - summary: Get the latest validator set - tags: - - Tendermint RPC - produces: - - application/json - responses: - '200': - description: The validator set at the latest block height + type: object + properties: + denom: + type: string + rate: + type: integer + format: int64 + title: DenomTakeRate describes take rate for specified denom + title: denom -> % take rate + default_take_rate: + type: integer + format: int64 + title: Params defines the parameters for the x/take package + description: >- + QueryParamsResponse is the response type for the Query/Params RPC + method. + default: + description: An unexpected error response. schema: type: object properties: - block_height: + error: type: string - validators: - type: array - items: - type: object - properties: - address: - type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - pub_key: - type: string - example: >- - cosmosvalconspub1zcjduepq0vu2zgkgk49efa0nqwzndanq5m4c7pa3u4apz4g2r9gspqg6g9cs3k9cuf - voting_power: - type: string - example: '1000' - proposer_priority: - type: string - example: '1000' - '500': - description: Server internal error - /validatorsets/{height}: - get: - summary: Get a validator set a certain height - tags: - - Tendermint RPC - produces: - - application/json - parameters: - - in: path - name: height - description: Block height - required: true - type: number - x-example: 1 - responses: - '200': - description: The validator set at a specific block height - schema: - type: object - properties: - block_height: + code: + type: integer + format: int32 + message: type: string - validators: + details: type: array items: type: object properties: - address: - type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - pub_key: - type: string - example: >- - cosmosvalconspub1zcjduepq0vu2zgkgk49efa0nqwzndanq5m4c7pa3u4apz4g2r9gspqg6g9cs3k9cuf - voting_power: + type_url: type: string - example: '1000' - proposer_priority: + value: type: string - example: '1000' - '400': - description: Invalid height - '404': - description: Block at height not available - '500': - description: Server internal error - /txs/{hash}: - get: - deprecated: true - summary: Get a Tx by hash + format: byte tags: - - Transactions - description: Retrieve a transaction using its hash. - produces: - - application/json - parameters: - - in: path - name: hash - description: Tx hash - required: true - type: string - x-example: BCBE20E8D46758B96AE5883B792858296AC06E51435490FBDCAE25A72B3CC76B + - Query + /cosmos/auth/v1beta1/account_info/{address}: + get: + summary: AccountInfo queries account info which is common to all account types. + description: 'Since: cosmos-sdk 0.47' + operationId: AccountInfo responses: '200': - description: Tx with the provided hash + description: A successful response. schema: type: object properties: - hash: - type: string - example: >- - D085138D913993919295FF4B0A9107F1F2CDE0D37A87CE0644E217CBF3B49656 - height: - type: number - example: 368 - tx: + info: + description: info is the account info which is represented by BaseAccount. type: object properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: + address: type: string - signature: + pub_key: type: object properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: + type_url: type: string - example: '0' - sequence: + description: >- + A URL/resource name that uniquely identifies the type + of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: type: string - example: '0' - result: - type: object - properties: - log: - type: string - gas_wanted: + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + account_number: type: string - example: '200000' - gas_used: + format: uint64 + sequence: type: string - example: '26354' - tags: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - '500': - description: Internal Server Error - /txs: - get: - deprecated: true - tags: - - Transactions - summary: Search transactions - description: Search transactions by events. - produces: - - application/json - parameters: - - in: query - name: message.action - type: string - description: >- - transaction events such as 'message.action=send' which results in - the following endpoint: 'GET /txs?message.action=send'. note that - each module documents its own events. look for xx_events.md in the - corresponding cosmos-sdk/docs/spec directory - x-example: send - - in: query - name: message.sender - type: string - description: >- - transaction tags with sender: 'GET - /txs?message.action=send&message.sender=cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv' - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - - in: query - name: page - description: Page number - type: integer - x-example: 1 - - in: query - name: limit - description: Maximum number of items per page - type: integer - x-example: 1 - - in: query - name: tx.minheight - type: integer - description: transactions on blocks with height greater or equal this value - x-example: 25 - - in: query - name: tx.maxheight - type: integer - description: transactions on blocks with height less than or equal this value - x-example: 800000 - responses: - '200': - description: All txs matching the provided events - schema: - type: object - properties: - total_count: - type: number - example: 1 - count: - type: number - example: 1 - page_number: - type: number - example: 1 - page_total: - type: number - example: 1 - limit: - type: number - example: 30 - txs: + format: uint64 + description: |- + QueryAccountInfoResponse is the Query/AccountInfo response type. + + Since: cosmos-sdk 0.47 + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: type: array items: type: object properties: - hash: + type_url: type: string - example: >- - D085138D913993919295FF4B0A9107F1F2CDE0D37A87CE0644E217CBF3B49656 - height: - type: number - example: 368 - tx: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - result: - type: object - properties: - log: - type: string - gas_wanted: - type: string - example: '200000' - gas_used: - type: string - example: '26354' - tags: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - '400': - description: Invalid search events - '500': - description: Internal Server Error - post: - tags: - - Transactions - summary: Broadcast a signed tx - description: Broadcast a signed tx to a full node - consumes: - - application/json - produces: - - application/json + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } parameters: - - in: body - name: txBroadcast - description: >- - The tx must be a signed StdTx. The supported broadcast modes include - `"block"`(return after tx commit), `"sync"`(return afer CheckTx) and - `"async"`(return right away). + - name: address + description: address is the account address string. + in: path required: true - schema: - type: object - properties: - tx: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - mode: - type: string - example: block + type: string + tags: + - Query + /cosmos/auth/v1beta1/accounts: + get: + summary: Accounts returns all the existing accounts. + description: >- + When called from another module, this query might consume a high amount + of + + gas if the pagination field is incorrectly set. + + + Since: cosmos-sdk 0.43 + operationId: Accounts responses: '200': - description: Tx broadcasting result + description: A successful response. schema: type: object properties: - check_tx: - type: object - properties: - code: - type: integer - data: - type: string - gas_used: - type: integer - gas_wanted: - type: integer - info: - type: string - log: - type: string - tags: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - example: - code: 0 - data: data - log: log - gas_used: 5000 - gas_wanted: 10000 - info: info - tags: - - '' - - '' - deliver_tx: - type: object - properties: - code: - type: integer - data: - type: string - gas_used: - type: integer - gas_wanted: - type: integer - info: - type: string - log: - type: string - tags: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - example: - code: 5 - data: data - log: log - gas_used: 5000 - gas_wanted: 10000 - info: info - tags: - - '' - - '' - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - height: - type: integer - '500': - description: Internal Server Error - /txs/encode: - post: - deprecated: true - tags: - - Transactions - summary: Encode a transaction to the Amino wire format - description: >- - Encode a transaction (signed or not) from JSON to base64-encoded Amino - serialized bytes - consumes: - - application/json - produces: - - application/json - parameters: - - in: body - name: tx - description: The tx to encode - required: true - schema: - type: object - properties: - tx: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - responses: - '200': - description: The tx was successfully decoded and re-encoded - schema: - type: object - properties: - tx: - type: string - example: The base64-encoded Amino-serialized bytes for the tx - '400': - description: The tx was malformated - '500': - description: Server internal error - /txs/decode: - post: - deprecated: true - tags: - - Transactions - summary: Decode a transaction from the Amino wire format - description: >- - Decode a transaction (signed or not) from base64-encoded Amino - serialized bytes to JSON - consumes: - - application/json - produces: - - application/json - parameters: - - in: body - name: tx - description: The tx to decode - required: true - schema: - type: object - properties: - tx: - type: string - example: >- - SvBiXe4KPqijYZoKFFHEzJ8c2HPAfv2EFUcIhx0yPagwEhTy0vPA+GGhCEslKXa4Af0uB+mfShoMCgVzdGFrZRIDMTAwEgQQwJoM - responses: - '200': - description: The tx was successfully decoded - schema: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - '400': - description: The tx was malformated - '500': - description: Server internal error - /bank/balances/{address}: - get: - deprecated: true - summary: Get the account balances - tags: - - Bank - produces: - - application/json - parameters: - - in: path - name: address - description: Account address in bech32 format - required: true - type: string - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - responses: - '200': - description: Account balances - schema: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - '500': - description: Server internal error - /bank/accounts/{address}/transfers: - post: - deprecated: true - summary: Send coins from one account to another - tags: - - Bank - consumes: - - application/json - produces: - - application/json - parameters: - - in: path - name: address - description: Account address in bech32 format - required: true - type: string - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - - in: body - name: account - description: The sender and tx information - required: true - schema: - type: object - properties: - base_req: - type: object - properties: - from: - type: string - example: cosmos1g9ahr6xhht5rmqven628nklxluzyv8z9jqjcmc - description: Sender address or Keybase name to generate a transaction - memo: - type: string - example: Sent via Cosmos Voyager 🚀 - chain_id: - type: string - example: Cosmos-Hub - account_number: - type: string - example: '0' - sequence: - type: string - example: '1' - gas: - type: string - example: '200000' - gas_adjustment: - type: string - example: '1.2' - fees: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - simulate: - type: boolean - example: false - description: >- - Estimate gas for a transaction (cannot be used in - conjunction with generate_only) - amount: + accounts: type: array items: type: object properties: - denom: + type_url: type: string - example: stake - amount: + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: type: string - example: '50' - responses: - '202': - description: Tx was succesfully generated - schema: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + title: accounts are the existing accounts + pagination: + description: pagination defines the pagination in the response. type: object properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: + next_key: type: string - example: '0' - sequence: + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: type: string - example: '0' - '400': - description: Invalid request - '500': - description: Server internal error - /bank/total: - get: - deprecated: true - summary: Total supply of coins in the chain - tags: - - Bank - produces: - - application/json - responses: - '200': - description: OK + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryAccountsResponse is the response type for the Query/Accounts + RPC method. + + + Since: cosmos-sdk 0.43 + default: + description: An unexpected error response. schema: type: object properties: - total: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: type: array items: type: object properties: - denom: + type_url: type: string - example: stake - amount: + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: type: string - example: '50' - '500': - description: Internal Server Error - /bank/total/{denomination}: - parameters: - - in: path - name: denomination - description: Coin denomination - required: true - type: string - x-example: uatom - get: - deprecated: true - summary: Total supply of a single coin denomination - tags: - - Bank - produces: - - application/json - responses: - '200': - description: OK - schema: - type: string - '400': - description: Invalid coin denomination - '500': - description: Internal Server Error - /auth/accounts/{address}: - get: - deprecated: true - summary: Get the account information on blockchain - tags: - - Auth - produces: - - application/json - parameters: - - in: path - name: address - description: Account address - required: true - type: string - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - responses: - '200': - description: Account information on the blockchain - schema: - type: object - properties: - type: - type: string - value: - type: object - properties: - account_number: - type: string - address: - type: string - coins: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - public_key: - type: object - properties: - type: - type: string - value: - type: string - sequence: - type: string - '500': - description: Server internel error - /staking/delegators/{delegatorAddr}/delegations: - parameters: - - in: path - name: delegatorAddr - description: Bech32 AccAddress of Delegator - required: true - type: string - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - get: - deprecated: true - summary: Get all delegations from a delegator - tags: - - Staking - produces: - - application/json - responses: - '200': - description: OK - schema: - type: array - items: - type: object - properties: - delegator_address: - type: string - validator_address: - type: string - shares: - type: string - balance: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - '400': - description: Invalid delegator address - '500': - description: Internal Server Error - post: - summary: Submit delegation + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } parameters: - - in: body - name: delegation - description: Delegate an amount of liquid coins to a validator - schema: - type: object - properties: - base_req: - type: object - properties: - from: - type: string - example: cosmos1g9ahr6xhht5rmqven628nklxluzyv8z9jqjcmc - description: Sender address or Keybase name to generate a transaction - memo: - type: string - example: Sent via Cosmos Voyager 🚀 - chain_id: - type: string - example: Cosmos-Hub - account_number: - type: string - example: '0' - sequence: - type: string - example: '1' - gas: - type: string - example: '200000' - gas_adjustment: - type: string - example: '1.2' - fees: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - simulate: - type: boolean - example: false - description: >- - Estimate gas for a transaction (cannot be used in - conjunction with generate_only) - delegator_address: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - validator_address: - type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - amount: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - - Staking - consumes: - - application/json - produces: - - application/json - responses: - '200': - description: OK - schema: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - '400': - description: Invalid delegator address or delegation request body - '401': - description: Key password is wrong - '500': - description: Internal Server Error - /staking/delegators/{delegatorAddr}/delegations/{validatorAddr}: - parameters: - - in: path - name: delegatorAddr - description: Bech32 AccAddress of Delegator - required: true - type: string - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - - in: path - name: validatorAddr - description: Bech32 OperatorAddress of validator - required: true - type: string - x-example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l + - Query + /cosmos/auth/v1beta1/accounts/{address}: get: - deprecated: true - summary: Query the current delegation between a delegator and a validator - tags: - - Staking - produces: - - application/json + summary: Account returns account details based on address. + operationId: Account responses: '200': - description: OK + description: A successful response. schema: type: object properties: - delegator_address: - type: string - validator_address: - type: string - shares: - type: string - balance: + account: type: object properties: - denom: + type_url: type: string - example: stake - amount: + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: type: string - example: '50' - '400': - description: Invalid delegator address or validator address - '500': - description: Internal Server Error - /staking/delegators/{delegatorAddr}/unbonding_delegations: - parameters: - - in: path - name: delegatorAddr - description: Bech32 AccAddress of Delegator - required: true - type: string - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - get: - deprecated: true - summary: Get all unbonding delegations from a delegator - tags: - - Staking - produces: - - application/json - responses: - '200': - description: OK - schema: - type: array - items: - type: object - properties: - delegator_address: - type: string - validator_address: - type: string - initial_balance: - type: string - balance: - type: string - creation_height: - type: integer - min_time: - type: integer - '400': - description: Invalid delegator address - '500': - description: Internal Server Error - post: - summary: Submit an unbonding delegation - parameters: - - in: body - name: delegation - description: Unbond an amount of bonded shares from a validator + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + QueryAccountResponse is the response type for the Query/Account + RPC method. + default: + description: An unexpected error response. schema: type: object properties: - base_req: - type: object - properties: - from: - type: string - example: cosmos1g9ahr6xhht5rmqven628nklxluzyv8z9jqjcmc - description: Sender address or Keybase name to generate a transaction - memo: - type: string - example: Sent via Cosmos Voyager 🚀 - chain_id: - type: string - example: Cosmos-Hub - account_number: - type: string - example: '0' - sequence: - type: string - example: '1' - gas: - type: string - example: '200000' - gas_adjustment: - type: string - example: '1.2' - fees: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - simulate: - type: boolean - example: false - description: >- - Estimate gas for a transaction (cannot be used in - conjunction with generate_only) - delegator_address: + error: type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - validator_address: + code: + type: integer + format: int32 + message: type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - amount: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - tags: - - Staking - consumes: - - application/json - produces: - - application/json - responses: - '200': - description: OK - schema: - type: object - properties: - msg: + details: type: array items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - '400': - description: Invalid delegator address or unbonding delegation request body - '401': - description: Key password is wrong - '500': - description: Internal Server Error - /staking/delegators/{delegatorAddr}/unbonding_delegations/{validatorAddr}: - parameters: - - in: path - name: delegatorAddr - description: Bech32 AccAddress of Delegator - required: true - type: string - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - - in: path - name: validatorAddr - description: Bech32 OperatorAddress of validator - required: true - type: string - x-example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - get: - deprecated: true - summary: Query all unbonding delegations between a delegator and a validator + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: address + description: address defines the address to query for. + in: path + required: true + type: string tags: - - Staking - produces: - - application/json + - Query + /cosmos/auth/v1beta1/address_by_id/{id}: + get: + summary: AccountAddressByID returns account address based on account number. + description: 'Since: cosmos-sdk 0.46.2' + operationId: AccountAddressByID responses: '200': - description: OK + description: A successful response. schema: type: object properties: - delegator_address: + account_address: type: string - validator_address: + description: 'Since: cosmos-sdk 0.46.2' + title: >- + QueryAccountAddressByIDResponse is the response type for + AccountAddressByID rpc method + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: type: string - entries: + details: type: array items: type: object properties: - initial_balance: - type: string - balance: - type: string - creation_height: + type_url: type: string - min_time: + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: type: string - '400': - description: Invalid delegator address or validator address - '500': - description: Internal Server Error - /staking/redelegations: - parameters: - - in: query - name: delegator - description: Bech32 AccAddress of Delegator - required: false - type: string - - in: query - name: validator_from - description: Bech32 ValAddress of SrcValidator - required: false - type: string - - in: query - name: validator_to - description: Bech32 ValAddress of DstValidator - required: false - type: string - get: - deprecated: true - summary: Get all redelegations (filter by query params) + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: id + description: |- + Deprecated, use account_id instead + + id is the account number of the address to be queried. This field + should have been an uint64 (like all account numbers), and will be + updated to uint64 in a future version of the auth query. + in: path + required: true + type: string + format: int64 + - name: account_id + description: |- + account_id is the account number of the address to be queried. + + Since: cosmos-sdk 0.47 + in: query + required: false + type: string + format: uint64 tags: - - Staking - produces: - - application/json + - Query + /cosmos/auth/v1beta1/bech32: + get: + summary: Bech32Prefix queries bech32Prefix + description: 'Since: cosmos-sdk 0.46' + operationId: Bech32Prefix responses: '200': - description: OK - schema: - type: array - items: - $ref: '#/definitions/Redelegation' - '500': - description: Internal Server Error - /staking/delegators/{delegatorAddr}/redelegations: - parameters: - - in: path - name: delegatorAddr - description: Bech32 AccAddress of Delegator - required: true - type: string - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - post: - deprecated: true - summary: Submit a redelegation - parameters: - - in: body - name: delegation - description: The sender and tx information + description: A successful response. schema: type: object properties: - base_req: - type: object - properties: - from: - type: string - example: cosmos1g9ahr6xhht5rmqven628nklxluzyv8z9jqjcmc - description: Sender address or Keybase name to generate a transaction - memo: - type: string - example: Sent via Cosmos Voyager 🚀 - chain_id: - type: string - example: Cosmos-Hub - account_number: - type: string - example: '0' - sequence: - type: string - example: '1' - gas: - type: string - example: '200000' - gas_adjustment: - type: string - example: '1.2' - fees: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - simulate: - type: boolean - example: false - description: >- - Estimate gas for a transaction (cannot be used in - conjunction with generate_only) - delegator_address: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - validator_src_addressess: + bech32_prefix: type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - validator_dst_address: - type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - shares: - type: string - example: '100' - tags: - - Staking - consumes: - - application/json - produces: - - application/json - responses: - '200': - description: Tx was succesfully generated + description: >- + Bech32PrefixResponse is the response type for Bech32Prefix rpc + method. + + + Since: cosmos-sdk 0.46 + default: + description: An unexpected error response. schema: type: object properties: - msg: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: type: array items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - '400': - description: Invalid delegator address or redelegation request body - '500': - description: Internal Server Error - /staking/delegators/{delegatorAddr}/validators: - parameters: - - in: path - name: delegatorAddr - description: Bech32 AccAddress of Delegator - required: true - type: string - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - get: - deprecated: true - summary: Query all validators that a delegator is bonded to - tags: - - Staking - produces: - - application/json - responses: - '200': - description: OK - schema: - type: array - items: - type: object - properties: - operator_address: - type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - consensus_pubkey: - type: string - example: >- - cosmosvalconspub1zcjduepq0vu2zgkgk49efa0nqwzndanq5m4c7pa3u4apz4g2r9gspqg6g9cs3k9cuf - jailed: - type: boolean - status: - type: integer - tokens: - type: string - delegator_shares: - type: string - description: type: object properties: - moniker: - type: string - identity: - type: string - website: - type: string - security_contact: - type: string - details: + type_url: type: string - bond_height: - type: string - example: '0' - bond_intra_tx_counter: - type: integer - example: 0 - unbonding_height: - type: string - example: '0' - unbonding_time: - type: string - example: '1970-01-01T00:00:00Z' - commission: - type: object - properties: - rate: - type: string - example: '0' - max_rate: - type: string - example: '0' - max_change_rate: - type: string - example: '0' - update_time: + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: type: string - example: '1970-01-01T00:00:00Z' - '400': - description: Invalid delegator address - '500': - description: Internal Server Error - /staking/delegators/{delegatorAddr}/validators/{validatorAddr}: - parameters: - - in: path - name: delegatorAddr - description: Bech32 AccAddress of Delegator - required: true - type: string - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - - in: path - name: validatorAddr - description: Bech32 ValAddress of Delegator - required: true - type: string - x-example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - get: - deprecated: true - summary: Query a validator that a delegator is bonded to + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } tags: - - Staking - produces: - - application/json + - Query + /cosmos/auth/v1beta1/bech32/{address_bytes}: + get: + summary: AddressBytesToString converts Account Address bytes to string + description: 'Since: cosmos-sdk 0.46' + operationId: AddressBytesToString responses: '200': - description: OK + description: A successful response. schema: type: object properties: - operator_address: - type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - consensus_pubkey: - type: string - example: >- - cosmosvalconspub1zcjduepq0vu2zgkgk49efa0nqwzndanq5m4c7pa3u4apz4g2r9gspqg6g9cs3k9cuf - jailed: - type: boolean - status: - type: integer - tokens: + address_string: type: string - delegator_shares: - type: string - description: - type: object - properties: - moniker: - type: string - identity: - type: string - website: - type: string - security_contact: - type: string - details: - type: string - bond_height: + description: >- + AddressBytesToStringResponse is the response type for + AddressString rpc method. + + + Since: cosmos-sdk 0.46 + default: + description: An unexpected error response. + schema: + type: object + properties: + error: type: string - example: '0' - bond_intra_tx_counter: + code: type: integer - example: 0 - unbonding_height: - type: string - example: '0' - unbonding_time: + format: int32 + message: type: string - example: '1970-01-01T00:00:00Z' - commission: - type: object - properties: - rate: - type: string - example: '0' - max_rate: - type: string - example: '0' - max_change_rate: - type: string - example: '0' - update_time: - type: string - example: '1970-01-01T00:00:00Z' - '400': - description: Invalid delegator address or validator address - '500': - description: Internal Server Error - /staking/validators: - get: - deprecated: true - summary: >- - Get all validator candidates. By default it returns only the bonded - validators. - parameters: - - in: query - name: status - type: string - description: >- - The validator bond status. Must be either 'bonded', 'unbonded', or - 'unbonding'. - x-example: bonded - - in: query - name: page - description: The page number. - type: integer - x-example: 1 - - in: query - name: limit - description: The maximum number of items per page. - type: integer - x-example: 1 - tags: - - Staking - produces: - - application/json - responses: - '200': - description: OK - schema: - type: array - items: - type: object - properties: - operator_address: - type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - consensus_pubkey: - type: string - example: >- - cosmosvalconspub1zcjduepq0vu2zgkgk49efa0nqwzndanq5m4c7pa3u4apz4g2r9gspqg6g9cs3k9cuf - jailed: - type: boolean - status: - type: integer - tokens: - type: string - delegator_shares: - type: string - description: - type: object - properties: - moniker: - type: string - identity: - type: string - website: - type: string - security_contact: - type: string - details: - type: string - bond_height: - type: string - example: '0' - bond_intra_tx_counter: - type: integer - example: 0 - unbonding_height: - type: string - example: '0' - unbonding_time: - type: string - example: '1970-01-01T00:00:00Z' - commission: + details: + type: array + items: type: object properties: - rate: - type: string - example: '0' - max_rate: - type: string - example: '0' - max_change_rate: + type_url: type: string - example: '0' - update_time: + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: type: string - example: '1970-01-01T00:00:00Z' - '500': - description: Internal Server Error - /staking/validators/{validatorAddr}: - parameters: - - in: path - name: validatorAddr - description: Bech32 OperatorAddress of validator - required: true - type: string - x-example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - get: - deprecated: true - summary: Query the information from a single validator + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: address_bytes + in: path + required: true + type: string + format: byte tags: - - Staking - produces: - - application/json + - Query + /cosmos/auth/v1beta1/bech32/{address_string}: + get: + summary: AddressStringToBytes converts Address string to bytes + description: 'Since: cosmos-sdk 0.46' + operationId: AddressStringToBytes responses: '200': - description: OK + description: A successful response. schema: type: object properties: - operator_address: - type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - consensus_pubkey: - type: string - example: >- - cosmosvalconspub1zcjduepq0vu2zgkgk49efa0nqwzndanq5m4c7pa3u4apz4g2r9gspqg6g9cs3k9cuf - jailed: - type: boolean - status: - type: integer - tokens: - type: string - delegator_shares: + address_bytes: type: string - description: - type: object - properties: - moniker: - type: string - identity: - type: string - website: - type: string - security_contact: - type: string - details: - type: string - bond_height: + format: byte + description: >- + AddressStringToBytesResponse is the response type for AddressBytes + rpc method. + + + Since: cosmos-sdk 0.46 + default: + description: An unexpected error response. + schema: + type: object + properties: + error: type: string - example: '0' - bond_intra_tx_counter: + code: type: integer - example: 0 - unbonding_height: - type: string - example: '0' - unbonding_time: + format: int32 + message: type: string - example: '1970-01-01T00:00:00Z' - commission: - type: object - properties: - rate: - type: string - example: '0' - max_rate: - type: string - example: '0' - max_change_rate: - type: string - example: '0' - update_time: - type: string - example: '1970-01-01T00:00:00Z' - '400': - description: Invalid validator address - '500': - description: Internal Server Error - /staking/validators/{validatorAddr}/delegations: - parameters: - - in: path - name: validatorAddr - description: Bech32 OperatorAddress of validator - required: true - type: string - x-example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - get: - deprecated: true - summary: Get all delegations from a validator - tags: - - Staking - produces: - - application/json - responses: - '200': - description: OK - schema: - type: array - items: - type: object - properties: - delegator_address: - type: string - validator_address: - type: string - shares: - type: string - balance: + details: + type: array + items: type: object properties: - denom: - type: string - example: stake - amount: + type_url: type: string - example: '50' - '400': - description: Invalid validator address - '500': - description: Internal Server Error - /staking/validators/{validatorAddr}/unbonding_delegations: - parameters: - - in: path - name: validatorAddr - description: Bech32 OperatorAddress of validator - required: true - type: string - x-example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - get: - deprecated: true - summary: Get all unbonding delegations from a validator - tags: - - Staking - produces: - - application/json - responses: - '200': - description: OK - schema: - type: array - items: - type: object - properties: - delegator_address: - type: string - validator_address: - type: string - initial_balance: - type: string - balance: - type: string - creation_height: - type: integer - min_time: - type: integer - '400': - description: Invalid validator address - '500': - description: Internal Server Error - /staking/pool: - get: - deprecated: true - summary: Get the current state of the staking pool - tags: - - Staking - produces: - - application/json - responses: - '200': - description: OK - schema: - type: object - properties: - loose_tokens: - type: string - bonded_tokens: - type: string - inflation_last_time: - type: string - inflation: - type: string - date_last_commission_reset: - type: string - prev_bonded_shares: - type: string - '500': - description: Internal Server Error - /staking/parameters: - get: - deprecated: true - summary: Get the current staking parameter values - tags: - - Staking - produces: - - application/json - responses: - '200': - description: OK - schema: - type: object - properties: - inflation_rate_change: - type: string - inflation_max: - type: string - inflation_min: - type: string - goal_bonded: - type: string - unbonding_time: - type: string - max_validators: - type: integer - bond_denom: - type: string - '500': - description: Internal Server Error - /slashing/signing_infos: - get: - deprecated: true - summary: Get sign info of given all validators - description: Get sign info of all validators - produces: - - application/json - tags: - - Slashing - parameters: - - in: query - name: page - description: Page number - type: integer - required: true - x-example: 1 - - in: query - name: limit - description: Maximum number of items per page - type: integer - required: true - x-example: 5 - responses: - '200': - description: OK - schema: - type: array - items: - type: object - properties: - start_height: - type: string - index_offset: - type: string - jailed_until: - type: string - missed_blocks_counter: - type: string - '400': - description: Invalid validator public key for one of the validators - '500': - description: Internal Server Error - /slashing/validators/{validatorAddr}/unjail: - post: - deprecated: true - summary: Unjail a jailed validator - description: Send transaction to unjail a jailed validator - consumes: - - application/json - produces: - - application/json - tags: - - Slashing + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } parameters: - - type: string - description: Bech32 validator address - name: validatorAddr - required: true + - name: address_string in: path - x-example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - - description: '' - name: UnjailBody - in: body required: true - schema: - type: object - properties: - base_req: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - responses: - '200': - description: Tx was succesfully generated - schema: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - '400': - description: Invalid validator address or base_req - '500': - description: Internal Server Error - /slashing/parameters: - get: - deprecated: true - summary: Get the current slashing parameters - tags: - - Slashing - produces: - - application/json - responses: - '200': - description: OK - schema: - type: object - properties: - max_evidence_age: - type: string - signed_blocks_window: - type: string - min_signed_per_window: - type: string - double_sign_unbond_duration: - type: string - downtime_unbond_duration: - type: string - slash_fraction_double_sign: - type: string - slash_fraction_downtime: - type: string - '500': - description: Internal Server Error - /gov/proposals: - post: - deprecated: true - summary: Submit a proposal - description: Send transaction to submit a proposal - consumes: - - application/json - produces: - - application/json + type: string tags: - - Governance - parameters: - - description: >- - valid value of `"proposal_type"` can be `"text"`, - `"parameter_change"`, `"software_upgrade"` - name: post_proposal_body - in: body - required: true - schema: - type: object - properties: - base_req: - type: object - properties: - from: - type: string - example: cosmos1g9ahr6xhht5rmqven628nklxluzyv8z9jqjcmc - description: Sender address or Keybase name to generate a transaction - memo: - type: string - example: Sent via Cosmos Voyager 🚀 - chain_id: - type: string - example: Cosmos-Hub - account_number: - type: string - example: '0' - sequence: - type: string - example: '1' - gas: - type: string - example: '200000' - gas_adjustment: - type: string - example: '1.2' - fees: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - simulate: - type: boolean - example: false - description: >- - Estimate gas for a transaction (cannot be used in - conjunction with generate_only) - title: - type: string - description: - type: string - proposal_type: - type: string - example: text - proposer: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - initial_deposit: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - responses: - '200': - description: Tx was succesfully generated - schema: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - '400': - description: Invalid proposal body - '500': - description: Internal Server Error + - Query + /cosmos/auth/v1beta1/module_accounts: get: - deprecated: true - summary: Query proposals - description: Query proposals information with parameters - produces: - - application/json - tags: - - Governance - parameters: - - in: query - name: voter - description: voter address - required: false - type: string - - in: query - name: depositor - description: depositor address - required: false - type: string - - in: query - name: status - description: >- - proposal status, valid values can be `"deposit_period"`, - `"voting_period"`, `"passed"`, `"rejected"` - required: false - type: string + summary: ModuleAccounts returns all the existing module accounts. + description: 'Since: cosmos-sdk 0.46' + operationId: ModuleAccounts responses: '200': - description: OK - schema: - type: array - items: - type: object - properties: - proposal_id: - type: integer - title: - type: string - description: - type: string - proposal_type: - type: string - proposal_status: - type: string - final_tally_result: - type: object - properties: - 'yes': - type: string - example: '0.0000000000' - abstain: - type: string - example: '0.0000000000' - 'no': - type: string - example: '0.0000000000' - no_with_veto: - type: string - example: '0.0000000000' - submit_time: - type: string - total_deposit: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - voting_start_time: - type: string - '400': - description: Invalid query parameters - '500': - description: Internal Server Error - /gov/proposals/param_change: - post: - deprecated: true - summary: Generate a parameter change proposal transaction - description: Generate a parameter change proposal transaction - consumes: - - application/json - produces: - - application/json - tags: - - Governance - parameters: - - description: >- - The parameter change proposal body that contains all parameter - changes - name: post_proposal_body - in: body - required: true + description: A successful response. schema: type: object properties: - base_req: - type: object - properties: - from: - type: string - example: cosmos1g9ahr6xhht5rmqven628nklxluzyv8z9jqjcmc - description: Sender address or Keybase name to generate a transaction - memo: - type: string - example: Sent via Cosmos Voyager 🚀 - chain_id: - type: string - example: Cosmos-Hub - account_number: - type: string - example: '0' - sequence: - type: string - example: '1' - gas: - type: string - example: '200000' - gas_adjustment: - type: string - example: '1.2' - fees: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - simulate: - type: boolean - example: false - description: >- - Estimate gas for a transaction (cannot be used in - conjunction with generate_only) - title: - type: string - x-example: Param Change - description: - type: string - x-example: Update max validators - proposer: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - deposit: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - changes: + accounts: type: array items: type: object properties: - subspace: - type: string - example: staking - key: - type: string - example: MaxValidators - subkey: + type_url: type: string - example: '' - value: - type: object - responses: - '200': - description: The transaction was succesfully generated - schema: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - '400': - description: Invalid proposal body - '500': - description: Internal Server Error - /gov/proposals/{proposalId}: - get: - deprecated: true - summary: Query a proposal - description: Query a proposal by id - produces: - - application/json - tags: - - Governance - parameters: - - type: string - name: proposalId - required: true - in: path - x-example: '2' - responses: - '200': - description: OK + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + QueryModuleAccountsResponse is the response type for the + Query/ModuleAccounts RPC method. + + + Since: cosmos-sdk 0.46 + default: + description: An unexpected error response. schema: type: object properties: - proposal_id: - type: integer - title: - type: string - description: - type: string - proposal_type: - type: string - proposal_status: + error: type: string - final_tally_result: - type: object - properties: - 'yes': - type: string - example: '0.0000000000' - abstain: - type: string - example: '0.0000000000' - 'no': - type: string - example: '0.0000000000' - no_with_veto: - type: string - example: '0.0000000000' - submit_time: + code: + type: integer + format: int32 + message: type: string - total_deposit: + details: type: array items: type: object properties: - denom: + type_url: type: string - example: stake - amount: + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: type: string - example: '50' - voting_start_time: - type: string - '400': - description: Invalid proposal id - '500': - description: Internal Server Error - /gov/proposals/{proposalId}/proposer: - get: - deprecated: true - summary: Query proposer - description: Query for the proposer for a proposal - produces: - - application/json + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } tags: - - Governance - parameters: - - type: string - name: proposalId - required: true - in: path - x-example: '2' - responses: - '200': - description: OK - schema: - type: object - properties: - proposal_id: - type: string - proposer: - type: string - '400': - description: Invalid proposal ID - '500': - description: Internal Server Error - /gov/proposals/{proposalId}/deposits: + - Query + /cosmos/auth/v1beta1/module_accounts/{name}: get: - deprecated: true - summary: Query deposits - description: Query deposits by proposalId - produces: - - application/json - tags: - - Governance - parameters: - - type: string - name: proposalId - required: true - in: path - x-example: '2' + summary: ModuleAccountByName returns the module account info by module name + operationId: ModuleAccountByName responses: '200': - description: OK - schema: - type: array - items: - type: object - properties: - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - proposal_id: - type: string - depositor: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - '400': - description: Invalid proposal id - '500': - description: Internal Server Error - post: - deprecated: true - summary: Deposit tokens to a proposal - description: Send transaction to deposit tokens to a proposal - consumes: - - application/json - produces: - - application/json - tags: - - Governance - parameters: - - type: string - description: proposal id - name: proposalId - required: true - in: path - x-example: '2' - - description: '' - name: post_deposit_body - in: body - required: true + description: A successful response. schema: type: object properties: - base_req: + account: type: object properties: - from: - type: string - example: cosmos1g9ahr6xhht5rmqven628nklxluzyv8z9jqjcmc - description: Sender address or Keybase name to generate a transaction - memo: - type: string - example: Sent via Cosmos Voyager 🚀 - chain_id: - type: string - example: Cosmos-Hub - account_number: - type: string - example: '0' - sequence: - type: string - example: '1' - gas: - type: string - example: '200000' - gas_adjustment: + type_url: type: string - example: '1.2' - fees: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - simulate: - type: boolean - example: false description: >- - Estimate gas for a transaction (cannot be used in - conjunction with generate_only) - depositor: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - responses: - '200': - description: OK - schema: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - '400': - description: Invalid proposal id or deposit body - '401': - description: Key password is wrong - '500': - description: Internal Server Error - /gov/proposals/{proposalId}/deposits/{depositor}: - get: - deprecated: true - summary: Query deposit - description: Query deposit by proposalId and depositor address - produces: - - application/json - tags: - - Governance - parameters: - - type: string - description: proposal id - name: proposalId - required: true - in: path - x-example: '2' - - type: string - description: Bech32 depositor address - name: depositor - required: true - in: path - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - responses: - '200': - description: OK + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + QueryModuleAccountByNameResponse is the response type for the + Query/ModuleAccountByName RPC method. + default: + description: An unexpected error response. schema: type: object properties: - amount: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: type: array items: type: object properties: - denom: + type_url: type: string - example: stake - amount: + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: type: string - example: '50' - proposal_id: - type: string - depositor: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - '400': - description: Invalid proposal id or depositor address - '404': - description: Found no deposit - '500': - description: Internal Server Error - /gov/proposals/{proposalId}/votes: - get: - deprecated: true - summary: Query voters - description: Query voters information by proposalId - produces: - - application/json - tags: - - Governance - parameters: - - type: string - description: proposal id - name: proposalId - required: true - in: path - x-example: '2' - responses: - '200': - description: OK - schema: - type: array - items: - type: object - properties: - voter: - type: string - proposal_id: - type: string - option: - type: string - '400': - description: Invalid proposal id - '500': - description: Internal Server Error - post: - deprecated: true - summary: Vote a proposal - description: Send transaction to vote a proposal - consumes: - - application/json - produces: - - application/json - tags: - - Governance + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } parameters: - - type: string - description: proposal id - name: proposalId - required: true + - name: name in: path - x-example: '2' - - description: >- - valid value of `"option"` field can be `"yes"`, `"no"`, - `"no_with_veto"` and `"abstain"` - name: post_vote_body - in: body required: true - schema: - type: object - properties: - base_req: - type: object - properties: - from: - type: string - example: cosmos1g9ahr6xhht5rmqven628nklxluzyv8z9jqjcmc - description: Sender address or Keybase name to generate a transaction - memo: - type: string - example: Sent via Cosmos Voyager 🚀 - chain_id: - type: string - example: Cosmos-Hub - account_number: - type: string - example: '0' - sequence: - type: string - example: '1' - gas: - type: string - example: '200000' - gas_adjustment: - type: string - example: '1.2' - fees: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - simulate: - type: boolean - example: false - description: >- - Estimate gas for a transaction (cannot be used in - conjunction with generate_only) - voter: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - option: - type: string - example: 'yes' + type: string + tags: + - Query + /cosmos/auth/v1beta1/params: + get: + summary: Params queries all parameters. + operationId: AuthParams responses: '200': - description: OK + description: A successful response. schema: type: object properties: - msg: - type: array - items: - type: string - fee: + params: + description: params defines the parameters of the module. type: object properties: - gas: + max_memo_characters: type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: + format: uint64 + tx_sig_limit: type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: + format: uint64 + tx_size_cost_per_byte: type: string - example: '0' - sequence: + format: uint64 + sig_verify_cost_ed25519: type: string - example: '0' - '400': - description: Invalid proposal id or vote body - '401': - description: Key password is wrong - '500': - description: Internal Server Error - /gov/proposals/{proposalId}/votes/{voter}: - get: - deprecated: true - summary: Query vote - description: Query vote information by proposal Id and voter address - produces: - - application/json - tags: - - Governance - parameters: - - type: string - description: proposal id - name: proposalId - required: true - in: path - x-example: '2' - - type: string - description: Bech32 voter address - name: voter - required: true - in: path - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - responses: - '200': - description: OK - schema: - type: object - properties: - voter: - type: string - proposal_id: - type: string - option: - type: string - '400': - description: Invalid proposal id or voter address - '404': - description: Found no vote - '500': - description: Internal Server Error - /gov/proposals/{proposalId}/tally: - get: - deprecated: true - summary: Get a proposal's tally result at the current time - description: >- - Gets a proposal's tally result at the current time. If the proposal is - pending deposits (i.e status 'DepositPeriod') it returns an empty tally - result. - produces: - - application/json - tags: - - Governance - parameters: - - type: string - description: proposal id - name: proposalId - required: true - in: path - x-example: '2' - responses: - '200': - description: OK - schema: - type: object - properties: - 'yes': - type: string - example: '0.0000000000' - abstain: - type: string - example: '0.0000000000' - 'no': - type: string - example: '0.0000000000' - no_with_veto: - type: string - example: '0.0000000000' - '400': - description: Invalid proposal id - '500': - description: Internal Server Error - /gov/parameters/deposit: - get: - deprecated: true - summary: Query governance deposit parameters - description: >- - Query governance deposit parameters. The max_deposit_period units are in - nanoseconds. - produces: - - application/json - tags: - - Governance - responses: - '200': - description: OK + format: uint64 + sig_verify_cost_secp256k1: + type: string + format: uint64 + description: >- + QueryParamsResponse is the response type for the Query/Params RPC + method. + default: + description: An unexpected error response. schema: type: object properties: - min_deposit: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - max_deposit_period: - type: string - example: '86400000000000' - '400': - description: is not a valid query request path - '404': - description: Found no deposit parameters - '500': - description: Internal Server Error - /gov/parameters/tallying: - get: - deprecated: true - summary: Query governance tally parameters - description: Query governance tally parameters - produces: - - application/json - tags: - - Governance - responses: - '200': - description: OK - schema: - properties: - threshold: - type: string - example: '0.5000000000' - veto: + error: type: string - example: '0.3340000000' - governance_penalty: + code: + type: integer + format: int32 + message: type: string - example: '0.0100000000' - '400': - description: is not a valid query request path - '404': - description: Found no tally parameters - '500': - description: Internal Server Error - /gov/parameters/voting: - get: - deprecated: true - summary: Query governance voting parameters - description: >- - Query governance voting parameters. The voting_period units are in - nanoseconds. - produces: - - application/json - tags: - - Governance - responses: - '200': - description: OK - schema: - properties: - voting_period: - type: string - example: '86400000000000' - '400': - description: is not a valid query request path - '404': - description: Found no voting parameters - '500': - description: Internal Server Error - /distribution/delegators/{delegatorAddr}/rewards: - parameters: - - in: path - name: delegatorAddr - description: Bech32 AccAddress of Delegator - required: true - type: string - x-example: cosmos167w96tdvmazakdwkw2u57227eduula2cy572lf - get: - deprecated: true - summary: Get the total rewards balance from all delegations - description: >- - Get the sum of all the rewards earned by delegations by a single - delegator - produces: - - application/json - tags: - - Distribution - responses: - '200': - description: OK - schema: - type: object - properties: - rewards: - type: array - items: - type: object - properties: - validator_address: - type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - reward: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - total: + details: type: array items: type: object properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - '400': - description: Invalid delegator address - '500': - description: Internal Server Error - post: - deprecated: true - summary: Withdraw all the delegator's delegation rewards - description: Withdraw all the delegator's delegation rewards - tags: - - Distribution - consumes: - - application/json - produces: - - application/json - parameters: - - in: body - name: Withdraw request body - schema: - properties: - base_req: - type: object - properties: - from: - type: string - example: cosmos1g9ahr6xhht5rmqven628nklxluzyv8z9jqjcmc - description: Sender address or Keybase name to generate a transaction - memo: - type: string - example: Sent via Cosmos Voyager 🚀 - chain_id: - type: string - example: Cosmos-Hub - account_number: - type: string - example: '0' - sequence: - type: string - example: '1' - gas: - type: string - example: '200000' - gas_adjustment: - type: string - example: '1.2' - fees: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - simulate: - type: boolean - example: false - description: >- - Estimate gas for a transaction (cannot be used in - conjunction with generate_only) - responses: - '200': - description: OK - schema: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - '400': - description: Invalid delegator address - '401': - description: Key password is wrong - '500': - description: Internal Server Error - /distribution/delegators/{delegatorAddr}/rewards/{validatorAddr}: - parameters: - - in: path - name: delegatorAddr - description: Bech32 AccAddress of Delegator - required: true - type: string - x-example: cosmos16xyempempp92x9hyzz9wrgf94r6j9h5f06pxxv - - in: path - name: validatorAddr - description: Bech32 OperatorAddress of validator - required: true - type: string - x-example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - get: - deprecated: true - summary: Query a delegation reward - description: Query a single delegation reward by a delegator - tags: - - Distribution - produces: - - application/json - responses: - '200': - description: OK - schema: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - '400': - description: Invalid delegator address - '500': - description: Internal Server Error - post: - deprecated: true - summary: Withdraw a delegation reward - description: Withdraw a delegator's delegation reward from a single validator - tags: - - Distribution - consumes: - - application/json - produces: - - application/json - parameters: - - in: body - name: Withdraw request body - schema: - properties: - base_req: - type: object - properties: - from: - type: string - example: cosmos1g9ahr6xhht5rmqven628nklxluzyv8z9jqjcmc - description: Sender address or Keybase name to generate a transaction - memo: - type: string - example: Sent via Cosmos Voyager 🚀 - chain_id: - type: string - example: Cosmos-Hub - account_number: - type: string - example: '0' - sequence: - type: string - example: '1' - gas: - type: string - example: '200000' - gas_adjustment: - type: string - example: '1.2' - fees: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - simulate: - type: boolean - example: false - description: >- - Estimate gas for a transaction (cannot be used in - conjunction with generate_only) - responses: - '200': - description: OK - schema: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - '400': - description: Invalid delegator address or delegation body - '401': - description: Key password is wrong - '500': - description: Internal Server Error - /distribution/delegators/{delegatorAddr}/withdraw_address: - parameters: - - in: path - name: delegatorAddr - description: Bech32 AccAddress of Delegator - required: true - type: string - x-example: cosmos167w96tdvmazakdwkw2u57227eduula2cy572lf - get: - deprecated: true - summary: Get the rewards withdrawal address - description: >- - Get the delegations' rewards withdrawal address. This is the address in - which the user will receive the reward funds - tags: - - Distribution - produces: - - application/json - responses: - '200': - description: OK - schema: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - '400': - description: Invalid delegator address - '500': - description: Internal Server Error - post: - deprecated: true - summary: Replace the rewards withdrawal address - description: Replace the delegations' rewards withdrawal address for a new one. - tags: - - Distribution - consumes: - - application/json - produces: - - application/json - parameters: - - in: body - name: Withdraw request body - schema: - properties: - base_req: - type: object - properties: - from: - type: string - example: cosmos1g9ahr6xhht5rmqven628nklxluzyv8z9jqjcmc - description: Sender address or Keybase name to generate a transaction - memo: - type: string - example: Sent via Cosmos Voyager 🚀 - chain_id: - type: string - example: Cosmos-Hub - account_number: - type: string - example: '0' - sequence: - type: string - example: '1' - gas: - type: string - example: '200000' - gas_adjustment: - type: string - example: '1.2' - fees: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - simulate: - type: boolean - example: false - description: >- - Estimate gas for a transaction (cannot be used in - conjunction with generate_only) - withdraw_address: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - responses: - '200': - description: OK - schema: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - '400': - description: Invalid delegator or withdraw address - '401': - description: Key password is wrong - '500': - description: Internal Server Error - /distribution/validators/{validatorAddr}: - parameters: - - in: path - name: validatorAddr - description: Bech32 OperatorAddress of validator - required: true - type: string - x-example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - get: - deprecated: true - summary: Validator distribution information - description: Query the distribution information of a single validator - tags: - - Distribution - produces: - - application/json - responses: - '200': - description: OK - schema: - type: object - properties: - operator_address: - type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - self_bond_rewards: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - val_commission: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - '400': - description: Invalid validator address - '500': - description: Internal Server Error - /distribution/validators/{validatorAddr}/outstanding_rewards: - parameters: - - in: path - name: validatorAddr - description: Bech32 OperatorAddress of validator - required: true - type: string - x-example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - get: - deprecated: true - summary: Fee distribution outstanding rewards of a single validator - tags: - - Distribution - produces: - - application/json - responses: - '200': - description: OK - schema: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - '500': - description: Internal Server Error - /distribution/validators/{validatorAddr}/rewards: - parameters: - - in: path - name: validatorAddr - description: Bech32 OperatorAddress of validator - required: true - type: string - x-example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - get: - deprecated: true - summary: Commission and self-delegation rewards of a single validator - description: Query the commission and self-delegation rewards of validator. - tags: - - Distribution - produces: - - application/json - responses: - '200': - description: OK - schema: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - '400': - description: Invalid validator address - '500': - description: Internal Server Error - post: - deprecated: true - summary: Withdraw the validator's rewards - description: Withdraw the validator's self-delegation and commissions rewards - tags: - - Distribution - consumes: - - application/json - produces: - - application/json - parameters: - - in: body - name: Withdraw request body - schema: - properties: - base_req: - type: object - properties: - from: - type: string - example: cosmos1g9ahr6xhht5rmqven628nklxluzyv8z9jqjcmc - description: Sender address or Keybase name to generate a transaction - memo: - type: string - example: Sent via Cosmos Voyager 🚀 - chain_id: - type: string - example: Cosmos-Hub - account_number: - type: string - example: '0' - sequence: - type: string - example: '1' - gas: - type: string - example: '200000' - gas_adjustment: - type: string - example: '1.2' - fees: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - simulate: - type: boolean - example: false - description: >- - Estimate gas for a transaction (cannot be used in - conjunction with generate_only) - responses: - '200': - description: OK - schema: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: - type: string - signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - '400': - description: Invalid validator address - '401': - description: Key password is wrong - '500': - description: Internal Server Error - /distribution/community_pool: - get: - deprecated: true - summary: Community pool parameters - tags: - - Distribution - produces: - - application/json - responses: - '200': - description: OK - schema: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - '500': - description: Internal Server Error - /distribution/parameters: - get: - deprecated: true - summary: Fee distribution parameters - tags: - - Distribution - produces: - - application/json - responses: - '200': - description: OK - schema: - properties: - base_proposer_reward: - type: string - bonus_proposer_reward: - type: string - community_tax: - type: string - '500': - description: Internal Server Error - /minting/parameters: - get: - deprecated: true - summary: Minting module parameters - tags: - - Mint - produces: - - application/json - responses: - '200': - description: OK - schema: - properties: - mint_denom: - type: string - inflation_rate_change: - type: string - inflation_max: - type: string - inflation_min: - type: string - goal_bonded: - type: string - blocks_per_year: - type: string - '500': - description: Internal Server Error - /minting/inflation: - get: - deprecated: true - summary: Current minting inflation value - tags: - - Mint - produces: - - application/json - responses: - '200': - description: OK - schema: - type: string - '500': - description: Internal Server Error - /minting/annual-provisions: - get: - deprecated: true - summary: Current minting annual provisions value - tags: - - Mint - produces: - - application/json - responses: - '200': - description: OK - schema: - type: string - '500': - description: Internal Server Error - /cosmos/auth/v1beta1/accounts: - get: - summary: Accounts returns all the existing accounts - operationId: Accounts - responses: - '200': - description: A successful response. - schema: - type: object - properties: - accounts: - type: array - items: - type: object - properties: - type_url: + type_url: type: string description: >- A URL/resource name that uniquely identifies the type of @@ -8615,7 +7436,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -8625,13 +7446,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -8653,7 +7477,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -8690,7 +7513,41 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - title: accounts are the existing accounts + tags: + - Query + /cosmos/bank/v1beta1/balances/{address}: + get: + summary: AllBalances queries the balance of all coins for a single account. + description: >- + When called from another module, this query might consume a high amount + of + + gas if the pagination field is incorrectly set. + operationId: AllBalances + responses: + '200': + description: A successful response. + schema: + type: object + properties: + balances: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: balances is the balances of all the coins. pagination: description: pagination defines the pagination in the response. type: object @@ -8698,9 +7555,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -8710,10 +7568,12 @@ paths: was set, its value is undefined otherwise description: >- - QueryAccountsResponse is the response type for the Query/Accounts - RPC method. + QueryAllBalancesResponse is the response type for the + Query/AllBalances RPC + + method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -8731,176 +7591,459 @@ paths: properties: type_url: type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in - a canonical form - - (e.g., leading "." is not accepted). + value: + type: string + format: byte + parameters: + - name: address + description: address is the address to query balances for. + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + It is less efficient than using key. Only one of offset or key + should - In practice, teams usually precompile into the binary - all types that they + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. - expect it to use in the context of Any. However, for - URLs which use the + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include - scheme `http`, `https`, or no scheme, one can optionally - set up a type + a count of the total number of items available for pagination in + UIs. - server that maps type URLs to message definitions as - follows: + count_total is only respected when offset is used. It is ignored + when key + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. - * If no scheme is provided, `https` is assumed. - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/bank/v1beta1/balances/{address}/by_denom: + get: + summary: Balance queries the balance of a single coin for a single account. + operationId: Balance + responses: + '200': + description: A successful response. + schema: + type: object + properties: + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - Note: this functionality is not currently available in - the official - protobuf release, and it is not used for type URLs - beginning with + NOTE: The amount field is an Int which implements the custom + method - type.googleapis.com. + signatures required by gogoproto. + description: >- + QueryBalanceResponse is the response type for the Query/Balance + RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + parameters: + - name: address + description: address is the address to query balances for. + in: path + required: true + type: string + - name: denom + description: denom is the coin denom to query balances for. + in: query + required: false + type: string + tags: + - Query + /cosmos/bank/v1beta1/denom_owners/{denom}: + get: + summary: >- + DenomOwners queries for all account addresses that own a particular + token + denomination. + description: >- + When called from another module, this query might consume a high amount + of - Schemes other than `http`, `https` (or the empty scheme) - might be + gas if the pagination field is incorrectly set. - used with implementation specific semantics. - value: + + Since: cosmos-sdk 0.46 + operationId: DenomOwners + responses: + '200': + description: A successful response. + schema: + type: object + properties: + denom_owners: + type: array + items: + type: object + properties: + address: type: string - format: byte description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a + address defines the address that owns a particular + denomination. + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - URL that describes the type of the serialized message. + NOTE: The amount field is an Int which implements the + custom method - Protobuf library provides support to pack/unpack Any values - in the form + signatures required by gogoproto. + description: >- + DenomOwner defines structure representing an account that + owns or holds a - of utility functions or additional generated methods of the - Any type. + particular denominated token. It contains the account + address and account + balance of the denominated token. - Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + Since: cosmos-sdk 0.46 + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - Example 2: Pack and unpack a message in Java. + was set, its value is undefined otherwise + description: >- + QueryDenomOwnersResponse defines the RPC response of a DenomOwners + RPC query. - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - Example 3: Pack and unpack a message in Python. + Since: cosmos-sdk 0.46 + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + parameters: + - name: denom + description: >- + denom defines the coin denomination to query all account holders + for. + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + It is less efficient than using key. Only one of offset or key + should - Example 4: Pack and unpack a message in Go + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include - The pack methods provided by protobuf library will by - default use + a count of the total number of items available for pagination in + UIs. - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + count_total is only respected when offset is used. It is ignored + when key - methods only use the fully qualified type name after the - last '/' + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. - in the type URL, for example "foo.bar.com/x/y.z" will yield - type - name "y.z". + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/bank/v1beta1/denoms_metadata: + get: + summary: |- + DenomsMetadata queries the client metadata for all registered coin + denominations. + operationId: DenomsMetadata + responses: + '200': + description: A successful response. + schema: + type: object + properties: + metadatas: + type: array + items: + type: object + properties: + description: + type: string + denom_units: + type: array + items: + type: object + properties: + denom: + type: string + description: >- + denom represents the string name of the given + denom unit (e.g uatom). + exponent: + type: integer + format: int64 + description: >- + exponent represents power of 10 exponent that one + must + raise the base_denom to in order to equal the + given DenomUnit's denom + 1 denom = 10^exponent base_denom - JSON + (e.g. with a base_denom of uatom, one can create a + DenomUnit of 'atom' with - ==== + exponent = 6, thus: 1 atom = 10^6 uatom). + aliases: + type: array + items: + type: string + title: >- + aliases is a list of string aliases for the given + denom + description: |- + DenomUnit represents a struct that describes a given + denomination unit of the basic token. + title: >- + denom_units represents the list of DenomUnit's for a + given coin + base: + type: string + description: >- + base represents the base denom (should be the DenomUnit + with exponent = 0). + display: + type: string + description: |- + display indicates the suggested denom that should be + displayed in clients. + name: + type: string + description: 'Since: cosmos-sdk 0.43' + title: 'name defines the name of the token (eg: Cosmos Atom)' + symbol: + type: string + description: >- + symbol is the token symbol usually shown on exchanges + (eg: ATOM). This can - The JSON representation of an `Any` value uses the regular + be the same as the display. - representation of the deserialized, embedded message, with - an - additional field `@type` which contains the type URL. - Example: + Since: cosmos-sdk 0.43 + uri: + type: string + description: >- + URI to a document (on or off-chain) that contains + additional information. Optional. - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + Since: cosmos-sdk 0.46 + uri_hash: + type: string + description: >- + URIHash is a sha256 hash of a document pointed by URI. + It's used to verify that - If the embedded message type is well-known and has a custom - JSON + the document didn't change. Optional. - representation, that representation will be embedded adding - a field - `value` which holds the custom JSON in addition to the - `@type` + Since: cosmos-sdk 0.46 + description: |- + Metadata represents a struct that describes + a basic token. + description: >- + metadata provides the client information for all the + registered tokens. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - field. Example (for message [google.protobuf.Duration][]): + was set, its value is undefined otherwise + description: >- + QueryDenomsMetadataResponse is the response type for the + Query/DenomsMetadata RPC - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte parameters: - name: pagination.key description: |- @@ -8948,203 +8091,124 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Query - /cosmos/auth/v1beta1/accounts/{address}: + /cosmos/bank/v1beta1/denoms_metadata/{denom}: get: - summary: Account returns account details based on address. - operationId: Account + summary: DenomsMetadata queries the client metadata of a given coin denomination. + operationId: DenomMetadata responses: '200': description: A successful response. schema: type: object properties: - account: + metadata: type: object properties: - type_url: + description: type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized + denom_units: + type: array + items: + type: object + properties: + denom: + type: string + description: >- + denom represents the string name of the given denom + unit (e.g uatom). + exponent: + type: integer + format: int64 + description: >- + exponent represents power of 10 exponent that one + must - protocol buffer message. This string must contain at least + raise the base_denom to in order to equal the given + DenomUnit's denom - one "/" character. The last segment of the URL's path must - represent + 1 denom = 10^exponent base_denom - the fully qualified name of the type (as in + (e.g. with a base_denom of uatom, one can create a + DenomUnit of 'atom' with - `path/google.protobuf.Duration`). The name should be in a - canonical form + exponent = 6, thus: 1 atom = 10^6 uatom). + aliases: + type: array + items: + type: string + title: >- + aliases is a list of string aliases for the given + denom + description: |- + DenomUnit represents a struct that describes a given + denomination unit of the basic token. + title: >- + denom_units represents the list of DenomUnit's for a given + coin + base: + type: string + description: >- + base represents the base denom (should be the DenomUnit + with exponent = 0). + display: + type: string + description: |- + display indicates the suggested denom that should be + displayed in clients. + name: + type: string + description: 'Since: cosmos-sdk 0.43' + title: 'name defines the name of the token (eg: Cosmos Atom)' + symbol: + type: string + description: >- + symbol is the token symbol usually shown on exchanges (eg: + ATOM). This can - (e.g., leading "." is not accepted). + be the same as the display. - In practice, teams usually precompile into the binary all - types that they + Since: cosmos-sdk 0.43 + uri: + type: string + description: >- + URI to a document (on or off-chain) that contains + additional information. Optional. - expect it to use in the context of Any. However, for URLs - which use the - scheme `http`, `https`, or no scheme, one can optionally - set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in the - official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) - might be - - used with implementation specific semantics. - value: + Since: cosmos-sdk 0.46 + uri_hash: type: string - format: byte description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message - along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values in - the form - - of utility functions or additional generated methods of the - Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by default - use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the last - '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield - type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with an - - additional field `@type` which contains the type URL. Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom - JSON - - representation, that representation will be embedded adding a - field + URIHash is a sha256 hash of a document pointed by URI. + It's used to verify that - `value` which holds the custom JSON in addition to the `@type` + the document didn't change. Optional. - field. Example (for message [google.protobuf.Duration][]): - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } + Since: cosmos-sdk 0.46 + description: |- + Metadata represents a struct that describes + a basic token. description: >- - QueryAccountResponse is the response type for the Query/Account - RPC method. + QueryDenomMetadataResponse is the response type for the + Query/DenomMetadata RPC + + method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -9162,188 +8226,21 @@ paths: properties: type_url: type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in - a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary - all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can optionally - set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in - the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) - might be - - used with implementation specific semantics. value: type: string format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values - in the form - - of utility functions or additional generated methods of the - Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield - type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom - JSON - - representation, that representation will be embedded adding - a field - - `value` which holds the custom JSON in addition to the - `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } parameters: - - name: address - description: address defines the address to query for. + - name: denom + description: denom is the coin denom to query the metadata for. in: path required: true type: string tags: - Query - /cosmos/auth/v1beta1/params: + /cosmos/bank/v1beta1/params: get: - summary: Params queries all parameters. - operationId: AuthParams + summary: Params queries the parameters of x/bank module. + operationId: BankParams responses: '200': description: A successful response. @@ -9351,29 +8248,42 @@ paths: type: object properties: params: - description: params defines the parameters of the module. type: object properties: - max_memo_characters: - type: string - format: uint64 - tx_sig_limit: - type: string - format: uint64 - tx_size_cost_per_byte: - type: string - format: uint64 - sig_verify_cost_ed25519: - type: string - format: uint64 - sig_verify_cost_secp256k1: - type: string - format: uint64 + send_enabled: + type: array + items: + type: object + properties: + denom: + type: string + enabled: + type: boolean + description: >- + SendEnabled maps coin denom to a send_enabled status + (whether a denom is + + sendable). + description: >- + Deprecated: Use of SendEnabled in params is deprecated. + + For genesis, use the newly added send_enabled field in the + genesis object. + + Storage, lookup, and manipulation of this information is + now in the keeper. + + + As of cosmos-sdk 0.47, this only exists for backwards + compatibility of genesis files. + default_send_enabled: + type: boolean + description: Params defines the parameters for the bank module. description: >- - QueryParamsResponse is the response type for the Query/Params RPC - method. + QueryParamsResponse defines the response type for querying x/bank + parameters. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -9391,182 +8301,182 @@ paths: properties: type_url: type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent + value: + type: string + format: byte + tags: + - Query + /cosmos/bank/v1beta1/send_enabled: + get: + summary: SendEnabled queries for SendEnabled entries. + description: >- + This query only returns denominations that have specific SendEnabled + settings. - the fully qualified name of the type (as in + Any denomination that does not have a specific setting will use the + default - `path/google.protobuf.Duration`). The name should be in - a canonical form + params.default_send_enabled, and will not be returned by this query. - (e.g., leading "." is not accepted). + Since: cosmos-sdk 0.47 + operationId: SendEnabled + responses: + '200': + description: A successful response. + schema: + type: object + properties: + send_enabled: + type: array + items: + type: object + properties: + denom: + type: string + enabled: + type: boolean + description: >- + SendEnabled maps coin denom to a send_enabled status + (whether a denom is - In practice, teams usually precompile into the binary - all types that they + sendable). + pagination: + description: >- + pagination defines the pagination in the response. This field + is only - expect it to use in the context of Any. However, for - URLs which use the + populated if the denoms field in the request is empty. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - scheme `http`, `https`, or no scheme, one can optionally - set up a type + was set, its value is undefined otherwise + description: >- + QuerySendEnabledResponse defines the RPC response of a SendEnable + query. - server that maps type URLs to message definitions as - follows: + Since: cosmos-sdk 0.47 + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + parameters: + - name: denoms + description: >- + denoms is the specific denoms you want look up. Leave empty to get + all entries. + in: query + required: false + type: array + items: + type: string + collectionFormat: multi + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. - * If no scheme is provided, `https` is assumed. + It is less efficient than using key. Only one of offset or key + should - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. - Note: this functionality is not currently available in - the official + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include - protobuf release, and it is not used for type URLs - beginning with + a count of the total number of items available for pagination in + UIs. - type.googleapis.com. + count_total is only respected when offset is used. It is ignored + when key + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. - Schemes other than `http`, `https` (or the empty scheme) - might be - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values - in the form - - of utility functions or additional generated methods of the - Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield - type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom - JSON + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/bank/v1beta1/spendable_balances/{address}: + get: + summary: >- + SpendableBalances queries the spendable balance of all coins for a + single - representation, that representation will be embedded adding - a field + account. + description: >- + When called from another module, this query might consume a high amount + of - `value` which holds the custom JSON in addition to the - `@type` + gas if the pagination field is incorrectly set. - field. Example (for message [google.protobuf.Duration][]): - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - tags: - - Query - /cosmos/bank/v1beta1/balances/{address}: - get: - summary: AllBalances queries the balance of all coins for a single account. - operationId: AllBalances + Since: cosmos-sdk 0.46 + operationId: SpendableBalances responses: '200': description: A successful response. @@ -9590,7 +8500,7 @@ paths: method signatures required by gogoproto. - description: balances is the balances of all the coins. + description: balances is the spendable balances of all the coins. pagination: description: pagination defines the pagination in the response. type: object @@ -9598,9 +8508,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -9610,12 +8521,15 @@ paths: was set, its value is undefined otherwise description: >- - QueryAllBalancesResponse is the response type for the - Query/AllBalances RPC + QuerySpendableBalancesResponse defines the gRPC response structure + for querying - method. + an account's spendable balances. + + + Since: cosmos-sdk 0.46 default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -9638,7 +8552,7 @@ paths: format: byte parameters: - name: address - description: address is the address to query balances for. + description: address is the address to query spendable balances for. in: path required: true type: string @@ -9688,21 +8602,34 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Query - /cosmos/bank/v1beta1/balances/{address}/{denom}: + /cosmos/bank/v1beta1/spendable_balances/{address}/by_denom: get: - summary: Balance queries the balance of a single coin for a single account. - operationId: Balance + summary: >- + SpendableBalanceByDenom queries the spendable balance of a single denom + for + + a single account. + description: >- + When called from another module, this query might consume a high amount + of + + gas if the pagination field is incorrectly set. + + + Since: cosmos-sdk 0.47 + operationId: SpendableBalanceByDenom responses: '200': description: A successful response. @@ -9725,10 +8652,15 @@ paths: signatures required by gogoproto. description: >- - QueryBalanceResponse is the response type for the Query/Balance - RPC method. + QuerySpendableBalanceByDenomResponse defines the gRPC response + structure for + + querying an account's spendable balance for a specific denom. + + + Since: cosmos-sdk 0.47 default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -9757,68 +8689,58 @@ paths: type: string - name: denom description: denom is the coin denom to query balances for. - in: path - required: true + in: query + required: false type: string tags: - Query - /cosmos/bank/v1beta1/denom_owners/{denom}: + /cosmos/bank/v1beta1/supply: get: - summary: >- - DenomOwners queries for all account addresses that own a particular - token + summary: TotalSupply queries the total supply of all coins. + description: >- + When called from another module, this query might consume a high amount + of - denomination. - operationId: DenomOwners + gas if the pagination field is incorrectly set. + operationId: TotalSupply responses: '200': description: A successful response. schema: type: object properties: - denom_owners: + supply: type: array items: type: object properties: - address: + denom: + type: string + amount: type: string - description: >- - address defines the address that owns a particular - denomination. - balance: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the - custom method - - signatures required by gogoproto. description: >- - DenomOwner defines structure representing an account that - owns or holds a + Coin defines a token with a denomination and an amount. - particular denominated token. It contains the account - address and account - balance of the denominated token. + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + title: supply is the supply of the coins pagination: - description: pagination defines the pagination in the response. + description: |- + pagination defines the pagination in the response. + + Since: cosmos-sdk 0.43 type: object properties: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -9827,11 +8749,13 @@ paths: PageRequest.count_total was set, its value is undefined otherwise - description: >- - QueryDenomOwnersResponse defines the RPC response of a DenomOwners - RPC query. + title: >- + QueryTotalSupplyResponse is the response type for the + Query/TotalSupply RPC + + method default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -9853,13 +8777,6 @@ paths: type: string format: byte parameters: - - name: denom - description: >- - denom defines the coin denomination to query all account holders - for. - in: path - required: true - type: string - name: pagination.key description: |- key is a value returned in PageResponse.next_key to begin @@ -9906,138 +8823,53 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Query - /cosmos/bank/v1beta1/denoms_metadata: + /cosmos/bank/v1beta1/supply/by_denom: get: - summary: |- - DenomsMetadata queries the client metadata for all registered coin - denominations. - operationId: DenomsMetadata + summary: SupplyOf queries the supply of a single coin. + description: >- + When called from another module, this query might consume a high amount + of + + gas if the pagination field is incorrectly set. + operationId: SupplyOf responses: '200': description: A successful response. schema: type: object properties: - metadatas: - type: array - items: - type: object - properties: - description: - type: string - denom_units: - type: array - items: - type: object - properties: - denom: - type: string - description: >- - denom represents the string name of the given - denom unit (e.g uatom). - exponent: - type: integer - format: int64 - description: >- - exponent represents power of 10 exponent that one - must - - raise the base_denom to in order to equal the - given DenomUnit's denom - - 1 denom = 1^exponent base_denom - - (e.g. with a base_denom of uatom, one can create a - DenomUnit of 'atom' with - - exponent = 6, thus: 1 atom = 10^6 uatom). - aliases: - type: array - items: - type: string - title: >- - aliases is a list of string aliases for the given - denom - description: |- - DenomUnit represents a struct that describes a given - denomination unit of the basic token. - title: >- - denom_units represents the list of DenomUnit's for a - given coin - base: - type: string - description: >- - base represents the base denom (should be the DenomUnit - with exponent = 0). - display: - type: string - description: |- - display indicates the suggested denom that should be - displayed in clients. - name: - type: string - title: 'name defines the name of the token (eg: Cosmos Atom)' - symbol: - type: string - description: >- - symbol is the token symbol usually shown on exchanges - (eg: ATOM). This can - - be the same as the display. - uri: - type: string - description: >- - URI to a document (on or off-chain) that contains - additional information. Optional. - uri_hash: - type: string - description: >- - URIHash is a sha256 hash of a document pointed by URI. - It's used to verify that - - the document didn't change. Optional. - description: |- - Metadata represents a struct that describes - a basic token. - description: >- - metadata provides the client information for all the - registered tokens. - pagination: - description: pagination defines the pagination in the response. + amount: type: object properties: - next_key: + denom: type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: + amount: type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + description: >- + Coin defines a token with a denomination and an amount. - was set, its value is undefined otherwise - description: >- - QueryDenomsMetadataResponse is the response type for the - Query/DenomsMetadata RPC - method. + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: >- + QuerySupplyOfResponse is the response type for the Query/SupplyOf + RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -10059,159 +8891,97 @@ paths: type: string format: byte parameters: - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - in: query - required: false - type: string - format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. - - It is less efficient than using key. Only one of offset or key - should - - be set. - in: query - required: false - type: string - format: uint64 - - name: pagination.limit - description: >- - limit is the total number of results to be returned in the result - page. - - If left empty it will default to a value to be set by each app. + - name: denom + description: denom is the coin denom to query balances for. in: query required: false type: string - format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include - - a count of the total number of items available for pagination in - UIs. - - count_total is only respected when offset is used. It is ignored - when key - - is set. - in: query - required: false - type: boolean - format: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - in: query - required: false - type: boolean - format: boolean tags: - Query - /cosmos/bank/v1beta1/denoms_metadata/{denom}: + /cosmos/base/tendermint/v1beta1/abci_query: get: - summary: DenomsMetadata queries the client metadata of a given coin denomination. - operationId: DenomMetadata + summary: >- + ABCIQuery defines a query handler that supports ABCI queries directly to + the + + application, bypassing Tendermint completely. The ABCI query must + contain + + a valid and supported path, including app, custom, p2p, and store. + description: 'Since: cosmos-sdk 0.46' + operationId: ABCIQuery responses: '200': description: A successful response. schema: type: object properties: - metadata: + code: + type: integer + format: int64 + log: + type: string + info: + type: string + index: + type: string + format: int64 + key: + type: string + format: byte + value: + type: string + format: byte + proof_ops: type: object properties: - description: - type: string - denom_units: + ops: type: array items: type: object properties: - denom: + type: type: string - description: >- - denom represents the string name of the given denom - unit (e.g uatom). - exponent: - type: integer - format: int64 - description: >- - exponent represents power of 10 exponent that one - must + key: + type: string + format: byte + data: + type: string + format: byte + description: >- + ProofOp defines an operation used for calculating Merkle + root. The data could - raise the base_denom to in order to equal the given - DenomUnit's denom + be arbitrary format, providing necessary data for + example neighbouring node - 1 denom = 1^exponent base_denom + hash. - (e.g. with a base_denom of uatom, one can create a - DenomUnit of 'atom' with - exponent = 6, thus: 1 atom = 10^6 uatom). - aliases: - type: array - items: - type: string - title: >- - aliases is a list of string aliases for the given - denom - description: |- - DenomUnit represents a struct that describes a given - denomination unit of the basic token. - title: >- - denom_units represents the list of DenomUnit's for a given - coin - base: - type: string - description: >- - base represents the base denom (should be the DenomUnit - with exponent = 0). - display: - type: string - description: |- - display indicates the suggested denom that should be - displayed in clients. - name: - type: string - title: 'name defines the name of the token (eg: Cosmos Atom)' - symbol: - type: string - description: >- - symbol is the token symbol usually shown on exchanges (eg: - ATOM). This can + Note: This type is a duplicate of the ProofOp proto type + defined in Tendermint. + description: >- + ProofOps is Merkle proof defined by the list of ProofOps. - be the same as the display. - uri: - type: string - description: >- - URI to a document (on or off-chain) that contains - additional information. Optional. - uri_hash: - type: string - description: >- - URIHash is a sha256 hash of a document pointed by URI. - It's used to verify that - the document didn't change. Optional. - description: |- - Metadata represents a struct that describes - a basic token. + Note: This type is a duplicate of the ProofOps proto type + defined in Tendermint. + height: + type: string + format: int64 + codespace: + type: string description: >- - QueryDenomMetadataResponse is the response type for the - Query/DenomMetadata RPC + ABCIQueryResponse defines the response structure for the ABCIQuery + gRPC query. - method. + + Note: This type is a duplicate of the ResponseQuery proto type + defined in + + Tendermint. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -10229,265 +8999,199 @@ paths: properties: type_url: type: string - value: - type: string - format: byte - parameters: - - name: denom - description: denom is the coin denom to query the metadata for. - in: path - required: true - type: string - tags: - - Query - /cosmos/bank/v1beta1/params: - get: - summary: Params queries the parameters of x/bank module. - operationId: BankParams - responses: - '200': - description: A successful response. - schema: - type: object - properties: - params: - type: object - properties: - send_enabled: - type: array - items: - type: object - properties: - denom: - type: string - enabled: - type: boolean - format: boolean description: >- - SendEnabled maps coin denom to a send_enabled status - (whether a denom is + A URL/resource name that uniquely identifies the type of + the serialized - sendable). - default_send_enabled: - type: boolean - format: boolean - description: Params defines the parameters for the bank module. - description: >- - QueryParamsResponse defines the response type for querying x/bank - parameters. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. value: type: string format: byte - tags: - - Query - /cosmos/bank/v1beta1/supply: - get: - summary: TotalSupply queries the total supply of all coins. - operationId: TotalSupply - responses: - '200': - description: A successful response. - schema: - type: object - properties: - supply: - type: array - items: - type: object - properties: - denom: - type: string - amount: - type: string + description: >- + Must be a valid serialized protocol buffer of the above + specified type. description: >- - Coin defines a token with a denomination and an amount. + `Any` contains an arbitrary serialized protocol buffer + message along with a + URL that describes the type of the serialized message. - NOTE: The amount field is an Int which implements the custom - method - signatures required by gogoproto. - title: supply is the supply of the coins - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + Protobuf library provides support to pack/unpack Any values + in the form - was set, its value is undefined otherwise - title: >- - QueryTotalSupplyResponse is the response type for the - Query/TotalSupply RPC + of utility functions or additional generated methods of the + Any type. - method - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } parameters: - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. + - name: data in: query required: false type: string format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. - - It is less efficient than using key. Only one of offset or key - should - - be set. + - name: path in: query required: false type: string - format: uint64 - - name: pagination.limit - description: >- - limit is the total number of results to be returned in the result - page. - - If left empty it will default to a value to be set by each app. + - name: height in: query required: false type: string - format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include - - a count of the total number of items available for pagination in - UIs. - - count_total is only respected when offset is used. It is ignored - when key - - is set. - in: query - required: false - type: boolean - format: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. + format: int64 + - name: prove in: query required: false type: boolean - format: boolean - tags: - - Query - /cosmos/bank/v1beta1/supply/{denom}: - get: - summary: SupplyOf queries the supply of a single coin. - operationId: SupplyOf - responses: - '200': - description: A successful response. - schema: - type: object - properties: - amount: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method - - signatures required by gogoproto. - description: >- - QuerySupplyOfResponse is the response type for the Query/SupplyOf - RPC method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - parameters: - - name: denom - description: denom is the coin denom to query balances for. - in: path - required: true - type: string tags: - - Query + - Service /cosmos/base/tendermint/v1beta1/blocks/latest: get: summary: GetLatestBlock returns the latest block. @@ -10516,6 +9220,7 @@ paths: title: PartsetHeader title: BlockID block: + title: 'Deprecated: please use `sdk_block` instead' type: object properties: header: @@ -11052,228 +9757,8 @@ paths: description: >- Commit contains the evidence that a block was committed by a set of validators. - description: >- - GetLatestBlockResponse is the response type for the - Query/GetLatestBlock RPC method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in - a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary - all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can optionally - set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in - the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) - might be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values - in the form - - of utility functions or additional generated methods of the - Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield - type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom - JSON - - representation, that representation will be embedded adding - a field - - `value` which holds the custom JSON in addition to the - `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - tags: - - Service - /cosmos/base/tendermint/v1beta1/blocks/{height}: - get: - summary: GetBlockByHeight queries block for given height. - operationId: GetBlockByHeight - responses: - '200': - description: A successful response. - schema: - type: object - properties: - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - block: + sdk_block: + title: 'Since: cosmos-sdk 0.47' type: object properties: header: @@ -11351,7 +9836,14 @@ paths: title: consensus info proposer_address: type: string - format: byte + description: >- + proposer_address is the original block proposer + address, formatted as a Bech32 string. + + In Tendermint, this type is `bytes`, but in the SDK, + we convert it to a Bech32 string + + for better UX. description: Header defines the structure of a Tendermint block header. data: type: object @@ -11810,11 +10302,16 @@ paths: description: >- Commit contains the evidence that a block was committed by a set of validators. + description: >- + Block is tendermint type Block, with the Header proposer + address + + field converted to bech32 string. description: >- - GetBlockByHeightResponse is the response type for the - Query/GetBlockByHeight RPC method. + GetLatestBlockResponse is the response type for the + Query/GetLatestBlock RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -11927,7 +10424,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -11937,13 +10434,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -11965,7 +10465,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -12002,265 +10501,1579 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - parameters: - - name: height - in: path - required: true - type: string - format: int64 tags: - Service - /cosmos/base/tendermint/v1beta1/node_info: + /cosmos/base/tendermint/v1beta1/blocks/{height}: get: - summary: GetNodeInfo queries the current node info. - operationId: GetNodeInfo + summary: GetBlockByHeight queries block for given height. + operationId: GetBlockByHeight responses: '200': description: A successful response. schema: type: object properties: - default_node_info: + block_id: type: object properties: - protocol_version: - type: object - properties: - p2p: - type: string - format: uint64 - block: - type: string - format: uint64 - app: - type: string - format: uint64 - default_node_id: - type: string - listen_addr: - type: string - network: - type: string - version: - type: string - channels: + hash: type: string format: byte - moniker: - type: string - other: + part_set_header: type: object properties: - tx_index: - type: string - rpc_address: + total: + type: integer + format: int64 + hash: type: string - application_version: + format: byte + title: PartsetHeader + title: BlockID + block: + title: 'Deprecated: please use `sdk_block` instead' type: object properties: - name: - type: string - app_name: - type: string - version: - type: string - git_commit: - type: string - build_tags: - type: string - go_version: - type: string - build_deps: - type: array - items: - type: object - properties: - path: - type: string - title: module path - version: - type: string - title: module version - sum: - type: string - title: checksum - title: Module is the type for VersionInfo - cosmos_sdk_version: - type: string - description: VersionInfo is the type for the GetNodeInfoResponse message. - description: >- - GetNodeInfoResponse is the request type for the Query/GetNodeInfo - RPC method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in - a canonical form + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, - (e.g., leading "." is not accepted). + including all blockchain data structures and the rules + of the application's + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + data: + type: object + properties: + txs: + type: array + items: + type: string + format: byte + description: >- + Txs that will be applied by state @ block.Height+1. - In practice, teams usually precompile into the binary - all types that they + NOTE: not all txs here are valid. We're just agreeing + on the order first. - expect it to use in the context of Any. However, for - URLs which use the + This means that block.AppHash does not include these + txs. + title: >- + Data contains the set of transactions included in the + block + evidence: + type: object + properties: + evidence: + type: array + items: + type: object + properties: + duplicate_vote_evidence: + type: object + properties: + vote_a: + type: object + properties: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed + message in the consensus. - scheme `http`, `https`, or no scheme, one can optionally - set up a type + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or + commit vote from validators for - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in - the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) - might be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a + consensus. + vote_b: + type: object + properties: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed + message in the consensus. - URL that describes the type of the serialized message. + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or + commit vote from validators for + consensus. + total_voting_power: + type: string + format: int64 + validator_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + DuplicateVoteEvidence contains evidence of a + validator signed two conflicting votes. + light_client_attack_evidence: + type: object + properties: + conflicting_block: + type: object + properties: + signed_header: + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules + for processing a block in the + blockchain, - Protobuf library provides support to pack/unpack Any values - in the form + including all blockchain data structures + and the rules of the application's - of utility functions or additional generated methods of the - Any type. + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: >- + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a + Tendermint block header. + commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the + signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: >- + CommitSig is a part of the Vote included + in a Commit. + description: >- + Commit contains the evidence that a + block was committed by a set of + validators. + validator_set: + type: object + properties: + validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + proposer: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + common_height: + type: string + format: int64 + byzantine_validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + LightClientAttackEvidence contains evidence of a + set of validators attempting to mislead a light + client. + last_commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the + signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: >- + CommitSig is a part of the Vote included in a + Commit. + description: >- + Commit contains the evidence that a block was committed by + a set of validators. + sdk_block: + title: 'Since: cosmos-sdk 0.47' + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, + including all blockchain data structures and the rules + of the application's - Example 1: Pack and unpack a message in C++. + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + description: >- + proposer_address is the original block proposer + address, formatted as a Bech32 string. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + In Tendermint, this type is `bytes`, but in the SDK, + we convert it to a Bech32 string - Example 2: Pack and unpack a message in Java. + for better UX. + description: Header defines the structure of a Tendermint block header. + data: + type: object + properties: + txs: + type: array + items: + type: string + format: byte + description: >- + Txs that will be applied by state @ block.Height+1. - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + NOTE: not all txs here are valid. We're just agreeing + on the order first. - Example 3: Pack and unpack a message in Python. + This means that block.AppHash does not include these + txs. + title: >- + Data contains the set of transactions included in the + block + evidence: + type: object + properties: + evidence: + type: array + items: + type: object + properties: + duplicate_vote_evidence: + type: object + properties: + vote_a: + type: object + properties: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed + message in the consensus. - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or + commit vote from validators for - Example 4: Pack and unpack a message in Go + consensus. + vote_b: + type: object + properties: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed + message in the consensus. - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or + commit vote from validators for - The pack methods provided by protobuf library will by - default use + consensus. + total_voting_power: + type: string + format: int64 + validator_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + DuplicateVoteEvidence contains evidence of a + validator signed two conflicting votes. + light_client_attack_evidence: + type: object + properties: + conflicting_block: + type: object + properties: + signed_header: + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules + for processing a block in the + blockchain, - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + including all blockchain data structures + and the rules of the application's - methods only use the fully qualified type name after the - last '/' + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: >- + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a + Tendermint block header. + commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the + signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: >- + CommitSig is a part of the Vote included + in a Commit. + description: >- + Commit contains the evidence that a + block was committed by a set of + validators. + validator_set: + type: object + properties: + validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + proposer: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + common_height: + type: string + format: int64 + byzantine_validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + LightClientAttackEvidence contains evidence of a + set of validators attempting to mislead a light + client. + last_commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the + signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: >- + CommitSig is a part of the Vote included in a + Commit. + description: >- + Commit contains the evidence that a block was committed by + a set of validators. + description: >- + Block is tendermint type Block, with the Header proposer + address - in the type URL, for example "foo.bar.com/x/y.z" will yield - type + field converted to bech32 string. + description: >- + GetBlockByHeightResponse is the response type for the + Query/GetBlockByHeight RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized - name "y.z". + protocol buffer message. This string must contain at + least + one "/" character. The last segment of the URL's path + must represent + the fully qualified name of the type (as in - JSON + `path/google.protobuf.Duration`). The name should be in + a canonical form - ==== + (e.g., leading "." is not accepted). - The JSON representation of an `Any` value uses the regular - representation of the deserialized, embedded message, with - an + In practice, teams usually precompile into the binary + all types that they - additional field `@type` which contains the type URL. - Example: + expect it to use in the context of Any. However, for + URLs which use the - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + scheme `http`, `https`, or no scheme, one can optionally + set up a type - { - "@type": "type.googleapis.com/google.profile.Person", + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: height + in: path + required: true + type: string + format: int64 + tags: + - Service + /cosmos/base/tendermint/v1beta1/node_info: + get: + summary: GetNodeInfo queries the current node info. + operationId: GetNodeInfo + responses: + '200': + description: A successful response. + schema: + type: object + properties: + default_node_info: + type: object + properties: + protocol_version: + type: object + properties: + p2p: + type: string + format: uint64 + block: + type: string + format: uint64 + app: + type: string + format: uint64 + default_node_id: + type: string + listen_addr: + type: string + network: + type: string + version: + type: string + channels: + type: string + format: byte + moniker: + type: string + other: + type: object + properties: + tx_index: + type: string + rpc_address: + type: string + application_version: + type: object + properties: + name: + type: string + app_name: + type: string + version: + type: string + git_commit: + type: string + build_tags: + type: string + go_version: + type: string + build_deps: + type: array + items: + type: object + properties: + path: + type: string + title: module path + version: + type: string + title: module version + sum: + type: string + title: checksum + title: Module is the type for VersionInfo + cosmos_sdk_version: + type: string + title: 'Since: cosmos-sdk 0.43' + description: VersionInfo is the type for the GetNodeInfoResponse message. + description: >- + GetNodeInfoResponse is the response type for the Query/GetNodeInfo + RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", "firstName": , "lastName": } @@ -12294,12 +12107,11 @@ paths: properties: syncing: type: boolean - format: boolean description: >- GetSyncingResponse is the response type for the Query/GetSyncing RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -12412,7 +12224,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -12422,13 +12234,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -12450,7 +12265,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -12609,7 +12423,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -12619,13 +12433,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -12647,7 +12464,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -12700,9 +12516,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -12715,7 +12532,7 @@ paths: GetLatestValidatorSetResponse is the response type for the Query/GetValidatorSetByHeight RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -12828,7 +12645,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -12838,13 +12655,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -12866,7 +12686,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -12950,15 +12769,16 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Service /cosmos/base/tendermint/v1beta1/validatorsets/{height}: @@ -13081,7 +12901,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -13091,13 +12911,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -13119,7 +12942,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -13172,9 +12994,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -13187,7 +13010,7 @@ paths: GetValidatorSetByHeightResponse is the response type for the Query/GetValidatorSetByHeight RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -13300,7 +13123,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -13310,13 +13133,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -13338,7 +13164,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -13427,15 +13252,16 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Service /cosmos/distribution/v1beta1/community_pool: @@ -13473,7 +13299,7 @@ paths: RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -13560,7 +13386,7 @@ paths: QueryDelegationTotalRewardsResponse is the response type for the Query/DelegationTotalRewards RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -13622,7 +13448,7 @@ paths: QueryDelegationRewardsResponse is the response type for the Query/DelegationRewards RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -13677,7 +13503,7 @@ paths: QueryDelegatorValidatorsResponse is the response type for the Query/DelegatorValidators RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -13723,7 +13549,7 @@ paths: QueryDelegatorWithdrawAddressResponse is the response type for the Query/DelegatorWithdrawAddress RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -13770,16 +13596,105 @@ paths: type: string base_proposer_reward: type: string + description: >- + Deprecated: The base_proposer_reward field is deprecated + and is no longer used + + in the x/distribution module's reward mechanism. bonus_proposer_reward: type: string + description: >- + Deprecated: The bonus_proposer_reward field is deprecated + and is no longer used + + in the x/distribution module's reward mechanism. withdraw_addr_enabled: type: boolean - format: boolean description: >- QueryParamsResponse is the response type for the Query/Params RPC method. default: - description: An unexpected error response + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + tags: + - Query + /cosmos/distribution/v1beta1/validators/{validator_address}: + get: + summary: >- + ValidatorDistributionInfo queries validator commission and + self-delegation rewards for validator + operationId: ValidatorDistributionInfo + responses: + '200': + description: A successful response. + schema: + type: object + properties: + operator_address: + type: string + description: operator_address defines the validator operator address. + self_bond_rewards: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + description: self_bond_rewards defines the self delegations rewards. + commission: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + description: commission defines the commission the validator received. + description: >- + QueryValidatorDistributionInfoResponse is the response type for + the Query/ValidatorDistributionInfo RPC method. + default: + description: An unexpected error response. schema: type: object properties: @@ -13800,6 +13715,12 @@ paths: value: type: string format: byte + parameters: + - name: validator_address + description: validator_address defines the validator address to query for. + in: path + required: true + type: string tags: - Query /cosmos/distribution/v1beta1/validators/{validator_address}/commission: @@ -13813,7 +13734,7 @@ paths: type: object properties: commission: - description: commission defines the commision the validator received. + description: commission defines the commission the validator received. type: object properties: commission: @@ -13838,7 +13759,7 @@ paths: QueryValidatorCommissionResponse is the response type for the Query/ValidatorCommission RPC method default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -13910,7 +13831,7 @@ paths: Query/ValidatorOutstandingRewards RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -13977,9 +13898,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -13992,7 +13914,7 @@ paths: QueryValidatorSlashesResponse is the response type for the Query/ValidatorSlashes RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -14081,15 +14003,16 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Query /cosmos/evidence/v1beta1/evidence: @@ -14204,7 +14127,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -14214,226 +14137,232 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + any, err := anypb.New(foo) + if err != nil { ... } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield - type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom - JSON - - representation, that representation will be embedded adding - a field - - `value` which holds the custom JSON in addition to the - `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - description: evidence returns all evidences. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: >- - QueryAllEvidenceResponse is the response type for the - Query/AllEvidence RPC - - method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in - a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary - all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can optionally - set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in - the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) - might be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values - in the form - - of utility functions or additional generated methods of the - Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: evidence returns all evidences. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryAllEvidenceResponse is the response type for the + Query/AllEvidence RPC + + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -14455,7 +14384,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -14539,18 +14467,19 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Query - /cosmos/evidence/v1beta1/evidence/{evidence_hash}: + /cosmos/evidence/v1beta1/evidence/{hash}: get: summary: Evidence queries evidence based on evidence hash. operationId: Evidence @@ -14659,7 +14588,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -14669,13 +14598,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -14697,7 +14629,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -14735,7 +14666,7 @@ paths: QueryEvidenceResponse is the response type for the Query/Evidence RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -14848,7 +14779,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -14858,13 +14789,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -14886,7 +14820,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -14924,11 +14857,21 @@ paths: "value": "1.212s" } parameters: - - name: evidence_hash - description: evidence_hash defines the hash of the requested evidence. + - name: hash + description: |- + hash defines the evidence hash of the requested evidence. + + Since: cosmos-sdk 0.47 in: path required: true type: string + - name: evidence_hash + description: |- + evidence_hash defines the hash of the requested evidence. + Deprecated: Use hash, a HEX encoded string, instead. + in: query + required: false + type: string format: byte tags: - Query @@ -14948,7 +14891,7 @@ paths: properties: voting_period: type: string - description: Length of the voting period. + description: Duration of the voting period. deposit_params: description: deposit_params defines the parameters related to deposit. type: object @@ -14976,7 +14919,8 @@ paths: description: >- Maximum period for Atom holders to deposit on a proposal. Initial value: 2 - months. + + months. tally_params: description: tally_params defines the parameters related to tally. type: object @@ -14987,7 +14931,8 @@ paths: description: >- Minimum percentage of total stake needed to vote for a result to be - considered valid. + + considered valid. threshold: type: string format: byte @@ -15000,12 +14945,13 @@ paths: description: >- Minimum value of Veto votes to Total votes ratio for proposal to be - vetoed. Default value: 1/3. + + vetoed. Default value: 1/3. description: >- QueryParamsResponse is the response type for the Query/Params RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -15118,7 +15064,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -15128,13 +15074,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -15156,7 +15105,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -15223,6 +15171,7 @@ paths: proposal_id: type: string format: uint64 + description: proposal_id defines the unique id of the proposal. content: type: object properties: @@ -15323,7 +15272,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -15333,13 +15282,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -15361,7 +15313,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -15401,6 +15352,7 @@ paths: "value": "1.212s" } status: + description: status defines the proposal status. type: string enum: - PROPOSAL_STATUS_UNSPECIFIED @@ -15410,41 +15362,41 @@ paths: - PROPOSAL_STATUS_REJECTED - PROPOSAL_STATUS_FAILED default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: description: >- - ProposalStatus enumerates the valid statuses of a - proposal. + final_tally_result is the final tally result of the + proposal. When - - PROPOSAL_STATUS_UNSPECIFIED: PROPOSAL_STATUS_UNSPECIFIED defines the default propopsal status. - - PROPOSAL_STATUS_DEPOSIT_PERIOD: PROPOSAL_STATUS_DEPOSIT_PERIOD defines a proposal status during the deposit - period. - - PROPOSAL_STATUS_VOTING_PERIOD: PROPOSAL_STATUS_VOTING_PERIOD defines a proposal status during the voting - period. - - PROPOSAL_STATUS_PASSED: PROPOSAL_STATUS_PASSED defines a proposal status of a proposal that has - passed. - - PROPOSAL_STATUS_REJECTED: PROPOSAL_STATUS_REJECTED defines a proposal status of a proposal that has - been rejected. - - PROPOSAL_STATUS_FAILED: PROPOSAL_STATUS_FAILED defines a proposal status of a proposal that has - failed. - final_tally_result: + querying a proposal via gRPC, this field is not + populated until the + + proposal's voting period has ended. type: object properties: 'yes': type: string + description: yes is the number of yes votes on a proposal. abstain: type: string + description: >- + abstain is the number of abstain votes on a + proposal. 'no': type: string + description: no is the number of no votes on a proposal. no_with_veto: type: string - description: >- - TallyResult defines a standard tally for a governance - proposal. + description: >- + no_with_veto is the number of no with veto votes on + a proposal. submit_time: type: string format: date-time + description: submit_time is the time of proposal submission. deposit_end_time: type: string format: date-time + description: deposit_end_time is the end time for deposition. total_deposit: type: array items: @@ -15463,15 +15415,21 @@ paths: custom method signatures required by gogoproto. + description: total_deposit is the total deposit on the proposal. voting_start_time: type: string format: date-time + description: >- + voting_start_time is the starting time to vote on a + proposal. voting_end_time: type: string format: date-time + description: voting_end_time is the end time of voting on a proposal. description: >- Proposal defines the core field members of a governance proposal. + description: proposals defines all the requested governance proposals. pagination: description: pagination defines the pagination in the response. type: object @@ -15479,9 +15437,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -15496,7 +15455,7 @@ paths: method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -15609,7 +15568,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -15619,13 +15578,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -15647,7 +15609,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -15689,7 +15650,7 @@ paths: description: |- proposal_status defines the status of the proposals. - - PROPOSAL_STATUS_UNSPECIFIED: PROPOSAL_STATUS_UNSPECIFIED defines the default propopsal status. + - PROPOSAL_STATUS_UNSPECIFIED: PROPOSAL_STATUS_UNSPECIFIED defines the default proposal status. - PROPOSAL_STATUS_DEPOSIT_PERIOD: PROPOSAL_STATUS_DEPOSIT_PERIOD defines a proposal status during the deposit period. - PROPOSAL_STATUS_VOTING_PERIOD: PROPOSAL_STATUS_VOTING_PERIOD defines a proposal status during the voting @@ -15767,15 +15728,16 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Query /cosmos/gov/v1beta1/proposals/{proposal_id}: @@ -15794,6 +15756,7 @@ paths: proposal_id: type: string format: uint64 + description: proposal_id defines the unique id of the proposal. content: type: object properties: @@ -15894,7 +15857,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -15904,13 +15867,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -15932,7 +15898,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -15970,6 +15935,7 @@ paths: "value": "1.212s" } status: + description: status defines the proposal status. type: string enum: - PROPOSAL_STATUS_UNSPECIFIED @@ -15979,41 +15945,39 @@ paths: - PROPOSAL_STATUS_REJECTED - PROPOSAL_STATUS_FAILED default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: description: >- - ProposalStatus enumerates the valid statuses of a - proposal. + final_tally_result is the final tally result of the + proposal. When - - PROPOSAL_STATUS_UNSPECIFIED: PROPOSAL_STATUS_UNSPECIFIED defines the default propopsal status. - - PROPOSAL_STATUS_DEPOSIT_PERIOD: PROPOSAL_STATUS_DEPOSIT_PERIOD defines a proposal status during the deposit - period. - - PROPOSAL_STATUS_VOTING_PERIOD: PROPOSAL_STATUS_VOTING_PERIOD defines a proposal status during the voting - period. - - PROPOSAL_STATUS_PASSED: PROPOSAL_STATUS_PASSED defines a proposal status of a proposal that has - passed. - - PROPOSAL_STATUS_REJECTED: PROPOSAL_STATUS_REJECTED defines a proposal status of a proposal that has - been rejected. - - PROPOSAL_STATUS_FAILED: PROPOSAL_STATUS_FAILED defines a proposal status of a proposal that has - failed. - final_tally_result: + querying a proposal via gRPC, this field is not populated + until the + + proposal's voting period has ended. type: object properties: 'yes': type: string + description: yes is the number of yes votes on a proposal. abstain: type: string + description: abstain is the number of abstain votes on a proposal. 'no': type: string + description: no is the number of no votes on a proposal. no_with_veto: type: string - description: >- - TallyResult defines a standard tally for a governance - proposal. + description: >- + no_with_veto is the number of no with veto votes on a + proposal. submit_time: type: string format: date-time + description: submit_time is the time of proposal submission. deposit_end_time: type: string format: date-time + description: deposit_end_time is the end time for deposition. total_deposit: type: array items: @@ -16031,12 +15995,17 @@ paths: custom method signatures required by gogoproto. + description: total_deposit is the total deposit on the proposal. voting_start_time: type: string format: date-time + description: >- + voting_start_time is the starting time to vote on a + proposal. voting_end_time: type: string format: date-time + description: voting_end_time is the end time of voting on a proposal. description: >- Proposal defines the core field members of a governance proposal. @@ -16044,7 +16013,7 @@ paths: QueryProposalResponse is the response type for the Query/Proposal RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -16157,7 +16126,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -16167,13 +16136,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -16195,7 +16167,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -16259,8 +16230,12 @@ paths: proposal_id: type: string format: uint64 + description: proposal_id defines the unique id of the proposal. depositor: type: string + description: >- + depositor defines the deposit addresses from the + proposals. amount: type: array items: @@ -16279,11 +16254,13 @@ paths: custom method signatures required by gogoproto. + description: amount to be deposited by depositor. description: >- Deposit defines an amount deposited by an account address to an active proposal. + description: deposits defines the requested deposits. pagination: description: pagination defines the pagination in the response. type: object @@ -16291,9 +16268,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -16306,7 +16284,7 @@ paths: QueryDepositsResponse is the response type for the Query/Deposits RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -16419,7 +16397,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -16429,13 +16407,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -16457,7 +16438,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -16547,15 +16527,16 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Query /cosmos/gov/v1beta1/proposals/{proposal_id}/deposits/{depositor}: @@ -16576,8 +16557,12 @@ paths: proposal_id: type: string format: uint64 + description: proposal_id defines the unique id of the proposal. depositor: type: string + description: >- + depositor defines the deposit addresses from the + proposals. amount: type: array items: @@ -16595,6 +16580,7 @@ paths: custom method signatures required by gogoproto. + description: amount to be deposited by depositor. description: >- Deposit defines an amount deposited by an account address to an active @@ -16604,7 +16590,7 @@ paths: QueryDepositResponse is the response type for the Query/Deposit RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -16717,7 +16703,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -16727,13 +16713,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -16755,7 +16744,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -16817,24 +16805,28 @@ paths: type: object properties: tally: + description: tally defines the requested tally. type: object properties: 'yes': type: string + description: yes is the number of yes votes on a proposal. abstain: type: string + description: abstain is the number of abstain votes on a proposal. 'no': type: string + description: no is the number of no votes on a proposal. no_with_veto: type: string - description: >- - TallyResult defines a standard tally for a governance - proposal. + description: >- + no_with_veto is the number of no with veto votes on a + proposal. description: >- QueryTallyResultResponse is the response type for the Query/Tally RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -16947,7 +16939,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -16957,304 +16949,316 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + any, err := anypb.New(foo) + if err != nil { ... } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield - type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom - JSON - - representation, that representation will be embedded adding - a field - - `value` which holds the custom JSON in addition to the - `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - parameters: - - name: proposal_id - description: proposal_id defines the unique id of the proposal. - in: path - required: true - type: string - format: uint64 - tags: - - Query - /cosmos/gov/v1beta1/proposals/{proposal_id}/votes: - get: - summary: Votes queries votes of a given proposal. - operationId: Votes - responses: - '200': - description: A successful response. - schema: - type: object - properties: - votes: - type: array - items: - type: object - properties: - proposal_id: - type: string - format: uint64 - voter: - type: string - option: - description: >- - Deprecated: Prefer to use `options` instead. This field - is set in queries - - if and only if `len(options) == 1` and that option has - weight 1. In all - - other cases, this field will default to - VOTE_OPTION_UNSPECIFIED. - type: string - enum: - - VOTE_OPTION_UNSPECIFIED - - VOTE_OPTION_YES - - VOTE_OPTION_ABSTAIN - - VOTE_OPTION_NO - - VOTE_OPTION_NO_WITH_VETO - default: VOTE_OPTION_UNSPECIFIED - options: - type: array - items: - type: object - properties: - option: - type: string - enum: - - VOTE_OPTION_UNSPECIFIED - - VOTE_OPTION_YES - - VOTE_OPTION_ABSTAIN - - VOTE_OPTION_NO - - VOTE_OPTION_NO_WITH_VETO - default: VOTE_OPTION_UNSPECIFIED - description: >- - VoteOption enumerates the valid vote options for a - given governance proposal. - - - VOTE_OPTION_UNSPECIFIED: VOTE_OPTION_UNSPECIFIED defines a no-op vote option. - - VOTE_OPTION_YES: VOTE_OPTION_YES defines a yes vote option. - - VOTE_OPTION_ABSTAIN: VOTE_OPTION_ABSTAIN defines an abstain vote option. - - VOTE_OPTION_NO: VOTE_OPTION_NO defines a no vote option. - - VOTE_OPTION_NO_WITH_VETO: VOTE_OPTION_NO_WITH_VETO defines a no with veto vote option. - weight: - type: string - description: >- - WeightedVoteOption defines a unit of vote for vote - split. - description: >- - Vote defines a vote on a governance proposal. - - A Vote consists of a proposal ID, the voter, and the vote - option. - description: votes defined the queried votes. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: >- - QueryVotesResponse is the response type for the Query/Votes RPC - method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in - a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary - all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can optionally - set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in - the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) - might be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values - in the form - - of utility functions or additional generated methods of the - Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: proposal_id + description: proposal_id defines the unique id of the proposal. + in: path + required: true + type: string + format: uint64 + tags: + - Query + /cosmos/gov/v1beta1/proposals/{proposal_id}/votes: + get: + summary: Votes queries votes of a given proposal. + operationId: Votes + responses: + '200': + description: A successful response. + schema: + type: object + properties: + votes: + type: array + items: + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal_id defines the unique id of the proposal. + voter: + type: string + description: voter is the voter address of the proposal. + option: + description: >- + Deprecated: Prefer to use `options` instead. This field + is set in queries + + if and only if `len(options) == 1` and that option has + weight 1. In all + + other cases, this field will default to + VOTE_OPTION_UNSPECIFIED. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + options: + type: array + items: + type: object + properties: + option: + description: >- + option defines the valid vote options, it must not + contain duplicate vote options. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + weight: + type: string + description: >- + weight is the vote weight associated with the vote + option. + description: >- + WeightedVoteOption defines a unit of vote for vote + split. + + + Since: cosmos-sdk 0.43 + description: |- + options is the weighted vote options. + + Since: cosmos-sdk 0.43 + description: >- + Vote defines a vote on a governance proposal. + + A Vote consists of a proposal ID, the voter, and the vote + option. + description: votes defines the queried votes. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryVotesResponse is the response type for the Query/Votes RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -17276,7 +17280,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -17366,15 +17369,16 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Query /cosmos/gov/v1beta1/proposals/{proposal_id}/votes/{voter}: @@ -17393,8 +17397,10 @@ paths: proposal_id: type: string format: uint64 + description: proposal_id defines the unique id of the proposal. voter: type: string + description: voter is the voter address of the proposal. option: description: >- Deprecated: Prefer to use `options` instead. This field is @@ -17419,6 +17425,9 @@ paths: type: object properties: option: + description: >- + option defines the valid vote options, it must not + contain duplicate vote options. type: string enum: - VOTE_OPTION_UNSPECIFIED @@ -17427,20 +17436,21 @@ paths: - VOTE_OPTION_NO - VOTE_OPTION_NO_WITH_VETO default: VOTE_OPTION_UNSPECIFIED - description: >- - VoteOption enumerates the valid vote options for a - given governance proposal. - - - VOTE_OPTION_UNSPECIFIED: VOTE_OPTION_UNSPECIFIED defines a no-op vote option. - - VOTE_OPTION_YES: VOTE_OPTION_YES defines a yes vote option. - - VOTE_OPTION_ABSTAIN: VOTE_OPTION_ABSTAIN defines an abstain vote option. - - VOTE_OPTION_NO: VOTE_OPTION_NO defines a no vote option. - - VOTE_OPTION_NO_WITH_VETO: VOTE_OPTION_NO_WITH_VETO defines a no with veto vote option. weight: type: string + description: >- + weight is the vote weight associated with the vote + option. description: >- WeightedVoteOption defines a unit of vote for vote split. + + + Since: cosmos-sdk 0.43 + description: |- + options is the weighted vote options. + + Since: cosmos-sdk 0.43 description: >- Vote defines a vote on a governance proposal. @@ -17450,7 +17460,7 @@ paths: QueryVoteResponse is the response type for the Query/Vote RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -17563,7 +17573,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -17573,13 +17583,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -17601,7 +17614,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -17646,638 +17658,149 @@ paths: type: string format: uint64 - name: voter - description: voter defines the oter address for the proposals. + description: voter defines the voter address for the proposals. in: path required: true type: string tags: - Query - /cosmos/mint/v1beta1/annual_provisions: - get: - summary: AnnualProvisions current minting annual provisions value. - operationId: AnnualProvisions - responses: - '200': - description: A successful response. - schema: - type: object - properties: - annual_provisions: - type: string - format: byte - description: >- - annual_provisions is the current minting annual provisions - value. - description: |- - QueryAnnualProvisionsResponse is the response type for the - Query/AnnualProvisions RPC method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - tags: - - Query - /cosmos/mint/v1beta1/inflation: + /cosmos/gov/v1/params/{params_type}: get: - summary: Inflation returns the current minting inflation value. - operationId: Inflation + summary: Params queries all parameters of the gov module. + operationId: GovV1Params responses: '200': description: A successful response. schema: type: object properties: - inflation: - type: string - format: byte - description: inflation is the current minting inflation value. - description: >- - QueryInflationResponse is the response type for the - Query/Inflation RPC + voting_params: + description: |- + Deprecated: Prefer to use `params` instead. + voting_params defines the parameters related to voting. + type: object + properties: + voting_period: + type: string + description: Duration of the voting period. + deposit_params: + description: |- + Deprecated: Prefer to use `params` instead. + deposit_params defines the parameters related to deposit. + type: object + properties: + min_deposit: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - tags: - - Query - /cosmos/mint/v1beta1/params: - get: - summary: Params returns the total set of minting parameters. - operationId: MintParams - responses: - '200': - description: A successful response. - schema: - type: object - properties: + + NOTE: The amount field is an Int which implements the + custom method + + signatures required by gogoproto. + description: Minimum deposit for a proposal to enter voting period. + max_deposit_period: + type: string + description: >- + Maximum period for Atom holders to deposit on a proposal. + Initial value: 2 + + months. + tally_params: + description: |- + Deprecated: Prefer to use `params` instead. + tally_params defines the parameters related to tally. + type: object + properties: + quorum: + type: string + description: >- + Minimum percentage of total stake needed to vote for a + result to be + + considered valid. + threshold: + type: string + description: >- + Minimum proportion of Yes votes for proposal to pass. + Default value: 0.5. + veto_threshold: + type: string + description: >- + Minimum value of Veto votes to Total votes ratio for + proposal to be + + vetoed. Default value: 1/3. params: - description: params defines the parameters of the module. + description: |- + params defines all the paramaters of x/gov module. + + Since: cosmos-sdk 0.47 type: object properties: - mint_denom: + min_deposit: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the + custom method + + signatures required by gogoproto. + description: Minimum deposit for a proposal to enter voting period. + max_deposit_period: type: string - title: type of coin to mint - inflation_rate_change: + description: >- + Maximum period for Atom holders to deposit on a proposal. + Initial value: 2 + + months. + voting_period: type: string - title: maximum annual change in inflation rate - inflation_max: + description: Duration of the voting period. + quorum: type: string - title: maximum inflation rate - inflation_min: + description: >- + Minimum percentage of total stake needed to vote for a + result to be + considered valid. + threshold: type: string - title: minimum inflation rate - goal_bonded: + description: >- + Minimum proportion of Yes votes for proposal to pass. + Default value: 0.5. + veto_threshold: type: string - title: goal of percent bonded atoms - blocks_per_year: + description: >- + Minimum value of Veto votes to Total votes ratio for + proposal to be + vetoed. Default value: 1/3. + min_initial_deposit_ratio: type: string - format: uint64 - title: expected blocks per year + description: >- + The ratio representing the proportion of the deposit value + that must be paid at proposal submission. description: >- QueryParamsResponse is the response type for the Query/Params RPC method. default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - tags: - - Query - /cosmos/params/v1beta1/params: - get: - summary: |- - Params queries a specific parameter of a module, given its subspace and - key. - operationId: Params - responses: - '200': - description: A successful response. - schema: - type: object - properties: - param: - description: param defines the queried parameter. - type: object - properties: - subspace: - type: string - key: - type: string - value: - type: string - description: >- - QueryParamsResponse is response type for the Query/Params RPC - method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - parameters: - - name: subspace - description: subspace defines the module to query the parameter for. - in: query - required: false - type: string - - name: key - description: key defines the key of the parameter in the subspace. - in: query - required: false - type: string - tags: - - Query - /cosmos/slashing/v1beta1/params: - get: - summary: Params queries the parameters of slashing module - operationId: SlashingParams - responses: - '200': - description: A successful response. - schema: - type: object - properties: - params: - type: object - properties: - signed_blocks_window: - type: string - format: int64 - min_signed_per_window: - type: string - format: byte - downtime_jail_duration: - type: string - slash_fraction_double_sign: - type: string - format: byte - slash_fraction_downtime: - type: string - format: byte - description: >- - Params represents the parameters used for by the slashing - module. - title: >- - QueryParamsResponse is the response type for the Query/Params RPC - method - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - tags: - - Query - /cosmos/slashing/v1beta1/signing_infos: - get: - summary: SigningInfos queries signing info of all validators - operationId: SigningInfos - responses: - '200': - description: A successful response. - schema: - type: object - properties: - info: - type: array - items: - type: object - properties: - address: - type: string - start_height: - type: string - format: int64 - title: >- - Height at which validator was first a candidate OR was - unjailed - index_offset: - type: string - format: int64 - description: >- - Index which is incremented each time the validator was a - bonded - - in a block and may have signed a precommit or not. This - in conjunction with the - - `SignedBlocksWindow` param determines the index in the - `MissedBlocksBitArray`. - jailed_until: - type: string - format: date-time - description: >- - Timestamp until which the validator is jailed due to - liveness downtime. - tombstoned: - type: boolean - format: boolean - description: >- - Whether or not a validator has been tombstoned (killed - out of validator set). It is set - - once the validator commits an equivocation or for any - other configured misbehiavor. - missed_blocks_counter: - type: string - format: int64 - description: >- - A counter kept to avoid unnecessary array reads. - - Note that `Sum(MissedBlocksBitArray)` always equals - `MissedBlocksCounter`. - description: >- - ValidatorSigningInfo defines a validator's signing info for - monitoring their - - liveness activity. - title: info is the signing info of all validators - pagination: - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: >- - PageResponse is to be embedded in gRPC response messages where - the - - corresponding request message has used PageRequest. - - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - title: >- - QuerySigningInfosResponse is the response type for the - Query/SigningInfos RPC - - method - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - parameters: - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - in: query - required: false - type: string - format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. - - It is less efficient than using key. Only one of offset or key - should - - be set. - in: query - required: false - type: string - format: uint64 - - name: pagination.limit - description: >- - limit is the total number of results to be returned in the result - page. - - If left empty it will default to a value to be set by each app. - in: query - required: false - type: string - format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include - - a count of the total number of items available for pagination in - UIs. - - count_total is only respected when offset is used. It is ignored - when key - - is set. - in: query - required: false - type: boolean - format: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - in: query - required: false - type: boolean - format: boolean - tags: - - Query - /cosmos/slashing/v1beta1/signing_infos/{cons_address}: - get: - summary: SigningInfo queries the signing info of given cons address - operationId: SigningInfo - responses: - '200': - description: A successful response. - schema: - type: object - properties: - val_signing_info: - type: object - properties: - address: - type: string - start_height: - type: string - format: int64 - title: >- - Height at which validator was first a candidate OR was - unjailed - index_offset: - type: string - format: int64 - description: >- - Index which is incremented each time the validator was a - bonded - - in a block and may have signed a precommit or not. This in - conjunction with the - - `SignedBlocksWindow` param determines the index in the - `MissedBlocksBitArray`. - jailed_until: - type: string - format: date-time - description: >- - Timestamp until which the validator is jailed due to - liveness downtime. - tombstoned: - type: boolean - format: boolean - description: >- - Whether or not a validator has been tombstoned (killed out - of validator set). It is set - - once the validator commits an equivocation or for any - other configured misbehiavor. - missed_blocks_counter: - type: string - format: int64 - description: >- - A counter kept to avoid unnecessary array reads. - - Note that `Sum(MissedBlocksBitArray)` always equals - `MissedBlocksCounter`. - description: >- - ValidatorSigningInfo defines a validator's signing info for - monitoring their - - liveness activity. - title: >- - val_signing_info is the signing info of requested val cons - address - title: >- - QuerySigningInfoResponse is the response type for the - Query/SigningInfo RPC - - method - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - value: - type: string - format: byte - parameters: - - name: cons_address - description: cons_address is the address to query signing info of - in: path - required: true - type: string - tags: - - Query - /cosmos/staking/v1beta1/delegations/{delegator_addr}: - get: - summary: >- - DelegatorDelegations queries all delegations of a given delegator - address. - operationId: DelegatorDelegations - responses: - '200': - description: A successful response. - schema: - type: object - properties: - delegation_responses: - type: array - items: - type: object - properties: - delegation: - type: object - properties: - delegator_address: - type: string - description: >- - delegator_address is the bech32-encoded address of - the delegator. - validator_address: - type: string - description: >- - validator_address is the bech32-encoded address of - the validator. - shares: - type: string - description: shares define the delegation shares received. - description: >- - Delegation represents the bond with tokens held by an - account. It is - - owned by one delegator, and is associated with the - voting power of one - - validator. - balance: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the - custom method - - signatures required by gogoproto. - description: >- - DelegationResponse is equivalent to Delegation except that - it contains a - - balance in addition to shares which is more suitable for - client responses. - description: >- - delegation_responses defines all the delegations' info of a - delegator. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: |- - QueryDelegatorDelegationsResponse is response type for the - Query/DelegatorDelegations RPC method. - default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -18390,7 +17913,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -18400,13 +17923,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -18428,7 +17954,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -18466,191 +17991,316 @@ paths: "value": "1.212s" } parameters: - - name: delegator_addr - description: delegator_addr defines the delegator address to query for. - in: path - required: true - type: string - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - in: query - required: false - type: string - format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. - - It is less efficient than using key. Only one of offset or key - should - - be set. - in: query - required: false - type: string - format: uint64 - - name: pagination.limit + - name: params_type description: >- - limit is the total number of results to be returned in the result - page. + params_type defines which parameters to query for, can be one of + "voting", - If left empty it will default to a value to be set by each app. - in: query - required: false + "tallying" or "deposit". + in: path + required: true type: string - format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include - - a count of the total number of items available for pagination in - UIs. - - count_total is only respected when offset is used. It is ignored - when key - - is set. - in: query - required: false - type: boolean - format: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - in: query - required: false - type: boolean - format: boolean tags: - Query - /cosmos/staking/v1beta1/delegators/{delegator_addr}/redelegations: + /cosmos/gov/v1/proposals: get: - summary: Redelegations queries redelegations of given address. - operationId: Redelegations + summary: Proposals queries all proposals based on given status. + operationId: GovV1Proposal responses: '200': description: A successful response. schema: type: object properties: - redelegation_responses: + proposals: type: array items: type: object properties: - redelegation: + id: + type: string + format: uint64 + description: id defines the unique id of the proposal. + messages: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized + + protocol buffer message. This string must contain + at least + + one "/" character. The last segment of the URL's + path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should + be in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the + binary all types that they + + expect it to use in the context of Any. However, + for URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions + as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently + available in the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods + of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL + and the unpack + + methods only use the fully qualified type name after + the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded message, + with an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + messages are the arbitrary messages to be executed if + the proposal passes. + status: + description: status defines the proposal status. + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_DEPOSIT_PERIOD + - PROPOSAL_STATUS_VOTING_PERIOD + - PROPOSAL_STATUS_PASSED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_FAILED + default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: + description: >- + final_tally_result is the final tally result of the + proposal. When + + querying a proposal via gRPC, this field is not + populated until the + + proposal's voting period has ended. type: object properties: - delegator_address: + yes_count: type: string - description: >- - delegator_address is the bech32-encoded address of - the delegator. - validator_src_address: + description: yes_count is the number of yes votes on a proposal. + abstain_count: type: string description: >- - validator_src_address is the validator redelegation - source operator address. - validator_dst_address: + abstain_count is the number of abstain votes on a + proposal. + no_count: + type: string + description: no_count is the number of no votes on a proposal. + no_with_veto_count: type: string description: >- - validator_dst_address is the validator redelegation - destination operator address. - entries: - type: array - items: - type: object - properties: - creation_height: - type: string - format: int64 - description: >- - creation_height defines the height which the - redelegation took place. - completion_time: - type: string - format: date-time - description: >- - completion_time defines the unix time for - redelegation completion. - initial_balance: - type: string - description: >- - initial_balance defines the initial balance - when redelegation started. - shares_dst: - type: string - description: >- - shares_dst is the amount of - destination-validator shares created by - redelegation. - description: >- - RedelegationEntry defines a redelegation object - with relevant metadata. - description: entries are the redelegation entries. - description: >- - Redelegation contains the list of a particular - delegator's redelegating bonds - - from a particular source validator to a particular - destination validator. - entries: + no_with_veto_count is the number of no with veto + votes on a proposal. + submit_time: + type: string + format: date-time + description: submit_time is the time of proposal submission. + deposit_end_time: + type: string + format: date-time + description: deposit_end_time is the end time for deposition. + total_deposit: type: array items: type: object properties: - redelegation_entry: - type: object - properties: - creation_height: - type: string - format: int64 - description: >- - creation_height defines the height which the - redelegation took place. - completion_time: - type: string - format: date-time - description: >- - completion_time defines the unix time for - redelegation completion. - initial_balance: - type: string - description: >- - initial_balance defines the initial balance - when redelegation started. - shares_dst: - type: string - description: >- - shares_dst is the amount of - destination-validator shares created by - redelegation. - description: >- - RedelegationEntry defines a redelegation object - with relevant metadata. - balance: + denom: + type: string + amount: type: string description: >- - RedelegationEntryResponse is equivalent to a - RedelegationEntry except that it - - contains a balance in addition to shares which is more - suitable for client + Coin defines a token with a denomination and an + amount. - responses. - description: >- - RedelegationResponse is equivalent to a Redelegation except - that its entries - contain a balance in addition to shares which is more - suitable for client + NOTE: The amount field is an Int which implements the + custom method - responses. + signatures required by gogoproto. + description: total_deposit is the total deposit on the proposal. + voting_start_time: + type: string + format: date-time + description: >- + voting_start_time is the starting time to vote on a + proposal. + voting_end_time: + type: string + format: date-time + description: voting_end_time is the end time of voting on a proposal. + metadata: + type: string + description: >- + metadata is any arbitrary metadata attached to the + proposal. + title: + type: string + description: 'Since: cosmos-sdk 0.47' + title: title is the title of the proposal + summary: + type: string + description: 'Since: cosmos-sdk 0.47' + title: summary is a short summary of the proposal + proposer: + type: string + description: 'Since: cosmos-sdk 0.47' + title: Proposer is the address of the proposal sumbitter + description: >- + Proposal defines the core field members of a governance + proposal. + description: proposals defines all the requested governance proposals. pagination: description: pagination defines the pagination in the response. type: object @@ -18658,9 +18308,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -18670,12 +18321,12 @@ paths: was set, its value is undefined otherwise description: >- - QueryRedelegationsResponse is response type for the - Query/Redelegations RPC + QueryProposalsResponse is the response type for the + Query/Proposals RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -18788,7 +18439,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -18798,13 +18449,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -18826,7 +18480,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -18864,18 +18517,39 @@ paths: "value": "1.212s" } parameters: - - name: delegator_addr - description: delegator_addr defines the delegator address to query for. - in: path - required: true + - name: proposal_status + description: |- + proposal_status defines the status of the proposals. + + - PROPOSAL_STATUS_UNSPECIFIED: PROPOSAL_STATUS_UNSPECIFIED defines the default proposal status. + - PROPOSAL_STATUS_DEPOSIT_PERIOD: PROPOSAL_STATUS_DEPOSIT_PERIOD defines a proposal status during the deposit + period. + - PROPOSAL_STATUS_VOTING_PERIOD: PROPOSAL_STATUS_VOTING_PERIOD defines a proposal status during the voting + period. + - PROPOSAL_STATUS_PASSED: PROPOSAL_STATUS_PASSED defines a proposal status of a proposal that has + passed. + - PROPOSAL_STATUS_REJECTED: PROPOSAL_STATUS_REJECTED defines a proposal status of a proposal that has + been rejected. + - PROPOSAL_STATUS_FAILED: PROPOSAL_STATUS_FAILED defines a proposal status of a proposal that has + failed. + in: query + required: false type: string - - name: src_validator_addr - description: src_validator_addr defines the validator address to redelegate from. + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_DEPOSIT_PERIOD + - PROPOSAL_STATUS_VOTING_PERIOD + - PROPOSAL_STATUS_PASSED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_FAILED + default: PROPOSAL_STATUS_UNSPECIFIED + - name: voter + description: voter defines the voter address for the proposals. in: query required: false type: string - - name: dst_validator_addr - description: dst_validator_addr defines the validator address to redelegate to. + - name: depositor + description: depositor defines the deposit addresses from the proposals. in: query required: false type: string @@ -18925,471 +18599,129 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Query - /cosmos/staking/v1beta1/delegators/{delegator_addr}/unbonding_delegations: + /cosmos/gov/v1/proposals/{proposal_id}: get: - summary: >- - DelegatorUnbondingDelegations queries all unbonding delegations of a - given - - delegator address. - operationId: DelegatorUnbondingDelegations + summary: Proposal queries proposal details based on ProposalID. + operationId: GovV1Proposal responses: '200': description: A successful response. schema: type: object properties: - unbonding_responses: - type: array - items: - type: object - properties: - delegator_address: - type: string - description: >- - delegator_address is the bech32-encoded address of the - delegator. - validator_address: - type: string - description: >- - validator_address is the bech32-encoded address of the - validator. - entries: - type: array - items: - type: object - properties: - creation_height: - type: string - format: int64 - description: >- - creation_height is the height which the unbonding - took place. - completion_time: - type: string - format: date-time - description: >- - completion_time is the unix time for unbonding - completion. - initial_balance: - type: string - description: >- - initial_balance defines the tokens initially - scheduled to receive at completion. - balance: - type: string - description: >- - balance defines the tokens to receive at - completion. - description: >- - UnbondingDelegationEntry defines an unbonding object - with relevant metadata. - description: entries are the unbonding delegation entries. - description: >- - UnbondingDelegation stores all of a single delegator's - unbonding bonds - - for a single validator in an time-ordered list. - pagination: - description: pagination defines the pagination in the response. + proposal: type: object properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: + id: type: string format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: >- - QueryUnbondingDelegatorDelegationsResponse is response type for - the - - Query/UnbondingDelegatorDelegations RPC method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized + description: id defines the unique id of the proposal. + messages: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized - protocol buffer message. This string must contain at - least + protocol buffer message. This string must contain at + least - one "/" character. The last segment of the URL's path - must represent + one "/" character. The last segment of the URL's + path must represent - the fully qualified name of the type (as in + the fully qualified name of the type (as in - `path/google.protobuf.Duration`). The name should be in - a canonical form + `path/google.protobuf.Duration`). The name should be + in a canonical form - (e.g., leading "." is not accepted). + (e.g., leading "." is not accepted). - In practice, teams usually precompile into the binary - all types that they + In practice, teams usually precompile into the + binary all types that they - expect it to use in the context of Any. However, for - URLs which use the + expect it to use in the context of Any. However, for + URLs which use the - scheme `http`, `https`, or no scheme, one can optionally - set up a type + scheme `http`, `https`, or no scheme, one can + optionally set up a type - server that maps type URLs to message definitions as - follows: + server that maps type URLs to message definitions as + follows: - * If no scheme is provided, `https` is assumed. + * If no scheme is provided, `https` is assumed. - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - Note: this functionality is not currently available in - the official + Note: this functionality is not currently available + in the official - protobuf release, and it is not used for type URLs - beginning with + protobuf release, and it is not used for type URLs + beginning with - type.googleapis.com. + type.googleapis.com. - Schemes other than `http`, `https` (or the empty scheme) - might be + Schemes other than `http`, `https` (or the empty + scheme) might be - used with implementation specific semantics. - value: - type: string - format: byte + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a + `Any` contains an arbitrary serialized protocol buffer + message along with a - URL that describes the type of the serialized message. + URL that describes the type of the serialized message. - Protobuf library provides support to pack/unpack Any values - in the form + Protobuf library provides support to pack/unpack Any + values in the form - of utility functions or additional generated methods of the - Any type. + of utility functions or additional generated methods of + the Any type. - Example 1: Pack and unpack a message in C++. + Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield - type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom - JSON - - representation, that representation will be embedded adding - a field - - `value` which holds the custom JSON in addition to the - `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - parameters: - - name: delegator_addr - description: delegator_addr defines the delegator address to query for. - in: path - required: true - type: string - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - in: query - required: false - type: string - format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. - - It is less efficient than using key. Only one of offset or key - should - - be set. - in: query - required: false - type: string - format: uint64 - - name: pagination.limit - description: >- - limit is the total number of results to be returned in the result - page. - - If left empty it will default to a value to be set by each app. - in: query - required: false - type: string - format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include - - a count of the total number of items available for pagination in - UIs. - - count_total is only respected when offset is used. It is ignored - when key - - is set. - in: query - required: false - type: boolean - format: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - in: query - required: false - type: boolean - format: boolean - tags: - - Query - /cosmos/staking/v1beta1/delegators/{delegator_addr}/validators: - get: - summary: |- - DelegatorValidators queries all validators info for given delegator - address. - operationId: StakingDelegatorValidators - responses: - '200': - description: A successful response. - schema: - type: object - properties: - validators: - type: array - items: - type: object - properties: - operator_address: - type: string - description: >- - operator_address defines the address of the validator's - operator; bech encoded in JSON. - consensus_pubkey: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the - type of the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's - path must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be - in a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the - binary all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can - optionally set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results - based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available - in the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty - scheme) might be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the - above specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any - values in the form - - of utility functions or additional generated methods of - the Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. + Example 2: Pack and unpack a message in Java. Foo foo = ...; Any any = Any.pack(foo); @@ -19398,7 +18730,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -19408,13 +18740,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -19436,7 +18771,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -19475,156 +18809,108 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - jailed: - type: boolean - format: boolean - description: >- - jailed defined whether the validator has been jailed - from bonded status or not. - status: - description: >- - status is the validator status - (bonded/unbonding/unbonded). - type: string - enum: - - BOND_STATUS_UNSPECIFIED - - BOND_STATUS_UNBONDED - - BOND_STATUS_UNBONDING - - BOND_STATUS_BONDED - default: BOND_STATUS_UNSPECIFIED - tokens: - type: string - description: >- - tokens define the delegated tokens (incl. - self-delegation). - delegator_shares: - type: string - description: >- - delegator_shares defines total shares issued to a - validator's delegators. - description: - description: >- - description defines the description terms for the - validator. + description: >- + messages are the arbitrary messages to be executed if the + proposal passes. + status: + description: status defines the proposal status. + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_DEPOSIT_PERIOD + - PROPOSAL_STATUS_VOTING_PERIOD + - PROPOSAL_STATUS_PASSED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_FAILED + default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: + description: >- + final_tally_result is the final tally result of the + proposal. When + + querying a proposal via gRPC, this field is not populated + until the + + proposal's voting period has ended. + type: object + properties: + yes_count: + type: string + description: yes_count is the number of yes votes on a proposal. + abstain_count: + type: string + description: >- + abstain_count is the number of abstain votes on a + proposal. + no_count: + type: string + description: no_count is the number of no votes on a proposal. + no_with_veto_count: + type: string + description: >- + no_with_veto_count is the number of no with veto votes + on a proposal. + submit_time: + type: string + format: date-time + description: submit_time is the time of proposal submission. + deposit_end_time: + type: string + format: date-time + description: deposit_end_time is the end time for deposition. + total_deposit: + type: array + items: type: object properties: - moniker: - type: string - description: >- - moniker defines a human-readable name for the - validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. - UPort or Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: >- - security_contact defines an optional email for - security contact. - details: + denom: type: string - description: details define other optional details. - unbonding_height: - type: string - format: int64 - description: >- - unbonding_height defines, if unbonding, the height at - which this validator has begun unbonding. - unbonding_time: - type: string - format: date-time - description: >- - unbonding_time defines, if unbonding, the min time for - the validator to complete unbonding. - commission: - description: commission defines the commission parameters. - type: object - properties: - commission_rates: - description: >- - commission_rates defines the initial commission - rates to be used for creating a validator. - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to - delegators, as a fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate - which validator can ever charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily - increase of the validator commission, as a - fraction. - update_time: + amount: type: string - format: date-time - description: >- - update_time is the last time the commission rate was - changed. - min_self_delegation: - type: string description: >- - min_self_delegation is the validator's self declared - minimum self delegation. - description: >- - Validator defines a validator, together with the total - amount of the - - Validator's bond shares and their exchange rate to coins. - Slashing results in - - a decrease in the exchange rate, allowing correct - calculation of future - - undelegations without iterating over delegators. When coins - are delegated to - - this validator, the validator is credited with a delegation - whose number of + Coin defines a token with a denomination and an amount. - bond shares is based on the amount of coins delegated - divided by the current - exchange rate. Voting power can be calculated as total - bonded shares + NOTE: The amount field is an Int which implements the + custom method - multiplied by exchange rate. - description: validators defines the the validators' info of a delegator. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: + signatures required by gogoproto. + description: total_deposit is the total deposit on the proposal. + voting_start_time: type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: + format: date-time + description: >- + voting_start_time is the starting time to vote on a + proposal. + voting_end_time: type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: |- - QueryDelegatorValidatorsResponse is response type for the - Query/DelegatorValidators RPC method. + format: date-time + description: voting_end_time is the end time of voting on a proposal. + metadata: + type: string + description: >- + metadata is any arbitrary metadata attached to the + proposal. + title: + type: string + description: 'Since: cosmos-sdk 0.47' + title: title is the title of the proposal + summary: + type: string + description: 'Since: cosmos-sdk 0.47' + title: summary is a short summary of the proposal + proposer: + type: string + description: 'Since: cosmos-sdk 0.47' + title: Proposer is the address of the proposal sumbitter + description: >- + Proposal defines the core field members of a governance + proposal. + description: >- + QueryProposalResponse is the response type for the Query/Proposal + RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -19737,7 +19023,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -19747,13 +19033,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -19775,7 +19064,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -19813,393 +19101,87 @@ paths: "value": "1.212s" } parameters: - - name: delegator_addr - description: delegator_addr defines the delegator address to query for. + - name: proposal_id + description: proposal_id defines the unique id of the proposal. in: path required: true type: string - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - in: query - required: false - type: string - format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. - - It is less efficient than using key. Only one of offset or key - should - - be set. - in: query - required: false - type: string - format: uint64 - - name: pagination.limit - description: >- - limit is the total number of results to be returned in the result - page. - - If left empty it will default to a value to be set by each app. - in: query - required: false - type: string format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include - - a count of the total number of items available for pagination in - UIs. - - count_total is only respected when offset is used. It is ignored - when key - - is set. - in: query - required: false - type: boolean - format: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - in: query - required: false - type: boolean - format: boolean tags: - Query - /cosmos/staking/v1beta1/delegators/{delegator_addr}/validators/{validator_addr}: + /cosmos/gov/v1/proposals/{proposal_id}/deposits: get: - summary: |- - DelegatorValidator queries validator info for given delegator validator - pair. - operationId: DelegatorValidator + summary: Deposits queries all deposits of a single proposal. + operationId: GovV1Deposit responses: '200': description: A successful response. schema: type: object properties: - validator: - type: object - properties: - operator_address: - type: string - description: >- - operator_address defines the address of the validator's - operator; bech encoded in JSON. - consensus_pubkey: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type - of the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be - in a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary - all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can - optionally set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results - based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in - the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty - scheme) might be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the - above specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any - values in the form - - of utility functions or additional generated methods of - the Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and - the unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will - yield type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a - custom JSON - - representation, that representation will be embedded - adding a field - - `value` which holds the custom JSON in addition to the - `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - jailed: - type: boolean - format: boolean - description: >- - jailed defined whether the validator has been jailed from - bonded status or not. - status: - description: >- - status is the validator status - (bonded/unbonding/unbonded). - type: string - enum: - - BOND_STATUS_UNSPECIFIED - - BOND_STATUS_UNBONDED - - BOND_STATUS_UNBONDING - - BOND_STATUS_BONDED - default: BOND_STATUS_UNSPECIFIED - tokens: - type: string - description: >- - tokens define the delegated tokens (incl. - self-delegation). - delegator_shares: - type: string - description: >- - delegator_shares defines total shares issued to a - validator's delegators. - description: - description: >- - description defines the description terms for the - validator. - type: object - properties: - moniker: - type: string - description: >- - moniker defines a human-readable name for the - validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. - UPort or Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: >- - security_contact defines an optional email for - security contact. - details: - type: string - description: details define other optional details. - unbonding_height: - type: string - format: int64 - description: >- - unbonding_height defines, if unbonding, the height at - which this validator has begun unbonding. - unbonding_time: - type: string - format: date-time - description: >- - unbonding_time defines, if unbonding, the min time for the - validator to complete unbonding. - commission: - description: commission defines the commission parameters. - type: object - properties: - commission_rates: - description: >- - commission_rates defines the initial commission rates - to be used for creating a validator. + deposits: + type: array + items: + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal_id defines the unique id of the proposal. + depositor: + type: string + description: >- + depositor defines the deposit addresses from the + proposals. + amount: + type: array + items: type: object properties: - rate: - type: string - description: >- - rate is the commission rate charged to delegators, - as a fraction. - max_rate: + denom: type: string - description: >- - max_rate defines the maximum commission rate which - validator can ever charge, as a fraction. - max_change_rate: + amount: type: string - description: >- - max_change_rate defines the maximum daily increase - of the validator commission, as a fraction. - update_time: - type: string - format: date-time description: >- - update_time is the last time the commission rate was - changed. - min_self_delegation: - type: string - description: >- - min_self_delegation is the validator's self declared - minimum self delegation. - description: >- - Validator defines a validator, together with the total amount - of the - - Validator's bond shares and their exchange rate to coins. - Slashing results in - - a decrease in the exchange rate, allowing correct calculation - of future + Coin defines a token with a denomination and an + amount. - undelegations without iterating over delegators. When coins - are delegated to - this validator, the validator is credited with a delegation - whose number of + NOTE: The amount field is an Int which implements the + custom method - bond shares is based on the amount of coins delegated divided - by the current + signatures required by gogoproto. + description: amount to be deposited by depositor. + description: >- + Deposit defines an amount deposited by an account address to + an active - exchange rate. Voting power can be calculated as total bonded - shares + proposal. + description: deposits defines the requested deposits. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - multiplied by exchange rate. - description: |- - QueryDelegatorValidatorResponse response type for the - Query/DelegatorValidator RPC method. + was set, its value is undefined otherwise + description: >- + QueryDepositsResponse is the response type for the Query/Deposits + RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -20312,7 +19294,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -20322,13 +19304,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -20350,7 +19335,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -20388,431 +19372,122 @@ paths: "value": "1.212s" } parameters: - - name: delegator_addr - description: delegator_addr defines the delegator address to query for. + - name: proposal_id + description: proposal_id defines the unique id of the proposal. in: path required: true type: string - - name: validator_addr - description: validator_addr defines the validator address to query for. - in: path - required: true + format: uint64 + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query - /cosmos/staking/v1beta1/historical_info/{height}: + /cosmos/gov/v1/proposals/{proposal_id}/deposits/{depositor}: get: - summary: HistoricalInfo queries the historical info for given height. - operationId: HistoricalInfo + summary: >- + Deposit queries single deposit information based proposalID, + depositAddr. + operationId: GovV1Deposit responses: '200': description: A successful response. schema: type: object properties: - hist: - description: hist defines the historical info at the given height. + deposit: type: object properties: - header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing - a block in the blockchain, - - including all blockchain data structures and the rules - of the application's - - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - valset: + proposal_id: + type: string + format: uint64 + description: proposal_id defines the unique id of the proposal. + depositor: + type: string + description: >- + depositor defines the deposit addresses from the + proposals. + amount: type: array items: type: object properties: - operator_address: + denom: type: string - description: >- - operator_address defines the address of the - validator's operator; bech encoded in JSON. - consensus_pubkey: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the - type of the serialized + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - protocol buffer message. This string must - contain at least - - one "/" character. The last segment of the URL's - path must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name - should be in a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the - binary all types that they - - expect it to use in the context of Any. However, - for URLs which use the - - scheme `http`, `https`, or no scheme, one can - optionally set up a type - - server that maps type URLs to message - definitions as follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup - results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently - available in the official - - protobuf release, and it is not used for type - URLs beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty - scheme) might be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of - the above specified type. - description: >- - `Any` contains an arbitrary serialized protocol - buffer message along with a - - URL that describes the type of the serialized - message. - - - Protobuf library provides support to pack/unpack Any - values in the form - - of utility functions or additional generated methods - of the Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will - by default use - - 'type.googleapis.com/full.type.name' as the type URL - and the unpack - - methods only use the fully qualified type name after - the last '/' - - in the type URL, for example "foo.bar.com/x/y.z" - will yield type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the - regular - - representation of the deserialized, embedded - message, with an - - additional field `@type` which contains the type - URL. Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a - custom JSON - - representation, that representation will be embedded - adding a field - - `value` which holds the custom JSON in addition to - the `@type` - - field. Example (for message - [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - jailed: - type: boolean - format: boolean - description: >- - jailed defined whether the validator has been jailed - from bonded status or not. - status: - description: >- - status is the validator status - (bonded/unbonding/unbonded). - type: string - enum: - - BOND_STATUS_UNSPECIFIED - - BOND_STATUS_UNBONDED - - BOND_STATUS_UNBONDING - - BOND_STATUS_BONDED - default: BOND_STATUS_UNSPECIFIED - tokens: - type: string - description: >- - tokens define the delegated tokens (incl. - self-delegation). - delegator_shares: - type: string - description: >- - delegator_shares defines total shares issued to a - validator's delegators. - description: - description: >- - description defines the description terms for the - validator. - type: object - properties: - moniker: - type: string - description: >- - moniker defines a human-readable name for the - validator. - identity: - type: string - description: >- - identity defines an optional identity signature - (ex. UPort or Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: >- - security_contact defines an optional email for - security contact. - details: - type: string - description: details define other optional details. - unbonding_height: - type: string - format: int64 - description: >- - unbonding_height defines, if unbonding, the height - at which this validator has begun unbonding. - unbonding_time: - type: string - format: date-time - description: >- - unbonding_time defines, if unbonding, the min time - for the validator to complete unbonding. - commission: - description: commission defines the commission parameters. - type: object - properties: - commission_rates: - description: >- - commission_rates defines the initial commission - rates to be used for creating a validator. - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to - delegators, as a fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate - which validator can ever charge, as a - fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily - increase of the validator commission, as a - fraction. - update_time: - type: string - format: date-time - description: >- - update_time is the last time the commission rate - was changed. - min_self_delegation: - type: string - description: >- - min_self_delegation is the validator's self declared - minimum self delegation. - description: >- - Validator defines a validator, together with the total - amount of the - - Validator's bond shares and their exchange rate to - coins. Slashing results in - - a decrease in the exchange rate, allowing correct - calculation of future - - undelegations without iterating over delegators. When - coins are delegated to - - this validator, the validator is credited with a - delegation whose number of - bond shares is based on the amount of coins delegated - divided by the current + NOTE: The amount field is an Int which implements the + custom method - exchange rate. Voting power can be calculated as total - bonded shares + signatures required by gogoproto. + description: amount to be deposited by depositor. + description: >- + Deposit defines an amount deposited by an account address to + an active - multiplied by exchange rate. + proposal. description: >- - QueryHistoricalInfoResponse is response type for the - Query/HistoricalInfo RPC - - method. + QueryDepositResponse is the response type for the Query/Deposit + RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -20925,7 +19600,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -20935,13 +19610,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -20963,7 +19641,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -21001,55 +19678,54 @@ paths: "value": "1.212s" } parameters: - - name: height - description: height defines at which height to query the historical info. + - name: proposal_id + description: proposal_id defines the unique id of the proposal. + in: path + required: true + type: string + format: uint64 + - name: depositor + description: depositor defines the deposit addresses from the proposals. in: path required: true type: string - format: int64 tags: - Query - /cosmos/staking/v1beta1/params: + /cosmos/gov/v1/proposals/{proposal_id}/tally: get: - summary: Parameters queries the staking parameters. - operationId: StakingParams + summary: TallyResult queries the tally of a proposal vote. + operationId: GovV1TallyResult responses: '200': description: A successful response. schema: type: object properties: - params: - description: params holds all the parameters of this module. + tally: + description: tally defines the requested tally. type: object properties: - unbonding_time: + yes_count: + type: string + description: yes_count is the number of yes votes on a proposal. + abstain_count: type: string - description: unbonding_time is the time duration of unbonding. - max_validators: - type: integer - format: int64 - description: max_validators is the maximum number of validators. - max_entries: - type: integer - format: int64 - description: >- - max_entries is the max entries for either unbonding - delegation or redelegation (per pair/trio). - historical_entries: - type: integer - format: int64 description: >- - historical_entries is the number of historical entries to - persist. - bond_denom: + abstain_count is the number of abstain votes on a + proposal. + no_count: type: string - description: bond_denom defines the bondable coin denomination. + description: no_count is the number of no votes on a proposal. + no_with_veto_count: + type: string + description: >- + no_with_veto_count is the number of no with veto votes on + a proposal. description: >- - QueryParamsResponse is response type for the Query/Params RPC - method. + QueryTallyResultResponse is the response type for the Query/Tally + RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -21162,7 +19838,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -21172,13 +19848,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -21200,7 +19879,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -21237,29 +19915,98 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + parameters: + - name: proposal_id + description: proposal_id defines the unique id of the proposal. + in: path + required: true + type: string + format: uint64 tags: - Query - /cosmos/staking/v1beta1/pool: + /cosmos/gov/v1/proposals/{proposal_id}/votes: get: - summary: Pool queries the pool info. - operationId: Pool + summary: Votes queries votes of a given proposal. + operationId: GovV1Votes responses: '200': description: A successful response. schema: type: object properties: - pool: - description: pool defines the pool info. + votes: + type: array + items: + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal_id defines the unique id of the proposal. + voter: + type: string + description: voter is the voter address of the proposal. + options: + type: array + items: + type: object + properties: + option: + description: >- + option defines the valid vote options, it must not + contain duplicate vote options. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + weight: + type: string + description: >- + weight is the vote weight associated with the vote + option. + description: >- + WeightedVoteOption defines a unit of vote for vote + split. + description: options is the weighted vote options. + metadata: + type: string + description: >- + metadata is any arbitrary metadata to attached to the + vote. + description: >- + Vote defines a vote on a governance proposal. + + A Vote consists of a proposal ID, the voter, and the vote + option. + description: votes defines the queried votes. + pagination: + description: pagination defines the pagination in the response. type: object properties: - not_bonded_tokens: + next_key: type: string - bonded_tokens: + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: type: string - description: QueryPoolResponse is response type for the Query/Pool RPC method. + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryVotesResponse is the response type for the Query/Votes RPC + method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -21372,7 +20119,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -21382,13 +20129,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -21410,7 +20160,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -21447,355 +20196,132 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + parameters: + - name: proposal_id + description: proposal_id defines the unique id of the proposal. + in: path + required: true + type: string + format: uint64 + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query - /cosmos/staking/v1beta1/validators: + /cosmos/gov/v1/proposals/{proposal_id}/votes/{voter}: get: - summary: Validators queries all validators that match the given status. - operationId: Validators + summary: Vote queries voted information based on proposalID, voterAddr. + operationId: GovV1Vote responses: '200': description: A successful response. schema: type: object properties: - validators: - type: array - items: - type: object - properties: - operator_address: - type: string - description: >- - operator_address defines the address of the validator's - operator; bech encoded in JSON. - consensus_pubkey: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the - type of the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's - path must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be - in a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the - binary all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can - optionally set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results - based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available - in the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty - scheme) might be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the - above specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any - values in the form - - of utility functions or additional generated methods of - the Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and - the unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will - yield type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the - regular - - representation of the deserialized, embedded message, - with an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a - custom JSON - - representation, that representation will be embedded - adding a field - - `value` which holds the custom JSON in addition to the - `@type` - - field. Example (for message - [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - jailed: - type: boolean - format: boolean - description: >- - jailed defined whether the validator has been jailed - from bonded status or not. - status: - description: >- - status is the validator status - (bonded/unbonding/unbonded). - type: string - enum: - - BOND_STATUS_UNSPECIFIED - - BOND_STATUS_UNBONDED - - BOND_STATUS_UNBONDING - - BOND_STATUS_BONDED - default: BOND_STATUS_UNSPECIFIED - tokens: - type: string - description: >- - tokens define the delegated tokens (incl. - self-delegation). - delegator_shares: - type: string - description: >- - delegator_shares defines total shares issued to a - validator's delegators. - description: - description: >- - description defines the description terms for the - validator. + vote: + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal_id defines the unique id of the proposal. + voter: + type: string + description: voter is the voter address of the proposal. + options: + type: array + items: type: object properties: - moniker: - type: string - description: >- - moniker defines a human-readable name for the - validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. - UPort or Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string + option: description: >- - security_contact defines an optional email for - security contact. - details: + option defines the valid vote options, it must not + contain duplicate vote options. type: string - description: details define other optional details. - unbonding_height: - type: string - format: int64 - description: >- - unbonding_height defines, if unbonding, the height at - which this validator has begun unbonding. - unbonding_time: - type: string - format: date-time - description: >- - unbonding_time defines, if unbonding, the min time for - the validator to complete unbonding. - commission: - description: commission defines the commission parameters. - type: object - properties: - commission_rates: - description: >- - commission_rates defines the initial commission - rates to be used for creating a validator. - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to - delegators, as a fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate - which validator can ever charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily - increase of the validator commission, as a - fraction. - update_time: + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + weight: type: string - format: date-time description: >- - update_time is the last time the commission rate was - changed. - min_self_delegation: - type: string + weight is the vote weight associated with the vote + option. description: >- - min_self_delegation is the validator's self declared - minimum self delegation. - description: >- - Validator defines a validator, together with the total - amount of the - - Validator's bond shares and their exchange rate to coins. - Slashing results in - - a decrease in the exchange rate, allowing correct - calculation of future - - undelegations without iterating over delegators. When coins - are delegated to - - this validator, the validator is credited with a delegation - whose number of - - bond shares is based on the amount of coins delegated - divided by the current - - exchange rate. Voting power can be calculated as total - bonded shares - - multiplied by exchange rate. - description: validators contains all the queried validators. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: + WeightedVoteOption defines a unit of vote for vote + split. + description: options is the weighted vote options. + metadata: type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + description: >- + metadata is any arbitrary metadata to attached to the + vote. + description: >- + Vote defines a vote on a governance proposal. - was set, its value is undefined otherwise - title: >- - QueryValidatorsResponse is response type for the Query/Validators - RPC method + A Vote consists of a proposal ID, the voter, and the vote + option. + description: >- + QueryVoteResponse is the response type for the Query/Vote RPC + method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -21908,7 +20434,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -21918,13 +20444,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -21946,7 +20475,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -21984,391 +20512,143 @@ paths: "value": "1.212s" } parameters: - - name: status - description: status enables to query for validators matching a given status. - in: query - required: false - type: string - - name: pagination.key - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - in: query - required: false - type: string - format: byte - - name: pagination.offset - description: >- - offset is a numeric offset that can be used when key is unavailable. - - It is less efficient than using key. Only one of offset or key - should - - be set. - in: query - required: false + - name: proposal_id + description: proposal_id defines the unique id of the proposal. + in: path + required: true type: string format: uint64 - - name: pagination.limit - description: >- - limit is the total number of results to be returned in the result - page. - - If left empty it will default to a value to be set by each app. - in: query - required: false + - name: voter + description: voter defines the voter address for the proposals. + in: path + required: true type: string - format: uint64 - - name: pagination.count_total - description: >- - count_total is set to true to indicate that the result set should - include - - a count of the total number of items available for pagination in - UIs. - - count_total is only respected when offset is used. It is ignored - when key + tags: + - Query + /cosmos/mint/v1beta1/annual_provisions: + get: + summary: AnnualProvisions current minting annual provisions value. + operationId: AnnualProvisions + responses: + '200': + description: A successful response. + schema: + type: object + properties: + annual_provisions: + type: string + format: byte + description: >- + annual_provisions is the current minting annual provisions + value. + description: |- + QueryAnnualProvisionsResponse is the response type for the + Query/AnnualProvisions RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + tags: + - Query + /cosmos/mint/v1beta1/inflation: + get: + summary: Inflation returns the current minting inflation value. + operationId: Inflation + responses: + '200': + description: A successful response. + schema: + type: object + properties: + inflation: + type: string + format: byte + description: inflation is the current minting inflation value. + description: >- + QueryInflationResponse is the response type for the + Query/Inflation RPC - is set. - in: query - required: false - type: boolean - format: boolean - - name: pagination.reverse - description: >- - reverse is set to true if results are to be returned in the - descending order. - in: query - required: false - type: boolean - format: boolean + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte tags: - Query - /cosmos/staking/v1beta1/validators/{validator_addr}: + /cosmos/mint/v1beta1/params: get: - summary: Validator queries validator info for given validator address. - operationId: Validator + summary: Params returns the total set of minting parameters. + operationId: MintParams responses: '200': description: A successful response. schema: type: object properties: - validator: + params: + description: params defines the parameters of the module. type: object properties: - operator_address: + mint_denom: type: string - description: >- - operator_address defines the address of the validator's - operator; bech encoded in JSON. - consensus_pubkey: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type - of the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be - in a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary - all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can - optionally set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results - based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in - the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty - scheme) might be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the - above specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any - values in the form - - of utility functions or additional generated methods of - the Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and - the unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will - yield type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a - custom JSON - - representation, that representation will be embedded - adding a field - - `value` which holds the custom JSON in addition to the - `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - jailed: - type: boolean - format: boolean - description: >- - jailed defined whether the validator has been jailed from - bonded status or not. - status: - description: >- - status is the validator status - (bonded/unbonding/unbonded). - type: string - enum: - - BOND_STATUS_UNSPECIFIED - - BOND_STATUS_UNBONDED - - BOND_STATUS_UNBONDING - - BOND_STATUS_BONDED - default: BOND_STATUS_UNSPECIFIED - tokens: + title: type of coin to mint + inflation_rate_change: type: string - description: >- - tokens define the delegated tokens (incl. - self-delegation). - delegator_shares: + title: maximum annual change in inflation rate + inflation_max: type: string - description: >- - delegator_shares defines total shares issued to a - validator's delegators. - description: - description: >- - description defines the description terms for the - validator. - type: object - properties: - moniker: - type: string - description: >- - moniker defines a human-readable name for the - validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. - UPort or Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: >- - security_contact defines an optional email for - security contact. - details: - type: string - description: details define other optional details. - unbonding_height: + title: maximum inflation rate + inflation_min: type: string - format: int64 - description: >- - unbonding_height defines, if unbonding, the height at - which this validator has begun unbonding. - unbonding_time: + title: minimum inflation rate + goal_bonded: type: string - format: date-time - description: >- - unbonding_time defines, if unbonding, the min time for the - validator to complete unbonding. - commission: - description: commission defines the commission parameters. - type: object - properties: - commission_rates: - description: >- - commission_rates defines the initial commission rates - to be used for creating a validator. - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to delegators, - as a fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate which - validator can ever charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily increase - of the validator commission, as a fraction. - update_time: - type: string - format: date-time - description: >- - update_time is the last time the commission rate was - changed. - min_self_delegation: + title: goal of percent bonded atoms + blocks_per_year: type: string - description: >- - min_self_delegation is the validator's self declared - minimum self delegation. - description: >- - Validator defines a validator, together with the total amount - of the - - Validator's bond shares and their exchange rate to coins. - Slashing results in - - a decrease in the exchange rate, allowing correct calculation - of future - - undelegations without iterating over delegators. When coins - are delegated to - - this validator, the validator is credited with a delegation - whose number of - - bond shares is based on the amount of coins delegated divided - by the current - - exchange rate. Voting power can be calculated as total bonded - shares - - multiplied by exchange rate. - title: >- - QueryValidatorResponse is response type for the Query/Validator - RPC method + format: uint64 + title: expected blocks per year + description: >- + QueryParamsResponse is the response type for the Query/Params RPC + method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -22386,221 +20666,511 @@ paths: properties: type_url: type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in - a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary - all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can optionally - set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in - the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) - might be - - used with implementation specific semantics. value: type: string format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values - in the form - - of utility functions or additional generated methods of the - Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield - type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom - JSON - - representation, that representation will be embedded adding - a field - - `value` which holds the custom JSON in addition to the - `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - parameters: - - name: validator_addr - description: validator_addr defines the validator address to query for. - in: path - required: true - type: string tags: - Query - /cosmos/staking/v1beta1/validators/{validator_addr}/delegations: + /cosmos/params/v1beta1/params: get: - summary: ValidatorDelegations queries delegate info for given validator. - operationId: ValidatorDelegations + summary: |- + Params queries a specific parameter of a module, given its subspace and + key. + operationId: Params responses: '200': description: A successful response. schema: type: object properties: - delegation_responses: + param: + description: param defines the queried parameter. + type: object + properties: + subspace: + type: string + key: + type: string + value: + type: string + description: >- + QueryParamsResponse is response type for the Query/Params RPC + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: type: array items: type: object properties: - delegation: - type: object - properties: - delegator_address: - type: string - description: >- - delegator_address is the bech32-encoded address of - the delegator. - validator_address: - type: string - description: >- - validator_address is the bech32-encoded address of - the validator. - shares: - type: string - description: shares define the delegation shares received. - description: >- - Delegation represents the bond with tokens held by an - account. It is - - owned by one delegator, and is associated with the - voting power of one + type_url: + type: string + value: + type: string + format: byte + parameters: + - name: subspace + description: subspace defines the module to query the parameter for. + in: query + required: false + type: string + - name: key + description: key defines the key of the parameter in the subspace. + in: query + required: false + type: string + tags: + - Query + /cosmos/params/v1beta1/subspaces: + get: + summary: >- + Subspaces queries for all registered subspaces and all keys for a + subspace. + description: 'Since: cosmos-sdk 0.46' + operationId: Subspaces + responses: + '200': + description: A successful response. + schema: + type: object + properties: + subspaces: + type: array + items: + type: object + properties: + subspace: + type: string + keys: + type: array + items: + type: string + description: >- + Subspace defines a parameter subspace name and all the keys + that exist for + + the subspace. + + + Since: cosmos-sdk 0.46 + description: >- + QuerySubspacesResponse defines the response types for querying for + all + + registered subspaces and all keys for a subspace. + + + Since: cosmos-sdk 0.46 + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + tags: + - Query + /cosmos/slashing/v1beta1/params: + get: + summary: Params queries the parameters of slashing module + operationId: SlashingParams + responses: + '200': + description: A successful response. + schema: + type: object + properties: + params: + type: object + properties: + signed_blocks_window: + type: string + format: int64 + min_signed_per_window: + type: string + format: byte + downtime_jail_duration: + type: string + slash_fraction_double_sign: + type: string + format: byte + slash_fraction_downtime: + type: string + format: byte + description: >- + Params represents the parameters used for by the slashing + module. + title: >- + QueryParamsResponse is the response type for the Query/Params RPC + method + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + tags: + - Query + /cosmos/slashing/v1beta1/signing_infos: + get: + summary: SigningInfos queries signing info of all validators + operationId: SigningInfos + responses: + '200': + description: A successful response. + schema: + type: object + properties: + info: + type: array + items: + type: object + properties: + address: + type: string + start_height: + type: string + format: int64 + title: >- + Height at which validator was first a candidate OR was + unjailed + index_offset: + type: string + format: int64 + description: >- + Index which is incremented each time the validator was a + bonded + + in a block and may have signed a precommit or not. This + in conjunction with the + + `SignedBlocksWindow` param determines the index in the + `MissedBlocksBitArray`. + jailed_until: + type: string + format: date-time + description: >- + Timestamp until which the validator is jailed due to + liveness downtime. + tombstoned: + type: boolean + description: >- + Whether or not a validator has been tombstoned (killed + out of validator set). It is set + + once the validator commits an equivocation or for any + other configured misbehiavor. + missed_blocks_counter: + type: string + format: int64 + description: >- + A counter kept to avoid unnecessary array reads. + + Note that `Sum(MissedBlocksBitArray)` always equals + `MissedBlocksCounter`. + description: >- + ValidatorSigningInfo defines a validator's signing info for + monitoring their + + liveness activity. + title: info is the signing info of all validators + pagination: + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + PageResponse is to be embedded in gRPC response messages where + the + + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: >- + QuerySigningInfosResponse is the response type for the + Query/SigningInfos RPC + + method + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/slashing/v1beta1/signing_infos/{cons_address}: + get: + summary: SigningInfo queries the signing info of given cons address + operationId: SigningInfo + responses: + '200': + description: A successful response. + schema: + type: object + properties: + val_signing_info: + type: object + properties: + address: + type: string + start_height: + type: string + format: int64 + title: >- + Height at which validator was first a candidate OR was + unjailed + index_offset: + type: string + format: int64 + description: >- + Index which is incremented each time the validator was a + bonded + + in a block and may have signed a precommit or not. This in + conjunction with the + + `SignedBlocksWindow` param determines the index in the + `MissedBlocksBitArray`. + jailed_until: + type: string + format: date-time + description: >- + Timestamp until which the validator is jailed due to + liveness downtime. + tombstoned: + type: boolean + description: >- + Whether or not a validator has been tombstoned (killed out + of validator set). It is set + + once the validator commits an equivocation or for any + other configured misbehiavor. + missed_blocks_counter: + type: string + format: int64 + description: >- + A counter kept to avoid unnecessary array reads. + + Note that `Sum(MissedBlocksBitArray)` always equals + `MissedBlocksCounter`. + description: >- + ValidatorSigningInfo defines a validator's signing info for + monitoring their + + liveness activity. + title: >- + val_signing_info is the signing info of requested val cons + address + title: >- + QuerySigningInfoResponse is the response type for the + Query/SigningInfo RPC + + method + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + value: + type: string + format: byte + parameters: + - name: cons_address + description: cons_address is the address to query signing info of + in: path + required: true + type: string + tags: + - Query + /cosmos/staking/v1beta1/delegations/{delegator_addr}: + get: + summary: >- + DelegatorDelegations queries all delegations of a given delegator + address. + description: >- + When called from another module, this query might consume a high amount + of + + gas if the pagination field is incorrectly set. + operationId: DelegatorDelegations + responses: + '200': + description: A successful response. + schema: + type: object + properties: + delegation_responses: + type: array + items: + type: object + properties: + delegation: + type: object + properties: + delegator_address: + type: string + description: >- + delegator_address is the bech32-encoded address of + the delegator. + validator_address: + type: string + description: >- + validator_address is the bech32-encoded address of + the validator. + shares: + type: string + description: shares define the delegation shares received. + description: >- + Delegation represents the bond with tokens held by an + account. It is + + owned by one delegator, and is associated with the + voting power of one validator. balance: @@ -22624,6 +21194,9 @@ paths: balance in addition to shares which is more suitable for client responses. + description: >- + delegation_responses defines all the delegations' info of a + delegator. pagination: description: pagination defines the pagination in the response. type: object @@ -22631,9 +21204,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -22642,11 +21216,11 @@ paths: PageRequest.count_total was set, its value is undefined otherwise - title: |- - QueryValidatorDelegationsResponse is response type for the - Query/ValidatorDelegations RPC method + description: |- + QueryDelegatorDelegationsResponse is response type for the + Query/DelegatorDelegations RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -22759,7 +21333,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -22769,13 +21343,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -22797,7 +21374,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -22835,8 +21411,8 @@ paths: "value": "1.212s" } parameters: - - name: validator_addr - description: validator_addr defines the validator address to query for. + - name: delegator_addr + description: delegator_addr defines the delegator address to query for. in: path required: true type: string @@ -22886,346 +21462,196 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Query - /cosmos/staking/v1beta1/validators/{validator_addr}/delegations/{delegator_addr}: + /cosmos/staking/v1beta1/delegators/{delegator_addr}/redelegations: get: - summary: Delegation queries delegate info for given validator delegator pair. - operationId: Delegation + summary: Redelegations queries redelegations of given address. + description: >- + When called from another module, this query might consume a high amount + of + + gas if the pagination field is incorrectly set. + operationId: Redelegations responses: '200': description: A successful response. schema: type: object properties: - delegation_response: - type: object - properties: - delegation: - type: object - properties: - delegator_address: - type: string - description: >- - delegator_address is the bech32-encoded address of the - delegator. - validator_address: - type: string - description: >- - validator_address is the bech32-encoded address of the - validator. - shares: - type: string - description: shares define the delegation shares received. - description: >- - Delegation represents the bond with tokens held by an - account. It is - - owned by one delegator, and is associated with the voting - power of one - - validator. - balance: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the - custom method - - signatures required by gogoproto. - description: >- - DelegationResponse is equivalent to Delegation except that it - contains a - - balance in addition to shares which is more suitable for - client responses. - description: >- - QueryDelegationResponse is response type for the Query/Delegation - RPC method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: + redelegation_responses: type: array items: type: object properties: - type_url: - type: string + redelegation: + type: object + properties: + delegator_address: + type: string + description: >- + delegator_address is the bech32-encoded address of + the delegator. + validator_src_address: + type: string + description: >- + validator_src_address is the validator redelegation + source operator address. + validator_dst_address: + type: string + description: >- + validator_dst_address is the validator redelegation + destination operator address. + entries: + type: array + items: + type: object + properties: + creation_height: + type: string + format: int64 + description: >- + creation_height defines the height which the + redelegation took place. + completion_time: + type: string + format: date-time + description: >- + completion_time defines the unix time for + redelegation completion. + initial_balance: + type: string + description: >- + initial_balance defines the initial balance + when redelegation started. + shares_dst: + type: string + description: >- + shares_dst is the amount of + destination-validator shares created by + redelegation. + unbonding_id: + type: string + format: uint64 + title: >- + Incrementing id that uniquely identifies this + entry + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + Strictly positive if this entry's unbonding + has been stopped by external modules + description: >- + RedelegationEntry defines a redelegation object + with relevant metadata. + description: entries are the redelegation entries. description: >- - A URL/resource name that uniquely identifies the type of - the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in - a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary - all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can optionally - set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in - the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. + Redelegation contains the list of a particular + delegator's redelegating bonds + from a particular source validator to a particular + destination validator. + entries: + type: array + items: + type: object + properties: + redelegation_entry: + type: object + properties: + creation_height: + type: string + format: int64 + description: >- + creation_height defines the height which the + redelegation took place. + completion_time: + type: string + format: date-time + description: >- + completion_time defines the unix time for + redelegation completion. + initial_balance: + type: string + description: >- + initial_balance defines the initial balance + when redelegation started. + shares_dst: + type: string + description: >- + shares_dst is the amount of + destination-validator shares created by + redelegation. + unbonding_id: + type: string + format: uint64 + title: >- + Incrementing id that uniquely identifies this + entry + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + Strictly positive if this entry's unbonding + has been stopped by external modules + description: >- + RedelegationEntry defines a redelegation object + with relevant metadata. + balance: + type: string + description: >- + RedelegationEntryResponse is equivalent to a + RedelegationEntry except that it - Schemes other than `http`, `https` (or the empty scheme) - might be + contains a balance in addition to shares which is more + suitable for client - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. + responses. description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values - in the form - - of utility functions or additional generated methods of the - Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield - type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom - JSON - - representation, that representation will be embedded adding - a field - - `value` which holds the custom JSON in addition to the - `@type` + RedelegationResponse is equivalent to a Redelegation except + that its entries - field. Example (for message [google.protobuf.Duration][]): + contain a balance in addition to shares which is more + suitable for client - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - parameters: - - name: validator_addr - description: validator_addr defines the validator address to query for. - in: path - required: true - type: string - - name: delegator_addr - description: delegator_addr defines the delegator address to query for. - in: path - required: true - type: string - tags: - - Query - /cosmos/staking/v1beta1/validators/{validator_addr}/delegations/{delegator_addr}/unbonding_delegation: - get: - summary: |- - UnbondingDelegation queries unbonding info for given validator delegator - pair. - operationId: UnbondingDelegation - responses: - '200': - description: A successful response. - schema: - type: object - properties: - unbond: + responses. + pagination: + description: pagination defines the pagination in the response. type: object properties: - delegator_address: + next_key: type: string - description: >- - delegator_address is the bech32-encoded address of the - delegator. - validator_address: + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: type: string - description: >- - validator_address is the bech32-encoded address of the - validator. - entries: - type: array - items: - type: object - properties: - creation_height: - type: string - format: int64 - description: >- - creation_height is the height which the unbonding - took place. - completion_time: - type: string - format: date-time - description: >- - completion_time is the unix time for unbonding - completion. - initial_balance: - type: string - description: >- - initial_balance defines the tokens initially - scheduled to receive at completion. - balance: - type: string - description: balance defines the tokens to receive at completion. - description: >- - UnbondingDelegationEntry defines an unbonding object - with relevant metadata. - description: entries are the unbonding delegation entries. - description: >- - UnbondingDelegation stores all of a single delegator's - unbonding bonds + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - for a single validator in an time-ordered list. + was set, its value is undefined otherwise description: >- - QueryDelegationResponse is response type for the - Query/UnbondingDelegation + QueryRedelegationsResponse is response type for the + Query/Redelegations RPC - RPC method. + method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -23338,7 +21764,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -23348,13 +21774,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -23376,7 +21805,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -23414,24 +21842,92 @@ paths: "value": "1.212s" } parameters: - - name: validator_addr - description: validator_addr defines the validator address to query for. - in: path - required: true - type: string - name: delegator_addr description: delegator_addr defines the delegator address to query for. in: path required: true type: string + - name: src_validator_addr + description: src_validator_addr defines the validator address to redelegate from. + in: query + required: false + type: string + - name: dst_validator_addr + description: dst_validator_addr defines the validator address to redelegate to. + in: query + required: false + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean tags: - Query - /cosmos/staking/v1beta1/validators/{validator_addr}/unbonding_delegations: + /cosmos/staking/v1beta1/delegators/{delegator_addr}/unbonding_delegations: get: summary: >- - ValidatorUnbondingDelegations queries unbonding delegations of a - validator. - operationId: ValidatorUnbondingDelegations + DelegatorUnbondingDelegations queries all unbonding delegations of a + given + + delegator address. + description: >- + When called from another module, this query might consume a high amount + of + + gas if the pagination field is incorrectly set. + operationId: DelegatorUnbondingDelegations responses: '200': description: A successful response. @@ -23480,6 +21976,18 @@ paths: description: >- balance defines the tokens to receive at completion. + unbonding_id: + type: string + format: uint64 + title: >- + Incrementing id that uniquely identifies this + entry + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + Strictly positive if this entry's unbonding has + been stopped by external modules description: >- UnbondingDelegationEntry defines an unbonding object with relevant metadata. @@ -23496,9 +22004,10 @@ paths: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -23508,12 +22017,12 @@ paths: was set, its value is undefined otherwise description: >- - QueryValidatorUnbondingDelegationsResponse is response type for + QueryUnbondingDelegatorDelegationsResponse is response type for the - Query/ValidatorUnbondingDelegations RPC method. + Query/UnbondingDelegatorDelegations RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -23626,7 +22135,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -23636,13 +22145,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -23664,7 +22176,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -23702,8 +22213,8 @@ paths: "value": "1.212s" } parameters: - - name: validator_addr - description: validator_addr defines the validator address to query for. + - name: delegator_addr + description: delegator_addr defines the delegator address to query for. in: path required: true type: string @@ -23753,307 +22264,391 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Query - /cosmos/tx/v1beta1/simulate: - post: - summary: Simulate simulates executing a transaction for estimating gas usage. - operationId: Simulate + /cosmos/staking/v1beta1/delegators/{delegator_addr}/validators: + get: + summary: |- + DelegatorValidators queries all validators info for given delegator + address. + description: >- + When called from another module, this query might consume a high amount + of + + gas if the pagination field is incorrectly set. + operationId: StakingDelegatorValidators responses: '200': description: A successful response. schema: type: object properties: - gas_info: - description: gas_info is the information about gas used in the simulation. - type: object - properties: - gas_wanted: - type: string - format: uint64 - description: >- - GasWanted is the maximum units of work we allow this tx to - perform. - gas_used: - type: string - format: uint64 - description: GasUsed is the amount of gas actually consumed. - result: - description: result is the result of the simulation. - type: object - properties: - data: - type: string - format: byte - description: >- - Data is any data returned from message or handler - execution. It MUST be - - length prefixed in order to separate data from multiple - message executions. - log: - type: string - description: >- - Log contains the log information from message or handler - execution. - events: - type: array - items: - type: object - properties: - type: - type: string - attributes: - type: array - items: - type: object - properties: - key: - type: string - format: byte - value: - type: string - format: byte - index: - type: boolean - format: boolean - description: >- - EventAttribute is a single key-value pair, - associated with an event. - description: >- - Event allows application developers to attach additional - information to - - ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx - and ResponseDeliverTx. - - Later, transactions may be queried using these events. - description: >- - Events contains a slice of Event objects that were emitted - during message - - or handler execution. - description: |- - SimulateResponse is the response type for the - Service.SimulateRPC method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: + validators: type: array items: type: object properties: - type_url: + operator_address: type: string description: >- - A URL/resource name that uniquely identifies the type of - the serialized + operator_address defines the address of the validator's + operator; bech encoded in JSON. + consensus_pubkey: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized - protocol buffer message. This string must contain at - least + protocol buffer message. This string must contain at + least - one "/" character. The last segment of the URL's path - must represent + one "/" character. The last segment of the URL's + path must represent - the fully qualified name of the type (as in + the fully qualified name of the type (as in - `path/google.protobuf.Duration`). The name should be in - a canonical form + `path/google.protobuf.Duration`). The name should be + in a canonical form - (e.g., leading "." is not accepted). + (e.g., leading "." is not accepted). - In practice, teams usually precompile into the binary - all types that they + In practice, teams usually precompile into the + binary all types that they - expect it to use in the context of Any. However, for - URLs which use the + expect it to use in the context of Any. However, for + URLs which use the - scheme `http`, `https`, or no scheme, one can optionally - set up a type + scheme `http`, `https`, or no scheme, one can + optionally set up a type - server that maps type URLs to message definitions as - follows: + server that maps type URLs to message definitions as + follows: - * If no scheme is provided, `https` is assumed. + * If no scheme is provided, `https` is assumed. - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - Note: this functionality is not currently available in - the official + Note: this functionality is not currently available + in the official - protobuf release, and it is not used for type URLs - beginning with + protobuf release, and it is not used for type URLs + beginning with - type.googleapis.com. + type.googleapis.com. - Schemes other than `http`, `https` (or the empty scheme) - might be + Schemes other than `http`, `https` (or the empty + scheme) might be - used with implementation specific semantics. - value: - type: string - format: byte + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a + `Any` contains an arbitrary serialized protocol buffer + message along with a - URL that describes the type of the serialized message. + URL that describes the type of the serialized message. - Protobuf library provides support to pack/unpack Any values - in the form + Protobuf library provides support to pack/unpack Any + values in the form - of utility functions or additional generated methods of the - Any type. + of utility functions or additional generated methods of + the Any type. - Example 1: Pack and unpack a message in C++. + Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - Example 2: Pack and unpack a message in Java. + Example 2: Pack and unpack a message in Java. - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - The pack methods provided by protobuf library will by - default use + The pack methods provided by protobuf library will by + default use - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + 'type.googleapis.com/full.type.name' as the type URL and + the unpack - methods only use the fully qualified type name after the - last '/' + methods only use the fully qualified type name after the + last '/' - in the type URL, for example "foo.bar.com/x/y.z" will yield - type + in the type URL, for example "foo.bar.com/x/y.z" will + yield type - name "y.z". + name "y.z". - JSON + JSON - ==== - The JSON representation of an `Any` value uses the regular + The JSON representation of an `Any` value uses the + regular - representation of the deserialized, embedded message, with - an + representation of the deserialized, embedded message, + with an - additional field `@type` which contains the type URL. - Example: + additional field `@type` which contains the type URL. + Example: - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - If the embedded message type is well-known and has a custom - JSON + If the embedded message type is well-known and has a + custom JSON - representation, that representation will be embedded adding - a field + representation, that representation will be embedded + adding a field - `value` which holds the custom JSON in addition to the - `@type` + `value` which holds the custom JSON in addition to the + `@type` - field. Example (for message [google.protobuf.Duration][]): + field. Example (for message + [google.protobuf.Duration][]): - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - parameters: - - name: body - in: body - required: true - schema: - $ref: '#/definitions/cosmos.tx.v1beta1.SimulateRequest' - tags: - - Service - /cosmos/tx/v1beta1/txs: - get: - summary: GetTxsEvent fetches txs by event. - operationId: GetTxsEvent - responses: - '200': - description: A successful response. - schema: - $ref: '#/definitions/cosmos.tx.v1beta1.GetTxsEventResponse' + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + jailed: + type: boolean + description: >- + jailed defined whether the validator has been jailed + from bonded status or not. + status: + description: >- + status is the validator status + (bonded/unbonding/unbonded). + type: string + enum: + - BOND_STATUS_UNSPECIFIED + - BOND_STATUS_UNBONDED + - BOND_STATUS_UNBONDING + - BOND_STATUS_BONDED + default: BOND_STATUS_UNSPECIFIED + tokens: + type: string + description: >- + tokens define the delegated tokens (incl. + self-delegation). + delegator_shares: + type: string + description: >- + delegator_shares defines total shares issued to a + validator's delegators. + description: + description: >- + description defines the description terms for the + validator. + type: object + properties: + moniker: + type: string + description: >- + moniker defines a human-readable name for the + validator. + identity: + type: string + description: >- + identity defines an optional identity signature (ex. + UPort or Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: >- + security_contact defines an optional email for + security contact. + details: + type: string + description: details define other optional details. + unbonding_height: + type: string + format: int64 + description: >- + unbonding_height defines, if unbonding, the height at + which this validator has begun unbonding. + unbonding_time: + type: string + format: date-time + description: >- + unbonding_time defines, if unbonding, the min time for + the validator to complete unbonding. + commission: + description: commission defines the commission parameters. + type: object + properties: + commission_rates: + description: >- + commission_rates defines the initial commission + rates to be used for creating a validator. + type: object + properties: + rate: + type: string + description: >- + rate is the commission rate charged to + delegators, as a fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate + which validator can ever charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily + increase of the validator commission, as a + fraction. + update_time: + type: string + format: date-time + description: >- + update_time is the last time the commission rate was + changed. + min_self_delegation: + type: string + description: >- + min_self_delegation is the validator's self declared + minimum self delegation. + + + Since: cosmos-sdk 0.46 + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + strictly positive if this validator's unbonding has been + stopped by external modules + unbonding_ids: + type: array + items: + type: string + format: uint64 + title: >- + list of unbonding ids, each uniquely identifing an + unbonding of this validator + description: >- + Validator defines a validator, together with the total + amount of the + + Validator's bond shares and their exchange rate to coins. + Slashing results in + + a decrease in the exchange rate, allowing correct + calculation of future + + undelegations without iterating over delegators. When coins + are delegated to + + this validator, the validator is credited with a delegation + whose number of + + bond shares is based on the amount of coins delegated + divided by the current + + exchange rate. Voting power can be calculated as total + bonded shares + + multiplied by exchange rate. + description: validators defines the validators' info of a delegator. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + QueryDelegatorValidatorsResponse is response type for the + Query/DelegatorValidators RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -24166,7 +22761,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -24176,13 +22771,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -24204,7 +22802,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -24242,14 +22839,11 @@ paths: "value": "1.212s" } parameters: - - name: events - description: events is the list of transaction event type. - in: query - required: false - type: array - items: - type: string - collectionFormat: multi + - name: delegator_addr + description: delegator_addr defines the delegator address to query for. + in: path + required: true + type: string - name: pagination.key description: |- key is a value returned in PageResponse.next_key to begin @@ -24296,126 +22890,39 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean - - name: order_by - description: |2- - - ORDER_BY_UNSPECIFIED: ORDER_BY_UNSPECIFIED specifies an unknown sorting order. OrderBy defaults to ASC in this case. - - ORDER_BY_ASC: ORDER_BY_ASC defines ascending order - - ORDER_BY_DESC: ORDER_BY_DESC defines descending order - in: query - required: false - type: string - enum: - - ORDER_BY_UNSPECIFIED - - ORDER_BY_ASC - - ORDER_BY_DESC - default: ORDER_BY_UNSPECIFIED tags: - - Service - post: - summary: BroadcastTx broadcast transaction. - operationId: BroadcastTx + - Query + /cosmos/staking/v1beta1/delegators/{delegator_addr}/validators/{validator_addr}: + get: + summary: |- + DelegatorValidator queries validator info for given delegator validator + pair. + operationId: DelegatorValidator responses: '200': description: A successful response. schema: type: object properties: - tx_response: + validator: type: object properties: - height: - type: string - format: int64 - title: The block height - txhash: - type: string - description: The transaction hash. - codespace: - type: string - title: Namespace for the Code - code: - type: integer - format: int64 - description: Response code. - data: - type: string - description: Result bytes, if any. - raw_log: + operator_address: type: string description: >- - The output of the application's logger (raw string). May - be - - non-deterministic. - logs: - type: array - items: - type: object - properties: - msg_index: - type: integer - format: int64 - log: - type: string - events: - type: array - items: - type: object - properties: - type: - type: string - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - description: >- - Attribute defines an attribute wrapper where - the key and value are - - strings instead of raw bytes. - description: >- - StringEvent defines en Event object wrapper where - all the attributes - - contain key/value pairs that are strings instead - of raw bytes. - description: >- - Events contains a slice of Event objects that were - emitted during some - - execution. - description: >- - ABCIMessageLog defines a structure containing an indexed - tx ABCI message log. - description: >- - The output of the application's logger (typed). May be - non-deterministic. - info: - type: string - description: Additional information. May be non-deterministic. - gas_wanted: - type: string - format: int64 - description: Amount of gas requested for transaction. - gas_used: - type: string - format: int64 - description: Amount of gas consumed by transaction. - tx: + operator_address defines the address of the validator's + operator; bech encoded in JSON. + consensus_pubkey: type: object properties: type_url: @@ -24515,7 +23022,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -24525,13 +23032,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -24553,7 +23063,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -24590,26 +23099,152 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - timestamp: + jailed: + type: boolean + description: >- + jailed defined whether the validator has been jailed from + bonded status or not. + status: + description: >- + status is the validator status + (bonded/unbonding/unbonded). + type: string + enum: + - BOND_STATUS_UNSPECIFIED + - BOND_STATUS_UNBONDED + - BOND_STATUS_UNBONDING + - BOND_STATUS_BONDED + default: BOND_STATUS_UNSPECIFIED + tokens: type: string description: >- - Time of the previous block. For heights > 1, it's the - weighted median of + tokens define the delegated tokens (incl. + self-delegation). + delegator_shares: + type: string + description: >- + delegator_shares defines total shares issued to a + validator's delegators. + description: + description: >- + description defines the description terms for the + validator. + type: object + properties: + moniker: + type: string + description: >- + moniker defines a human-readable name for the + validator. + identity: + type: string + description: >- + identity defines an optional identity signature (ex. + UPort or Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: >- + security_contact defines an optional email for + security contact. + details: + type: string + description: details define other optional details. + unbonding_height: + type: string + format: int64 + description: >- + unbonding_height defines, if unbonding, the height at + which this validator has begun unbonding. + unbonding_time: + type: string + format: date-time + description: >- + unbonding_time defines, if unbonding, the min time for the + validator to complete unbonding. + commission: + description: commission defines the commission parameters. + type: object + properties: + commission_rates: + description: >- + commission_rates defines the initial commission rates + to be used for creating a validator. + type: object + properties: + rate: + type: string + description: >- + rate is the commission rate charged to delegators, + as a fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate which + validator can ever charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily increase + of the validator commission, as a fraction. + update_time: + type: string + format: date-time + description: >- + update_time is the last time the commission rate was + changed. + min_self_delegation: + type: string + description: >- + min_self_delegation is the validator's self declared + minimum self delegation. - the timestamps of the valid votes in the block.LastCommit. - For height == 1, - it's genesis time. + Since: cosmos-sdk 0.46 + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + strictly positive if this validator's unbonding has been + stopped by external modules + unbonding_ids: + type: array + items: + type: string + format: uint64 + title: >- + list of unbonding ids, each uniquely identifing an + unbonding of this validator description: >- - TxResponse defines a structure containing relevant tx data and - metadata. The + Validator defines a validator, together with the total amount + of the - tags are stringified and the log is JSON decoded. + Validator's bond shares and their exchange rate to coins. + Slashing results in + + a decrease in the exchange rate, allowing correct calculation + of future + + undelegations without iterating over delegators. When coins + are delegated to + + this validator, the validator is credited with a delegation + whose number of + + bond shares is based on the amount of coins delegated divided + by the current + + exchange rate. Voting power can be calculated as total bonded + shares + + multiplied by exchange rate. description: |- - BroadcastTxResponse is the response type for the - Service.BroadcastTx method. + QueryDelegatorValidatorResponse response type for the + Query/DelegatorValidator RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -24722,7 +23357,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -24732,13 +23367,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -24760,7 +23398,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -24798,155 +23435,551 @@ paths: "value": "1.212s" } parameters: - - name: body - in: body + - name: delegator_addr + description: delegator_addr defines the delegator address to query for. + in: path required: true - schema: - type: object - properties: - tx_bytes: - type: string - format: byte - description: tx_bytes is the raw transaction. - mode: - type: string - enum: - - BROADCAST_MODE_UNSPECIFIED - - BROADCAST_MODE_BLOCK - - BROADCAST_MODE_SYNC - - BROADCAST_MODE_ASYNC - default: BROADCAST_MODE_UNSPECIFIED - description: >- - BroadcastMode specifies the broadcast mode for the - TxService.Broadcast RPC method. - - - BROADCAST_MODE_UNSPECIFIED: zero-value for mode ordering - - BROADCAST_MODE_BLOCK: BROADCAST_MODE_BLOCK defines a tx broadcasting mode where the client waits for - the tx to be committed in a block. - - BROADCAST_MODE_SYNC: BROADCAST_MODE_SYNC defines a tx broadcasting mode where the client waits for - a CheckTx execution response only. - - BROADCAST_MODE_ASYNC: BROADCAST_MODE_ASYNC defines a tx broadcasting mode where the client returns - immediately. - description: >- - BroadcastTxRequest is the request type for the - Service.BroadcastTxRequest - - RPC method. + type: string + - name: validator_addr + description: validator_addr defines the validator address to query for. + in: path + required: true + type: string tags: - - Service - /cosmos/tx/v1beta1/txs/{hash}: + - Query + /cosmos/staking/v1beta1/historical_info/{height}: get: - summary: GetTx fetches a tx by hash. - operationId: GetTx + summary: HistoricalInfo queries the historical info for given height. + operationId: HistoricalInfo responses: '200': description: A successful response. - schema: - $ref: '#/definitions/cosmos.tx.v1beta1.GetTxResponse' - default: - description: An unexpected error response schema: type: object properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized + hist: + description: hist defines the historical info at the given height. + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, - protocol buffer message. This string must contain at - least + including all blockchain data structures and the rules + of the application's - one "/" character. The last segment of the URL's path - must represent + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + title: prev block info + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + valset: + type: array + items: + type: object + properties: + operator_address: + type: string + description: >- + operator_address defines the address of the + validator's operator; bech encoded in JSON. + consensus_pubkey: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized - the fully qualified name of the type (as in + protocol buffer message. This string must + contain at least - `path/google.protobuf.Duration`). The name should be in - a canonical form + one "/" character. The last segment of the URL's + path must represent - (e.g., leading "." is not accepted). + the fully qualified name of the type (as in + `path/google.protobuf.Duration`). The name + should be in a canonical form - In practice, teams usually precompile into the binary - all types that they + (e.g., leading "." is not accepted). - expect it to use in the context of Any. However, for - URLs which use the - scheme `http`, `https`, or no scheme, one can optionally - set up a type + In practice, teams usually precompile into the + binary all types that they - server that maps type URLs to message definitions as - follows: + expect it to use in the context of Any. However, + for URLs which use the + scheme `http`, `https`, or no scheme, one can + optionally set up a type - * If no scheme is provided, `https` is assumed. + server that maps type URLs to message + definitions as follows: - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - Note: this functionality is not currently available in - the official + * If no scheme is provided, `https` is assumed. - protobuf release, and it is not used for type URLs - beginning with + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup + results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - type.googleapis.com. + Note: this functionality is not currently + available in the official + protobuf release, and it is not used for type + URLs beginning with - Schemes other than `http`, `https` (or the empty scheme) - might be + type.googleapis.com. - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - URL that describes the type of the serialized message. + Schemes other than `http`, `https` (or the empty + scheme) might be + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of + the above specified type. + description: >- + `Any` contains an arbitrary serialized protocol + buffer message along with a - Protobuf library provides support to pack/unpack Any values - in the form + URL that describes the type of the serialized + message. - of utility functions or additional generated methods of the - Any type. + Protobuf library provides support to pack/unpack Any + values in the form - Example 1: Pack and unpack a message in C++. + of utility functions or additional generated methods + of the Any type. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will + by default use + + 'type.googleapis.com/full.type.name' as the type URL + and the unpack + + methods only use the fully qualified type name after + the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" + will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded + message, with an + + additional field `@type` which contains the type + URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to + the `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + jailed: + type: boolean + description: >- + jailed defined whether the validator has been jailed + from bonded status or not. + status: + description: >- + status is the validator status + (bonded/unbonding/unbonded). + type: string + enum: + - BOND_STATUS_UNSPECIFIED + - BOND_STATUS_UNBONDED + - BOND_STATUS_UNBONDING + - BOND_STATUS_BONDED + default: BOND_STATUS_UNSPECIFIED + tokens: + type: string + description: >- + tokens define the delegated tokens (incl. + self-delegation). + delegator_shares: + type: string + description: >- + delegator_shares defines total shares issued to a + validator's delegators. + description: + description: >- + description defines the description terms for the + validator. + type: object + properties: + moniker: + type: string + description: >- + moniker defines a human-readable name for the + validator. + identity: + type: string + description: >- + identity defines an optional identity signature + (ex. UPort or Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: >- + security_contact defines an optional email for + security contact. + details: + type: string + description: details define other optional details. + unbonding_height: + type: string + format: int64 + description: >- + unbonding_height defines, if unbonding, the height + at which this validator has begun unbonding. + unbonding_time: + type: string + format: date-time + description: >- + unbonding_time defines, if unbonding, the min time + for the validator to complete unbonding. + commission: + description: commission defines the commission parameters. + type: object + properties: + commission_rates: + description: >- + commission_rates defines the initial commission + rates to be used for creating a validator. + type: object + properties: + rate: + type: string + description: >- + rate is the commission rate charged to + delegators, as a fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate + which validator can ever charge, as a + fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily + increase of the validator commission, as a + fraction. + update_time: + type: string + format: date-time + description: >- + update_time is the last time the commission rate + was changed. + min_self_delegation: + type: string + description: >- + min_self_delegation is the validator's self declared + minimum self delegation. + + + Since: cosmos-sdk 0.46 + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + strictly positive if this validator's unbonding has + been stopped by external modules + unbonding_ids: + type: array + items: + type: string + format: uint64 + title: >- + list of unbonding ids, each uniquely identifing an + unbonding of this validator + description: >- + Validator defines a validator, together with the total + amount of the + + Validator's bond shares and their exchange rate to + coins. Slashing results in + + a decrease in the exchange rate, allowing correct + calculation of future + + undelegations without iterating over delegators. When + coins are delegated to + + this validator, the validator is credited with a + delegation whose number of + + bond shares is based on the amount of coins delegated + divided by the current + + exchange rate. Voting power can be calculated as total + bonded shares + + multiplied by exchange rate. + description: >- + QueryHistoricalInfoResponse is response type for the + Query/HistoricalInfo RPC + + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } Example 2: Pack and unpack a message in Java. @@ -24957,7 +23990,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -24967,13 +24000,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -24995,7 +24031,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -25033,34 +24068,60 @@ paths: "value": "1.212s" } parameters: - - name: hash - description: hash is the tx hash to query, encoded as a hex string. + - name: height + description: height defines at which height to query the historical info. in: path required: true type: string + format: int64 tags: - - Service - /cosmos/upgrade/v1beta1/applied_plan/{name}: + - Query + /cosmos/staking/v1beta1/params: get: - summary: AppliedPlan queries a previously applied upgrade plan by its name. - operationId: AppliedPlan + summary: Parameters queries the staking parameters. + operationId: StakingParams responses: '200': description: A successful response. schema: type: object properties: - height: - type: string - format: int64 - description: height is the block height at which the plan was applied. + params: + description: params holds all the parameters of this module. + type: object + properties: + unbonding_time: + type: string + description: unbonding_time is the time duration of unbonding. + max_validators: + type: integer + format: int64 + description: max_validators is the maximum number of validators. + max_entries: + type: integer + format: int64 + description: >- + max_entries is the max entries for either unbonding + delegation or redelegation (per pair/trio). + historical_entries: + type: integer + format: int64 + description: >- + historical_entries is the number of historical entries to + persist. + bond_denom: + type: string + description: bond_denom defines the bondable coin denomination. + min_commission_rate: + type: string + title: >- + min_commission_rate is the chain-wide minimum commission + rate that a validator can charge their delegators description: >- - QueryAppliedPlanResponse is the response type for the - Query/AppliedPlan RPC - + QueryParamsResponse is response type for the Query/Params RPC method. default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -25173,7 +24234,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -25183,13 +24244,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -25211,7 +24275,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -25248,305 +24311,78 @@ paths: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - parameters: - - name: name - description: name is the name of the applied plan to query for. - in: path - required: true - type: string tags: - Query - /cosmos/upgrade/v1beta1/current_plan: + /cosmos/staking/v1beta1/pool: get: - summary: CurrentPlan queries the current upgrade plan. - operationId: CurrentPlan + summary: Pool queries the pool info. + operationId: Pool responses: '200': description: A successful response. schema: type: object properties: - plan: - description: plan is the current upgrade plan. + pool: + description: pool defines the pool info. type: object properties: - name: + not_bonded_tokens: type: string - description: >- - Sets the name for the upgrade. This name will be used by - the upgraded - - version of the software to apply any special "on-upgrade" - commands during - - the first BeginBlock method after the upgrade is applied. - It is also used - - to detect whether a software version can handle a given - upgrade. If no - - upgrade handler with this name has been set in the - software, it will be - - assumed that the software is out-of-date when the upgrade - Time or Height is - - reached and the software will exit. - time: + bonded_tokens: type: string - format: date-time - description: >- - Deprecated: Time based upgrades have been deprecated. Time - based upgrade logic + description: QueryPoolResponse is response type for the Query/Pool RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized - has been removed from the SDK. + protocol buffer message. This string must contain at + least - If this field is not empty, an error will be thrown. - height: - type: string - format: int64 - description: |- - The height at which the upgrade must be performed. - Only used if Time is not set. - info: - type: string - title: >- - Any application specific upgrade info to be included - on-chain + one "/" character. The last segment of the URL's path + must represent - such as a git commit that validators could automatically - upgrade to - upgraded_client_state: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type - of the serialized + the fully qualified name of the type (as in - protocol buffer message. This string must contain at - least + `path/google.protobuf.Duration`). The name should be in + a canonical form - one "/" character. The last segment of the URL's path - must represent + (e.g., leading "." is not accepted). - the fully qualified name of the type (as in - `path/google.protobuf.Duration`). The name should be - in a canonical form + In practice, teams usually precompile into the binary + all types that they - (e.g., leading "." is not accepted). + expect it to use in the context of Any. However, for + URLs which use the + scheme `http`, `https`, or no scheme, one can optionally + set up a type - In practice, teams usually precompile into the binary - all types that they + server that maps type URLs to message definitions as + follows: - expect it to use in the context of Any. However, for - URLs which use the - scheme `http`, `https`, or no scheme, one can - optionally set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results - based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in - the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty - scheme) might be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the - above specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any - values in the form - - of utility functions or additional generated methods of - the Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and - the unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will - yield type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a - custom JSON - - representation, that representation will be embedded - adding a field - - `value` which holds the custom JSON in addition to the - `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - description: >- - QueryCurrentPlanResponse is the response type for the - Query/CurrentPlan RPC - - method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in - a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary - all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can optionally - set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. + * If no scheme is provided, `https` is assumed. * An HTTP GET on the URL must yield a [google.protobuf.Type][] @@ -25610,7 +24446,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -25620,13 +24456,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -25648,7 +24487,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -25687,670 +24525,364 @@ paths: } tags: - Query - /cosmos/upgrade/v1beta1/module_versions: + /cosmos/staking/v1beta1/validators: get: - summary: ModuleVersions queries the list of module versions from state. - operationId: ModuleVersions + summary: Validators queries all validators that match the given status. + description: >- + When called from another module, this query might consume a high amount + of + + gas if the pagination field is incorrectly set. + operationId: Validators responses: '200': description: A successful response. schema: type: object properties: - module_versions: - type: array - items: - type: object - properties: - name: - type: string - title: name of the app module - version: - type: string - format: uint64 - title: consensus version of the app module - description: ModuleVersion specifies a module and its consensus version. - description: >- - module_versions is a list of module names with their consensus - versions. - description: >- - QueryModuleVersionsResponse is the response type for the - Query/ModuleVersions - - RPC method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: + validators: type: array items: type: object properties: - type_url: + operator_address: type: string description: >- - A URL/resource name that uniquely identifies the type of - the serialized + operator_address defines the address of the validator's + operator; bech encoded in JSON. + consensus_pubkey: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized - protocol buffer message. This string must contain at - least + protocol buffer message. This string must contain at + least - one "/" character. The last segment of the URL's path - must represent + one "/" character. The last segment of the URL's + path must represent - the fully qualified name of the type (as in + the fully qualified name of the type (as in - `path/google.protobuf.Duration`). The name should be in - a canonical form + `path/google.protobuf.Duration`). The name should be + in a canonical form - (e.g., leading "." is not accepted). + (e.g., leading "." is not accepted). - In practice, teams usually precompile into the binary - all types that they + In practice, teams usually precompile into the + binary all types that they - expect it to use in the context of Any. However, for - URLs which use the + expect it to use in the context of Any. However, for + URLs which use the - scheme `http`, `https`, or no scheme, one can optionally - set up a type + scheme `http`, `https`, or no scheme, one can + optionally set up a type - server that maps type URLs to message definitions as - follows: + server that maps type URLs to message definitions as + follows: - * If no scheme is provided, `https` is assumed. + * If no scheme is provided, `https` is assumed. - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - Note: this functionality is not currently available in - the official + Note: this functionality is not currently available + in the official - protobuf release, and it is not used for type URLs - beginning with + protobuf release, and it is not used for type URLs + beginning with - type.googleapis.com. + type.googleapis.com. - Schemes other than `http`, `https` (or the empty scheme) - might be + Schemes other than `http`, `https` (or the empty + scheme) might be - used with implementation specific semantics. - value: - type: string - format: byte + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. + `Any` contains an arbitrary serialized protocol buffer + message along with a + URL that describes the type of the serialized message. - Protobuf library provides support to pack/unpack Any values - in the form - of utility functions or additional generated methods of the - Any type. + Protobuf library provides support to pack/unpack Any + values in the form + of utility functions or additional generated methods of + the Any type. - Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + Example 1: Pack and unpack a message in C++. - Example 2: Pack and unpack a message in Java. + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + Example 2: Pack and unpack a message in Java. - Example 3: Pack and unpack a message in Python. + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + Example 3: Pack and unpack a message in Python. - Example 4: Pack and unpack a message in Go + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + Example 4: Pack and unpack a message in Go - The pack methods provided by protobuf library will by - default use + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + The pack methods provided by protobuf library will by + default use - methods only use the fully qualified type name after the - last '/' + 'type.googleapis.com/full.type.name' as the type URL and + the unpack - in the type URL, for example "foo.bar.com/x/y.z" will yield - type + methods only use the fully qualified type name after the + last '/' - name "y.z". + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + name "y.z". - JSON - ==== + JSON - The JSON representation of an `Any` value uses the regular - representation of the deserialized, embedded message, with - an + The JSON representation of an `Any` value uses the + regular - additional field `@type` which contains the type URL. - Example: + representation of the deserialized, embedded message, + with an - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + additional field `@type` which contains the type URL. + Example: - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - If the embedded message type is well-known and has a custom - JSON + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - representation, that representation will be embedded adding - a field + If the embedded message type is well-known and has a + custom JSON - `value` which holds the custom JSON in addition to the - `@type` + representation, that representation will be embedded + adding a field - field. Example (for message [google.protobuf.Duration][]): + `value` which holds the custom JSON in addition to the + `@type` - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - parameters: - - name: module_name - description: |- - module_name is a field to query a specific module - consensus version from state. Leaving this empty will - fetch the full list of module versions from state. - in: query - required: false - type: string - tags: - - Query - /cosmos/upgrade/v1beta1/upgraded_consensus_state/{last_height}: - get: - summary: |- - UpgradedConsensusState queries the consensus state that will serve - as a trusted kernel for the next version of this chain. It will only be - stored at the last height of this chain. - UpgradedConsensusState RPC not supported with legacy querier - operationId: UpgradedConsensusState - responses: - '200': - description: A successful response. - schema: - type: object - properties: - upgraded_consensus_state: - type: string - format: byte - description: >- - QueryUpgradedConsensusStateResponse is the response type for the - Query/UpgradedConsensusState + field. Example (for message + [google.protobuf.Duration][]): - RPC method. - default: - description: An unexpected error response - schema: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + jailed: + type: boolean + description: >- + jailed defined whether the validator has been jailed + from bonded status or not. + status: + description: >- + status is the validator status + (bonded/unbonding/unbonded). + type: string + enum: + - BOND_STATUS_UNSPECIFIED + - BOND_STATUS_UNBONDED + - BOND_STATUS_UNBONDING + - BOND_STATUS_BONDED + default: BOND_STATUS_UNSPECIFIED + tokens: type: string description: >- - A URL/resource name that uniquely identifies the type of - the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's path - must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in - a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary - all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can optionally - set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in - the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) - might be - - used with implementation specific semantics. - value: + tokens define the delegated tokens (incl. + self-delegation). + delegator_shares: type: string - format: byte description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values - in the form - - of utility functions or additional generated methods of the - Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield - type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with - an - - additional field `@type` which contains the type URL. - Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom - JSON - - representation, that representation will be embedded adding - a field - - `value` which holds the custom JSON in addition to the - `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - parameters: - - name: last_height - description: |- - last height of the current chain must be sent in request - as this is the height under which next consensus state is stored - in: path - required: true - type: string - format: int64 - tags: - - Query - /cosmos/authz/v1beta1/grants: - get: - summary: Returns list of `Authorization`, granted to the grantee by the granter. - operationId: Grants - responses: - '200': - description: A successful response. - schema: - type: object - properties: - grants: - type: array - items: - type: object - properties: - authorization: + delegator_shares defines total shares issued to a + validator's delegators. + description: + description: >- + description defines the description terms for the + validator. type: object properties: - type_url: + moniker: type: string description: >- - A URL/resource name that uniquely identifies the - type of the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's - path must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be - in a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the - binary all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can - optionally set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results - based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available - in the official - - protobuf release, and it is not used for type URLs - beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty - scheme) might be - - used with implementation specific semantics. - value: + moniker defines a human-readable name for the + validator. + identity: type: string - format: byte description: >- - Must be a valid serialized protocol buffer of the - above specified type. + identity defines an optional identity signature (ex. + UPort or Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: >- + security_contact defines an optional email for + security contact. + details: + type: string + description: details define other optional details. + unbonding_height: + type: string + format: int64 description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any - values in the form - - of utility functions or additional generated methods of - the Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by - default use - - 'type.googleapis.com/full.type.name' as the type URL and - the unpack - - methods only use the fully qualified type name after the - last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will - yield type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the - regular + unbonding_height defines, if unbonding, the height at + which this validator has begun unbonding. + unbonding_time: + type: string + format: date-time + description: >- + unbonding_time defines, if unbonding, the min time for + the validator to complete unbonding. + commission: + description: commission defines the commission parameters. + type: object + properties: + commission_rates: + description: >- + commission_rates defines the initial commission + rates to be used for creating a validator. + type: object + properties: + rate: + type: string + description: >- + rate is the commission rate charged to + delegators, as a fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate + which validator can ever charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily + increase of the validator commission, as a + fraction. + update_time: + type: string + format: date-time + description: >- + update_time is the last time the commission rate was + changed. + min_self_delegation: + type: string + description: >- + min_self_delegation is the validator's self declared + minimum self delegation. - representation of the deserialized, embedded message, - with an - additional field `@type` which contains the type URL. - Example: + Since: cosmos-sdk 0.46 + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + strictly positive if this validator's unbonding has been + stopped by external modules + unbonding_ids: + type: array + items: + type: string + format: uint64 + title: >- + list of unbonding ids, each uniquely identifing an + unbonding of this validator + description: >- + Validator defines a validator, together with the total + amount of the - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + Validator's bond shares and their exchange rate to coins. + Slashing results in - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + a decrease in the exchange rate, allowing correct + calculation of future - If the embedded message type is well-known and has a - custom JSON + undelegations without iterating over delegators. When coins + are delegated to - representation, that representation will be embedded - adding a field + this validator, the validator is credited with a delegation + whose number of - `value` which holds the custom JSON in addition to the - `@type` + bond shares is based on the amount of coins delegated + divided by the current - field. Example (for message - [google.protobuf.Duration][]): + exchange rate. Voting power can be calculated as total + bonded shares - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - expiration: - type: string - format: date-time - description: |- - Grant gives permissions to execute - the provide method with expiration time. - description: >- - authorizations is a list of grants granted for grantee by - granter. + multiplied by exchange rate. + description: validators contains all the queried validators. pagination: - description: pagination defines an pagination for the response. + description: pagination defines the pagination in the response. type: object properties: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -26359,11 +24891,11 @@ paths: PageRequest.count_total was set, its value is undefined otherwise - description: >- - QueryGrantsResponse is the response type for the - Query/Authorizations RPC method. + title: >- + QueryValidatorsResponse is response type for the Query/Validators + RPC method default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -26476,7 +25008,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -26486,13 +25018,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -26514,7 +25049,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -26552,18 +25086,8 @@ paths: "value": "1.212s" } parameters: - - name: granter - in: query - required: false - type: string - - name: grantee - in: query - required: false - type: string - - name: msg_type_url - description: >- - Optional, msg_type_url, when set, will query only grants matching - given msg type. + - name: status + description: status enables to query for validators matching a given status. in: query required: false type: string @@ -26613,43 +25137,37 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Query - /cosmos/feegrant/v1beta1/allowance/{granter}/{grantee}: + /cosmos/staking/v1beta1/validators/{validator_addr}: get: - summary: Allowance returns fee granted to the grantee by the granter. - operationId: Allowance + summary: Validator queries validator info for given validator address. + operationId: Validator responses: '200': description: A successful response. schema: type: object properties: - allowance: - description: allowance is a allowance granted for grantee by granter. + validator: type: object properties: - granter: - type: string - description: >- - granter is the address of the user granting an allowance - of their funds. - grantee: + operator_address: type: string description: >- - grantee is the address of the user being granted an - allowance of another user's funds. - allowance: - description: allowance can be any of basic and filtered fee allowance. + operator_address defines the address of the validator's + operator; bech encoded in JSON. + consensus_pubkey: type: object properties: type_url: @@ -26716,14 +25234,262 @@ paths: description: >- Must be a valid serialized protocol buffer of the above specified type. - title: >- - Grant is stored in the KVStore to record a grant with full - context - description: >- - QueryAllowanceResponse is the response type for the - Query/Allowance RPC method. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + jailed: + type: boolean + description: >- + jailed defined whether the validator has been jailed from + bonded status or not. + status: + description: >- + status is the validator status + (bonded/unbonding/unbonded). + type: string + enum: + - BOND_STATUS_UNSPECIFIED + - BOND_STATUS_UNBONDED + - BOND_STATUS_UNBONDING + - BOND_STATUS_BONDED + default: BOND_STATUS_UNSPECIFIED + tokens: + type: string + description: >- + tokens define the delegated tokens (incl. + self-delegation). + delegator_shares: + type: string + description: >- + delegator_shares defines total shares issued to a + validator's delegators. + description: + description: >- + description defines the description terms for the + validator. + type: object + properties: + moniker: + type: string + description: >- + moniker defines a human-readable name for the + validator. + identity: + type: string + description: >- + identity defines an optional identity signature (ex. + UPort or Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: >- + security_contact defines an optional email for + security contact. + details: + type: string + description: details define other optional details. + unbonding_height: + type: string + format: int64 + description: >- + unbonding_height defines, if unbonding, the height at + which this validator has begun unbonding. + unbonding_time: + type: string + format: date-time + description: >- + unbonding_time defines, if unbonding, the min time for the + validator to complete unbonding. + commission: + description: commission defines the commission parameters. + type: object + properties: + commission_rates: + description: >- + commission_rates defines the initial commission rates + to be used for creating a validator. + type: object + properties: + rate: + type: string + description: >- + rate is the commission rate charged to delegators, + as a fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate which + validator can ever charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily increase + of the validator commission, as a fraction. + update_time: + type: string + format: date-time + description: >- + update_time is the last time the commission rate was + changed. + min_self_delegation: + type: string + description: >- + min_self_delegation is the validator's self declared + minimum self delegation. + + + Since: cosmos-sdk 0.46 + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + strictly positive if this validator's unbonding has been + stopped by external modules + unbonding_ids: + type: array + items: + type: string + format: uint64 + title: >- + list of unbonding ids, each uniquely identifing an + unbonding of this validator + description: >- + Validator defines a validator, together with the total amount + of the + + Validator's bond shares and their exchange rate to coins. + Slashing results in + + a decrease in the exchange rate, allowing correct calculation + of future + + undelegations without iterating over delegators. When coins + are delegated to + + this validator, the validator is credited with a delegation + whose number of + + bond shares is based on the amount of coins delegated divided + by the current + + exchange rate. Voting power can be calculated as total bonded + shares + + multiplied by exchange rate. + title: >- + QueryValidatorResponse is response type for the Query/Validator + RPC method default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -26836,7 +25602,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -26846,13 +25612,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -26874,7 +25643,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -26912,131 +25680,89 @@ paths: "value": "1.212s" } parameters: - - name: granter - description: >- - granter is the address of the user granting an allowance of their - funds. - in: path - required: true - type: string - - name: grantee - description: >- - grantee is the address of the user being granted an allowance of - another user's funds. + - name: validator_addr + description: validator_addr defines the validator address to query for. in: path required: true type: string tags: - Query - /cosmos/feegrant/v1beta1/allowances/{grantee}: + /cosmos/staking/v1beta1/validators/{validator_addr}/delegations: get: - summary: Allowances returns all the grants for address. - operationId: Allowances + summary: ValidatorDelegations queries delegate info for given validator. + description: >- + When called from another module, this query might consume a high amount + of + + gas if the pagination field is incorrectly set. + operationId: ValidatorDelegations responses: '200': description: A successful response. schema: type: object properties: - allowances: + delegation_responses: type: array items: type: object properties: - granter: - type: string - description: >- - granter is the address of the user granting an allowance - of their funds. - grantee: - type: string - description: >- - grantee is the address of the user being granted an - allowance of another user's funds. - allowance: - description: >- - allowance can be any of basic and filtered fee - allowance. + delegation: type: object properties: - type_url: + delegator_address: type: string description: >- - A URL/resource name that uniquely identifies the - type of the serialized - - protocol buffer message. This string must contain at - least - - one "/" character. The last segment of the URL's - path must represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be - in a canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the - binary all types that they - - expect it to use in the context of Any. However, for - URLs which use the - - scheme `http`, `https`, or no scheme, one can - optionally set up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results - based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + delegator_address is the bech32-encoded address of + the delegator. + validator_address: + type: string + description: >- + validator_address is the bech32-encoded address of + the validator. + shares: + type: string + description: shares define the delegation shares received. + description: >- + Delegation represents the bond with tokens held by an + account. It is - Note: this functionality is not currently available - in the official + owned by one delegator, and is associated with the + voting power of one - protobuf release, and it is not used for type URLs - beginning with + validator. + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - type.googleapis.com. + NOTE: The amount field is an Int which implements the + custom method - Schemes other than `http`, `https` (or the empty - scheme) might be + signatures required by gogoproto. + description: >- + DelegationResponse is equivalent to Delegation except that + it contains a - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the - above specified type. - title: >- - Grant is stored in the KVStore to record a grant with full - context - description: allowances are allowance's granted for grantee by granter. + balance in addition to shares which is more suitable for + client responses. pagination: - description: pagination defines an pagination for the response. + description: pagination defines the pagination in the response. type: object properties: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -27045,11 +25771,11 @@ paths: PageRequest.count_total was set, its value is undefined otherwise - description: >- - QueryAllowancesResponse is the response type for the - Query/Allowances RPC method. + title: |- + QueryValidatorDelegationsResponse is response type for the + Query/ValidatorDelegations RPC method default: - description: An unexpected error response + description: An unexpected error response. schema: type: object properties: @@ -27162,7 +25888,7 @@ paths: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -27172,13 +25898,16 @@ paths: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -27200,7 +25929,6 @@ paths: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -27238,7 +25966,8 @@ paths: "value": "1.212s" } parameters: - - name: grantee + - name: validator_addr + description: validator_addr defines the validator address to query for. in: path required: true type: string @@ -27288,6483 +26017,29906 @@ paths: in: query required: false type: boolean - format: boolean - name: pagination.reverse description: >- reverse is set to true if results are to be returned in the descending order. + + + Since: cosmos-sdk 0.43 in: query required: false type: boolean - format: boolean tags: - Query -definitions: - akash.audit.v1beta3.Provider: - type: object - properties: - owner: - type: string - auditor: - type: string - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Provider stores owner auditor and attributes details - akash.audit.v1beta3.QueryProvidersResponse: - type: object - properties: - providers: - type: array - items: - type: object - properties: - owner: - type: string - auditor: - type: string - attributes: - type: array - items: + /cosmos/staking/v1beta1/validators/{validator_addr}/delegations/{delegator_addr}: + get: + summary: Delegation queries delegate info for given validator delegator pair. + operationId: Delegation + responses: + '200': + description: A successful response. + schema: + type: object + properties: + delegation_response: type: object properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Provider stores owner auditor and attributes details - pagination: - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + delegation: + type: object + properties: + delegator_address: + type: string + description: >- + delegator_address is the bech32-encoded address of the + delegator. + validator_address: + type: string + description: >- + validator_address is the bech32-encoded address of the + validator. + shares: + type: string + description: shares define the delegation shares received. + description: >- + Delegation represents the bond with tokens held by an + account. It is - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. + owned by one delegator, and is associated with the voting + power of one - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - title: QueryProvidersResponse is response type for the Query/Providers RPC method - akash.base.v1beta3.Attribute: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - cosmos.base.query.v1beta1.PageRequest: - type: object - properties: - key: - type: string - format: byte - description: |- - key is a value returned in PageResponse.next_key to begin - querying the next page most efficiently. Only one of offset or key - should be set. - offset: - type: string - format: uint64 - description: |- - offset is a numeric offset that can be used when key is unavailable. - It is less efficient than using key. Only one of offset or key should - be set. - limit: - type: string - format: uint64 - description: >- - limit is the total number of results to be returned in the result - page. + validator. + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - If left empty it will default to a value to be set by each app. - count_total: - type: boolean - description: >- - count_total is set to true to indicate that the result set should - include - a count of the total number of items available for pagination in UIs. + NOTE: The amount field is an Int which implements the + custom method - count_total is only respected when offset is used. It is ignored when - key + signatures required by gogoproto. + description: >- + DelegationResponse is equivalent to Delegation except that it + contains a - is set. - format: boolean - reverse: - type: boolean - description: >- - reverse is set to true if results are to be returned in the descending - order. + balance in addition to shares which is more suitable for + client responses. + description: >- + QueryDelegationResponse is response type for the Query/Delegation + RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + protocol buffer message. This string must contain at + least - Since: cosmos-sdk 0.43 - format: boolean - description: |- - message SomeRequest { - Foo some_parameter = 1; - PageRequest pagination = 2; - } - title: |- - PageRequest is to be embedded in gRPC request messages for efficient - pagination. Ex: - cosmos.base.query.v1beta1.PageResponse: - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: |- - total is total number of results available if PageRequest.count_total - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. + one "/" character. The last segment of the URL's path + must represent - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - google.protobuf.Any: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized + the fully qualified name of the type (as in - protocol buffer message. This string must contain at least + `path/google.protobuf.Duration`). The name should be in + a canonical form - one "/" character. The last segment of the URL's path must represent + (e.g., leading "." is not accepted). - the fully qualified name of the type (as in - `path/google.protobuf.Duration`). The name should be in a canonical - form + In practice, teams usually precompile into the binary + all types that they - (e.g., leading "." is not accepted). + expect it to use in the context of Any. However, for + URLs which use the + scheme `http`, `https`, or no scheme, one can optionally + set up a type - In practice, teams usually precompile into the binary all types that - they + server that maps type URLs to message definitions as + follows: - expect it to use in the context of Any. However, for URLs which use - the - scheme `http`, `https`, or no scheme, one can optionally set up a type + * If no scheme is provided, `https` is assumed. - server that maps type URLs to message definitions as follows: + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + Note: this functionality is not currently available in + the official - * If no scheme is provided, `https` is assumed. + protobuf release, and it is not used for type URLs + beginning with - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + type.googleapis.com. - Note: this functionality is not currently available in the official - protobuf release, and it is not used for type URLs beginning with + Schemes other than `http`, `https` (or the empty scheme) + might be - type.googleapis.com. + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + URL that describes the type of the serialized message. - Schemes other than `http`, `https` (or the empty scheme) might be - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above specified - type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message along with - a + Protobuf library provides support to pack/unpack Any values + in the form - URL that describes the type of the serialized message. + of utility functions or additional generated methods of the + Any type. - Protobuf library provides support to pack/unpack Any values in the form + Example 1: Pack and unpack a message in C++. - of utility functions or additional generated methods of the Any type. + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + Example 2: Pack and unpack a message in Java. - Example 1: Pack and unpack a message in C++. + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + Example 3: Pack and unpack a message in Python. - Example 2: Pack and unpack a message in Java. + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + Example 4: Pack and unpack a message in Go - Example 3: Pack and unpack a message in Python. + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + The pack methods provided by protobuf library will by + default use - Example 4: Pack and unpack a message in Go + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + methods only use the fully qualified type name after the + last '/' - The pack methods provided by protobuf library will by default use + in the type URL, for example "foo.bar.com/x/y.z" will yield + type - 'type.googleapis.com/full.type.name' as the type URL and the unpack + name "y.z". - methods only use the fully qualified type name after the last '/' - in the type URL, for example "foo.bar.com/x/y.z" will yield type - name "y.z". + JSON + The JSON representation of an `Any` value uses the regular - JSON + representation of the deserialized, embedded message, with + an - ==== + additional field `@type` which contains the type URL. + Example: - The JSON representation of an `Any` value uses the regular + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - representation of the deserialized, embedded message, with an + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - additional field `@type` which contains the type URL. Example: + If the embedded message type is well-known and has a custom + JSON - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + representation, that representation will be embedded adding + a field - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + `value` which holds the custom JSON in addition to the + `@type` - If the embedded message type is well-known and has a custom JSON + field. Example (for message [google.protobuf.Duration][]): - representation, that representation will be embedded adding a field + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: validator_addr + description: validator_addr defines the validator address to query for. + in: path + required: true + type: string + - name: delegator_addr + description: delegator_addr defines the delegator address to query for. + in: path + required: true + type: string + tags: + - Query + /cosmos/staking/v1beta1/validators/{validator_addr}/delegations/{delegator_addr}/unbonding_delegation: + get: + summary: |- + UnbondingDelegation queries unbonding info for given validator delegator + pair. + operationId: UnbondingDelegation + responses: + '200': + description: A successful response. + schema: + type: object + properties: + unbond: + type: object + properties: + delegator_address: + type: string + description: >- + delegator_address is the bech32-encoded address of the + delegator. + validator_address: + type: string + description: >- + validator_address is the bech32-encoded address of the + validator. + entries: + type: array + items: + type: object + properties: + creation_height: + type: string + format: int64 + description: >- + creation_height is the height which the unbonding + took place. + completion_time: + type: string + format: date-time + description: >- + completion_time is the unix time for unbonding + completion. + initial_balance: + type: string + description: >- + initial_balance defines the tokens initially + scheduled to receive at completion. + balance: + type: string + description: balance defines the tokens to receive at completion. + unbonding_id: + type: string + format: uint64 + title: Incrementing id that uniquely identifies this entry + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + Strictly positive if this entry's unbonding has been + stopped by external modules + description: >- + UnbondingDelegationEntry defines an unbonding object + with relevant metadata. + description: entries are the unbonding delegation entries. + description: >- + UnbondingDelegation stores all of a single delegator's + unbonding bonds - `value` which holds the custom JSON in addition to the `@type` + for a single validator in an time-ordered list. + description: >- + QueryDelegationResponse is response type for the + Query/UnbondingDelegation - field. Example (for message [google.protobuf.Duration][]): + RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - grpc.gateway.runtime.Error: - type: object - properties: - error: - type: string - code: - type: integer - format: int32 - message: - type: string - details: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized + protocol buffer message. This string must contain at + least - protocol buffer message. This string must contain at least + one "/" character. The last segment of the URL's path + must represent - one "/" character. The last segment of the URL's path must - represent + the fully qualified name of the type (as in - the fully qualified name of the type (as in + `path/google.protobuf.Duration`). The name should be in + a canonical form - `path/google.protobuf.Duration`). The name should be in a - canonical form + (e.g., leading "." is not accepted). - (e.g., leading "." is not accepted). + In practice, teams usually precompile into the binary + all types that they - In practice, teams usually precompile into the binary all types - that they + expect it to use in the context of Any. However, for + URLs which use the - expect it to use in the context of Any. However, for URLs which - use the + scheme `http`, `https`, or no scheme, one can optionally + set up a type - scheme `http`, `https`, or no scheme, one can optionally set up - a type + server that maps type URLs to message definitions as + follows: - server that maps type URLs to message definitions as follows: + * If no scheme is provided, `https` is assumed. - * If no scheme is provided, `https` is assumed. + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + Note: this functionality is not currently available in + the official - Note: this functionality is not currently available in the - official + protobuf release, and it is not used for type URLs + beginning with - protobuf release, and it is not used for type URLs beginning - with + type.googleapis.com. - type.googleapis.com. + Schemes other than `http`, `https` (or the empty scheme) + might be - Schemes other than `http`, `https` (or the empty scheme) might - be + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message along - with a + URL that describes the type of the serialized message. - URL that describes the type of the serialized message. + Protobuf library provides support to pack/unpack Any values + in the form - Protobuf library provides support to pack/unpack Any values in the - form + of utility functions or additional generated methods of the + Any type. - of utility functions or additional generated methods of the Any - type. + Example 1: Pack and unpack a message in C++. - Example 1: Pack and unpack a message in C++. + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + Example 2: Pack and unpack a message in Java. - Example 2: Pack and unpack a message in Java. + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + Example 3: Pack and unpack a message in Python. - Example 3: Pack and unpack a message in Python. + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + Example 4: Pack and unpack a message in Go - Example 4: Pack and unpack a message in Go + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + The pack methods provided by protobuf library will by + default use - The pack methods provided by protobuf library will by default use + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - 'type.googleapis.com/full.type.name' as the type URL and the unpack + methods only use the fully qualified type name after the + last '/' - methods only use the fully qualified type name after the last '/' + in the type URL, for example "foo.bar.com/x/y.z" will yield + type - in the type URL, for example "foo.bar.com/x/y.z" will yield type + name "y.z". - name "y.z". + JSON - JSON - ==== + The JSON representation of an `Any` value uses the regular - The JSON representation of an `Any` value uses the regular + representation of the deserialized, embedded message, with + an - representation of the deserialized, embedded message, with an + additional field `@type` which contains the type URL. + Example: - additional field `@type` which contains the type URL. Example: + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + If the embedded message type is well-known and has a custom + JSON - If the embedded message type is well-known and has a custom JSON + representation, that representation will be embedded adding + a field - representation, that representation will be embedded adding a field + `value` which holds the custom JSON in addition to the + `@type` - `value` which holds the custom JSON in addition to the `@type` + field. Example (for message [google.protobuf.Duration][]): - field. Example (for message [google.protobuf.Duration][]): + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: validator_addr + description: validator_addr defines the validator address to query for. + in: path + required: true + type: string + - name: delegator_addr + description: delegator_addr defines the delegator address to query for. + in: path + required: true + type: string + tags: + - Query + /cosmos/staking/v1beta1/validators/{validator_addr}/unbonding_delegations: + get: + summary: >- + ValidatorUnbondingDelegations queries unbonding delegations of a + validator. + description: >- + When called from another module, this query might consume a high amount + of - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - akash.cert.v1beta3.Certificate: - type: object - properties: - state: - type: string - enum: - - invalid - - valid - - revoked - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring dummy - state - - valid: CertificateValid denotes state for deployment active - - revoked: CertificateRevoked denotes state for deployment closed - title: State is an enum which refers to state of deployment - cert: - type: string - format: byte - pubkey: - type: string - format: byte - title: Certificate stores state, certificate and it's public key - akash.cert.v1beta3.Certificate.State: - type: string - enum: - - invalid - - valid - - revoked - default: invalid - description: |- - - invalid: Prefix should start with 0 in enum. So declaring dummy state - - valid: CertificateValid denotes state for deployment active - - revoked: CertificateRevoked denotes state for deployment closed - title: State is an enum which refers to state of deployment - akash.cert.v1beta3.CertificateFilter: - type: object - properties: - owner: - type: string - serial: - type: string - state: - type: string - title: CertificateFilter defines filters used to filter certificates - akash.cert.v1beta3.CertificateResponse: - type: object - properties: - certificate: - type: object - properties: - state: - type: string - enum: - - invalid - - valid - - revoked - default: invalid + gas if the pagination field is incorrectly set. + operationId: ValidatorUnbondingDelegations + responses: + '200': + description: A successful response. + schema: + type: object + properties: + unbonding_responses: + type: array + items: + type: object + properties: + delegator_address: + type: string + description: >- + delegator_address is the bech32-encoded address of the + delegator. + validator_address: + type: string + description: >- + validator_address is the bech32-encoded address of the + validator. + entries: + type: array + items: + type: object + properties: + creation_height: + type: string + format: int64 + description: >- + creation_height is the height which the unbonding + took place. + completion_time: + type: string + format: date-time + description: >- + completion_time is the unix time for unbonding + completion. + initial_balance: + type: string + description: >- + initial_balance defines the tokens initially + scheduled to receive at completion. + balance: + type: string + description: >- + balance defines the tokens to receive at + completion. + unbonding_id: + type: string + format: uint64 + title: >- + Incrementing id that uniquely identifies this + entry + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + Strictly positive if this entry's unbonding has + been stopped by external modules + description: >- + UnbondingDelegationEntry defines an unbonding object + with relevant metadata. + description: entries are the unbonding delegation entries. + description: >- + UnbondingDelegation stores all of a single delegator's + unbonding bonds + + for a single validator in an time-ordered list. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise description: >- - - invalid: Prefix should start with 0 in enum. So declaring dummy - state - - valid: CertificateValid denotes state for deployment active - - revoked: CertificateRevoked denotes state for deployment closed - title: State is an enum which refers to state of deployment - cert: - type: string - format: byte - pubkey: - type: string - format: byte - title: Certificate stores state, certificate and it's public key - serial: - type: string - title: >- - CertificateResponse contains a single X509 certificate and its serial - number - akash.cert.v1beta3.QueryCertificatesResponse: - type: object - properties: - certificates: - type: array - items: - type: object - properties: - certificate: - type: object - properties: - state: - type: string - enum: - - invalid - - valid - - revoked - default: invalid + QueryValidatorUnbondingDelegationsResponse is response type for + the + + Query/ValidatorUnbondingDelegations RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. description: >- - - invalid: Prefix should start with 0 in enum. So declaring - dummy state - - valid: CertificateValid denotes state for deployment active - - revoked: CertificateRevoked denotes state for deployment closed - title: State is an enum which refers to state of deployment - cert: - type: string - format: byte - pubkey: - type: string - format: byte - title: Certificate stores state, certificate and it's public key - serial: - type: string - title: >- - CertificateResponse contains a single X509 certificate and its - serial number - pagination: - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + `Any` contains an arbitrary serialized protocol buffer + message along with a - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. + URL that describes the type of the serialized message. - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - title: >- - QueryCertificatesResponse is response type for the Query/Certificates RPC - method - akash.base.v1beta3.CPU: - type: object - properties: - units: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: CPU stores resource units and cpu config attributes - akash.base.v1beta3.Endpoint: - type: object - properties: - kind: - type: string - enum: - - SHARED_HTTP - - RANDOM_PORT - - LEASED_IP - default: SHARED_HTTP - description: |- - - SHARED_HTTP: Describes an endpoint that becomes a Kubernetes Ingress - - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort - - LEASED_IP: Describes an endpoint that becomes a leased IP - title: >- - This describes how the endpoint is implemented when the lease is - deployed - sequence_number: - type: integer - format: int64 - title: Endpoint describes a publicly accessible IP service - akash.base.v1beta3.Endpoint.Kind: - type: string - enum: - - SHARED_HTTP - - RANDOM_PORT - - LEASED_IP - default: SHARED_HTTP - description: |- - - SHARED_HTTP: Describes an endpoint that becomes a Kubernetes Ingress - - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort - - LEASED_IP: Describes an endpoint that becomes a leased IP - title: This describes how the endpoint is implemented when the lease is deployed - akash.base.v1beta3.GPU: - type: object - properties: - units: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: GPU stores resource units and cpu config attributes - akash.base.v1beta3.Memory: - type: object - properties: - quantity: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Memory stores resource quantity and memory attributes - akash.base.v1beta3.PlacementRequirements: - type: object - properties: - signed_by: - title: SignedBy list of keys that tenants expect to have signatures from - type: object - properties: - all_of: - type: array - items: - type: string - title: all_of all keys in this list must have signed attributes - any_of: - type: array - items: - type: string - title: >- - any_of at least of of the keys from the list must have signed - attributes - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Attribute list of attributes tenant expects from the provider - title: PlacementRequirements - akash.base.v1beta3.ResourceValue: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - akash.base.v1beta3.Resources: - type: object - properties: - id: - type: integer - format: int64 - cpu: - type: object - properties: - units: + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: validator_addr + description: validator_addr defines the validator address to query for. + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/tx/v1beta1/decode: + post: + summary: TxDecode decodes the transaction. + description: 'Since: cosmos-sdk 0.47' + operationId: TxDecode + responses: + '200': + description: A successful response. + schema: + $ref: '#/definitions/cosmos.tx.v1beta1.TxDecodeResponse' + default: + description: An unexpected error response. + schema: type: object properties: - val: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: body + in: body + required: true + schema: + type: object + properties: + tx_bytes: type: string format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: CPU stores resource units and cpu config attributes - memory: + description: tx_bytes is the raw transaction. + description: |- + TxDecodeRequest is the request type for the Service.TxDecode + RPC method. + + Since: cosmos-sdk 0.47 + tags: + - Service + /cosmos/tx/v1beta1/decode/amino: + post: + summary: TxDecodeAmino decodes an Amino transaction from encoded bytes to JSON. + description: 'Since: cosmos-sdk 0.47' + operationId: TxDecodeAmino + responses: + '200': + description: A successful response. + schema: + type: object + properties: + amino_json: + type: string + description: >- + TxDecodeAminoResponse is the response type for the + Service.TxDecodeAmino + + RPC method. + + + Since: cosmos-sdk 0.47 + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: body + in: body + required: true + schema: + type: object + properties: + amino_binary: + type: string + format: byte + description: >- + TxDecodeAminoRequest is the request type for the + Service.TxDecodeAmino + + RPC method. + + + Since: cosmos-sdk 0.47 + tags: + - Service + /cosmos/tx/v1beta1/encode: + post: + summary: TxEncode encodes the transaction. + description: 'Since: cosmos-sdk 0.47' + operationId: TxEncode + responses: + '200': + description: A successful response. + schema: + type: object + properties: + tx_bytes: + type: string + format: byte + description: tx_bytes is the encoded transaction bytes. + description: |- + TxEncodeResponse is the response type for the + Service.TxEncode method. + + Since: cosmos-sdk 0.47 + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/cosmos.tx.v1beta1.TxEncodeRequest' + tags: + - Service + /cosmos/tx/v1beta1/encode/amino: + post: + summary: TxEncodeAmino encodes an Amino transaction from JSON to encoded bytes. + description: 'Since: cosmos-sdk 0.47' + operationId: TxEncodeAmino + responses: + '200': + description: A successful response. + schema: + type: object + properties: + amino_binary: + type: string + format: byte + description: >- + TxEncodeAminoResponse is the response type for the + Service.TxEncodeAmino + + RPC method. + + + Since: cosmos-sdk 0.47 + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: body + in: body + required: true + schema: + type: object + properties: + amino_json: + type: string + description: >- + TxEncodeAminoRequest is the request type for the + Service.TxEncodeAmino + + RPC method. + + + Since: cosmos-sdk 0.47 + tags: + - Service + /cosmos/tx/v1beta1/simulate: + post: + summary: Simulate simulates executing a transaction for estimating gas usage. + operationId: Simulate + responses: + '200': + description: A successful response. + schema: + type: object + properties: + gas_info: + description: gas_info is the information about gas used in the simulation. + type: object + properties: + gas_wanted: + type: string + format: uint64 + description: >- + GasWanted is the maximum units of work we allow this tx to + perform. + gas_used: + type: string + format: uint64 + description: GasUsed is the amount of gas actually consumed. + result: + description: result is the result of the simulation. + type: object + properties: + data: + type: string + format: byte + description: >- + Data is any data returned from message or handler + execution. It MUST be + + length prefixed in order to separate data from multiple + message executions. + + Deprecated. This field is still populated, but prefer + msg_response instead + + because it also contains the Msg response typeURL. + log: + type: string + description: >- + Log contains the log information from message or handler + execution. + events: + type: array + items: + type: object + properties: + type: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + index: + type: boolean + description: >- + EventAttribute is a single key-value pair, + associated with an event. + description: >- + Event allows application developers to attach additional + information to + + ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx + and ResponseDeliverTx. + + Later, transactions may be queried using these events. + description: >- + Events contains a slice of Event objects that were emitted + during message + + or handler execution. + msg_responses: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's + path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the + binary all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available + in the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded message, + with an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + msg_responses contains the Msg handler responses type + packed in Anys. + + + Since: cosmos-sdk 0.46 + description: |- + SimulateResponse is the response type for the + Service.SimulateRPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: body + in: body + required: true + schema: + $ref: '#/definitions/cosmos.tx.v1beta1.SimulateRequest' + tags: + - Service + /cosmos/tx/v1beta1/txs: + get: + summary: GetTxsEvent fetches txs by event. + operationId: GetTxsEvent + responses: + '200': + description: A successful response. + schema: + $ref: '#/definitions/cosmos.tx.v1beta1.GetTxsEventResponse' + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: events + description: events is the list of transaction event type. + in: query + required: false + type: array + items: + type: string + collectionFormat: multi + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + - name: order_by + description: |2- + - ORDER_BY_UNSPECIFIED: ORDER_BY_UNSPECIFIED specifies an unknown sorting order. OrderBy defaults to ASC in this case. + - ORDER_BY_ASC: ORDER_BY_ASC defines ascending order + - ORDER_BY_DESC: ORDER_BY_DESC defines descending order + in: query + required: false + type: string + enum: + - ORDER_BY_UNSPECIFIED + - ORDER_BY_ASC + - ORDER_BY_DESC + default: ORDER_BY_UNSPECIFIED + - name: page + description: >- + page is the page number to query, starts at 1. If not provided, will + default to first page. + in: query + required: false + type: string + format: uint64 + - name: limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + tags: + - Service + post: + summary: BroadcastTx broadcast transaction. + operationId: BroadcastTx + responses: + '200': + description: A successful response. + schema: + type: object + properties: + tx_response: + type: object + properties: + height: + type: string + format: int64 + title: The block height + txhash: + type: string + description: The transaction hash. + codespace: + type: string + title: Namespace for the Code + code: + type: integer + format: int64 + description: Response code. + data: + type: string + description: Result bytes, if any. + raw_log: + type: string + description: >- + The output of the application's logger (raw string). May + be + + non-deterministic. + logs: + type: array + items: + type: object + properties: + msg_index: + type: integer + format: int64 + log: + type: string + events: + type: array + items: + type: object + properties: + type: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + description: >- + Attribute defines an attribute wrapper where + the key and value are + + strings instead of raw bytes. + description: >- + StringEvent defines en Event object wrapper where + all the attributes + + contain key/value pairs that are strings instead + of raw bytes. + description: >- + Events contains a slice of Event objects that were + emitted during some + + execution. + description: >- + ABCIMessageLog defines a structure containing an indexed + tx ABCI message log. + description: >- + The output of the application's logger (typed). May be + non-deterministic. + info: + type: string + description: Additional information. May be non-deterministic. + gas_wanted: + type: string + format: int64 + description: Amount of gas requested for transaction. + gas_used: + type: string + format: int64 + description: Amount of gas consumed by transaction. + tx: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type + of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + timestamp: + type: string + description: >- + Time of the previous block. For heights > 1, it's the + weighted median of + + the timestamps of the valid votes in the block.LastCommit. + For height == 1, + + it's genesis time. + events: + type: array + items: + type: object + properties: + type: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + index: + type: boolean + description: >- + EventAttribute is a single key-value pair, + associated with an event. + description: >- + Event allows application developers to attach additional + information to + + ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx + and ResponseDeliverTx. + + Later, transactions may be queried using these events. + description: >- + Events defines all the events emitted by processing a + transaction. Note, + + these events include those emitted by processing all the + messages and those + + emitted from the ante. Whereas Logs contains the events, + with + + additional metadata, emitted only by processing the + messages. + + + Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + description: >- + TxResponse defines a structure containing relevant tx data and + metadata. The + + tags are stringified and the log is JSON decoded. + description: |- + BroadcastTxResponse is the response type for the + Service.BroadcastTx method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: body + in: body + required: true + schema: + type: object + properties: + tx_bytes: + type: string + format: byte + description: tx_bytes is the raw transaction. + mode: + type: string + enum: + - BROADCAST_MODE_UNSPECIFIED + - BROADCAST_MODE_BLOCK + - BROADCAST_MODE_SYNC + - BROADCAST_MODE_ASYNC + default: BROADCAST_MODE_UNSPECIFIED + description: >- + BroadcastMode specifies the broadcast mode for the + TxService.Broadcast RPC method. + + - BROADCAST_MODE_UNSPECIFIED: zero-value for mode ordering + - BROADCAST_MODE_BLOCK: DEPRECATED: use BROADCAST_MODE_SYNC instead, + BROADCAST_MODE_BLOCK is not supported by the SDK from v0.47.x + onwards. + - BROADCAST_MODE_SYNC: BROADCAST_MODE_SYNC defines a tx broadcasting mode where the client waits for + a CheckTx execution response only. + - BROADCAST_MODE_ASYNC: BROADCAST_MODE_ASYNC defines a tx broadcasting mode where the client returns + immediately. + description: >- + BroadcastTxRequest is the request type for the + Service.BroadcastTxRequest + + RPC method. + tags: + - Service + /cosmos/tx/v1beta1/txs/block/{height}: + get: + summary: GetBlockWithTxs fetches a block with decoded txs. + description: 'Since: cosmos-sdk 0.45.2' + operationId: GetBlockWithTxs + responses: + '200': + description: A successful response. + schema: + $ref: '#/definitions/cosmos.tx.v1beta1.GetBlockWithTxsResponse' + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: height + description: height is the height of the block to query. + in: path + required: true + type: string + format: int64 + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Service + /cosmos/tx/v1beta1/txs/{hash}: + get: + summary: GetTx fetches a tx by hash. + operationId: GetTx + responses: + '200': + description: A successful response. + schema: + $ref: '#/definitions/cosmos.tx.v1beta1.GetTxResponse' + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: hash + description: hash is the tx hash to query, encoded as a hex string. + in: path + required: true + type: string + tags: + - Service + /cosmos/upgrade/v1beta1/applied_plan/{name}: + get: + summary: AppliedPlan queries a previously applied upgrade plan by its name. + operationId: AppliedPlan + responses: + '200': + description: A successful response. + schema: + type: object + properties: + height: + type: string + format: int64 + description: height is the block height at which the plan was applied. + description: >- + QueryAppliedPlanResponse is the response type for the + Query/AppliedPlan RPC + + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: name + description: name is the name of the applied plan to query for. + in: path + required: true + type: string + tags: + - Query + /cosmos/upgrade/v1beta1/authority: + get: + summary: Returns the account with authority to conduct upgrades + description: 'Since: cosmos-sdk 0.46' + operationId: Authority + responses: + '200': + description: A successful response. + schema: + type: object + properties: + address: + type: string + description: 'Since: cosmos-sdk 0.46' + title: QueryAuthorityResponse is the response type for Query/Authority + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + tags: + - Query + /cosmos/upgrade/v1beta1/current_plan: + get: + summary: CurrentPlan queries the current upgrade plan. + operationId: CurrentPlan + responses: + '200': + description: A successful response. + schema: + type: object + properties: + plan: + description: plan is the current upgrade plan. + type: object + properties: + name: + type: string + description: >- + Sets the name for the upgrade. This name will be used by + the upgraded + + version of the software to apply any special "on-upgrade" + commands during + + the first BeginBlock method after the upgrade is applied. + It is also used + + to detect whether a software version can handle a given + upgrade. If no + + upgrade handler with this name has been set in the + software, it will be + + assumed that the software is out-of-date when the upgrade + Time or Height is + + reached and the software will exit. + time: + type: string + format: date-time + description: >- + Deprecated: Time based upgrades have been deprecated. Time + based upgrade logic + + has been removed from the SDK. + + If this field is not empty, an error will be thrown. + height: + type: string + format: int64 + description: The height at which the upgrade must be performed. + info: + type: string + title: >- + Any application specific upgrade info to be included + on-chain + + such as a git commit that validators could automatically + upgrade to + upgraded_client_state: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type + of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + QueryCurrentPlanResponse is the response type for the + Query/CurrentPlan RPC + + method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + tags: + - Query + /cosmos/upgrade/v1beta1/module_versions: + get: + summary: ModuleVersions queries the list of module versions from state. + description: 'Since: cosmos-sdk 0.43' + operationId: ModuleVersions + responses: + '200': + description: A successful response. + schema: + type: object + properties: + module_versions: + type: array + items: + type: object + properties: + name: + type: string + title: name of the app module + version: + type: string + format: uint64 + title: consensus version of the app module + description: |- + ModuleVersion specifies a module and its consensus version. + + Since: cosmos-sdk 0.43 + description: >- + module_versions is a list of module names with their consensus + versions. + description: >- + QueryModuleVersionsResponse is the response type for the + Query/ModuleVersions + + RPC method. + + + Since: cosmos-sdk 0.43 + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: module_name + description: |- + module_name is a field to query a specific module + consensus version from state. Leaving this empty will + fetch the full list of module versions from state. + in: query + required: false + type: string + tags: + - Query + /cosmos/upgrade/v1beta1/upgraded_consensus_state/{last_height}: + get: + summary: >- + UpgradedConsensusState queries the consensus state that will serve + + as a trusted kernel for the next version of this chain. It will only be + + stored at the last height of this chain. + + UpgradedConsensusState RPC not supported with legacy querier + + This rpc is deprecated now that IBC has its own replacement + + (https://github.com/cosmos/ibc-go/blob/2c880a22e9f9cc75f62b527ca94aa75ce1106001/proto/ibc/core/client/v1/query.proto#L54) + operationId: UpgradedConsensusState + responses: + '200': + description: A successful response. + schema: + type: object + properties: + upgraded_consensus_state: + type: string + format: byte + title: 'Since: cosmos-sdk 0.43' + description: >- + QueryUpgradedConsensusStateResponse is the response type for the + Query/UpgradedConsensusState + + RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: last_height + description: |- + last height of the current chain must be sent in request + as this is the height under which next consensus state is stored + in: path + required: true + type: string + format: int64 + tags: + - Query + /cosmos/authz/v1beta1/grants: + get: + summary: Returns list of `Authorization`, granted to the grantee by the granter. + operationId: Grants + responses: + '200': + description: A successful response. + schema: + type: object + properties: + grants: + type: array + items: + type: object + properties: + authorization: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's + path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the + binary all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available + in the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded message, + with an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + expiration: + type: string + format: date-time + title: >- + time when the grant will expire and will be pruned. If + null, then the grant + + doesn't have a time expiration (other conditions in + `authorization` + + may apply to invalidate the grant) + description: |- + Grant gives permissions to execute + the provide method with expiration time. + description: >- + authorizations is a list of grants granted for grantee by + granter. + pagination: + description: pagination defines an pagination for the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryGrantsResponse is the response type for the + Query/Authorizations RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: granter + in: query + required: false + type: string + - name: grantee + in: query + required: false + type: string + - name: msg_type_url + description: >- + Optional, msg_type_url, when set, will query only grants matching + given msg type. + in: query + required: false + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/authz/v1beta1/grants/grantee/{grantee}: + get: + summary: GranteeGrants returns a list of `GrantAuthorization` by grantee. + description: 'Since: cosmos-sdk 0.46' + operationId: GranteeGrants + responses: + '200': + description: A successful response. + schema: + type: object + properties: + grants: + type: array + items: + type: object + properties: + granter: + type: string + grantee: + type: string + authorization: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's + path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the + binary all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available + in the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded message, + with an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + expiration: + type: string + format: date-time + title: >- + GrantAuthorization extends a grant with both the addresses + of the grantee and granter. + + It is used in genesis.proto and query.proto + description: grants is a list of grants granted to the grantee. + pagination: + description: pagination defines an pagination for the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryGranteeGrantsResponse is the response type for the + Query/GranteeGrants RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: grantee + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/authz/v1beta1/grants/granter/{granter}: + get: + summary: GranterGrants returns list of `GrantAuthorization`, granted by granter. + description: 'Since: cosmos-sdk 0.46' + operationId: GranterGrants + responses: + '200': + description: A successful response. + schema: + type: object + properties: + grants: + type: array + items: + type: object + properties: + granter: + type: string + grantee: + type: string + authorization: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's + path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the + binary all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available + in the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded message, + with an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + expiration: + type: string + format: date-time + title: >- + GrantAuthorization extends a grant with both the addresses + of the grantee and granter. + + It is used in genesis.proto and query.proto + description: grants is a list of grants granted by the granter. + pagination: + description: pagination defines an pagination for the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryGranterGrantsResponse is the response type for the + Query/GranterGrants RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: granter + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/feegrant/v1beta1/allowance/{granter}/{grantee}: + get: + summary: Allowance returns fee granted to the grantee by the granter. + operationId: Allowance + responses: + '200': + description: A successful response. + schema: + type: object + properties: + allowance: + description: allowance is a allowance granted for grantee by granter. + type: object + properties: + granter: + type: string + description: >- + granter is the address of the user granting an allowance + of their funds. + grantee: + type: string + description: >- + grantee is the address of the user being granted an + allowance of another user's funds. + allowance: + description: >- + allowance can be any of basic, periodic, allowed fee + allowance. + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type + of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + title: >- + Grant is stored in the KVStore to record a grant with full + context + description: >- + QueryAllowanceResponse is the response type for the + Query/Allowance RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: granter + description: >- + granter is the address of the user granting an allowance of their + funds. + in: path + required: true + type: string + - name: grantee + description: >- + grantee is the address of the user being granted an allowance of + another user's funds. + in: path + required: true + type: string + tags: + - Query + /cosmos/feegrant/v1beta1/allowances/{grantee}: + get: + summary: Allowances returns all the grants for address. + operationId: Allowances + responses: + '200': + description: A successful response. + schema: + type: object + properties: + allowances: + type: array + items: + type: object + properties: + granter: + type: string + description: >- + granter is the address of the user granting an allowance + of their funds. + grantee: + type: string + description: >- + grantee is the address of the user being granted an + allowance of another user's funds. + allowance: + description: >- + allowance can be any of basic, periodic, allowed fee + allowance. + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's + path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the + binary all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available + in the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + title: >- + Grant is stored in the KVStore to record a grant with full + context + description: allowances are allowance's granted for grantee by granter. + pagination: + description: pagination defines an pagination for the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryAllowancesResponse is the response type for the + Query/Allowances RPC method. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: grantee + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/feegrant/v1beta1/issued/{granter}: + get: + summary: AllowancesByGranter returns all the grants given by an address + description: 'Since: cosmos-sdk 0.46' + operationId: AllowancesByGranter + responses: + '200': + description: A successful response. + schema: + type: object + properties: + allowances: + type: array + items: + type: object + properties: + granter: + type: string + description: >- + granter is the address of the user granting an allowance + of their funds. + grantee: + type: string + description: >- + grantee is the address of the user being granted an + allowance of another user's funds. + allowance: + description: >- + allowance can be any of basic, periodic, allowed fee + allowance. + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's + path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the + binary all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available + in the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + title: >- + Grant is stored in the KVStore to record a grant with full + context + description: allowances that have been issued by the granter. + pagination: + description: pagination defines an pagination for the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryAllowancesByGranterResponse is the response type for the + Query/AllowancesByGranter RPC method. + + + Since: cosmos-sdk 0.46 + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: granter + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/nft/v1beta1/balance/{owner}/{class_id}: + get: + summary: >- + Balance queries the number of NFTs of a given class owned by the owner, + same as balanceOf in ERC721 + operationId: NftBalance + responses: + '200': + description: A successful response. + schema: + type: object + properties: + amount: + type: string + format: uint64 + title: >- + amount is the number of all NFTs of a given class owned by the + owner + title: >- + QueryBalanceResponse is the response type for the Query/Balance + RPC method + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: owner + description: owner is the owner address of the nft + in: path + required: true + type: string + - name: class_id + description: class_id associated with the nft + in: path + required: true + type: string + tags: + - Query + /cosmos/nft/v1beta1/classes: + get: + summary: Classes queries all NFT classes + operationId: Classes + responses: + '200': + description: A successful response. + schema: + type: object + properties: + classes: + type: array + items: + type: object + properties: + id: + type: string + title: >- + id defines the unique identifier of the NFT + classification, similar to the contract address of + ERC721 + name: + type: string + title: >- + name defines the human-readable name of the NFT + classification. Optional + symbol: + type: string + title: >- + symbol is an abbreviated name for nft classification. + Optional + description: + type: string + title: >- + description is a brief description of nft + classification. Optional + uri: + type: string + title: >- + uri for the class metadata stored off chain. It can + define schema for Class and NFT `Data` attributes. + Optional + uri_hash: + type: string + title: >- + uri_hash is a hash of the document pointed by uri. + Optional + data: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's + path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the + binary all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available + in the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded message, + with an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + title: >- + data is the app specific metadata of the NFT class. + Optional + description: Class defines the class of the nft type. + description: class defines the class of the nft type. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + title: >- + QueryClassesResponse is the response type for the Query/Classes + RPC method + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/nft/v1beta1/classes/{class_id}: + get: + summary: Class queries an NFT class based on its id + operationId: Class + responses: + '200': + description: A successful response. + schema: + type: object + properties: + class: + type: object + properties: + id: + type: string + title: >- + id defines the unique identifier of the NFT + classification, similar to the contract address of ERC721 + name: + type: string + title: >- + name defines the human-readable name of the NFT + classification. Optional + symbol: + type: string + title: >- + symbol is an abbreviated name for nft classification. + Optional + description: + type: string + title: >- + description is a brief description of nft classification. + Optional + uri: + type: string + title: >- + uri for the class metadata stored off chain. It can define + schema for Class and NFT `Data` attributes. Optional + uri_hash: + type: string + title: >- + uri_hash is a hash of the document pointed by uri. + Optional + data: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type + of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + title: >- + data is the app specific metadata of the NFT class. + Optional + description: Class defines the class of the nft type. + title: >- + QueryClassResponse is the response type for the Query/Class RPC + method + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: class_id + description: class_id associated with the nft + in: path + required: true + type: string + tags: + - Query + /cosmos/nft/v1beta1/nfts: + get: + summary: >- + NFTs queries all NFTs of a given class or owner,choose at least one of + the two, similar to tokenByIndex in + + ERC721Enumerable + operationId: NFTs + responses: + '200': + description: A successful response. + schema: + type: object + properties: + nfts: + type: array + items: + type: object + properties: + class_id: + type: string + title: >- + class_id associated with the NFT, similar to the + contract address of ERC721 + id: + type: string + title: id is a unique identifier of the NFT + uri: + type: string + title: uri for the NFT metadata stored off chain + uri_hash: + type: string + title: uri_hash is a hash of the document pointed by uri + data: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's + path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the + binary all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available + in the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded message, + with an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + title: data is an app specific data of the NFT. Optional + description: NFT defines the NFT. + title: NFT defines the NFT + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + title: >- + QueryNFTsResponse is the response type for the Query/NFTs RPC + methods + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: class_id + description: class_id associated with the nft. + in: query + required: false + type: string + - name: owner + description: owner is the owner address of the nft. + in: query + required: false + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/nft/v1beta1/nfts/{class_id}/{id}: + get: + summary: NFT queries an NFT based on its class and id. + operationId: NFT + responses: + '200': + description: A successful response. + schema: + type: object + properties: + nft: + type: object + properties: + class_id: + type: string + title: >- + class_id associated with the NFT, similar to the contract + address of ERC721 + id: + type: string + title: id is a unique identifier of the NFT + uri: + type: string + title: uri for the NFT metadata stored off chain + uri_hash: + type: string + title: uri_hash is a hash of the document pointed by uri + data: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type + of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + title: data is an app specific data of the NFT. Optional + description: NFT defines the NFT. + title: owner is the owner address of the nft + title: QueryNFTResponse is the response type for the Query/NFT RPC method + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: class_id + description: class_id associated with the nft + in: path + required: true + type: string + - name: id + description: id is a unique identifier of the NFT + in: path + required: true + type: string + tags: + - Query + /cosmos/nft/v1beta1/owner/{class_id}/{id}: + get: + summary: >- + Owner queries the owner of the NFT based on its class and id, same as + ownerOf in ERC721 + operationId: Owner + responses: + '200': + description: A successful response. + schema: + type: object + properties: + owner: + type: string + title: owner is the owner address of the nft + title: >- + QueryOwnerResponse is the response type for the Query/Owner RPC + method + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: class_id + description: class_id associated with the nft + in: path + required: true + type: string + - name: id + description: id is a unique identifier of the NFT + in: path + required: true + type: string + tags: + - Query + /cosmos/nft/v1beta1/supply/{class_id}: + get: + summary: >- + Supply queries the number of NFTs from the given class, same as + totalSupply of ERC721. + operationId: Supply + responses: + '200': + description: A successful response. + schema: + type: object + properties: + amount: + type: string + format: uint64 + title: amount is the number of all NFTs from the given class + title: >- + QuerySupplyResponse is the response type for the Query/Supply RPC + method + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: class_id + description: class_id associated with the nft + in: path + required: true + type: string + tags: + - Query + /cosmos/group/v1/group_info/{group_id}: + get: + summary: GroupInfo queries group info based on group id. + operationId: GroupInfo + responses: + '200': + description: A successful response. + schema: + type: object + properties: + info: + description: info is the GroupInfo of the group. + type: object + properties: + id: + type: string + format: uint64 + description: id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group's admin. + metadata: + type: string + description: >- + metadata is any arbitrary metadata to attached to the + group. + version: + type: string + format: uint64 + title: >- + version is used to track changes to a group's membership + structure that + + would break existing proposals. Whenever any members + weight is changed, + + or any member is added or removed this version is + incremented and will + + cause proposals based on older versions of this group to + fail + total_weight: + type: string + description: total_weight is the sum of the group members' weights. + created_at: + type: string + format: date-time + description: >- + created_at is a timestamp specifying when a group was + created. + description: QueryGroupInfoResponse is the Query/GroupInfo response type. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: group_id + description: group_id is the unique ID of the group. + in: path + required: true + type: string + format: uint64 + tags: + - Query + /cosmos/group/v1/group_members/{group_id}: + get: + summary: GroupMembers queries members of a group by group id. + operationId: GroupMembers + responses: + '200': + description: A successful response. + schema: + type: object + properties: + members: + type: array + items: + type: object + properties: + group_id: + type: string + format: uint64 + description: group_id is the unique ID of the group. + member: + description: member is the member data. + type: object + properties: + address: + type: string + description: address is the member's account address. + weight: + type: string + description: >- + weight is the member's voting weight that should be + greater than 0. + metadata: + type: string + description: >- + metadata is any arbitrary metadata attached to the + member. + added_at: + type: string + format: date-time + description: >- + added_at is a timestamp specifying when a member was + added. + description: >- + GroupMember represents the relationship between a group and + a member. + description: members are the members of the group with given group_id. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryGroupMembersResponse is the Query/GroupMembersResponse + response type. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: group_id + description: group_id is the unique ID of the group. + in: path + required: true + type: string + format: uint64 + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/group/v1/group_policies_by_admin/{admin}: + get: + summary: GroupPoliciesByAdmin queries group policies by admin address. + operationId: GroupPoliciesByAdmin + responses: + '200': + description: A successful response. + schema: + type: object + properties: + group_policies: + type: array + items: + type: object + properties: + address: + type: string + description: address is the account address of group policy. + group_id: + type: string + format: uint64 + description: group_id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group admin. + metadata: + type: string + description: >- + metadata is any arbitrary metadata attached to the group + policy. + version: + type: string + format: uint64 + description: >- + version is used to track changes to a group's + GroupPolicyInfo structure that + + would create a different result on a running proposal. + decision_policy: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's + path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the + binary all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available + in the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded message, + with an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + created_at: + type: string + format: date-time + description: >- + created_at is a timestamp specifying when a group policy + was created. + description: >- + GroupPolicyInfo represents the high-level on-chain + information for a group policy. + description: >- + group_policies are the group policies info with provided + admin. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryGroupPoliciesByAdminResponse is the + Query/GroupPoliciesByAdmin response type. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: admin + description: admin is the admin address of the group policy. + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/group/v1/group_policies_by_group/{group_id}: + get: + summary: GroupPoliciesByGroup queries group policies by group id. + operationId: GroupPoliciesByGroup + responses: + '200': + description: A successful response. + schema: + type: object + properties: + group_policies: + type: array + items: + type: object + properties: + address: + type: string + description: address is the account address of group policy. + group_id: + type: string + format: uint64 + description: group_id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group admin. + metadata: + type: string + description: >- + metadata is any arbitrary metadata attached to the group + policy. + version: + type: string + format: uint64 + description: >- + version is used to track changes to a group's + GroupPolicyInfo structure that + + would create a different result on a running proposal. + decision_policy: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's + path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the + binary all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available + in the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded message, + with an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + created_at: + type: string + format: date-time + description: >- + created_at is a timestamp specifying when a group policy + was created. + description: >- + GroupPolicyInfo represents the high-level on-chain + information for a group policy. + description: >- + group_policies are the group policies info associated with the + provided group. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryGroupPoliciesByGroupResponse is the + Query/GroupPoliciesByGroup response type. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: group_id + description: group_id is the unique ID of the group policy's group. + in: path + required: true + type: string + format: uint64 + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/group/v1/group_policy_info/{address}: + get: + summary: >- + GroupPolicyInfo queries group policy info based on account address of + group policy. + operationId: GroupPolicyInfo + responses: + '200': + description: A successful response. + schema: + type: object + properties: + info: + type: object + properties: + address: + type: string + description: address is the account address of group policy. + group_id: + type: string + format: uint64 + description: group_id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group admin. + metadata: + type: string + description: >- + metadata is any arbitrary metadata attached to the group + policy. + version: + type: string + format: uint64 + description: >- + version is used to track changes to a group's + GroupPolicyInfo structure that + + would create a different result on a running proposal. + decision_policy: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type + of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + created_at: + type: string + format: date-time + description: >- + created_at is a timestamp specifying when a group policy + was created. + description: >- + GroupPolicyInfo represents the high-level on-chain information + for a group policy. + description: >- + QueryGroupPolicyInfoResponse is the Query/GroupPolicyInfo response + type. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: address + description: address is the account address of the group policy. + in: path + required: true + type: string + tags: + - Query + /cosmos/group/v1/groups_by_admin/{admin}: + get: + summary: GroupsByAdmin queries groups by admin address. + operationId: GroupsByAdmin + responses: + '200': + description: A successful response. + schema: + type: object + properties: + groups: + type: array + items: + type: object + properties: + id: + type: string + format: uint64 + description: id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group's admin. + metadata: + type: string + description: >- + metadata is any arbitrary metadata to attached to the + group. + version: + type: string + format: uint64 + title: >- + version is used to track changes to a group's membership + structure that + + would break existing proposals. Whenever any members + weight is changed, + + or any member is added or removed this version is + incremented and will + + cause proposals based on older versions of this group to + fail + total_weight: + type: string + description: total_weight is the sum of the group members' weights. + created_at: + type: string + format: date-time + description: >- + created_at is a timestamp specifying when a group was + created. + description: >- + GroupInfo represents the high-level on-chain information for + a group. + description: groups are the groups info with the provided admin. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryGroupsByAdminResponse is the Query/GroupsByAdminResponse + response type. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: admin + description: admin is the account address of a group's admin. + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/group/v1/groups_by_member/{address}: + get: + summary: GroupsByMember queries groups by member address. + operationId: GroupsByMember + responses: + '200': + description: A successful response. + schema: + type: object + properties: + groups: + type: array + items: + type: object + properties: + id: + type: string + format: uint64 + description: id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group's admin. + metadata: + type: string + description: >- + metadata is any arbitrary metadata to attached to the + group. + version: + type: string + format: uint64 + title: >- + version is used to track changes to a group's membership + structure that + + would break existing proposals. Whenever any members + weight is changed, + + or any member is added or removed this version is + incremented and will + + cause proposals based on older versions of this group to + fail + total_weight: + type: string + description: total_weight is the sum of the group members' weights. + created_at: + type: string + format: date-time + description: >- + created_at is a timestamp specifying when a group was + created. + description: >- + GroupInfo represents the high-level on-chain information for + a group. + description: groups are the groups info with the provided group member. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryGroupsByMemberResponse is the Query/GroupsByMember response + type. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: address + description: address is the group member address. + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/group/v1/proposal/{proposal_id}: + get: + summary: Proposal queries a proposal based on proposal id. + operationId: GroupProposal + responses: + '200': + description: A successful response. + schema: + type: object + properties: + proposal: + description: proposal is the proposal info. + type: object + properties: + id: + type: string + format: uint64 + description: id is the unique id of the proposal. + group_policy_address: + type: string + description: >- + group_policy_address is the account address of group + policy. + metadata: + type: string + description: >- + metadata is any arbitrary metadata attached to the + proposal. + proposers: + type: array + items: + type: string + description: proposers are the account addresses of the proposers. + submit_time: + type: string + format: date-time + description: >- + submit_time is a timestamp specifying when a proposal was + submitted. + group_version: + type: string + format: uint64 + description: >- + group_version tracks the version of the group at proposal + submission. + + This field is here for informational purposes only. + group_policy_version: + type: string + format: uint64 + description: >- + group_policy_version tracks the version of the group + policy at proposal submission. + + When a decision policy is changed, existing proposals from + previous policy + + versions will become invalid with the `ABORTED` status. + + This field is here for informational purposes only. + status: + description: >- + status represents the high level position in the life + cycle of the proposal. Initial value is Submitted. + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_SUBMITTED + - PROPOSAL_STATUS_ACCEPTED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_ABORTED + - PROPOSAL_STATUS_WITHDRAWN + default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: + description: >- + final_tally_result contains the sums of all weighted votes + for this + + proposal for each vote option. It is empty at submission, + and only + + populated after tallying, at voting period end or at + proposal execution, + + whichever happens first. + type: object + properties: + yes_count: + type: string + description: yes_count is the weighted sum of yes votes. + abstain_count: + type: string + description: abstain_count is the weighted sum of abstainers. + no_count: + type: string + description: no_count is the weighted sum of no votes. + no_with_veto_count: + type: string + description: no_with_veto_count is the weighted sum of veto. + voting_period_end: + type: string + format: date-time + description: >- + voting_period_end is the timestamp before which voting + must be done. + + Unless a successful MsgExec is called before (to execute a + proposal whose + + tally is successful before the voting period ends), + tallying will be done + + at this point, and the `final_tally_result`and `status` + fields will be + + accordingly updated. + executor_result: + description: >- + executor_result is the final result of the proposal + execution. Initial value is NotRun. + type: string + enum: + - PROPOSAL_EXECUTOR_RESULT_UNSPECIFIED + - PROPOSAL_EXECUTOR_RESULT_NOT_RUN + - PROPOSAL_EXECUTOR_RESULT_SUCCESS + - PROPOSAL_EXECUTOR_RESULT_FAILURE + default: PROPOSAL_EXECUTOR_RESULT_UNSPECIFIED + messages: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's + path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be + in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the + binary all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available + in the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods of + the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and + the unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded message, + with an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + messages is a list of `sdk.Msg`s that will be executed if + the proposal passes. + title: + type: string + description: 'Since: cosmos-sdk 0.47' + title: title is the title of the proposal + summary: + type: string + description: 'Since: cosmos-sdk 0.47' + title: summary is a short summary of the proposal + description: QueryProposalResponse is the Query/Proposal response type. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: proposal_id + description: proposal_id is the unique ID of a proposal. + in: path + required: true + type: string + format: uint64 + tags: + - Query + /cosmos/group/v1/proposals/{proposal_id}/tally: + get: + summary: >- + TallyResult returns the tally result of a proposal. If the proposal is + + still in voting period, then this query computes the current tally + state, + + which might not be final. On the other hand, if the proposal is final, + + then it simply returns the `final_tally_result` state stored in the + + proposal itself. + operationId: GroupTallyResult + responses: + '200': + description: A successful response. + schema: + type: object + properties: + tally: + description: tally defines the requested tally. + type: object + properties: + yes_count: + type: string + description: yes_count is the weighted sum of yes votes. + abstain_count: + type: string + description: abstain_count is the weighted sum of abstainers. + no_count: + type: string + description: no_count is the weighted sum of no votes. + no_with_veto_count: + type: string + description: no_with_veto_count is the weighted sum of veto. + description: QueryTallyResultResponse is the Query/TallyResult response type. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: proposal_id + description: proposal_id is the unique id of a proposal. + in: path + required: true + type: string + format: uint64 + tags: + - Query + /cosmos/group/v1/proposals_by_group_policy/{address}: + get: + summary: >- + ProposalsByGroupPolicy queries proposals based on account address of + group policy. + operationId: ProposalsByGroupPolicy + responses: + '200': + description: A successful response. + schema: + type: object + properties: + proposals: + type: array + items: + type: object + properties: + id: + type: string + format: uint64 + description: id is the unique id of the proposal. + group_policy_address: + type: string + description: >- + group_policy_address is the account address of group + policy. + metadata: + type: string + description: >- + metadata is any arbitrary metadata attached to the + proposal. + proposers: + type: array + items: + type: string + description: proposers are the account addresses of the proposers. + submit_time: + type: string + format: date-time + description: >- + submit_time is a timestamp specifying when a proposal + was submitted. + group_version: + type: string + format: uint64 + description: >- + group_version tracks the version of the group at + proposal submission. + + This field is here for informational purposes only. + group_policy_version: + type: string + format: uint64 + description: >- + group_policy_version tracks the version of the group + policy at proposal submission. + + When a decision policy is changed, existing proposals + from previous policy + + versions will become invalid with the `ABORTED` status. + + This field is here for informational purposes only. + status: + description: >- + status represents the high level position in the life + cycle of the proposal. Initial value is Submitted. + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_SUBMITTED + - PROPOSAL_STATUS_ACCEPTED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_ABORTED + - PROPOSAL_STATUS_WITHDRAWN + default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: + description: >- + final_tally_result contains the sums of all weighted + votes for this + + proposal for each vote option. It is empty at + submission, and only + + populated after tallying, at voting period end or at + proposal execution, + + whichever happens first. + type: object + properties: + yes_count: + type: string + description: yes_count is the weighted sum of yes votes. + abstain_count: + type: string + description: abstain_count is the weighted sum of abstainers. + no_count: + type: string + description: no_count is the weighted sum of no votes. + no_with_veto_count: + type: string + description: no_with_veto_count is the weighted sum of veto. + voting_period_end: + type: string + format: date-time + description: >- + voting_period_end is the timestamp before which voting + must be done. + + Unless a successful MsgExec is called before (to execute + a proposal whose + + tally is successful before the voting period ends), + tallying will be done + + at this point, and the `final_tally_result`and `status` + fields will be + + accordingly updated. + executor_result: + description: >- + executor_result is the final result of the proposal + execution. Initial value is NotRun. + type: string + enum: + - PROPOSAL_EXECUTOR_RESULT_UNSPECIFIED + - PROPOSAL_EXECUTOR_RESULT_NOT_RUN + - PROPOSAL_EXECUTOR_RESULT_SUCCESS + - PROPOSAL_EXECUTOR_RESULT_FAILURE + default: PROPOSAL_EXECUTOR_RESULT_UNSPECIFIED + messages: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the + type of the serialized + + protocol buffer message. This string must contain + at least + + one "/" character. The last segment of the URL's + path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should + be in a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the + binary all types that they + + expect it to use in the context of Any. However, + for URLs which use the + + scheme `http`, `https`, or no scheme, one can + optionally set up a type + + server that maps type URLs to message definitions + as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results + based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently + available in the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty + scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the + above specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any + values in the form + + of utility functions or additional generated methods + of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL + and the unpack + + methods only use the fully qualified type name after + the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will + yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the + regular + + representation of the deserialized, embedded message, + with an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a + custom JSON + + representation, that representation will be embedded + adding a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message + [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + messages is a list of `sdk.Msg`s that will be executed + if the proposal passes. + title: + type: string + description: 'Since: cosmos-sdk 0.47' + title: title is the title of the proposal + summary: + type: string + description: 'Since: cosmos-sdk 0.47' + title: summary is a short summary of the proposal + description: >- + Proposal defines a group proposal. Any member of a group can + submit a proposal + + for a group policy to decide upon. + + A proposal consists of a set of `sdk.Msg`s that will be + executed if the proposal + + passes as well as some optional metadata associated with the + proposal. + description: proposals are the proposals with given group policy. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryProposalsByGroupPolicyResponse is the + Query/ProposalByGroupPolicy response type. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: address + description: >- + address is the account address of the group policy related to + proposals. + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/group/v1/vote_by_proposal_voter/{proposal_id}/{voter}: + get: + summary: VoteByProposalVoter queries a vote by proposal id and voter. + operationId: VoteByProposalVoter + responses: + '200': + description: A successful response. + schema: + type: object + properties: + vote: + description: vote is the vote with given proposal_id and voter. + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal is the unique ID of the proposal. + voter: + type: string + description: voter is the account address of the voter. + option: + description: option is the voter's choice on the proposal. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + metadata: + type: string + description: metadata is any arbitrary metadata attached to the vote. + submit_time: + type: string + format: date-time + description: submit_time is the timestamp when the vote was submitted. + description: >- + QueryVoteByProposalVoterResponse is the Query/VoteByProposalVoter + response type. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: proposal_id + description: proposal_id is the unique ID of a proposal. + in: path + required: true + type: string + format: uint64 + - name: voter + description: voter is a proposal voter account address. + in: path + required: true + type: string + tags: + - Query + /cosmos/group/v1/votes_by_proposal/{proposal_id}: + get: + summary: VotesByProposal queries a vote by proposal id. + operationId: VotesByProposal + responses: + '200': + description: A successful response. + schema: + type: object + properties: + votes: + type: array + items: + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal is the unique ID of the proposal. + voter: + type: string + description: voter is the account address of the voter. + option: + description: option is the voter's choice on the proposal. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + metadata: + type: string + description: metadata is any arbitrary metadata attached to the vote. + submit_time: + type: string + format: date-time + description: >- + submit_time is the timestamp when the vote was + submitted. + description: Vote represents a vote for a proposal. + description: votes are the list of votes for given proposal_id. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryVotesByProposalResponse is the Query/VotesByProposal response + type. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: proposal_id + description: proposal_id is the unique ID of a proposal. + in: path + required: true + type: string + format: uint64 + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query + /cosmos/group/v1/votes_by_voter/{voter}: + get: + summary: VotesByVoter queries a vote by voter. + operationId: VotesByVoter + responses: + '200': + description: A successful response. + schema: + type: object + properties: + votes: + type: array + items: + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal is the unique ID of the proposal. + voter: + type: string + description: voter is the account address of the voter. + option: + description: option is the voter's choice on the proposal. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + metadata: + type: string + description: metadata is any arbitrary metadata attached to the vote. + submit_time: + type: string + format: date-time + description: >- + submit_time is the timestamp when the vote was + submitted. + description: Vote represents a vote for a proposal. + description: votes are the list of votes by given voter. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: QueryVotesByVoterResponse is the Query/VotesByVoter response type. + default: + description: An unexpected error response. + schema: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + parameters: + - name: voter + description: voter is a proposal voter account address. + in: path + required: true + type: string + - name: pagination.key + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + in: query + required: false + type: string + format: byte + - name: pagination.offset + description: >- + offset is a numeric offset that can be used when key is unavailable. + + It is less efficient than using key. Only one of offset or key + should + + be set. + in: query + required: false + type: string + format: uint64 + - name: pagination.limit + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + in: query + required: false + type: string + format: uint64 + - name: pagination.count_total + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in + UIs. + + count_total is only respected when offset is used. It is ignored + when key + + is set. + in: query + required: false + type: boolean + - name: pagination.reverse + description: >- + reverse is set to true if results are to be returned in the + descending order. + + + Since: cosmos-sdk 0.43 + in: query + required: false + type: boolean + tags: + - Query +definitions: + akash.audit.v1.AuditedProvider: + type: object + properties: + owner: + type: string + auditor: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Provider stores owner auditor and attributes details + akash.audit.v1.QueryProvidersResponse: + type: object + properties: + providers: + type: array + items: + type: object + properties: + owner: + type: string + auditor: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Provider stores owner auditor and attributes details + pagination: + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: QueryProvidersResponse is response type for the Query/Providers RPC method + akash.base.attributes.v1.Attribute: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + cosmos.base.query.v1beta1.PageRequest: + type: object + properties: + key: + type: string + format: byte + description: |- + key is a value returned in PageResponse.next_key to begin + querying the next page most efficiently. Only one of offset or key + should be set. + offset: + type: string + format: uint64 + description: |- + offset is a numeric offset that can be used when key is unavailable. + It is less efficient than using key. Only one of offset or key should + be set. + limit: + type: string + format: uint64 + description: >- + limit is the total number of results to be returned in the result + page. + + If left empty it will default to a value to be set by each app. + count_total: + type: boolean + description: >- + count_total is set to true to indicate that the result set should + include + + a count of the total number of items available for pagination in UIs. + + count_total is only respected when offset is used. It is ignored when + key + + is set. + reverse: + type: boolean + description: >- + reverse is set to true if results are to be returned in the descending + order. + + + Since: cosmos-sdk 0.43 + description: |- + message SomeRequest { + Foo some_parameter = 1; + PageRequest pagination = 2; + } + title: |- + PageRequest is to be embedded in gRPC request messages for efficient + pagination. Ex: + cosmos.base.query.v1beta1.PageResponse: + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: |- + total is total number of results available if PageRequest.count_total + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + google.protobuf.Any: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a canonical + form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types that + they + + expect it to use in the context of Any. However, for URLs which use + the + + scheme `http`, `https`, or no scheme, one can optionally set up a type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above specified + type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along with + a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + grpc.gateway.runtime.Error: + type: object + properties: + error: + type: string + code: + type: integer + format: int32 + message: + type: string + details: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up + a type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might + be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + akash.cert.v1.Certificate: + type: object + properties: + state: + type: string + enum: + - invalid + - valid + - revoked + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring dummy + state + - valid: CertificateValid denotes state for deployment active + - revoked: CertificateRevoked denotes state for deployment closed + title: State is an enum which refers to state of deployment + cert: + type: string + format: byte + pubkey: + type: string + format: byte + title: Certificate stores state, certificate and it's public key + akash.cert.v1.CertificateFilter: + type: object + properties: + owner: + type: string + serial: + type: string + state: + type: string + title: CertificateFilter defines filters used to filter certificates + akash.cert.v1.CertificateResponse: + type: object + properties: + certificate: + type: object + properties: + state: + type: string + enum: + - invalid + - valid + - revoked + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring dummy + state + - valid: CertificateValid denotes state for deployment active + - revoked: CertificateRevoked denotes state for deployment closed + title: State is an enum which refers to state of deployment + cert: + type: string + format: byte + pubkey: + type: string + format: byte + title: Certificate stores state, certificate and it's public key + serial: + type: string + title: >- + CertificateResponse contains a single X509 certificate and its serial + number + akash.cert.v1.QueryCertificatesResponse: + type: object + properties: + certificates: + type: array + items: + type: object + properties: + certificate: + type: object + properties: + state: + type: string + enum: + - invalid + - valid + - revoked + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring + dummy state + - valid: CertificateValid denotes state for deployment active + - revoked: CertificateRevoked denotes state for deployment closed + title: State is an enum which refers to state of deployment + cert: + type: string + format: byte + pubkey: + type: string + format: byte + title: Certificate stores state, certificate and it's public key + serial: + type: string + title: >- + CertificateResponse contains a single X509 certificate and its + serial number + pagination: + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: >- + QueryCertificatesResponse is response type for the Query/Certificates RPC + method + akash.cert.v1.State: + type: string + enum: + - invalid + - valid + - revoked + default: invalid + description: |- + - invalid: Prefix should start with 0 in enum. So declaring dummy state + - valid: CertificateValid denotes state for deployment active + - revoked: CertificateRevoked denotes state for deployment closed + title: State is an enum which refers to state of deployment + akash.base.attributes.v1.PlacementRequirements: + type: object + properties: + signed_by: + title: SignedBy list of keys that tenants expect to have signatures from + type: object + properties: + all_of: + type: array + items: + type: string + title: all_of all keys in this list must have signed attributes + any_of: + type: array + items: + type: string + title: >- + any_of at least of of the keys from the list must have signed + attributes + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Attribute list of attributes tenant expects from the provider + title: PlacementRequirements + akash.base.attributes.v1.SignedBy: + type: object + properties: + all_of: + type: array + items: + type: string + title: all_of all keys in this list must have signed attributes + any_of: + type: array + items: + type: string + title: >- + any_of at least of of the keys from the list must have signed + attributes + title: >- + SignedBy represents validation accounts that tenant expects signatures for + provider attributes + + AllOf has precedence i.e. if there is at least one entry AnyOf is ignored + regardless to how many + + entries there + + this behaviour to be discussed + akash.base.resources.v1beta4.CPU: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: CPU stores resource units and cpu config attributes + akash.base.resources.v1beta4.Endpoint: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: |- + - SHARED_HTTP: Describes an endpoint that becomes a Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is implemented when the lease is + deployed + sequence_number: + type: integer + format: int64 + title: Endpoint describes a publicly accessible IP service + akash.base.resources.v1beta4.Endpoint.Kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: |- + - SHARED_HTTP: Describes an endpoint that becomes a Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: This describes how the endpoint is implemented when the lease is deployed + akash.base.resources.v1beta4.GPU: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: GPU stores resource units and cpu config attributes + akash.base.resources.v1beta4.Memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Memory stores resource quantity and memory attributes + akash.base.resources.v1beta4.ResourceValue: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + akash.base.resources.v1beta4.Resources: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: CPU stores resource units and cpu config attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Memory stores resource quantity and memory attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Storage stores resource quantity and storage attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: GPU stores resource units and cpu config attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that becomes a Kubernetes + Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is implemented when the lease is + deployed + sequence_number: + type: integer + format: int64 + title: Endpoint describes a publicly accessible IP service + title: |- + Resources describes all available resources types for deployment/node etc + if field is nil resource is not present in the given data-structure + akash.base.resources.v1beta4.Storage: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Storage stores resource quantity and storage attributes + akash.deployment.v1.Deployment: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + title: DeploymentID stores owner and sequence number + state: + type: string + enum: + - invalid + - active + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring dummy + state + - active: DeploymentActive denotes state for deployment active + - closed: DeploymentClosed denotes state for deployment closed + title: State is an enum which refers to state of deployment + hash: + type: string + format: byte + created_at: + type: string + format: int64 + title: Deployment stores deploymentID, state and checksum details + akash.deployment.v1.Deployment.State: + type: string + enum: + - invalid + - active + - closed + default: invalid + description: |- + - invalid: Prefix should start with 0 in enum. So declaring dummy state + - active: DeploymentActive denotes state for deployment active + - closed: DeploymentClosed denotes state for deployment closed + title: State is an enum which refers to state of deployment + akash.deployment.v1.DeploymentID: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + title: DeploymentID stores owner and sequence number + akash.deployment.v1.GroupID: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + title: GroupID stores owner, deployment sequence number and group sequence number + akash.deployment.v1beta4.DeploymentFilters: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + state: + type: string + title: DeploymentFilters defines filters used to filter deployments + akash.deployment.v1beta4.Group: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + title: >- + GroupID stores owner, deployment sequence number and group sequence + number + state: + type: string + enum: + - invalid + - open + - paused + - insufficient_funds + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring dummy + state + - open: GroupOpen denotes state for group open + - paused: GroupOrdered denotes state for group ordered + - insufficient_funds: GroupInsufficientFunds denotes state for group insufficient_funds + - closed: GroupClosed denotes state for group closed + title: State is an enum which refers to state of group + group_spec: + type: object + properties: + name: + type: string + requirements: + type: object + properties: + signed_by: + title: >- + SignedBy list of keys that tenants expect to have signatures + from + type: object + properties: + all_of: + type: array + items: + type: string + title: all_of all keys in this list must have signed attributes + any_of: + type: array + items: + type: string + title: >- + any_of at least of of the keys from the list must have + signed attributes + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Attribute list of attributes tenant expects from the provider + title: PlacementRequirements + resources: + type: array + items: + type: object + properties: + resource: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: CPU stores resource units and cpu config attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Memory stores resource quantity and memory attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Storage stores resource quantity and storage + attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: GPU stores resource units and cpu config attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that becomes + a Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is implemented + when the lease is deployed + sequence_number: + type: integer + format: int64 + title: Endpoint describes a publicly accessible IP service + title: >- + Resources describes all available resources types for + deployment/node etc + + if field is nil resource is not present in the given + data-structure + count: + type: integer + format: int64 + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: >- + ResourceUnit extends Resources and adds Count along with the + Price + title: Spec stores group specifications + created_at: + type: string + format: int64 + title: Group stores group id, state and specifications of group + akash.deployment.v1beta4.Group.State: + type: string + enum: + - invalid + - open + - paused + - insufficient_funds + - closed + default: invalid + description: |- + - invalid: Prefix should start with 0 in enum. So declaring dummy state + - open: GroupOpen denotes state for group open + - paused: GroupOrdered denotes state for group ordered + - insufficient_funds: GroupInsufficientFunds denotes state for group insufficient_funds + - closed: GroupClosed denotes state for group closed + title: State is an enum which refers to state of group + akash.deployment.v1beta4.GroupSpec: + type: object + properties: + name: + type: string + requirements: + type: object + properties: + signed_by: + title: SignedBy list of keys that tenants expect to have signatures from + type: object + properties: + all_of: + type: array + items: + type: string + title: all_of all keys in this list must have signed attributes + any_of: + type: array + items: + type: string + title: >- + any_of at least of of the keys from the list must have signed + attributes + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Attribute list of attributes tenant expects from the provider + title: PlacementRequirements + resources: + type: array + items: + type: object + properties: + resource: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: CPU stores resource units and cpu config attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Memory stores resource quantity and memory attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Storage stores resource quantity and storage attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: GPU stores resource units and cpu config attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that becomes a + Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is implemented when + the lease is deployed + sequence_number: + type: integer + format: int64 + title: Endpoint describes a publicly accessible IP service + title: >- + Resources describes all available resources types for + deployment/node etc + + if field is nil resource is not present in the given + data-structure + count: + type: integer + format: int64 + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: ResourceUnit extends Resources and adds Count along with the Price + title: Spec stores group specifications + akash.deployment.v1beta4.Params: + type: object + properties: + min_deposits: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + title: Params defines the parameters for the x/deployment module + akash.deployment.v1beta4.QueryDeploymentResponse: + type: object + properties: + deployment: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + title: DeploymentID stores owner and sequence number + state: + type: string + enum: + - invalid + - active + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring dummy + state + - active: DeploymentActive denotes state for deployment active + - closed: DeploymentClosed denotes state for deployment closed + title: State is an enum which refers to state of deployment + hash: + type: string + format: byte + created_at: + type: string + format: int64 + title: Deployment stores deploymentID, state and checksum details + groups: + type: array + items: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + title: >- + GroupID stores owner, deployment sequence number and group + sequence number + state: + type: string + enum: + - invalid + - open + - paused + - insufficient_funds + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring + dummy state + - open: GroupOpen denotes state for group open + - paused: GroupOrdered denotes state for group ordered + - insufficient_funds: GroupInsufficientFunds denotes state for group insufficient_funds + - closed: GroupClosed denotes state for group closed + title: State is an enum which refers to state of group + group_spec: + type: object + properties: + name: + type: string + requirements: + type: object + properties: + signed_by: + title: >- + SignedBy list of keys that tenants expect to have + signatures from + type: object + properties: + all_of: + type: array + items: + type: string + title: >- + all_of all keys in this list must have signed + attributes + any_of: + type: array + items: + type: string + title: >- + any_of at least of of the keys from the list must + have signed attributes + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Attribute list of attributes tenant expects from the + provider + title: PlacementRequirements + resources: + type: array + items: + type: object + properties: + resource: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + CPU stores resource units and cpu config + attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Memory stores resource quantity and memory + attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Storage stores resource quantity and storage + attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + GPU stores resource units and cpu config + attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that + becomes a Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is + implemented when the lease is deployed + sequence_number: + type: integer + format: int64 + title: >- + Endpoint describes a publicly accessible IP + service + title: >- + Resources describes all available resources types for + deployment/node etc + + if field is nil resource is not present in the given + data-structure + count: + type: integer + format: int64 + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a + decimal amount. + + + NOTE: The amount field is an Dec which implements the + custom method + + signatures required by gogoproto. + title: >- + ResourceUnit extends Resources and adds Count along with + the Price + title: Spec stores group specifications + created_at: + type: string + format: int64 + title: Group stores group id, state and specifications of group + escrow_account: + type: object + properties: + id: + title: unique identifier for this escrow account + type: object + properties: + scope: + type: string + xid: + type: string + owner: + type: string + title: bech32 encoded account address of the owner of this escrow account + state: + title: current state of this escrow account + type: string + enum: + - invalid + - open + - closed + - overdrawn + default: invalid + description: |- + - invalid: AccountStateInvalid is an invalid state + - open: AccountOpen is the state when an account is open + - closed: AccountClosed is the state when an account is closed + - overdrawn: AccountOverdrawn is the state when an account is overdrawn + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: unspent coins received from the owner's wallet + transferred: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: total coins spent by this account + settled_at: + type: string + format: int64 + title: block height at which this account was last settled + depositor: + type: string + description: >- + bech32 encoded account address of the depositor. + + If depositor is same as the owner, then any incoming coins are + added to the Balance. + + If depositor isn't same as the owner, then any incoming coins are + added to the Funds. + funds: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: Account stores state for an escrow account + title: >- + QueryDeploymentResponse is response type for the Query/Deployment RPC + method + akash.deployment.v1beta4.QueryDeploymentsResponse: + type: object + properties: + deployments: + type: array + items: + type: object + properties: + deployment: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + title: DeploymentID stores owner and sequence number + state: + type: string + enum: + - invalid + - active + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring + dummy state + - active: DeploymentActive denotes state for deployment active + - closed: DeploymentClosed denotes state for deployment closed + title: State is an enum which refers to state of deployment + hash: + type: string + format: byte + created_at: + type: string + format: int64 + title: Deployment stores deploymentID, state and checksum details + groups: + type: array + items: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + title: >- + GroupID stores owner, deployment sequence number and group + sequence number + state: + type: string + enum: + - invalid + - open + - paused + - insufficient_funds + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So + declaring dummy state + - open: GroupOpen denotes state for group open + - paused: GroupOrdered denotes state for group ordered + - insufficient_funds: GroupInsufficientFunds denotes state for group insufficient_funds + - closed: GroupClosed denotes state for group closed + title: State is an enum which refers to state of group + group_spec: + type: object + properties: + name: + type: string + requirements: + type: object + properties: + signed_by: + title: >- + SignedBy list of keys that tenants expect to have + signatures from + type: object + properties: + all_of: + type: array + items: + type: string + title: >- + all_of all keys in this list must have signed + attributes + any_of: + type: array + items: + type: string + title: >- + any_of at least of of the keys from the list + must have signed attributes + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Attribute list of attributes tenant expects from + the provider + title: PlacementRequirements + resources: + type: array + items: + type: object + properties: + resource: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: >- + Unit stores cpu, memory and storage + metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + CPU stores resource units and cpu config + attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: >- + Unit stores cpu, memory and storage + metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Memory stores resource quantity and memory + attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: >- + Unit stores cpu, memory and storage + metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Storage stores resource quantity and + storage attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: >- + Unit stores cpu, memory and storage + metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + GPU stores resource units and cpu config + attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint + that becomes a Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is + implemented when the lease is deployed + sequence_number: + type: integer + format: int64 + title: >- + Endpoint describes a publicly accessible + IP service + title: >- + Resources describes all available resources + types for deployment/node etc + + if field is nil resource is not present in the + given data-structure + count: + type: integer + format: int64 + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and + a decimal amount. + + + NOTE: The amount field is an Dec which + implements the custom method + + signatures required by gogoproto. + title: >- + ResourceUnit extends Resources and adds Count along + with the Price + title: Spec stores group specifications + created_at: + type: string + format: int64 + title: Group stores group id, state and specifications of group + escrow_account: + type: object + properties: + id: + title: unique identifier for this escrow account + type: object + properties: + scope: + type: string + xid: + type: string + owner: + type: string + title: >- + bech32 encoded account address of the owner of this escrow + account + state: + title: current state of this escrow account + type: string + enum: + - invalid + - open + - closed + - overdrawn + default: invalid + description: |- + - invalid: AccountStateInvalid is an invalid state + - open: AccountOpen is the state when an account is open + - closed: AccountClosed is the state when an account is closed + - overdrawn: AccountOverdrawn is the state when an account is overdrawn + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: unspent coins received from the owner's wallet + transferred: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: total coins spent by this account + settled_at: + type: string + format: int64 + title: block height at which this account was last settled + depositor: + type: string + description: >- + bech32 encoded account address of the depositor. + + If depositor is same as the owner, then any incoming coins + are added to the Balance. + + If depositor isn't same as the owner, then any incoming + coins are added to the Funds. + funds: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: Account stores state for an escrow account + title: >- + QueryDeploymentResponse is response type for the Query/Deployment + RPC method + pagination: + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: >- + QueryDeploymentsResponse is response type for the Query/Deployments RPC + method + akash.deployment.v1beta4.QueryGroupResponse: + type: object + properties: + group: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + title: >- + GroupID stores owner, deployment sequence number and group + sequence number + state: + type: string + enum: + - invalid + - open + - paused + - insufficient_funds + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring dummy + state + - open: GroupOpen denotes state for group open + - paused: GroupOrdered denotes state for group ordered + - insufficient_funds: GroupInsufficientFunds denotes state for group insufficient_funds + - closed: GroupClosed denotes state for group closed + title: State is an enum which refers to state of group + group_spec: + type: object + properties: + name: + type: string + requirements: + type: object + properties: + signed_by: + title: >- + SignedBy list of keys that tenants expect to have + signatures from + type: object + properties: + all_of: + type: array + items: + type: string + title: >- + all_of all keys in this list must have signed + attributes + any_of: + type: array + items: + type: string + title: >- + any_of at least of of the keys from the list must have + signed attributes + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Attribute list of attributes tenant expects from the + provider + title: PlacementRequirements + resources: + type: array + items: + type: object + properties: + resource: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: CPU stores resource units and cpu config attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Memory stores resource quantity and memory + attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Storage stores resource quantity and storage + attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: GPU stores resource units and cpu config attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that + becomes a Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is implemented + when the lease is deployed + sequence_number: + type: integer + format: int64 + title: >- + Endpoint describes a publicly accessible IP + service + title: >- + Resources describes all available resources types for + deployment/node etc + + if field is nil resource is not present in the given + data-structure + count: + type: integer + format: int64 + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a + decimal amount. + + + NOTE: The amount field is an Dec which implements the + custom method + + signatures required by gogoproto. + title: >- + ResourceUnit extends Resources and adds Count along with the + Price + title: Spec stores group specifications + created_at: + type: string + format: int64 + title: Group stores group id, state and specifications of group + title: QueryGroupResponse is response type for the Query/Group RPC method + akash.deployment.v1beta4.QueryParamsResponse: + type: object + properties: + params: + description: params defines the parameters of the module. + type: object + properties: + min_deposits: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + title: Params defines the parameters for the x/deployment module + description: QueryParamsResponse is the response type for the Query/Params RPC method. + akash.deployment.v1beta4.ResourceUnit: + type: object + properties: + resource: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: CPU stores resource units and cpu config attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Memory stores resource quantity and memory attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Storage stores resource quantity and storage attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: GPU stores resource units and cpu config attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that becomes a + Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is implemented when the + lease is deployed + sequence_number: + type: integer + format: int64 + title: Endpoint describes a publicly accessible IP service + title: >- + Resources describes all available resources types for deployment/node + etc + + if field is nil resource is not present in the given data-structure + count: + type: integer + format: int64 + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + title: ResourceUnit extends Resources and adds Count along with the Price + akash.escrow.v1.Account: + type: object + properties: + id: + title: unique identifier for this escrow account + type: object + properties: + scope: + type: string + xid: + type: string + owner: + type: string + title: bech32 encoded account address of the owner of this escrow account + state: + title: current state of this escrow account + type: string + enum: + - invalid + - open + - closed + - overdrawn + default: invalid + description: |- + - invalid: AccountStateInvalid is an invalid state + - open: AccountOpen is the state when an account is open + - closed: AccountClosed is the state when an account is closed + - overdrawn: AccountOverdrawn is the state when an account is overdrawn + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + title: unspent coins received from the owner's wallet + transferred: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + title: total coins spent by this account + settled_at: + type: string + format: int64 + title: block height at which this account was last settled + depositor: + type: string + description: >- + bech32 encoded account address of the depositor. + + If depositor is same as the owner, then any incoming coins are added + to the Balance. + + If depositor isn't same as the owner, then any incoming coins are + added to the Funds. + funds: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + title: Account stores state for an escrow account + akash.escrow.v1.Account.State: + type: string + enum: + - invalid + - open + - closed + - overdrawn + default: invalid + description: |- + - invalid: AccountStateInvalid is an invalid state + - open: AccountOpen is the state when an account is open + - closed: AccountClosed is the state when an account is closed + - overdrawn: AccountOverdrawn is the state when an account is overdrawn + title: State stores state for an escrow account + akash.escrow.v1.AccountID: + type: object + properties: + scope: + type: string + xid: + type: string + title: AccountID is the account identifier + cosmos.base.v1beta1.Coin: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + cosmos.base.v1beta1.DecCoin: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + akash.deployment.v1.MsgDepositDeploymentResponse: + type: object + description: >- + MsgCreateDeploymentResponse defines the Msg/CreateDeployment response + type. + akash.deployment.v1beta4.MsgCloseDeploymentResponse: + type: object + description: MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. + akash.deployment.v1beta4.MsgCloseGroupResponse: + type: object + description: MsgCloseGroupResponse defines the Msg/CloseGroup response type. + akash.deployment.v1beta4.MsgCreateDeploymentResponse: + type: object + description: >- + MsgCreateDeploymentResponse defines the Msg/CreateDeployment response + type. + akash.deployment.v1beta4.MsgPauseGroupResponse: + type: object + description: MsgPauseGroupResponse defines the Msg/PauseGroup response type. + akash.deployment.v1beta4.MsgStartGroupResponse: + type: object + description: MsgStartGroupResponse defines the Msg/StartGroup response type. + akash.deployment.v1beta4.MsgUpdateDeploymentResponse: + type: object + description: >- + MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response + type. + akash.deployment.v1beta4.MsgUpdateParamsResponse: + type: object + description: |- + MsgUpdateParamsResponse defines the response structure for executing a + MsgUpdateParams message. + + Since: akash v1.0.0 + akash.escrow.v1.FractionalPayment: + type: object + properties: + account_id: + type: object + properties: + scope: + type: string + xid: + type: string + title: AccountID is the account identifier + payment_id: + type: string + owner: + type: string + state: + type: string + enum: + - invalid + - open + - closed + - overdrawn + default: invalid + description: >- + - invalid: PaymentStateInvalid is the state when the payment is + invalid + - open: PaymentStateOpen is the state when the payment is open + - closed: PaymentStateClosed is the state when the payment is closed + - overdrawn: PaymentStateOverdrawn is the state when the payment is overdrawn + title: State defines payment state + rate: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + withdrawn: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + title: Payment stores state for a payment + akash.escrow.v1.FractionalPayment.State: + type: string + enum: + - invalid + - open + - closed + - overdrawn + default: invalid + description: |- + - invalid: PaymentStateInvalid is the state when the payment is invalid + - open: PaymentStateOpen is the state when the payment is open + - closed: PaymentStateClosed is the state when the payment is closed + - overdrawn: PaymentStateOverdrawn is the state when the payment is overdrawn + title: State defines payment state + akash.market.v1.BidID: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + provider: + type: string + description: |- + BidID stores owner and all other seq numbers + A successful bid becomes a Lease(ID). + akash.market.v1.Lease: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + provider: + type: string + title: LeaseID stores bid details of lease + state: + type: string + enum: + - invalid + - active + - insufficient_funds + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring dummy + state + - active: LeaseActive denotes state for lease active + - insufficient_funds: LeaseInsufficientFunds denotes state for lease insufficient_funds + - closed: LeaseClosed denotes state for lease closed + title: State is an enum which refers to state of lease + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + created_at: + type: string + format: int64 + closed_on: + type: string + format: int64 + title: Lease stores LeaseID, state of lease and price + akash.market.v1.Lease.State: + type: string + enum: + - invalid + - active + - insufficient_funds + - closed + default: invalid + description: |- + - invalid: Prefix should start with 0 in enum. So declaring dummy state + - active: LeaseActive denotes state for lease active + - insufficient_funds: LeaseInsufficientFunds denotes state for lease insufficient_funds + - closed: LeaseClosed denotes state for lease closed + title: State is an enum which refers to state of lease + akash.market.v1.LeaseFilters: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + provider: + type: string + state: + type: string + title: LeaseFilters defines flags for lease list filter + akash.market.v1.LeaseID: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + provider: + type: string + title: LeaseID stores bid details of lease + akash.market.v1.OrderID: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + title: OrderID stores owner and all other seq numbers + akash.market.v1beta5.Bid: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + provider: + type: string + description: |- + BidID stores owner and all other seq numbers + A successful bid becomes a Lease(ID). + state: + type: string + enum: + - invalid + - open + - active + - lost + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring dummy + state + - open: BidOpen denotes state for bid open + - active: BidMatched denotes state for bid open + - lost: BidLost denotes state for bid lost + - closed: BidClosed denotes state for bid closed + title: BidState is an enum which refers to state of bid + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + created_at: + type: string + format: int64 + resources_offer: + type: array + items: + type: object + properties: + resources: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: CPU stores resource units and cpu config attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Memory stores resource quantity and memory attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Storage stores resource quantity and storage attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: GPU stores resource units and cpu config attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that becomes a + Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is implemented when + the lease is deployed + sequence_number: + type: integer + format: int64 + title: Endpoint describes a publicly accessible IP service + title: >- + Resources describes all available resources types for + deployment/node etc + + if field is nil resource is not present in the given + data-structure + count: + type: integer + format: int64 + title: |- + ResourceOffer describes resources that provider is offering + for deployment + title: Bid stores BidID, state of bid and price + akash.market.v1beta5.Bid.State: + type: string + enum: + - invalid + - open + - active + - lost + - closed + default: invalid + description: |- + - invalid: Prefix should start with 0 in enum. So declaring dummy state + - open: BidOpen denotes state for bid open + - active: BidMatched denotes state for bid open + - lost: BidLost denotes state for bid lost + - closed: BidClosed denotes state for bid closed + title: BidState is an enum which refers to state of bid + akash.market.v1beta5.BidFilters: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + provider: + type: string + state: + type: string + title: BidFilters defines flags for bid list filter + akash.market.v1beta5.Order: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + title: OrderID stores owner and all other seq numbers + state: + type: string + enum: + - invalid + - open + - active + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring dummy + state + - open: OrderOpen denotes state for order open + - active: OrderMatched denotes state for order matched + - closed: OrderClosed denotes state for order lost + title: State is an enum which refers to state of order + spec: + type: object + properties: + name: + type: string + requirements: + type: object + properties: + signed_by: + title: >- + SignedBy list of keys that tenants expect to have signatures + from + type: object + properties: + all_of: + type: array + items: + type: string + title: all_of all keys in this list must have signed attributes + any_of: + type: array + items: + type: string + title: >- + any_of at least of of the keys from the list must have + signed attributes + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Attribute list of attributes tenant expects from the provider + title: PlacementRequirements + resources: + type: array + items: + type: object + properties: + resource: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: CPU stores resource units and cpu config attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Memory stores resource quantity and memory attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Storage stores resource quantity and storage + attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: GPU stores resource units and cpu config attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that becomes + a Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is implemented + when the lease is deployed + sequence_number: + type: integer + format: int64 + title: Endpoint describes a publicly accessible IP service + title: >- + Resources describes all available resources types for + deployment/node etc + + if field is nil resource is not present in the given + data-structure + count: + type: integer + format: int64 + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: >- + ResourceUnit extends Resources and adds Count along with the + Price + title: Spec stores group specifications + created_at: + type: string + format: int64 + title: Order stores orderID, state of order and other details + akash.market.v1beta5.Order.State: + type: string + enum: + - invalid + - open + - active + - closed + default: invalid + description: |- + - invalid: Prefix should start with 0 in enum. So declaring dummy state + - open: OrderOpen denotes state for order open + - active: OrderMatched denotes state for order matched + - closed: OrderClosed denotes state for order lost + title: State is an enum which refers to state of order + akash.market.v1beta5.OrderFilters: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + state: + type: string + title: OrderFilters defines flags for order list filter + akash.market.v1beta5.Params: + type: object + properties: + bid_min_deposit: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + order_max_bids: + type: integer + format: int64 + title: Params is the params for the x/market module + akash.market.v1beta5.QueryBidResponse: + type: object + properties: + bid: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + provider: + type: string + description: |- + BidID stores owner and all other seq numbers + A successful bid becomes a Lease(ID). + state: + type: string + enum: + - invalid + - open + - active + - lost + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring dummy + state + - open: BidOpen denotes state for bid open + - active: BidMatched denotes state for bid open + - lost: BidLost denotes state for bid lost + - closed: BidClosed denotes state for bid closed + title: BidState is an enum which refers to state of bid + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + created_at: + type: string + format: int64 + resources_offer: + type: array + items: + type: object + properties: + resources: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: CPU stores resource units and cpu config attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Memory stores resource quantity and memory attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Storage stores resource quantity and storage + attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: GPU stores resource units and cpu config attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that becomes + a Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is implemented + when the lease is deployed + sequence_number: + type: integer + format: int64 + title: Endpoint describes a publicly accessible IP service + title: >- + Resources describes all available resources types for + deployment/node etc + + if field is nil resource is not present in the given + data-structure + count: + type: integer + format: int64 + title: |- + ResourceOffer describes resources that provider is offering + for deployment + title: Bid stores BidID, state of bid and price + escrow_account: + type: object + properties: + id: + title: unique identifier for this escrow account + type: object + properties: + scope: + type: string + xid: + type: string + owner: + type: string + title: bech32 encoded account address of the owner of this escrow account + state: + title: current state of this escrow account + type: string + enum: + - invalid + - open + - closed + - overdrawn + default: invalid + description: |- + - invalid: AccountStateInvalid is an invalid state + - open: AccountOpen is the state when an account is open + - closed: AccountClosed is the state when an account is closed + - overdrawn: AccountOverdrawn is the state when an account is overdrawn + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: unspent coins received from the owner's wallet + transferred: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: total coins spent by this account + settled_at: + type: string + format: int64 + title: block height at which this account was last settled + depositor: + type: string + description: >- + bech32 encoded account address of the depositor. + + If depositor is same as the owner, then any incoming coins are + added to the Balance. + + If depositor isn't same as the owner, then any incoming coins are + added to the Funds. + funds: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: Account stores state for an escrow account + title: QueryBidResponse is response type for the Query/Bid RPC method + akash.market.v1beta5.QueryBidsResponse: + type: object + properties: + bids: + type: array + items: + type: object + properties: + bid: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + provider: + type: string + description: |- + BidID stores owner and all other seq numbers + A successful bid becomes a Lease(ID). + state: + type: string + enum: + - invalid + - open + - active + - lost + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring + dummy state + - open: BidOpen denotes state for bid open + - active: BidMatched denotes state for bid open + - lost: BidLost denotes state for bid lost + - closed: BidClosed denotes state for bid closed + title: BidState is an enum which refers to state of bid + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + created_at: + type: string + format: int64 + resources_offer: + type: array + items: + type: object + properties: + resources: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + CPU stores resource units and cpu config + attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Memory stores resource quantity and memory + attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Storage stores resource quantity and storage + attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + GPU stores resource units and cpu config + attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that + becomes a Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is + implemented when the lease is deployed + sequence_number: + type: integer + format: int64 + title: >- + Endpoint describes a publicly accessible IP + service + title: >- + Resources describes all available resources types for + deployment/node etc + + if field is nil resource is not present in the given + data-structure + count: + type: integer + format: int64 + title: >- + ResourceOffer describes resources that provider is + offering + + for deployment + title: Bid stores BidID, state of bid and price + escrow_account: + type: object + properties: + id: + title: unique identifier for this escrow account + type: object + properties: + scope: + type: string + xid: + type: string + owner: + type: string + title: >- + bech32 encoded account address of the owner of this escrow + account + state: + title: current state of this escrow account + type: string + enum: + - invalid + - open + - closed + - overdrawn + default: invalid + description: |- + - invalid: AccountStateInvalid is an invalid state + - open: AccountOpen is the state when an account is open + - closed: AccountClosed is the state when an account is closed + - overdrawn: AccountOverdrawn is the state when an account is overdrawn + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: unspent coins received from the owner's wallet + transferred: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: total coins spent by this account + settled_at: + type: string + format: int64 + title: block height at which this account was last settled + depositor: + type: string + description: >- + bech32 encoded account address of the depositor. + + If depositor is same as the owner, then any incoming coins + are added to the Balance. + + If depositor isn't same as the owner, then any incoming + coins are added to the Funds. + funds: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: Account stores state for an escrow account + title: QueryBidResponse is response type for the Query/Bid RPC method + pagination: + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: QueryBidsResponse is response type for the Query/Bids RPC method + akash.market.v1beta5.QueryLeaseResponse: + type: object + properties: + lease: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + provider: + type: string + title: LeaseID stores bid details of lease + state: + type: string + enum: + - invalid + - active + - insufficient_funds + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring dummy + state + - active: LeaseActive denotes state for lease active + - insufficient_funds: LeaseInsufficientFunds denotes state for lease insufficient_funds + - closed: LeaseClosed denotes state for lease closed + title: State is an enum which refers to state of lease + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + created_at: + type: string + format: int64 + closed_on: + type: string + format: int64 + title: Lease stores LeaseID, state of lease and price + escrow_payment: + type: object + properties: + account_id: + type: object + properties: + scope: + type: string + xid: + type: string + title: AccountID is the account identifier + payment_id: + type: string + owner: + type: string + state: + type: string + enum: + - invalid + - open + - closed + - overdrawn + default: invalid + description: >- + - invalid: PaymentStateInvalid is the state when the payment is + invalid + - open: PaymentStateOpen is the state when the payment is open + - closed: PaymentStateClosed is the state when the payment is closed + - overdrawn: PaymentStateOverdrawn is the state when the payment is overdrawn + title: State defines payment state + rate: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + withdrawn: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + title: Payment stores state for a payment + title: QueryLeaseResponse is response type for the Query/Lease RPC method + akash.market.v1beta5.QueryLeasesResponse: + type: object + properties: + leases: + type: array + items: + type: object + properties: + lease: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + provider: + type: string + title: LeaseID stores bid details of lease + state: + type: string + enum: + - invalid + - active + - insufficient_funds + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring + dummy state + - active: LeaseActive denotes state for lease active + - insufficient_funds: LeaseInsufficientFunds denotes state for lease insufficient_funds + - closed: LeaseClosed denotes state for lease closed + title: State is an enum which refers to state of lease + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + created_at: + type: string + format: int64 + closed_on: + type: string + format: int64 + title: Lease stores LeaseID, state of lease and price + escrow_payment: + type: object + properties: + account_id: + type: object + properties: + scope: + type: string + xid: + type: string + title: AccountID is the account identifier + payment_id: + type: string + owner: + type: string + state: + type: string + enum: + - invalid + - open + - closed + - overdrawn + default: invalid + description: >- + - invalid: PaymentStateInvalid is the state when the payment + is invalid + - open: PaymentStateOpen is the state when the payment is open + - closed: PaymentStateClosed is the state when the payment is closed + - overdrawn: PaymentStateOverdrawn is the state when the payment is overdrawn + title: State defines payment state + rate: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + withdrawn: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + title: Payment stores state for a payment + title: QueryLeaseResponse is response type for the Query/Lease RPC method + pagination: + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: QueryLeasesResponse is response type for the Query/Leases RPC method + akash.market.v1beta5.QueryOrderResponse: + type: object + properties: + order: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + title: OrderID stores owner and all other seq numbers + state: + type: string + enum: + - invalid + - open + - active + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring dummy + state + - open: OrderOpen denotes state for order open + - active: OrderMatched denotes state for order matched + - closed: OrderClosed denotes state for order lost + title: State is an enum which refers to state of order + spec: + type: object + properties: + name: + type: string + requirements: + type: object + properties: + signed_by: + title: >- + SignedBy list of keys that tenants expect to have + signatures from + type: object + properties: + all_of: + type: array + items: + type: string + title: >- + all_of all keys in this list must have signed + attributes + any_of: + type: array + items: + type: string + title: >- + any_of at least of of the keys from the list must have + signed attributes + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Attribute list of attributes tenant expects from the + provider + title: PlacementRequirements + resources: + type: array + items: + type: object + properties: + resource: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: CPU stores resource units and cpu config attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Memory stores resource quantity and memory + attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Storage stores resource quantity and storage + attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: GPU stores resource units and cpu config attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that + becomes a Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is implemented + when the lease is deployed + sequence_number: + type: integer + format: int64 + title: >- + Endpoint describes a publicly accessible IP + service + title: >- + Resources describes all available resources types for + deployment/node etc + + if field is nil resource is not present in the given + data-structure + count: + type: integer + format: int64 + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a + decimal amount. + + + NOTE: The amount field is an Dec which implements the + custom method + + signatures required by gogoproto. + title: >- + ResourceUnit extends Resources and adds Count along with the + Price + title: Spec stores group specifications + created_at: + type: string + format: int64 + title: Order stores orderID, state of order and other details + title: QueryOrderResponse is response type for the Query/Order RPC method + akash.market.v1beta5.QueryOrdersResponse: + type: object + properties: + orders: + type: array + items: + type: object + properties: + id: + type: object + properties: + owner: + type: string + dseq: + type: string + format: uint64 + gseq: + type: integer + format: int64 + oseq: + type: integer + format: int64 + title: OrderID stores owner and all other seq numbers + state: + type: string + enum: + - invalid + - open + - active + - closed + default: invalid + description: >- + - invalid: Prefix should start with 0 in enum. So declaring + dummy state + - open: OrderOpen denotes state for order open + - active: OrderMatched denotes state for order matched + - closed: OrderClosed denotes state for order lost + title: State is an enum which refers to state of order + spec: + type: object + properties: + name: + type: string + requirements: + type: object + properties: + signed_by: + title: >- + SignedBy list of keys that tenants expect to have + signatures from + type: object + properties: + all_of: + type: array + items: + type: string + title: >- + all_of all keys in this list must have signed + attributes + any_of: + type: array + items: + type: string + title: >- + any_of at least of of the keys from the list must + have signed attributes + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Attribute list of attributes tenant expects from the + provider + title: PlacementRequirements + resources: + type: array + items: + type: object + properties: + resource: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + CPU stores resource units and cpu config + attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Memory stores resource quantity and memory + attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + Storage stores resource quantity and storage + attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: >- + GPU stores resource units and cpu config + attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that + becomes a Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is + implemented when the lease is deployed + sequence_number: + type: integer + format: int64 + title: >- + Endpoint describes a publicly accessible IP + service + title: >- + Resources describes all available resources types for + deployment/node etc + + if field is nil resource is not present in the given + data-structure + count: + type: integer + format: int64 + price: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a + decimal amount. + + + NOTE: The amount field is an Dec which implements the + custom method + + signatures required by gogoproto. + title: >- + ResourceUnit extends Resources and adds Count along with + the Price + title: Spec stores group specifications + created_at: + type: string + format: int64 + title: Order stores orderID, state of order and other details + pagination: + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: QueryOrdersResponse is response type for the Query/Orders RPC method + akash.market.v1beta5.QueryParamsResponse: + type: object + properties: + params: + description: params defines the parameters of the module. + type: object + properties: + bid_min_deposit: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + order_max_bids: + type: integer + format: int64 + title: Params is the params for the x/market module + description: QueryParamsResponse is the response type for the Query/Params RPC method. + akash.market.v1beta5.ResourceOffer: + type: object + properties: + resources: + type: object + properties: + id: + type: integer + format: int64 + cpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: CPU stores resource units and cpu config attributes + memory: + type: object + properties: + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Memory stores resource quantity and memory attributes + storage: + type: array + items: + type: object + properties: + name: + type: string + quantity: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: Storage stores resource quantity and storage attributes + gpu: + type: object + properties: + units: + type: object + properties: + val: + type: string + format: byte + title: Unit stores cpu, memory and storage metrics + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + title: GPU stores resource units and cpu config attributes + endpoints: + type: array + items: + type: object + properties: + kind: + type: string + enum: + - SHARED_HTTP + - RANDOM_PORT + - LEASED_IP + default: SHARED_HTTP + description: >- + - SHARED_HTTP: Describes an endpoint that becomes a + Kubernetes Ingress + - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort + - LEASED_IP: Describes an endpoint that becomes a leased IP + title: >- + This describes how the endpoint is implemented when the + lease is deployed + sequence_number: + type: integer + format: int64 + title: Endpoint describes a publicly accessible IP service + title: >- + Resources describes all available resources types for deployment/node + etc + + if field is nil resource is not present in the given data-structure + count: + type: integer + format: int64 + title: |- + ResourceOffer describes resources that provider is offering + for deployment + akash.market.v1beta5.MsgCloseBidResponse: + type: object + description: MsgCloseBidResponse defines the Msg/CloseBid response type. + akash.market.v1beta5.MsgCloseLeaseResponse: + type: object + description: MsgCloseLeaseResponse defines the Msg/CloseLease response type. + akash.market.v1beta5.MsgCreateBidResponse: + type: object + description: MsgCreateBidResponse defines the Msg/CreateBid response type. + akash.market.v1beta5.MsgCreateLeaseResponse: + type: object + title: MsgCreateLeaseResponse is the response from creating a lease + akash.market.v1beta5.MsgUpdateParamsResponse: + type: object + description: |- + MsgUpdateParamsResponse defines the response structure for executing a + MsgUpdateParams message. + + Since: akash v1.0.0 + akash.market.v1beta5.MsgWithdrawLeaseResponse: + type: object + description: MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. + akash.provider.v1beta4.Info: + type: object + properties: + email: + type: string + website: + type: string + title: Info + akash.provider.v1beta4.Provider: + type: object + properties: + owner: + type: string + host_uri: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + info: + type: object + properties: + email: + type: string + website: + type: string + title: Info + title: Provider stores owner and host details + akash.provider.v1beta4.QueryProviderResponse: + type: object + properties: + provider: + type: object + properties: + owner: + type: string + host_uri: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + info: + type: object + properties: + email: + type: string + website: + type: string + title: Info + title: Provider stores owner and host details + title: QueryProviderResponse is response type for the Query/Provider RPC method + akash.provider.v1beta4.QueryProvidersResponse: + type: object + properties: + providers: + type: array + items: + type: object + properties: + owner: + type: string + host_uri: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + title: Attribute represents key value pair + info: + type: object + properties: + email: + type: string + website: + type: string + title: Info + title: Provider stores owner and host details + pagination: + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: QueryProvidersResponse is response type for the Query/Providers RPC method + akash.take.v1.DenomTakeRate: + type: object + properties: + denom: + type: string + rate: + type: integer + format: int64 + title: DenomTakeRate describes take rate for specified denom + akash.take.v1.Params: + type: object + properties: + denom_take_rates: + type: array + items: + type: object + properties: + denom: + type: string + rate: + type: integer + format: int64 + title: DenomTakeRate describes take rate for specified denom + title: denom -> % take rate + default_take_rate: + type: integer + format: int64 + title: Params defines the parameters for the x/take package + akash.take.v1.QueryParamsResponse: + type: object + properties: + params: + description: params defines the parameters of the module. + type: object + properties: + denom_take_rates: + type: array + items: + type: object + properties: + denom: + type: string + rate: + type: integer + format: int64 + title: DenomTakeRate describes take rate for specified denom + title: denom -> % take rate + default_take_rate: + type: integer + format: int64 + title: Params defines the parameters for the x/take package + description: QueryParamsResponse is the response type for the Query/Params RPC method. + akash.take.v1.MsgUpdateParamsResponse: + type: object + description: |- + MsgUpdateParamsResponse defines the response structure for executing a + MsgUpdateParams message. + + Since: akash v1.0.0 + cosmos.auth.v1beta1.AddressBytesToStringResponse: + type: object + properties: + address_string: + type: string + description: >- + AddressBytesToStringResponse is the response type for AddressString rpc + method. + + + Since: cosmos-sdk 0.46 + cosmos.auth.v1beta1.AddressStringToBytesResponse: + type: object + properties: + address_bytes: + type: string + format: byte + description: >- + AddressStringToBytesResponse is the response type for AddressBytes rpc + method. + + + Since: cosmos-sdk 0.46 + cosmos.auth.v1beta1.BaseAccount: + type: object + properties: + address: + type: string + pub_key: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up a + type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above specified + type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + account_number: + type: string + format: uint64 + sequence: + type: string + format: uint64 + description: >- + BaseAccount defines a base account type. It contains all the necessary + fields + + for basic account functionality. Any custom account type should extend + this + + type for additional functionality (e.g. vesting). + cosmos.auth.v1beta1.Bech32PrefixResponse: + type: object + properties: + bech32_prefix: + type: string + description: |- + Bech32PrefixResponse is the response type for Bech32Prefix rpc method. + + Since: cosmos-sdk 0.46 + cosmos.auth.v1beta1.Params: + type: object + properties: + max_memo_characters: + type: string + format: uint64 + tx_sig_limit: + type: string + format: uint64 + tx_size_cost_per_byte: + type: string + format: uint64 + sig_verify_cost_ed25519: + type: string + format: uint64 + sig_verify_cost_secp256k1: + type: string + format: uint64 + description: Params defines the parameters for the auth module. + cosmos.auth.v1beta1.QueryAccountAddressByIDResponse: + type: object + properties: + account_address: + type: string + description: 'Since: cosmos-sdk 0.46.2' + title: >- + QueryAccountAddressByIDResponse is the response type for + AccountAddressByID rpc method + cosmos.auth.v1beta1.QueryAccountInfoResponse: + type: object + properties: + info: + description: info is the account info which is represented by BaseAccount. + type: object + properties: + address: + type: string + pub_key: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might + be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + account_number: + type: string + format: uint64 + sequence: + type: string + format: uint64 + description: |- + QueryAccountInfoResponse is the Query/AccountInfo response type. + + Since: cosmos-sdk 0.47 + cosmos.auth.v1beta1.QueryAccountResponse: + type: object + properties: + account: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up a + type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above specified + type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + QueryAccountResponse is the response type for the Query/Account RPC + method. + cosmos.auth.v1beta1.QueryAccountsResponse: + type: object + properties: + accounts: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up + a type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might + be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + title: accounts are the existing accounts + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryAccountsResponse is the response type for the Query/Accounts RPC + method. + + + Since: cosmos-sdk 0.43 + cosmos.auth.v1beta1.QueryModuleAccountByNameResponse: + type: object + properties: + account: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up a + type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above specified + type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + QueryModuleAccountByNameResponse is the response type for the + Query/ModuleAccountByName RPC method. + cosmos.auth.v1beta1.QueryModuleAccountsResponse: + type: object + properties: + accounts: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up + a type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might + be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + QueryModuleAccountsResponse is the response type for the + Query/ModuleAccounts RPC method. + + + Since: cosmos-sdk 0.46 + cosmos.auth.v1beta1.QueryParamsResponse: + type: object + properties: + params: + description: params defines the parameters of the module. + type: object + properties: + max_memo_characters: + type: string + format: uint64 + tx_sig_limit: + type: string + format: uint64 + tx_size_cost_per_byte: + type: string + format: uint64 + sig_verify_cost_ed25519: + type: string + format: uint64 + sig_verify_cost_secp256k1: + type: string + format: uint64 + description: QueryParamsResponse is the response type for the Query/Params RPC method. + cosmos.bank.v1beta1.DenomOwner: + type: object + properties: + address: + type: string + description: address defines the address that owns a particular denomination. + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: |- + DenomOwner defines structure representing an account that owns or holds a + particular denominated token. It contains the account address and account + balance of the denominated token. + + Since: cosmos-sdk 0.46 + cosmos.bank.v1beta1.DenomUnit: + type: object + properties: + denom: + type: string + description: denom represents the string name of the given denom unit (e.g uatom). + exponent: + type: integer + format: int64 + description: >- + exponent represents power of 10 exponent that one must + + raise the base_denom to in order to equal the given DenomUnit's denom + + 1 denom = 10^exponent base_denom + + (e.g. with a base_denom of uatom, one can create a DenomUnit of 'atom' + with + + exponent = 6, thus: 1 atom = 10^6 uatom). + aliases: + type: array + items: + type: string + title: aliases is a list of string aliases for the given denom + description: |- + DenomUnit represents a struct that describes a given + denomination unit of the basic token. + cosmos.bank.v1beta1.Metadata: + type: object + properties: + description: + type: string + denom_units: + type: array + items: + type: object + properties: + denom: + type: string + description: >- + denom represents the string name of the given denom unit (e.g + uatom). + exponent: + type: integer + format: int64 + description: >- + exponent represents power of 10 exponent that one must + + raise the base_denom to in order to equal the given DenomUnit's + denom + + 1 denom = 10^exponent base_denom + + (e.g. with a base_denom of uatom, one can create a DenomUnit of + 'atom' with + + exponent = 6, thus: 1 atom = 10^6 uatom). + aliases: + type: array + items: + type: string + title: aliases is a list of string aliases for the given denom + description: |- + DenomUnit represents a struct that describes a given + denomination unit of the basic token. + title: denom_units represents the list of DenomUnit's for a given coin + base: + type: string + description: >- + base represents the base denom (should be the DenomUnit with exponent + = 0). + display: + type: string + description: |- + display indicates the suggested denom that should be + displayed in clients. + name: + type: string + description: 'Since: cosmos-sdk 0.43' + title: 'name defines the name of the token (eg: Cosmos Atom)' + symbol: + type: string + description: >- + symbol is the token symbol usually shown on exchanges (eg: ATOM). This + can + + be the same as the display. + + + Since: cosmos-sdk 0.43 + uri: + type: string + description: >- + URI to a document (on or off-chain) that contains additional + information. Optional. + + + Since: cosmos-sdk 0.46 + uri_hash: + type: string + description: >- + URIHash is a sha256 hash of a document pointed by URI. It's used to + verify that + + the document didn't change. Optional. + + + Since: cosmos-sdk 0.46 + description: |- + Metadata represents a struct that describes + a basic token. + cosmos.bank.v1beta1.Params: + type: object + properties: + send_enabled: + type: array + items: + type: object + properties: + denom: + type: string + enabled: + type: boolean + description: >- + SendEnabled maps coin denom to a send_enabled status (whether a + denom is + + sendable). + description: >- + Deprecated: Use of SendEnabled in params is deprecated. + + For genesis, use the newly added send_enabled field in the genesis + object. + + Storage, lookup, and manipulation of this information is now in the + keeper. + + + As of cosmos-sdk 0.47, this only exists for backwards compatibility of + genesis files. + default_send_enabled: + type: boolean + description: Params defines the parameters for the bank module. + cosmos.bank.v1beta1.QueryAllBalancesResponse: + type: object + properties: + balances: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: balances is the balances of all the coins. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryAllBalancesResponse is the response type for the Query/AllBalances + RPC + + method. + cosmos.bank.v1beta1.QueryBalanceResponse: + type: object + properties: + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: >- + QueryBalanceResponse is the response type for the Query/Balance RPC + method. + cosmos.bank.v1beta1.QueryDenomMetadataResponse: + type: object + properties: + metadata: + type: object + properties: + description: + type: string + denom_units: + type: array + items: + type: object + properties: + denom: + type: string + description: >- + denom represents the string name of the given denom unit + (e.g uatom). + exponent: + type: integer + format: int64 + description: >- + exponent represents power of 10 exponent that one must + + raise the base_denom to in order to equal the given + DenomUnit's denom + + 1 denom = 10^exponent base_denom + + (e.g. with a base_denom of uatom, one can create a DenomUnit + of 'atom' with + + exponent = 6, thus: 1 atom = 10^6 uatom). + aliases: + type: array + items: + type: string + title: aliases is a list of string aliases for the given denom + description: |- + DenomUnit represents a struct that describes a given + denomination unit of the basic token. + title: denom_units represents the list of DenomUnit's for a given coin + base: + type: string + description: >- + base represents the base denom (should be the DenomUnit with + exponent = 0). + display: + type: string + description: |- + display indicates the suggested denom that should be + displayed in clients. + name: + type: string + description: 'Since: cosmos-sdk 0.43' + title: 'name defines the name of the token (eg: Cosmos Atom)' + symbol: + type: string + description: >- + symbol is the token symbol usually shown on exchanges (eg: ATOM). + This can + + be the same as the display. + + + Since: cosmos-sdk 0.43 + uri: + type: string + description: >- + URI to a document (on or off-chain) that contains additional + information. Optional. + + + Since: cosmos-sdk 0.46 + uri_hash: + type: string + description: >- + URIHash is a sha256 hash of a document pointed by URI. It's used + to verify that + + the document didn't change. Optional. + + + Since: cosmos-sdk 0.46 + description: |- + Metadata represents a struct that describes + a basic token. + description: >- + QueryDenomMetadataResponse is the response type for the + Query/DenomMetadata RPC + + method. + cosmos.bank.v1beta1.QueryDenomOwnersResponse: + type: object + properties: + denom_owners: + type: array + items: + type: object + properties: + address: + type: string + description: address defines the address that owns a particular denomination. + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: >- + DenomOwner defines structure representing an account that owns or + holds a + + particular denominated token. It contains the account address and + account + + balance of the denominated token. + + + Since: cosmos-sdk 0.46 + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryDenomOwnersResponse defines the RPC response of a DenomOwners RPC + query. + + + Since: cosmos-sdk 0.46 + cosmos.bank.v1beta1.QueryDenomsMetadataResponse: + type: object + properties: + metadatas: + type: array + items: + type: object + properties: + description: + type: string + denom_units: + type: array + items: + type: object + properties: + denom: + type: string + description: >- + denom represents the string name of the given denom unit + (e.g uatom). + exponent: + type: integer + format: int64 + description: >- + exponent represents power of 10 exponent that one must + + raise the base_denom to in order to equal the given + DenomUnit's denom + + 1 denom = 10^exponent base_denom + + (e.g. with a base_denom of uatom, one can create a + DenomUnit of 'atom' with + + exponent = 6, thus: 1 atom = 10^6 uatom). + aliases: + type: array + items: + type: string + title: aliases is a list of string aliases for the given denom + description: |- + DenomUnit represents a struct that describes a given + denomination unit of the basic token. + title: denom_units represents the list of DenomUnit's for a given coin + base: + type: string + description: >- + base represents the base denom (should be the DenomUnit with + exponent = 0). + display: + type: string + description: |- + display indicates the suggested denom that should be + displayed in clients. + name: + type: string + description: 'Since: cosmos-sdk 0.43' + title: 'name defines the name of the token (eg: Cosmos Atom)' + symbol: + type: string + description: >- + symbol is the token symbol usually shown on exchanges (eg: + ATOM). This can + + be the same as the display. + + + Since: cosmos-sdk 0.43 + uri: + type: string + description: >- + URI to a document (on or off-chain) that contains additional + information. Optional. + + + Since: cosmos-sdk 0.46 + uri_hash: + type: string + description: >- + URIHash is a sha256 hash of a document pointed by URI. It's used + to verify that + + the document didn't change. Optional. + + + Since: cosmos-sdk 0.46 + description: |- + Metadata represents a struct that describes + a basic token. + description: >- + metadata provides the client information for all the registered + tokens. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryDenomsMetadataResponse is the response type for the + Query/DenomsMetadata RPC + + method. + cosmos.bank.v1beta1.QueryParamsResponse: + type: object + properties: + params: + type: object + properties: + send_enabled: + type: array + items: + type: object + properties: + denom: + type: string + enabled: + type: boolean + description: >- + SendEnabled maps coin denom to a send_enabled status (whether a + denom is + + sendable). + description: >- + Deprecated: Use of SendEnabled in params is deprecated. + + For genesis, use the newly added send_enabled field in the genesis + object. + + Storage, lookup, and manipulation of this information is now in + the keeper. + + + As of cosmos-sdk 0.47, this only exists for backwards + compatibility of genesis files. + default_send_enabled: + type: boolean + description: Params defines the parameters for the bank module. + description: >- + QueryParamsResponse defines the response type for querying x/bank + parameters. + cosmos.bank.v1beta1.QuerySendEnabledResponse: + type: object + properties: + send_enabled: + type: array + items: + type: object + properties: + denom: + type: string + enabled: + type: boolean + description: >- + SendEnabled maps coin denom to a send_enabled status (whether a + denom is + + sendable). + pagination: + description: |- + pagination defines the pagination in the response. This field is only + populated if the denoms field in the request is empty. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + QuerySendEnabledResponse defines the RPC response of a SendEnable query. + + Since: cosmos-sdk 0.47 + cosmos.bank.v1beta1.QuerySpendableBalanceByDenomResponse: + type: object + properties: + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: >- + QuerySpendableBalanceByDenomResponse defines the gRPC response structure + for + + querying an account's spendable balance for a specific denom. + + + Since: cosmos-sdk 0.47 + cosmos.bank.v1beta1.QuerySpendableBalancesResponse: + type: object + properties: + balances: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: balances is the spendable balances of all the coins. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QuerySpendableBalancesResponse defines the gRPC response structure for + querying + + an account's spendable balances. + + + Since: cosmos-sdk 0.46 + cosmos.bank.v1beta1.QuerySupplyOfResponse: + type: object + properties: + amount: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: >- + QuerySupplyOfResponse is the response type for the Query/SupplyOf RPC + method. + cosmos.bank.v1beta1.QueryTotalSupplyResponse: + type: object + properties: + supply: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + title: supply is the supply of the coins + pagination: + description: |- + pagination defines the pagination in the response. + + Since: cosmos-sdk 0.43 + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + title: >- + QueryTotalSupplyResponse is the response type for the Query/TotalSupply + RPC + + method + cosmos.bank.v1beta1.SendEnabled: + type: object + properties: + denom: + type: string + enabled: + type: boolean + description: |- + SendEnabled maps coin denom to a send_enabled status (whether a denom is + sendable). + cosmos.base.tendermint.v1beta1.ABCIQueryResponse: + type: object + properties: + code: + type: integer + format: int64 + log: + type: string + info: + type: string + index: + type: string + format: int64 + key: + type: string + format: byte + value: + type: string + format: byte + proof_ops: + type: object + properties: + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + description: >- + ProofOp defines an operation used for calculating Merkle root. + The data could + + be arbitrary format, providing necessary data for example + neighbouring node + + hash. + + + Note: This type is a duplicate of the ProofOp proto type defined + in Tendermint. + description: >- + ProofOps is Merkle proof defined by the list of ProofOps. + + + Note: This type is a duplicate of the ProofOps proto type defined in + Tendermint. + height: + type: string + format: int64 + codespace: + type: string + description: >- + ABCIQueryResponse defines the response structure for the ABCIQuery gRPC + query. + + + Note: This type is a duplicate of the ResponseQuery proto type defined in + + Tendermint. + cosmos.base.tendermint.v1beta1.Block: + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block in + the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + description: >- + proposer_address is the original block proposer address, formatted + as a Bech32 string. + + In Tendermint, this type is `bytes`, but in the SDK, we convert it + to a Bech32 string + + for better UX. + description: Header defines the structure of a Tendermint block header. + data: + type: object + properties: + txs: + type: array + items: + type: string + format: byte + description: >- + Txs that will be applied by state @ block.Height+1. + + NOTE: not all txs here are valid. We're just agreeing on the + order first. + + This means that block.AppHash does not include these txs. + title: Data contains the set of transactions included in the block + evidence: + type: object + properties: + evidence: + type: array + items: + type: object + properties: + duplicate_vote_evidence: + type: object + properties: + vote_a: + type: object + properties: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for + + consensus. + vote_b: + type: object + properties: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for + + consensus. + total_voting_power: + type: string + format: int64 + validator_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + DuplicateVoteEvidence contains evidence of a validator + signed two conflicting votes. + light_client_attack_evidence: + type: object + properties: + conflicting_block: + type: object + properties: + signed_header: + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and + the rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: >- + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint + block header. + commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the + signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: >- + CommitSig is a part of the Vote included + in a Commit. + description: >- + Commit contains the evidence that a block was + committed by a set of validators. + validator_set: + type: object + properties: + validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + proposer: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use + with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + common_height: + type: string + format: int64 + byzantine_validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use with + Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + LightClientAttackEvidence contains evidence of a set of + validators attempting to mislead a light client. + last_commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: BlockIdFlag indicates which BlcokID the signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: CommitSig is a part of the Vote included in a Commit. + description: >- + Commit contains the evidence that a block was committed by a set of + validators. + description: |- + Block is tendermint type Block, with the Header proposer address + field converted to bech32 string. + cosmos.base.tendermint.v1beta1.GetBlockByHeightResponse: + type: object + properties: + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + block: + title: 'Deprecated: please use `sdk_block` instead' + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + data: + type: object + properties: + txs: + type: array + items: + type: string + format: byte + description: >- + Txs that will be applied by state @ block.Height+1. + + NOTE: not all txs here are valid. We're just agreeing on the + order first. + + This means that block.AppHash does not include these txs. + title: Data contains the set of transactions included in the block + evidence: + type: object + properties: + evidence: + type: array + items: + type: object + properties: + duplicate_vote_evidence: + type: object + properties: + vote_a: + type: object + properties: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for + + consensus. + vote_b: + type: object + properties: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for + + consensus. + total_voting_power: + type: string + format: int64 + validator_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + DuplicateVoteEvidence contains evidence of a validator + signed two conflicting votes. + light_client_attack_evidence: + type: object + properties: + conflicting_block: + type: object + properties: + signed_header: + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules + for processing a block in the + blockchain, + + including all blockchain data structures + and the rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: >- + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint + block header. + commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the + signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: >- + CommitSig is a part of the Vote included + in a Commit. + description: >- + Commit contains the evidence that a block + was committed by a set of validators. + validator_set: + type: object + properties: + validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + proposer: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + common_height: + type: string + format: int64 + byzantine_validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use + with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + LightClientAttackEvidence contains evidence of a set of + validators attempting to mislead a light client. + last_commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: BlockIdFlag indicates which BlcokID the signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: CommitSig is a part of the Vote included in a Commit. + description: >- + Commit contains the evidence that a block was committed by a set + of validators. + sdk_block: + title: 'Since: cosmos-sdk 0.47' type: object properties: - quantity: + header: type: object properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Memory stores resource quantity and memory attributes - storage: - type: array - items: - type: object - properties: - name: - type: string - quantity: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: + version: + title: basic block info type: object properties: - key: + block: type: string - value: + format: uint64 + app: type: string - title: Attribute represents key value pair - title: Storage stores resource quantity and storage attributes - gpu: - type: object - properties: - units: - type: object - properties: - val: + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: type: string format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: GPU stores resource units and cpu config attributes - endpoints: - type: array - items: - type: object - properties: - kind: - type: string - enum: - - SHARED_HTTP - - RANDOM_PORT - - LEASED_IP - default: SHARED_HTTP - description: >- - - SHARED_HTTP: Describes an endpoint that becomes a Kubernetes - Ingress - - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort - - LEASED_IP: Describes an endpoint that becomes a leased IP - title: >- - This describes how the endpoint is implemented when the lease is - deployed - sequence_number: - type: integer - format: int64 - title: Endpoint describes a publicly accessible IP service - title: |- - Resources describes all available resources types for deployment/node etc - if field is nil resource is not present in the given data-structure - akash.base.v1beta3.SignedBy: - type: object - properties: - all_of: - type: array - items: - type: string - title: all_of all keys in this list must have signed attributes - any_of: - type: array - items: - type: string - title: >- - any_of at least of of the keys from the list must have signed - attributes - title: >- - SignedBy represents validation accounts that tenant expects signatures for - provider attributes + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + description: >- + proposer_address is the original block proposer address, + formatted as a Bech32 string. - AllOf has precedence i.e. if there is at least one entry AnyOf is ignored - regardless to how many + In Tendermint, this type is `bytes`, but in the SDK, we + convert it to a Bech32 string - entries there + for better UX. + description: Header defines the structure of a Tendermint block header. + data: + type: object + properties: + txs: + type: array + items: + type: string + format: byte + description: >- + Txs that will be applied by state @ block.Height+1. - this behaviour to be discussed - akash.base.v1beta3.Storage: - type: object - properties: - name: - type: string - quantity: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Storage stores resource quantity and storage attributes - akash.deployment.v1beta3.Deployment: - type: object - properties: - deployment_id: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - title: DeploymentID stores owner and sequence number - state: - type: string - enum: - - invalid - - active - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring dummy - state - - active: DeploymentActive denotes state for deployment active - - closed: DeploymentClosed denotes state for deployment closed - title: State is an enum which refers to state of deployment - version: - type: string - format: byte - created_at: - type: string - format: int64 - title: Deployment stores deploymentID, state and version details - akash.deployment.v1beta3.Deployment.State: - type: string - enum: - - invalid - - active - - closed - default: invalid - description: |- - - invalid: Prefix should start with 0 in enum. So declaring dummy state - - active: DeploymentActive denotes state for deployment active - - closed: DeploymentClosed denotes state for deployment closed - title: State is an enum which refers to state of deployment - akash.deployment.v1beta3.DeploymentFilters: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - state: - type: string - title: DeploymentFilters defines filters used to filter deployments - akash.deployment.v1beta3.DeploymentID: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - title: DeploymentID stores owner and sequence number - akash.deployment.v1beta3.Group: - type: object - properties: - group_id: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - gseq: - type: integer - format: int64 - title: >- - GroupID stores owner, deployment sequence number and group sequence - number - state: - type: string - enum: - - invalid - - open - - paused - - insufficient_funds - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring dummy - state - - open: GroupOpen denotes state for group open - - paused: GroupOrdered denotes state for group ordered - - insufficient_funds: GroupInsufficientFunds denotes state for group insufficient_funds - - closed: GroupClosed denotes state for group closed - title: State is an enum which refers to state of group - group_spec: - type: object - properties: - name: - type: string - requirements: + NOTE: not all txs here are valid. We're just agreeing on the + order first. + + This means that block.AppHash does not include these txs. + title: Data contains the set of transactions included in the block + evidence: type: object properties: - signed_by: - title: >- - SignedBy list of keys that tenants expect to have signatures - from - type: object - properties: - all_of: - type: array - items: - type: string - title: all_of all keys in this list must have signed attributes - any_of: - type: array - items: - type: string - title: >- - any_of at least of of the keys from the list must have - signed attributes - attributes: + evidence: type: array items: type: object properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Attribute list of attributes tenant expects from the provider - title: PlacementRequirements - resources: - type: array - items: - type: object - properties: - resource: - type: object - properties: - id: - type: integer - format: int64 - cpu: + duplicate_vote_evidence: type: object properties: - units: + vote_a: type: object properties: - val: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: type: string format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: CPU stores resource units and cpu config attributes - memory: - type: object - properties: - quantity: + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for + + consensus. + vote_b: type: object properties: - val: + type: type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Memory stores resource quantity and memory attributes - storage: - type: array - items: - type: object - properties: - name: - type: string - quantity: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: type: object properties: - key: - type: string - value: + hash: type: string - title: Attribute represents key value pair - title: >- - Storage stores resource quantity and storage - attributes - gpu: + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for + + consensus. + total_voting_power: + type: string + format: int64 + validator_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + DuplicateVoteEvidence contains evidence of a validator + signed two conflicting votes. + light_client_attack_evidence: type: object properties: - units: + conflicting_block: type: object properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: + signed_header: + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules + for processing a block in the + blockchain, + + including all blockchain data structures + and the rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: >- + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint + block header. + commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the + signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: >- + CommitSig is a part of the Vote included + in a Commit. + description: >- + Commit contains the evidence that a block + was committed by a set of validators. + validator_set: + type: object + properties: + validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + proposer: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + common_height: + type: string + format: int64 + byzantine_validators: type: array items: type: object properties: - key: + address: type: string - value: + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use + with Tendermint Validators + voting_power: type: string - title: Attribute represents key value pair - title: GPU stores resource units and cpu config attributes - endpoints: - type: array - items: - type: object - properties: - kind: - type: string - enum: - - SHARED_HTTP - - RANDOM_PORT - - LEASED_IP - default: SHARED_HTTP - description: >- - - SHARED_HTTP: Describes an endpoint that becomes - a Kubernetes Ingress - - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort - - LEASED_IP: Describes an endpoint that becomes a leased IP - title: >- - This describes how the endpoint is implemented - when the lease is deployed - sequence_number: - type: integer - format: int64 - title: Endpoint describes a publicly accessible IP service - title: >- - Resources describes all available resources types for - deployment/node etc - - if field is nil resource is not present in the given - data-structure - count: - type: integer - format: int64 - price: + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + LightClientAttackEvidence contains evidence of a set of + validators attempting to mislead a light client. + last_commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: type: object properties: - denom: + block_id_flag: type: string - amount: + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: BlockIdFlag indicates which BlcokID the signature is for + validator_address: type: string - description: >- - DecCoin defines a token with a denomination and a decimal - amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - title: >- - ResourceUnit extends Resources and adds Count along with the - Price - title: GroupSpec stores group specifications - created_at: - type: string - format: int64 - title: Group stores group id, state and specifications of group - akash.deployment.v1beta3.Group.State: - type: string - enum: - - invalid - - open - - paused - - insufficient_funds - - closed - default: invalid - description: |- - - invalid: Prefix should start with 0 in enum. So declaring dummy state - - open: GroupOpen denotes state for group open - - paused: GroupOrdered denotes state for group ordered - - insufficient_funds: GroupInsufficientFunds denotes state for group insufficient_funds - - closed: GroupClosed denotes state for group closed - title: State is an enum which refers to state of group - akash.deployment.v1beta3.GroupID: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - gseq: - type: integer - format: int64 - title: GroupID stores owner, deployment sequence number and group sequence number - akash.deployment.v1beta3.GroupSpec: + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: CommitSig is a part of the Vote included in a Commit. + description: >- + Commit contains the evidence that a block was committed by a set + of validators. + description: |- + Block is tendermint type Block, with the Header proposer address + field converted to bech32 string. + description: >- + GetBlockByHeightResponse is the response type for the + Query/GetBlockByHeight RPC method. + cosmos.base.tendermint.v1beta1.GetLatestBlockResponse: type: object properties: - name: - type: string - requirements: + block_id: type: object properties: - signed_by: - title: SignedBy list of keys that tenants expect to have signatures from + hash: + type: string + format: byte + part_set_header: type: object properties: - all_of: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + block: + title: 'Deprecated: please use `sdk_block` instead' + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + data: + type: object + properties: + txs: type: array items: type: string - title: all_of all keys in this list must have signed attributes - any_of: + format: byte + description: >- + Txs that will be applied by state @ block.Height+1. + + NOTE: not all txs here are valid. We're just agreeing on the + order first. + + This means that block.AppHash does not include these txs. + title: Data contains the set of transactions included in the block + evidence: + type: object + properties: + evidence: type: array items: - type: string - title: >- - any_of at least of of the keys from the list must have signed - attributes - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Attribute list of attributes tenant expects from the provider - title: PlacementRequirements - resources: - type: array - items: - type: object - properties: - resource: - type: object - properties: - id: - type: integer - format: int64 - cpu: - type: object - properties: - units: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: CPU stores resource units and cpu config attributes - memory: type: object properties: - quantity: + duplicate_vote_evidence: type: object properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Memory stores resource quantity and memory attributes - storage: - type: array - items: - type: object - properties: - name: - type: string - quantity: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: + vote_a: type: object properties: - key: - type: string - value: + type: type: string - title: Attribute represents key value pair - title: Storage stores resource quantity and storage attributes - gpu: - type: object - properties: - units: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: GPU stores resource units and cpu config attributes - endpoints: - type: array - items: - type: object - properties: - kind: - type: string - enum: - - SHARED_HTTP - - RANDOM_PORT - - LEASED_IP - default: SHARED_HTTP - description: >- - - SHARED_HTTP: Describes an endpoint that becomes a - Kubernetes Ingress - - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort - - LEASED_IP: Describes an endpoint that becomes a leased IP - title: >- - This describes how the endpoint is implemented when - the lease is deployed - sequence_number: - type: integer - format: int64 - title: Endpoint describes a publicly accessible IP service - title: >- - Resources describes all available resources types for - deployment/node etc + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. - if field is nil resource is not present in the given - data-structure - count: - type: integer - format: int64 - price: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal - amount. + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for + consensus. + vote_b: + type: object + properties: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. - NOTE: The amount field is an Dec which implements the custom - method + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for - signatures required by gogoproto. - title: ResourceUnit extends Resources and adds Count along with the Price - title: GroupSpec stores group specifications - akash.deployment.v1beta3.QueryDeploymentResponse: - type: object - properties: - deployment: - type: object - properties: - deployment_id: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - title: DeploymentID stores owner and sequence number - state: - type: string - enum: - - invalid - - active - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring dummy - state - - active: DeploymentActive denotes state for deployment active - - closed: DeploymentClosed denotes state for deployment closed - title: State is an enum which refers to state of deployment - version: - type: string - format: byte - created_at: - type: string - format: int64 - title: Deployment stores deploymentID, state and version details - groups: - type: array - items: - type: object - properties: - group_id: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - gseq: - type: integer - format: int64 - title: >- - GroupID stores owner, deployment sequence number and group - sequence number - state: - type: string - enum: - - invalid - - open - - paused - - insufficient_funds - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring - dummy state - - open: GroupOpen denotes state for group open - - paused: GroupOrdered denotes state for group ordered - - insufficient_funds: GroupInsufficientFunds denotes state for group insufficient_funds - - closed: GroupClosed denotes state for group closed - title: State is an enum which refers to state of group - group_spec: - type: object - properties: - name: - type: string - requirements: - type: object - properties: - signed_by: - title: >- - SignedBy list of keys that tenants expect to have - signatures from + consensus. + total_voting_power: + type: string + format: int64 + validator_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + DuplicateVoteEvidence contains evidence of a validator + signed two conflicting votes. + light_client_attack_evidence: type: object properties: - all_of: - type: array - items: - type: string - title: >- - all_of all keys in this list must have signed - attributes - any_of: - type: array - items: - type: string - title: >- - any_of at least of of the keys from the list must - have signed attributes - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: >- - Attribute list of attributes tenant expects from the - provider - title: PlacementRequirements - resources: - type: array - items: - type: object - properties: - resource: - type: object - properties: - id: - type: integer - format: int64 - cpu: - type: object - properties: - units: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: + conflicting_block: + type: object + properties: + signed_header: + type: object + properties: + header: type: object properties: - key: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules + for processing a block in the + blockchain, + + including all blockchain data structures + and the rules of the application's + + state transition machine. + chain_id: type: string - value: + height: type: string - title: Attribute represents key value pair - title: >- - CPU stores resource units and cpu config - attributes - memory: - type: object - properties: - quantity: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: >- + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint + block header. + commit: type: object properties: - key: - type: string - value: + height: type: string - title: Attribute represents key value pair - title: >- - Memory stores resource quantity and memory - attributes - storage: - type: array - items: + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the + signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: >- + CommitSig is a part of the Vote included + in a Commit. + description: >- + Commit contains the evidence that a block + was committed by a set of validators. + validator_set: type: object properties: - name: - type: string - quantity: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: + validators: type: array items: type: object properties: - key: + address: type: string - value: + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: type: string - title: Attribute represents key value pair - title: >- - Storage stores resource quantity and storage - attributes - gpu: - type: object - properties: - units: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: + format: int64 + proposer_priority: + type: string + format: int64 + proposer: type: object properties: - key: + address: type: string - value: + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: type: string - title: Attribute represents key value pair - title: >- - GPU stores resource units and cpu config - attributes - endpoints: - type: array - items: - type: object - properties: - kind: + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: type: string - enum: - - SHARED_HTTP - - RANDOM_PORT - - LEASED_IP - default: SHARED_HTTP - description: >- - - SHARED_HTTP: Describes an endpoint that - becomes a Kubernetes Ingress - - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort - - LEASED_IP: Describes an endpoint that becomes a leased IP - title: >- - This describes how the endpoint is - implemented when the lease is deployed - sequence_number: - type: integer format: int64 - title: >- - Endpoint describes a publicly accessible IP - service - title: >- - Resources describes all available resources types for - deployment/node etc - - if field is nil resource is not present in the given - data-structure - count: + common_height: + type: string + format: int64 + byzantine_validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use + with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + LightClientAttackEvidence contains evidence of a set of + validators attempting to mislead a light client. + last_commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: type: integer format: int64 - price: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a - decimal amount. - - - NOTE: The amount field is an Dec which implements the - custom method - - signatures required by gogoproto. - title: >- - ResourceUnit extends Resources and adds Count along with - the Price - title: GroupSpec stores group specifications - created_at: - type: string - format: int64 - title: Group stores group id, state and specifications of group - escrow_account: + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: BlockIdFlag indicates which BlcokID the signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: CommitSig is a part of the Vote included in a Commit. + description: >- + Commit contains the evidence that a block was committed by a set + of validators. + sdk_block: + title: 'Since: cosmos-sdk 0.47' type: object properties: - id: - title: unique identifier for this escrow account - type: object - properties: - scope: - type: string - xid: - type: string - owner: - type: string - title: bech32 encoded account address of the owner of this escrow account - state: - title: current state of this escrow account - type: string - enum: - - invalid - - open - - closed - - overdrawn - default: invalid - description: |- - - invalid: AccountStateInvalid is an invalid state - - open: AccountOpen is the state when an account is open - - closed: AccountClosed is the state when an account is closed - - overdrawn: AccountOverdrawn is the state when an account is overdrawn - balance: + header: type: object properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal amount. - + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, - NOTE: The amount field is an Dec which implements the custom - method + including all blockchain data structures and the rules of the + application's - signatures required by gogoproto. - title: unspent coins received from the owner's wallet - transferred: - type: object - properties: - denom: - type: string - amount: + state transition machine. + chain_id: type: string - description: >- - DecCoin defines a token with a denomination and a decimal amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - title: total coins spent by this account - settled_at: - type: string - format: int64 - title: block height at which this account was last settled - depositor: - type: string - description: >- - bech32 encoded account address of the depositor. - - If depositor is same as the owner, then any incoming coins are - added to the Balance. - - If depositor isn't same as the owner, then any incoming coins are - added to the Funds. - funds: - type: object - properties: - denom: + height: type: string - amount: + format: int64 + time: type: string - description: >- - DecCoin defines a token with a denomination and a decimal amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - title: Account stores state for an escrow account - title: >- - QueryDeploymentResponse is response type for the Query/Deployment RPC - method - akash.deployment.v1beta3.QueryDeploymentsResponse: - type: object - properties: - deployments: - type: array - items: - type: object - properties: - deployment: - type: object - properties: - deployment_id: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - title: DeploymentID stores owner and sequence number - state: - type: string - enum: - - invalid - - active - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring - dummy state - - active: DeploymentActive denotes state for deployment active - - closed: DeploymentClosed denotes state for deployment closed - title: State is an enum which refers to state of deployment - version: - type: string - format: byte - created_at: - type: string - format: int64 - title: Deployment stores deploymentID, state and version details - groups: - type: array - items: + format: date-time + last_block_id: type: object properties: - group_id: + hash: + type: string + format: byte + part_set_header: type: object properties: - owner: - type: string - dseq: - type: string - format: uint64 - gseq: + total: type: integer format: int64 - title: >- - GroupID stores owner, deployment sequence number and group - sequence number - state: - type: string - enum: - - invalid - - open - - paused - - insufficient_funds - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So - declaring dummy state - - open: GroupOpen denotes state for group open - - paused: GroupOrdered denotes state for group ordered - - insufficient_funds: GroupInsufficientFunds denotes state for group insufficient_funds - - closed: GroupClosed denotes state for group closed - title: State is an enum which refers to state of group - group_spec: - type: object - properties: - name: + hash: type: string - requirements: - type: object - properties: - signed_by: - title: >- - SignedBy list of keys that tenants expect to have - signatures from - type: object - properties: - all_of: - type: array - items: - type: string - title: >- - all_of all keys in this list must have signed - attributes - any_of: - type: array - items: - type: string - title: >- - any_of at least of of the keys from the list - must have signed attributes - attributes: - type: array - items: + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + description: >- + proposer_address is the original block proposer address, + formatted as a Bech32 string. + + In Tendermint, this type is `bytes`, but in the SDK, we + convert it to a Bech32 string + + for better UX. + description: Header defines the structure of a Tendermint block header. + data: + type: object + properties: + txs: + type: array + items: + type: string + format: byte + description: >- + Txs that will be applied by state @ block.Height+1. + + NOTE: not all txs here are valid. We're just agreeing on the + order first. + + This means that block.AppHash does not include these txs. + title: Data contains the set of transactions included in the block + evidence: + type: object + properties: + evidence: + type: array + items: + type: object + properties: + duplicate_vote_evidence: + type: object + properties: + vote_a: + type: object + properties: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: type: object properties: - key: + hash: type: string - value: + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for + + consensus. + vote_b: + type: object + properties: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: type: string - title: Attribute represents key value pair - title: >- - Attribute list of attributes tenant expects from - the provider - title: PlacementRequirements - resources: - type: array - items: + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for + + consensus. + total_voting_power: + type: string + format: int64 + validator_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + DuplicateVoteEvidence contains evidence of a validator + signed two conflicting votes. + light_client_attack_evidence: + type: object + properties: + conflicting_block: type: object properties: - resource: + signed_header: type: object properties: - id: - type: integer - format: int64 - cpu: + header: type: object properties: - units: + version: + title: basic block info type: object properties: - val: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules + for processing a block in the + blockchain, + + including all blockchain data structures + and the rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: type: string format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte title: >- - Unit stores cpu, memory and storage - metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: >- - CPU stores resource units and cpu config - attributes - memory: + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint + block header. + commit: type: object properties: - quantity: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: type: object properties: - val: + hash: type: string format: byte - title: >- - Unit stores cpu, memory and storage - metrics - attributes: + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: type: array items: type: object properties: - key: + block_id_flag: type: string - value: + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the + signature is for + validator_address: type: string - title: Attribute represents key value pair - title: >- - Memory stores resource quantity and memory - attributes - storage: + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: >- + CommitSig is a part of the Vote included + in a Commit. + description: >- + Commit contains the evidence that a block + was committed by a set of validators. + validator_set: + type: object + properties: + validators: type: array items: type: object properties: - name: + address: type: string - quantity: + format: byte + pub_key: type: object properties: - val: + ed25519: + type: string + format: byte + secp256k1: type: string format: byte title: >- - Unit stores cpu, memory and storage - metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: >- - Storage stores resource quantity and - storage attributes - gpu: + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + proposer: type: object properties: - units: + address: + type: string + format: byte + pub_key: type: object properties: - val: + ed25519: + type: string + format: byte + secp256k1: type: string format: byte title: >- - Unit stores cpu, memory and storage - metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: >- - GPU stores resource units and cpu config - attributes - endpoints: - type: array - items: - type: object - properties: - kind: - type: string - enum: - - SHARED_HTTP - - RANDOM_PORT - - LEASED_IP - default: SHARED_HTTP - description: >- - - SHARED_HTTP: Describes an endpoint - that becomes a Kubernetes Ingress - - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort - - LEASED_IP: Describes an endpoint that becomes a leased IP - title: >- - This describes how the endpoint is - implemented when the lease is deployed - sequence_number: - type: integer - format: int64 - title: >- - Endpoint describes a publicly accessible - IP service - title: >- - Resources describes all available resources - types for deployment/node etc - - if field is nil resource is not present in the - given data-structure - count: - type: integer - format: int64 - price: - type: object - properties: - denom: - type: string - amount: + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: type: string - description: >- - DecCoin defines a token with a denomination and - a decimal amount. - - - NOTE: The amount field is an Dec which - implements the custom method - - signatures required by gogoproto. - title: >- - ResourceUnit extends Resources and adds Count along - with the Price - title: GroupSpec stores group specifications - created_at: + format: int64 + common_height: + type: string + format: int64 + byzantine_validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use + with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + LightClientAttackEvidence contains evidence of a set of + validators attempting to mislead a light client. + last_commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: type: string - format: int64 - title: Group stores group id, state and specifications of group - escrow_account: - type: object - properties: - id: - title: unique identifier for this escrow account + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: type: object properties: - scope: + block_id_flag: type: string - xid: + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: BlockIdFlag indicates which BlcokID the signature is for + validator_address: type: string - owner: - type: string - title: >- - bech32 encoded account address of the owner of this escrow - account - state: - title: current state of this escrow account - type: string - enum: - - invalid - - open - - closed - - overdrawn - default: invalid - description: |- - - invalid: AccountStateInvalid is an invalid state - - open: AccountOpen is the state when an account is open - - closed: AccountClosed is the state when an account is closed - - overdrawn: AccountOverdrawn is the state when an account is overdrawn - balance: - type: object - properties: - denom: + format: byte + timestamp: type: string - amount: + format: date-time + signature: type: string + format: byte + description: CommitSig is a part of the Vote included in a Commit. + description: >- + Commit contains the evidence that a block was committed by a set + of validators. + description: |- + Block is tendermint type Block, with the Header proposer address + field converted to bech32 string. + description: >- + GetLatestBlockResponse is the response type for the Query/GetLatestBlock + RPC method. + cosmos.base.tendermint.v1beta1.GetLatestValidatorSetResponse: + type: object + properties: + block_height: + type: string + format: int64 + validators: + type: array + items: + type: object + properties: + address: + type: string + pub_key: + type: object + properties: + type_url: + type: string description: >- - DecCoin defines a token with a denomination and a decimal - amount. + A URL/resource name that uniquely identifies the type of the + serialized + protocol buffer message. This string must contain at least - NOTE: The amount field is an Dec which implements the custom - method + one "/" character. The last segment of the URL's path must + represent - signatures required by gogoproto. - title: unspent coins received from the owner's wallet - transferred: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal - amount. + the fully qualified name of the type (as in + `path/google.protobuf.Duration`). The name should be in a + canonical form - NOTE: The amount field is an Dec which implements the custom - method + (e.g., leading "." is not accepted). - signatures required by gogoproto. - title: total coins spent by this account - settled_at: - type: string - format: int64 - title: block height at which this account was last settled - depositor: + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: type: string + format: byte description: >- - bech32 encoded account address of the depositor. + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a - If depositor is same as the owner, then any incoming coins - are added to the Balance. + URL that describes the type of the serialized message. - If depositor isn't same as the owner, then any incoming - coins are added to the Funds. - funds: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal - amount. + Protobuf library provides support to pack/unpack Any values in + the form - NOTE: The amount field is an Dec which implements the custom - method + of utility functions or additional generated methods of the Any + type. - signatures required by gogoproto. - title: Account stores state for an escrow account - title: >- - QueryDeploymentResponse is response type for the Query/Deployment - RPC method - pagination: - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. + Example 1: Pack and unpack a message in C++. - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - title: >- - QueryDeploymentsResponse is response type for the Query/Deployments RPC - method - akash.deployment.v1beta3.QueryGroupResponse: - type: object - properties: - group: - type: object - properties: - group_id: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - gseq: - type: integer - format: int64 - title: >- - GroupID stores owner, deployment sequence number and group - sequence number - state: - type: string - enum: - - invalid - - open - - paused - - insufficient_funds - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring dummy - state - - open: GroupOpen denotes state for group open - - paused: GroupOrdered denotes state for group ordered - - insufficient_funds: GroupInsufficientFunds denotes state for group insufficient_funds - - closed: GroupClosed denotes state for group closed - title: State is an enum which refers to state of group - group_spec: - type: object - properties: - name: - type: string - requirements: - type: object - properties: - signed_by: - title: >- - SignedBy list of keys that tenants expect to have - signatures from - type: object - properties: - all_of: - type: array - items: - type: string - title: >- - all_of all keys in this list must have signed - attributes - any_of: - type: array - items: - type: string - title: >- - any_of at least of of the keys from the list must have - signed attributes - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: >- - Attribute list of attributes tenant expects from the - provider - title: PlacementRequirements - resources: - type: array - items: - type: object - properties: - resource: - type: object - properties: - id: - type: integer - format: int64 - cpu: - type: object - properties: - units: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: CPU stores resource units and cpu config attributes - memory: - type: object - properties: - quantity: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: >- - Memory stores resource quantity and memory - attributes - storage: - type: array - items: - type: object - properties: - name: - type: string - quantity: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: >- - Storage stores resource quantity and storage - attributes - gpu: - type: object - properties: - units: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: GPU stores resource units and cpu config attributes - endpoints: - type: array - items: - type: object - properties: - kind: - type: string - enum: - - SHARED_HTTP - - RANDOM_PORT - - LEASED_IP - default: SHARED_HTTP - description: >- - - SHARED_HTTP: Describes an endpoint that - becomes a Kubernetes Ingress - - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort - - LEASED_IP: Describes an endpoint that becomes a leased IP - title: >- - This describes how the endpoint is implemented - when the lease is deployed - sequence_number: - type: integer - format: int64 - title: >- - Endpoint describes a publicly accessible IP - service - title: >- - Resources describes all available resources types for - deployment/node etc + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - if field is nil resource is not present in the given - data-structure - count: - type: integer - format: int64 - price: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a - decimal amount. + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - NOTE: The amount field is an Dec which implements the - custom method + If the embedded message type is well-known and has a custom JSON - signatures required by gogoproto. - title: >- - ResourceUnit extends Resources and adds Count along with the - Price - title: GroupSpec stores group specifications - created_at: + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + description: Validator is the type for the validator-set. + pagination: + description: pagination defines an pagination for the response. + type: object + properties: + next_key: type: string - format: int64 - title: Group stores group id, state and specifications of group - title: QueryGroupResponse is response type for the Query/Group RPC method - akash.deployment.v1beta3.ResourceUnit: + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + GetLatestValidatorSetResponse is the response type for the + Query/GetValidatorSetByHeight RPC method. + cosmos.base.tendermint.v1beta1.GetNodeInfoResponse: type: object properties: - resource: + default_node_info: type: object properties: - id: - type: integer - format: int64 - cpu: + protocol_version: type: object properties: - units: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: CPU stores resource units and cpu config attributes - memory: + p2p: + type: string + format: uint64 + block: + type: string + format: uint64 + app: + type: string + format: uint64 + default_node_id: + type: string + listen_addr: + type: string + network: + type: string + version: + type: string + channels: + type: string + format: byte + moniker: + type: string + other: type: object properties: - quantity: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Memory stores resource quantity and memory attributes - storage: + tx_index: + type: string + rpc_address: + type: string + application_version: + type: object + properties: + name: + type: string + app_name: + type: string + version: + type: string + git_commit: + type: string + build_tags: + type: string + go_version: + type: string + build_deps: type: array items: type: object properties: - name: + path: type: string - quantity: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Storage stores resource quantity and storage attributes - gpu: - type: object - properties: - units: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: GPU stores resource units and cpu config attributes - endpoints: - type: array - items: + title: module path + version: + type: string + title: module version + sum: + type: string + title: checksum + title: Module is the type for VersionInfo + cosmos_sdk_version: + type: string + title: 'Since: cosmos-sdk 0.43' + description: VersionInfo is the type for the GetNodeInfoResponse message. + description: >- + GetNodeInfoResponse is the response type for the Query/GetNodeInfo RPC + method. + cosmos.base.tendermint.v1beta1.GetSyncingResponse: + type: object + properties: + syncing: + type: boolean + description: >- + GetSyncingResponse is the response type for the Query/GetSyncing RPC + method. + cosmos.base.tendermint.v1beta1.GetValidatorSetByHeightResponse: + type: object + properties: + block_height: + type: string + format: int64 + validators: + type: array + items: + type: object + properties: + address: + type: string + pub_key: type: object properties: - kind: + type_url: type: string - enum: - - SHARED_HTTP - - RANDOM_PORT - - LEASED_IP - default: SHARED_HTTP description: >- - - SHARED_HTTP: Describes an endpoint that becomes a - Kubernetes Ingress - - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort - - LEASED_IP: Describes an endpoint that becomes a leased IP - title: >- - This describes how the endpoint is implemented when the - lease is deployed - sequence_number: - type: integer - format: int64 - title: Endpoint describes a publicly accessible IP service - title: >- - Resources describes all available resources types for deployment/node - etc + A URL/resource name that uniquely identifies the type of the + serialized - if field is nil resource is not present in the given data-structure - count: - type: integer - format: int64 - price: + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + description: Validator is the type for the validator-set. + pagination: + description: pagination defines an pagination for the response. type: object properties: - denom: + next_key: type: string - amount: + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - title: ResourceUnit extends Resources and adds Count along with the Price - akash.escrow.v1beta3.Account: + was set, its value is undefined otherwise + description: >- + GetValidatorSetByHeightResponse is the response type for the + Query/GetValidatorSetByHeight RPC method. + cosmos.base.tendermint.v1beta1.Header: type: object properties: - id: - title: unique identifier for this escrow account + version: + title: basic block info type: object properties: - scope: + block: type: string - xid: + format: uint64 + app: type: string - owner: + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block in the + blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: type: string - title: bech32 encoded account address of the owner of this escrow account - state: - title: current state of this escrow account + height: type: string - enum: - - invalid - - open - - closed - - overdrawn - default: invalid - description: |- - - invalid: AccountStateInvalid is an invalid state - - open: AccountOpen is the state when an account is open - - closed: AccountClosed is the state when an account is closed - - overdrawn: AccountOverdrawn is the state when an account is overdrawn - balance: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. - - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - title: unspent coins received from the owner's wallet - transferred: + format: int64 + time: + type: string + format: date-time + last_block_id: type: object properties: - denom: - type: string - amount: + hash: type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. - - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - title: total coins spent by this account - settled_at: + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: type: string - format: int64 - title: block height at which this account was last settled - depositor: + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: type: string description: >- - bech32 encoded account address of the depositor. - - If depositor is same as the owner, then any incoming coins are added - to the Balance. + proposer_address is the original block proposer address, formatted as + a Bech32 string. - If depositor isn't same as the owner, then any incoming coins are - added to the Funds. - funds: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. + In Tendermint, this type is `bytes`, but in the SDK, we convert it to + a Bech32 string - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - title: Account stores state for an escrow account - akash.escrow.v1beta3.Account.State: - type: string - enum: - - invalid - - open - - closed - - overdrawn - default: invalid - description: |- - - invalid: AccountStateInvalid is an invalid state - - open: AccountOpen is the state when an account is open - - closed: AccountClosed is the state when an account is closed - - overdrawn: AccountOverdrawn is the state when an account is overdrawn - title: State stores state for an escrow account - akash.escrow.v1beta3.AccountID: + for better UX. + description: Header defines the structure of a Tendermint block header. + cosmos.base.tendermint.v1beta1.Module: type: object properties: - scope: + path: type: string - xid: + title: module path + version: type: string - title: AccountID is the account identifier - cosmos.base.v1beta1.DecCoin: + title: module version + sum: + type: string + title: checksum + title: Module is the type for VersionInfo + cosmos.base.tendermint.v1beta1.ProofOp: type: object properties: - denom: + type: type: string - amount: + key: type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. - - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - akash.deployment.v1beta3.MsgCloseDeploymentResponse: - type: object - description: MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. - akash.deployment.v1beta3.MsgCloseGroupResponse: - type: object - description: MsgCloseGroupResponse defines the Msg/CloseGroup response type. - akash.deployment.v1beta3.MsgCreateDeploymentResponse: - type: object - description: >- - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response - type. - akash.deployment.v1beta3.MsgDepositDeploymentResponse: - type: object - description: >- - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response - type. - akash.deployment.v1beta3.MsgPauseGroupResponse: - type: object - description: MsgPauseGroupResponse defines the Msg/PauseGroup response type. - akash.deployment.v1beta3.MsgStartGroupResponse: - type: object - description: MsgStartGroupResponse defines the Msg/StartGroup response type. - akash.deployment.v1beta3.MsgUpdateDeploymentResponse: - type: object + format: byte + data: + type: string + format: byte description: >- - MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response - type. - cosmos.base.v1beta1.Coin: + ProofOp defines an operation used for calculating Merkle root. The data + could + + be arbitrary format, providing necessary data for example neighbouring + node + + hash. + + + Note: This type is a duplicate of the ProofOp proto type defined in + Tendermint. + cosmos.base.tendermint.v1beta1.ProofOps: type: object properties: - denom: - type: string - amount: - type: string - description: |- - Coin defines a token with a denomination and an amount. + ops: + type: array + items: + type: object + properties: + type: + type: string + key: + type: string + format: byte + data: + type: string + format: byte + description: >- + ProofOp defines an operation used for calculating Merkle root. The + data could - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - akash.escrow.v1beta3.FractionalPayment: + be arbitrary format, providing necessary data for example + neighbouring node + + hash. + + + Note: This type is a duplicate of the ProofOp proto type defined in + Tendermint. + description: >- + ProofOps is Merkle proof defined by the list of ProofOps. + + + Note: This type is a duplicate of the ProofOps proto type defined in + Tendermint. + cosmos.base.tendermint.v1beta1.Validator: type: object properties: - account_id: - type: object - properties: - scope: - type: string - xid: - type: string - title: AccountID is the account identifier - payment_id: - type: string - owner: - type: string - state: + address: type: string - enum: - - invalid - - open - - closed - - overdrawn - default: invalid - description: >- - - invalid: PaymentStateInvalid is the state when the payment is - invalid - - open: PaymentStateOpen is the state when the payment is open - - closed: PaymentStateClosed is the state when the payment is closed - - overdrawn: PaymentStateOverdrawn is the state when the payment is overdrawn - title: Payment State - rate: + pub_key: type: object properties: - denom: - type: string - amount: + type_url: type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - balance: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. + protocol buffer message. This string must contain at least - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - withdrawn: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - Coin defines a token with a denomination and an amount. + one "/" character. The last segment of the URL's path must + represent - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - title: Payment stores state for a payment - akash.escrow.v1beta3.FractionalPayment.State: - type: string - enum: - - invalid - - open - - closed - - overdrawn - default: invalid - description: |- - - invalid: PaymentStateInvalid is the state when the payment is invalid - - open: PaymentStateOpen is the state when the payment is open - - closed: PaymentStateClosed is the state when the payment is closed - - overdrawn: PaymentStateOverdrawn is the state when the payment is overdrawn - title: Payment State - akash.market.v1beta3.Bid: - type: object - properties: - bid_id: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - gseq: - type: integer - format: int64 - oseq: - type: integer - format: int64 - provider: + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up a + type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + value: type: string - description: |- - BidID stores owner and all other seq numbers - A successful bid becomes a Lease(ID). - state: - type: string - enum: - - invalid - - open - - active - - lost - - closed - default: invalid + format: byte + description: >- + Must be a valid serialized protocol buffer of the above specified + type. description: >- - - invalid: Prefix should start with 0 in enum. So declaring dummy - state - - open: BidOpen denotes state for bid open - - active: BidMatched denotes state for bid open - - lost: BidLost denotes state for bid lost - - closed: BidClosed denotes state for bid closed - title: State is an enum which refers to state of bid - price: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. + `Any` contains an arbitrary serialized protocol buffer message along + with a - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - created_at: + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + voting_power: type: string format: int64 - title: Bid stores BidID, state of bid and price - akash.market.v1beta3.Bid.State: - type: string - enum: - - invalid - - open - - active - - lost - - closed - default: invalid - description: |- - - invalid: Prefix should start with 0 in enum. So declaring dummy state - - open: BidOpen denotes state for bid open - - active: BidMatched denotes state for bid open - - lost: BidLost denotes state for bid lost - - closed: BidClosed denotes state for bid closed - title: State is an enum which refers to state of bid - akash.market.v1beta3.BidFilters: + proposer_priority: + type: string + format: int64 + description: Validator is the type for the validator-set. + cosmos.base.tendermint.v1beta1.VersionInfo: type: object properties: - owner: + name: type: string - dseq: + app_name: type: string - format: uint64 - gseq: - type: integer - format: int64 - oseq: - type: integer - format: int64 - provider: + version: type: string - state: + git_commit: type: string - title: BidFilters defines flags for bid list filter - akash.market.v1beta3.BidID: - type: object - properties: - owner: + build_tags: type: string - dseq: + go_version: type: string - format: uint64 - gseq: - type: integer - format: int64 - oseq: - type: integer - format: int64 - provider: + build_deps: + type: array + items: + type: object + properties: + path: + type: string + title: module path + version: + type: string + title: module version + sum: + type: string + title: checksum + title: Module is the type for VersionInfo + cosmos_sdk_version: type: string - description: |- - BidID stores owner and all other seq numbers - A successful bid becomes a Lease(ID). - akash.market.v1beta3.Lease: + title: 'Since: cosmos-sdk 0.43' + description: VersionInfo is the type for the GetNodeInfoResponse message. + tendermint.crypto.PublicKey: type: object properties: - lease_id: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - gseq: - type: integer - format: int64 - oseq: - type: integer - format: int64 - provider: - type: string - title: LeaseID stores bid details of lease - state: + ed25519: type: string - enum: - - invalid - - active - - insufficient_funds - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring dummy - state - - active: LeaseActive denotes state for lease active - - insufficient_funds: LeaseInsufficientFunds denotes state for lease insufficient_funds - - closed: LeaseClosed denotes state for lease closed - title: State is an enum which refers to state of lease - price: + format: byte + secp256k1: + type: string + format: byte + title: PublicKey defines the keys available for use with Tendermint Validators + tendermint.p2p.DefaultNodeInfo: + type: object + properties: + protocol_version: type: object properties: - denom: + p2p: type: string - amount: + format: uint64 + block: type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. - - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - created_at: + format: uint64 + app: + type: string + format: uint64 + default_node_id: type: string - format: int64 - closed_on: + listen_addr: type: string - format: int64 - title: Lease stores LeaseID, state of lease and price - akash.market.v1beta3.Lease.State: - type: string - enum: - - invalid - - active - - insufficient_funds - - closed - default: invalid - description: |- - - invalid: Prefix should start with 0 in enum. So declaring dummy state - - active: LeaseActive denotes state for lease active - - insufficient_funds: LeaseInsufficientFunds denotes state for lease insufficient_funds - - closed: LeaseClosed denotes state for lease closed - title: State is an enum which refers to state of lease - akash.market.v1beta3.LeaseFilters: - type: object - properties: - owner: + network: type: string - dseq: + version: type: string - format: uint64 - gseq: - type: integer - format: int64 - oseq: - type: integer - format: int64 - provider: + channels: type: string - state: + format: byte + moniker: type: string - title: LeaseFilters defines flags for lease list filter - akash.market.v1beta3.LeaseID: + other: + type: object + properties: + tx_index: + type: string + rpc_address: + type: string + tendermint.p2p.DefaultNodeInfoOther: type: object properties: - owner: + tx_index: type: string - dseq: + rpc_address: + type: string + tendermint.p2p.ProtocolVersion: + type: object + properties: + p2p: type: string format: uint64 - gseq: - type: integer - format: int64 - oseq: - type: integer - format: int64 - provider: + block: type: string - title: LeaseID stores bid details of lease - akash.market.v1beta3.Order: + format: uint64 + app: + type: string + format: uint64 + tendermint.types.Block: type: object properties: - order_id: + header: type: object properties: - owner: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block in + the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: type: string - dseq: + height: type: string - format: uint64 - gseq: - type: integer - format: int64 - oseq: - type: integer format: int64 - title: OrderID stores owner and all other seq numbers - state: - type: string - enum: - - invalid - - open - - active - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring dummy - state - - open: OrderOpen denotes state for order open - - active: OrderMatched denotes state for order matched - - closed: OrderClosed denotes state for order lost - title: State is an enum which refers to state of order - spec: - type: object - properties: - name: + time: type: string - requirements: + format: date-time + last_block_id: type: object properties: - signed_by: - title: >- - SignedBy list of keys that tenants expect to have signatures - from + hash: + type: string + format: byte + part_set_header: type: object properties: - all_of: - type: array - items: - type: string - title: all_of all keys in this list must have signed attributes - any_of: - type: array - items: - type: string - title: >- - any_of at least of of the keys from the list must have - signed attributes - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Attribute list of attributes tenant expects from the provider - title: PlacementRequirements - resources: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + data: + type: object + properties: + txs: + type: array + items: + type: string + format: byte + description: >- + Txs that will be applied by state @ block.Height+1. + + NOTE: not all txs here are valid. We're just agreeing on the + order first. + + This means that block.AppHash does not include these txs. + title: Data contains the set of transactions included in the block + evidence: + type: object + properties: + evidence: type: array items: type: object properties: - resource: + duplicate_vote_evidence: type: object properties: - id: - type: integer - format: int64 - cpu: + vote_a: type: object properties: - units: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: type: object properties: - val: + hash: type: string format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: CPU stores resource units and cpu config attributes - memory: + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for + + consensus. + vote_b: type: object properties: - quantity: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for + + consensus. + total_voting_power: + type: string + format: int64 + validator_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + DuplicateVoteEvidence contains evidence of a validator + signed two conflicting votes. + light_client_attack_evidence: + type: object + properties: + conflicting_block: + type: object + properties: + signed_header: + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and + the rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: >- + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint + block header. + commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the + signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: >- + CommitSig is a part of the Vote included + in a Commit. + description: >- + Commit contains the evidence that a block was + committed by a set of validators. + validator_set: type: object properties: - val: + validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + proposer: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use + with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: Memory stores resource quantity and memory attributes - storage: + format: int64 + common_height: + type: string + format: int64 + byzantine_validators: type: array items: type: object properties: - name: + address: type: string - quantity: + format: byte + pub_key: type: object properties: - val: + ed25519: type: string format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: >- - Storage stores resource quantity and storage - attributes - gpu: - type: object - properties: - units: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: + secp256k1: type: string - title: Attribute represents key value pair - title: GPU stores resource units and cpu config attributes - endpoints: - type: array - items: - type: object - properties: - kind: - type: string - enum: - - SHARED_HTTP - - RANDOM_PORT - - LEASED_IP - default: SHARED_HTTP - description: >- - - SHARED_HTTP: Describes an endpoint that becomes - a Kubernetes Ingress - - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort - - LEASED_IP: Describes an endpoint that becomes a leased IP + format: byte title: >- - This describes how the endpoint is implemented - when the lease is deployed - sequence_number: - type: integer + PublicKey defines the keys available for use with + Tendermint Validators + voting_power: + type: string format: int64 - title: Endpoint describes a publicly accessible IP service - title: >- - Resources describes all available resources types for - deployment/node etc - - if field is nil resource is not present in the given - data-structure - count: - type: integer - format: int64 - price: - type: object - properties: - denom: + proposer_priority: + type: string + format: int64 + total_voting_power: type: string - amount: + format: int64 + timestamp: type: string + format: date-time description: >- - DecCoin defines a token with a denomination and a decimal - amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - title: >- - ResourceUnit extends Resources and adds Count along with the - Price - title: GroupSpec stores group specifications - created_at: + LightClientAttackEvidence contains evidence of a set of + validators attempting to mislead a light client. + last_commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: BlockIdFlag indicates which BlcokID the signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: CommitSig is a part of the Vote included in a Commit. + description: >- + Commit contains the evidence that a block was committed by a set of + validators. + tendermint.types.BlockID: + type: object + properties: + hash: type: string - format: int64 - title: Order stores orderID, state of order and other details - akash.market.v1beta3.Order.State: + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + tendermint.types.BlockIDFlag: type: string enum: - - invalid - - open - - active - - closed - default: invalid - description: |- - - invalid: Prefix should start with 0 in enum. So declaring dummy state - - open: OrderOpen denotes state for order open - - active: OrderMatched denotes state for order matched - - closed: OrderClosed denotes state for order lost - title: State is an enum which refers to state of order - akash.market.v1beta3.OrderFilters: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: BlockIdFlag indicates which BlcokID the signature is for + tendermint.types.Commit: type: object properties: - owner: - type: string - dseq: + height: type: string - format: uint64 - gseq: - type: integer format: int64 - oseq: + round: type: integer - format: int64 - state: - type: string - title: OrderFilters defines flags for order list filter - akash.market.v1beta3.OrderID: + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: BlockIdFlag indicates which BlcokID the signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: CommitSig is a part of the Vote included in a Commit. + description: >- + Commit contains the evidence that a block was committed by a set of + validators. + tendermint.types.CommitSig: type: object properties: - owner: + block_id_flag: type: string - dseq: + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: BlockIdFlag indicates which BlcokID the signature is for + validator_address: type: string - format: uint64 - gseq: - type: integer - format: int64 - oseq: - type: integer - format: int64 - title: OrderID stores owner and all other seq numbers - akash.market.v1beta3.QueryBidResponse: + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: CommitSig is a part of the Vote included in a Commit. + tendermint.types.Data: type: object properties: - bid: + txs: + type: array + items: + type: string + format: byte + description: >- + Txs that will be applied by state @ block.Height+1. + + NOTE: not all txs here are valid. We're just agreeing on the order + first. + + This means that block.AppHash does not include these txs. + title: Data contains the set of transactions included in the block + tendermint.types.DuplicateVoteEvidence: + type: object + properties: + vote_a: type: object properties: - bid_id: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: |- + SignedMsgType is a type of signed message in the consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: type: object properties: - owner: - type: string - dseq: - type: string - format: uint64 - gseq: - type: integer - format: int64 - oseq: - type: integer - format: int64 - provider: + hash: type: string - description: |- - BidID stores owner and all other seq numbers - A successful bid becomes a Lease(ID). - state: + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote from validators + for + + consensus. + vote_b: + type: object + properties: + type: type: string enum: - - invalid - - open - - active - - lost - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring dummy - state - - open: BidOpen denotes state for bid open - - active: BidMatched denotes state for bid open - - lost: BidLost denotes state for bid lost - - closed: BidClosed denotes state for bid closed - title: State is an enum which refers to state of bid - price: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: |- + SignedMsgType is a type of signed message in the consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: type: object properties: - denom: - type: string - amount: + hash: type: string - description: >- - DecCoin defines a token with a denomination and a decimal amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - created_at: + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: type: string - format: int64 - title: Bid stores BidID, state of bid and price - escrow_account: + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote from validators + for + + consensus. + total_voting_power: + type: string + format: int64 + validator_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + DuplicateVoteEvidence contains evidence of a validator signed two + conflicting votes. + tendermint.types.Evidence: + type: object + properties: + duplicate_vote_evidence: type: object properties: - id: - title: unique identifier for this escrow account + vote_a: type: object properties: - scope: + type: type: string - xid: + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: |- + SignedMsgType is a type of signed message in the consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: type: string - owner: - type: string - title: bech32 encoded account address of the owner of this escrow account - state: - title: current state of this escrow account - type: string - enum: - - invalid - - open - - closed - - overdrawn - default: invalid - description: |- - - invalid: AccountStateInvalid is an invalid state - - open: AccountOpen is the state when an account is open - - closed: AccountClosed is the state when an account is closed - - overdrawn: AccountOverdrawn is the state when an account is overdrawn - balance: - type: object - properties: - denom: + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: type: string - amount: + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: type: string + format: byte description: >- - DecCoin defines a token with a denomination and a decimal amount. - - - NOTE: The amount field is an Dec which implements the custom - method + Vote represents a prevote, precommit, or commit vote from + validators for - signatures required by gogoproto. - title: unspent coins received from the owner's wallet - transferred: + consensus. + vote_b: type: object properties: - denom: + type: type: string - amount: + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: |- + SignedMsgType is a type of signed message in the consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: type: string + format: byte description: >- - DecCoin defines a token with a denomination and a decimal amount. - - - NOTE: The amount field is an Dec which implements the custom - method + Vote represents a prevote, precommit, or commit vote from + validators for - signatures required by gogoproto. - title: total coins spent by this account - settled_at: + consensus. + total_voting_power: type: string format: int64 - title: block height at which this account was last settled - depositor: + validator_power: type: string - description: >- - bech32 encoded account address of the depositor. - - If depositor is same as the owner, then any incoming coins are - added to the Balance. - - If depositor isn't same as the owner, then any incoming coins are - added to the Funds. - funds: + format: int64 + timestamp: + type: string + format: date-time + description: >- + DuplicateVoteEvidence contains evidence of a validator signed two + conflicting votes. + light_client_attack_evidence: + type: object + properties: + conflicting_block: type: object properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - title: Account stores state for an escrow account - title: QueryBidResponse is response type for the Query/Bid RPC method - akash.market.v1beta3.QueryBidsResponse: - type: object - properties: - bids: - type: array - items: - type: object - properties: - bid: - type: object - properties: - bid_id: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - gseq: - type: integer - format: int64 - oseq: - type: integer - format: int64 - provider: - type: string - description: |- - BidID stores owner and all other seq numbers - A successful bid becomes a Lease(ID). - state: - type: string - enum: - - invalid - - open - - active - - lost - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring - dummy state - - open: BidOpen denotes state for bid open - - active: BidMatched denotes state for bid open - - lost: BidLost denotes state for bid lost - - closed: BidClosed denotes state for bid closed - title: State is an enum which refers to state of bid - price: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal - amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - created_at: - type: string - format: int64 - title: Bid stores BidID, state of bid and price - escrow_account: - type: object - properties: - id: - title: unique identifier for this escrow account - type: object - properties: - scope: - type: string - xid: - type: string - owner: - type: string - title: >- - bech32 encoded account address of the owner of this escrow - account - state: - title: current state of this escrow account - type: string - enum: - - invalid - - open - - closed - - overdrawn - default: invalid - description: |- - - invalid: AccountStateInvalid is an invalid state - - open: AccountOpen is the state when an account is open - - closed: AccountClosed is the state when an account is closed - - overdrawn: AccountOverdrawn is the state when an account is overdrawn - balance: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal - amount. - + signed_header: + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing + a block in the blockchain, - NOTE: The amount field is an Dec which implements the custom - method + including all blockchain data structures and the rules + of the application's - signatures required by gogoproto. - title: unspent coins received from the owner's wallet - transferred: + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the + signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: >- + CommitSig is a part of the Vote included in a + Commit. + description: >- + Commit contains the evidence that a block was committed by + a set of validators. + validator_set: + type: object + properties: + validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use with + Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + proposer: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use with + Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + common_height: + type: string + format: int64 + byzantine_validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: type: object properties: - denom: + ed25519: type: string - amount: + format: byte + secp256k1: type: string - description: >- - DecCoin defines a token with a denomination and a decimal - amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - title: total coins spent by this account - settled_at: + format: byte + title: >- + PublicKey defines the keys available for use with Tendermint + Validators + voting_power: type: string format: int64 - title: block height at which this account was last settled - depositor: + proposer_priority: type: string - description: >- - bech32 encoded account address of the depositor. - - If depositor is same as the owner, then any incoming coins - are added to the Balance. - - If depositor isn't same as the owner, then any incoming - coins are added to the Funds. - funds: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal - amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - title: Account stores state for an escrow account - title: QueryBidResponse is response type for the Query/Bid RPC method - pagination: - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. - - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - title: QueryBidsResponse is response type for the Query/Bids RPC method - akash.market.v1beta3.QueryLeaseResponse: - type: object - properties: - lease: - type: object - properties: - lease_id: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - gseq: - type: integer - format: int64 - oseq: - type: integer - format: int64 - provider: - type: string - title: LeaseID stores bid details of lease - state: - type: string - enum: - - invalid - - active - - insufficient_funds - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring dummy - state - - active: LeaseActive denotes state for lease active - - insufficient_funds: LeaseInsufficientFunds denotes state for lease insufficient_funds - - closed: LeaseClosed denotes state for lease closed - title: State is an enum which refers to state of lease - price: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - created_at: - type: string - format: int64 - closed_on: + format: int64 + total_voting_power: type: string format: int64 - title: Lease stores LeaseID, state of lease and price - escrow_payment: - type: object - properties: - account_id: - type: object - properties: - scope: - type: string - xid: - type: string - title: AccountID is the account identifier - payment_id: - type: string - owner: - type: string - state: + timestamp: type: string - enum: - - invalid - - open - - closed - - overdrawn - default: invalid - description: >- - - invalid: PaymentStateInvalid is the state when the payment is - invalid - - open: PaymentStateOpen is the state when the payment is open - - closed: PaymentStateClosed is the state when the payment is closed - - overdrawn: PaymentStateOverdrawn is the state when the payment is overdrawn - title: Payment State - rate: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - balance: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - withdrawn: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method - - signatures required by gogoproto. - title: Payment stores state for a payment - title: QueryLeaseResponse is response type for the Query/Lease RPC method - akash.market.v1beta3.QueryLeasesResponse: + format: date-time + description: >- + LightClientAttackEvidence contains evidence of a set of validators + attempting to mislead a light client. + tendermint.types.EvidenceList: type: object properties: - leases: + evidence: type: array items: type: object properties: - lease: + duplicate_vote_evidence: type: object properties: - lease_id: + vote_a: type: object properties: - owner: + type: type: string - dseq: + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: type: string - format: uint64 - gseq: - type: integer format: int64 - oseq: + round: type: integer - format: int64 - provider: - type: string - title: LeaseID stores bid details of lease - state: - type: string - enum: - - invalid - - active - - insufficient_funds - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring - dummy state - - active: LeaseActive denotes state for lease active - - insufficient_funds: LeaseInsufficientFunds denotes state for lease insufficient_funds - - closed: LeaseClosed denotes state for lease closed - title: State is an enum which refers to state of lease - price: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal - amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - created_at: - type: string - format: int64 - closed_on: - type: string - format: int64 - title: Lease stores LeaseID, state of lease and price - escrow_payment: - type: object - properties: - account_id: - type: object - properties: - scope: - type: string - xid: + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: type: string - title: AccountID is the account identifier - payment_id: - type: string - owner: - type: string - state: - type: string - enum: - - invalid - - open - - closed - - overdrawn - default: invalid - description: >- - - invalid: PaymentStateInvalid is the state when the payment - is invalid - - open: PaymentStateOpen is the state when the payment is open - - closed: PaymentStateClosed is the state when the payment is closed - - overdrawn: PaymentStateOverdrawn is the state when the payment is overdrawn - title: Payment State - rate: - type: object - properties: - denom: + format: date-time + validator_address: type: string - amount: + format: byte + validator_index: + type: integer + format: int32 + signature: type: string + format: byte description: >- - DecCoin defines a token with a denomination and a decimal - amount. - - - NOTE: The amount field is an Dec which implements the custom - method + Vote represents a prevote, precommit, or commit vote from + validators for - signatures required by gogoproto. - balance: + consensus. + vote_b: type: object properties: - denom: - type: string - amount: + type: type: string - description: >- - DecCoin defines a token with a denomination and a decimal - amount. - - - NOTE: The amount field is an Dec which implements the custom - method + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. - signatures required by gogoproto. - withdrawn: - type: object - properties: - denom: - type: string - amount: + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method - - signatures required by gogoproto. - title: Payment stores state for a payment - title: QueryLeaseResponse is response type for the Query/Lease RPC method - pagination: - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. - - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - title: QueryLeasesResponse is response type for the Query/Leases RPC method - akash.market.v1beta3.QueryOrderResponse: - type: object - properties: - order: - type: object - properties: - order_id: - type: object - properties: - owner: - type: string - dseq: - type: string - format: uint64 - gseq: - type: integer - format: int64 - oseq: - type: integer - format: int64 - title: OrderID stores owner and all other seq numbers - state: - type: string - enum: - - invalid - - open - - active - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring dummy - state - - open: OrderOpen denotes state for order open - - active: OrderMatched denotes state for order matched - - closed: OrderClosed denotes state for order lost - title: State is an enum which refers to state of order - spec: - type: object - properties: - name: - type: string - requirements: - type: object - properties: - signed_by: - title: >- - SignedBy list of keys that tenants expect to have - signatures from - type: object - properties: - all_of: - type: array - items: - type: string - title: >- - all_of all keys in this list must have signed - attributes - any_of: - type: array - items: - type: string - title: >- - any_of at least of of the keys from the list must have - signed attributes - attributes: - type: array - items: + format: int64 + round: + type: integer + format: int32 + block_id: type: object properties: - key: - type: string - value: + hash: type: string - title: Attribute represents key value pair - title: >- - Attribute list of attributes tenant expects from the - provider - title: PlacementRequirements - resources: - type: array - items: + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote from + validators for + + consensus. + total_voting_power: + type: string + format: int64 + validator_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + DuplicateVoteEvidence contains evidence of a validator signed + two conflicting votes. + light_client_attack_evidence: + type: object + properties: + conflicting_block: type: object properties: - resource: + signed_header: type: object properties: - id: - type: integer - format: int64 - cpu: + header: type: object properties: - units: + version: + title: basic block info type: object properties: - val: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for + processing a block in the blockchain, + + including all blockchain data structures and the + rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: type: string format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: CPU stores resource units and cpu config attributes - memory: + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint block + header. + commit: type: object properties: - quantity: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: type: object properties: - val: + hash: type: string format: byte - title: Unit stores cpu, memory and storage metrics - attributes: + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: type: array items: type: object properties: - key: + block_id_flag: type: string - value: + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the + signature is for + validator_address: type: string - title: Attribute represents key value pair - title: >- - Memory stores resource quantity and memory - attributes - storage: + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: >- + CommitSig is a part of the Vote included in a + Commit. + description: >- + Commit contains the evidence that a block was + committed by a set of validators. + validator_set: + type: object + properties: + validators: type: array items: type: object properties: - name: + address: type: string - quantity: + format: byte + pub_key: type: object properties: - val: + ed25519: type: string format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: >- - Storage stores resource quantity and storage - attributes - gpu: + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use + with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + proposer: type: object properties: - units: + address: + type: string + format: byte + pub_key: type: object properties: - val: + ed25519: type: string format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: GPU stores resource units and cpu config attributes - endpoints: - type: array - items: - type: object - properties: - kind: - type: string - enum: - - SHARED_HTTP - - RANDOM_PORT - - LEASED_IP - default: SHARED_HTTP - description: >- - - SHARED_HTTP: Describes an endpoint that - becomes a Kubernetes Ingress - - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort - - LEASED_IP: Describes an endpoint that becomes a leased IP - title: >- - This describes how the endpoint is implemented - when the lease is deployed - sequence_number: - type: integer - format: int64 - title: >- - Endpoint describes a publicly accessible IP - service - title: >- - Resources describes all available resources types for - deployment/node etc - - if field is nil resource is not present in the given - data-structure - count: - type: integer - format: int64 - price: - type: object - properties: - denom: - type: string - amount: + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use + with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: type: string - description: >- - DecCoin defines a token with a denomination and a - decimal amount. - + format: int64 + common_height: + type: string + format: int64 + byzantine_validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use with + Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + LightClientAttackEvidence contains evidence of a set of + validators attempting to mislead a light client. + tendermint.types.Header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block in the + blockchain, - NOTE: The amount field is an Dec which implements the - custom method + including all blockchain data structures and the rules of the + application's - signatures required by gogoproto. - title: >- - ResourceUnit extends Resources and adds Count along with the - Price - title: GroupSpec stores group specifications - created_at: + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: type: string - format: int64 - title: Order stores orderID, state of order and other details - title: QueryOrderResponse is response type for the Query/Order RPC method - akash.market.v1beta3.QueryOrdersResponse: + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + tendermint.types.LightBlock: type: object properties: - orders: - type: array - items: - type: object - properties: - order_id: + signed_header: + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: BlockIdFlag indicates which BlcokID the signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: CommitSig is a part of the Vote included in a Commit. + description: >- + Commit contains the evidence that a block was committed by a set + of validators. + validator_set: + type: object + properties: + validators: + type: array + items: type: object properties: - owner: + address: type: string - dseq: + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use with Tendermint + Validators + voting_power: type: string - format: uint64 - gseq: - type: integer - format: int64 - oseq: - type: integer format: int64 - title: OrderID stores owner and all other seq numbers - state: - type: string - enum: - - invalid - - open - - active - - closed - default: invalid - description: >- - - invalid: Prefix should start with 0 in enum. So declaring - dummy state - - open: OrderOpen denotes state for order open - - active: OrderMatched denotes state for order matched - - closed: OrderClosed denotes state for order lost - title: State is an enum which refers to state of order - spec: - type: object - properties: - name: + proposer_priority: type: string - requirements: - type: object - properties: - signed_by: - title: >- - SignedBy list of keys that tenants expect to have - signatures from - type: object - properties: - all_of: - type: array - items: - type: string - title: >- - all_of all keys in this list must have signed - attributes - any_of: - type: array - items: - type: string - title: >- - any_of at least of of the keys from the list must - have signed attributes - attributes: - type: array - items: + format: int64 + proposer: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use with Tendermint + Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + tendermint.types.LightClientAttackEvidence: + type: object + properties: + conflicting_block: + type: object + properties: + signed_header: + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a + block in the blockchain, + + including all blockchain data structures and the rules of + the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: type: object properties: - key: - type: string - value: + total: + type: integer + format: int64 + hash: type: string - title: Attribute represents key value pair - title: >- - Attribute list of attributes tenant expects from the - provider - title: PlacementRequirements - resources: - type: array - items: + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: type: object properties: - resource: + hash: + type: string + format: byte + part_set_header: type: object properties: - id: + total: type: integer format: int64 - cpu: - type: object - properties: - units: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: >- - CPU stores resource units and cpu config - attributes - memory: - type: object - properties: - quantity: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: >- - Memory stores resource quantity and memory - attributes - storage: - type: array - items: - type: object - properties: - name: - type: string - quantity: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: >- - Storage stores resource quantity and storage - attributes - gpu: - type: object - properties: - units: - type: object - properties: - val: - type: string - format: byte - title: Unit stores cpu, memory and storage metrics - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - title: >- - GPU stores resource units and cpu config - attributes - endpoints: - type: array - items: - type: object - properties: - kind: - type: string - enum: - - SHARED_HTTP - - RANDOM_PORT - - LEASED_IP - default: SHARED_HTTP - description: >- - - SHARED_HTTP: Describes an endpoint that - becomes a Kubernetes Ingress - - RANDOM_PORT: Describes an endpoint that becomes a Kubernetes NodePort - - LEASED_IP: Describes an endpoint that becomes a leased IP - title: >- - This describes how the endpoint is - implemented when the lease is deployed - sequence_number: - type: integer - format: int64 - title: >- - Endpoint describes a publicly accessible IP - service - title: >- - Resources describes all available resources types for - deployment/node etc - - if field is nil resource is not present in the given - data-structure - count: - type: integer - format: int64 - price: - type: object - properties: - denom: - type: string - amount: + hash: type: string - description: >- - DecCoin defines a token with a denomination and a - decimal amount. - - - NOTE: The amount field is an Dec which implements the - custom method - - signatures required by gogoproto. - title: >- - ResourceUnit extends Resources and adds Count along with - the Price - title: GroupSpec stores group specifications - created_at: - type: string - format: int64 - title: Order stores orderID, state of order and other details - pagination: - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. - - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - title: QueryOrdersResponse is response type for the Query/Orders RPC method - akash.market.v1beta3.MsgCloseBidResponse: - type: object - description: MsgCloseBidResponse defines the Msg/CloseBid response type. - akash.market.v1beta3.MsgCloseLeaseResponse: - type: object - description: MsgCloseLeaseResponse defines the Msg/CloseLease response type. - akash.market.v1beta3.MsgCreateBidResponse: - type: object - description: MsgCreateBidResponse defines the Msg/CreateBid response type. - akash.market.v1beta3.MsgCreateLeaseResponse: - type: object - title: MsgCreateLeaseResponse is the response from creating a lease - akash.market.v1beta3.MsgWithdrawLeaseResponse: - type: object - description: MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. - akash.provider.v1beta3.Provider: - type: object - properties: - owner: - type: string - host_uri: + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the signature is + for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: CommitSig is a part of the Vote included in a Commit. + description: >- + Commit contains the evidence that a block was committed by a + set of validators. + validator_set: + type: object + properties: + validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use with + Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + proposer: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use with + Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + common_height: type: string - attributes: + format: int64 + byzantine_validators: type: array items: type: object properties: - key: + address: type: string - value: + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use with Tendermint + Validators + voting_power: type: string - title: Attribute represents key value pair - info: - type: object - properties: - email: - type: string - website: - type: string - title: ProviderInfo - title: Provider stores owner and host details - akash.provider.v1beta3.ProviderInfo: + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + LightClientAttackEvidence contains evidence of a set of validators + attempting to mislead a light client. + tendermint.types.PartSetHeader: type: object properties: - email: - type: string - website: + total: + type: integer + format: int64 + hash: type: string - title: ProviderInfo - akash.provider.v1beta3.QueryProviderResponse: + format: byte + title: PartsetHeader + tendermint.types.SignedHeader: type: object properties: - provider: + header: type: object properties: - owner: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block in + the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: type: string - host_uri: + height: type: string - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - title: Attribute represents key value pair - info: + format: int64 + time: + type: string + format: date-time + last_block_id: type: object properties: - email: - type: string - website: + hash: type: string - title: ProviderInfo - title: Provider stores owner and host details - title: QueryProviderResponse is response type for the Query/Provider RPC method - akash.provider.v1beta3.QueryProvidersResponse: - type: object - properties: - providers: - type: array - items: - type: object - properties: - owner: - type: string - host_uri: - type: string - attributes: - type: array - items: + format: byte + part_set_header: type: object properties: - key: - type: string - value: + total: + type: integer + format: int64 + hash: type: string - title: Attribute represents key value pair - info: - type: object - properties: - email: - type: string - website: - type: string - title: ProviderInfo - title: Provider stores owner and host details - pagination: - type: object - properties: - next_key: + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: type: string format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: + title: hashes of block data + data_hash: type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. - - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - title: QueryProvidersResponse is response type for the Query/Providers RPC method - CheckTxResult: - type: object - properties: - code: - type: integer - data: - type: string - gas_used: - type: integer - gas_wanted: - type: integer - info: - type: string - log: - type: string - tags: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - example: - code: 0 - data: data - log: log - gas_used: 5000 - gas_wanted: 10000 - info: info - tags: - - '' - - '' - DeliverTxResult: - type: object - properties: - code: - type: integer - data: - type: string - gas_used: - type: integer - gas_wanted: - type: integer - info: - type: string - log: - type: string - tags: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - example: - code: 5 - data: data - log: log - gas_used: 5000 - gas_wanted: 10000 - info: info - tags: - - '' - - '' - BroadcastTxCommitResult: - type: object - properties: - check_tx: - type: object - properties: - code: - type: integer - data: + format: byte + validators_hash: type: string - gas_used: - type: integer - gas_wanted: - type: integer - info: + format: byte + title: hashes from the app output from the prev block + next_validators_hash: type: string - log: + format: byte + consensus_hash: type: string - tags: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - example: - code: 0 - data: data - log: log - gas_used: 5000 - gas_wanted: 10000 - info: info - tags: - - '' - - '' - deliver_tx: - type: object - properties: - code: - type: integer - data: + format: byte + app_hash: type: string - gas_used: - type: integer - gas_wanted: - type: integer - info: + format: byte + last_results_hash: type: string - log: + format: byte + evidence_hash: type: string - tags: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - example: - code: 5 - data: data - log: log - gas_used: 5000 - gas_wanted: 10000 - info: info - tags: - - '' - - '' - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - height: - type: integer - KVPair: - type: object - properties: - key: - type: string - value: - type: string - Msg: - type: string - Address: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - ValidatorAddress: - type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - Coin: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - Hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - TxQuery: - type: object - properties: - hash: - type: string - example: D085138D913993919295FF4B0A9107F1F2CDE0D37A87CE0644E217CBF3B49656 - height: - type: number - example: 368 - tx: + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + commit: type: object properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: + height: type: string - signature: + format: int64 + round: + type: integer + format: int32 + block_id: type: object properties: - signature: + hash: type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: + format: byte + part_set_header: type: object properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: + total: + type: integer + format: int64 + hash: type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - result: - type: object - properties: - log: - type: string - gas_wanted: - type: string - example: '200000' - gas_used: - type: string - example: '26354' - tags: + format: byte + title: PartsetHeader + title: BlockID + signatures: type: array items: type: object properties: - key: + block_id_flag: type: string - value: + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: BlockIdFlag indicates which BlcokID the signature is for + validator_address: type: string - PaginatedQueryTxs: - type: object - properties: - total_count: - type: number - example: 1 - count: - type: number - example: 1 - page_number: - type: number - example: 1 - page_total: - type: number - example: 1 - limit: - type: number - example: 30 - txs: - type: array - items: - type: object - properties: - hash: - type: string - example: D085138D913993919295FF4B0A9107F1F2CDE0D37A87CE0644E217CBF3B49656 - height: - type: number - example: 368 - tx: - type: object - properties: - msg: - type: array - items: - type: string - fee: - type: object - properties: - gas: - type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - memo: + format: byte + timestamp: type: string + format: date-time signature: - type: object - properties: - signature: - type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= - pub_key: - type: object - properties: - type: - type: string - example: tendermint/PubKeySecp256k1 - value: - type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: - type: string - example: '0' - sequence: - type: string - example: '0' - result: - type: object - properties: - log: type: string - gas_wanted: - type: string - example: '200000' - gas_used: - type: string - example: '26354' - tags: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - StdTx: + format: byte + description: CommitSig is a part of the Vote included in a Commit. + description: >- + Commit contains the evidence that a block was committed by a set of + validators. + tendermint.types.SignedMsgType: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: |- + SignedMsgType is a type of signed message in the consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + tendermint.types.Validator: type: object properties: - msg: - type: array - items: - type: string - fee: + address: + type: string + format: byte + pub_key: type: object properties: - gas: + ed25519: type: string - amount: - type: array - items: + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use with Tendermint + Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + tendermint.types.ValidatorSet: + type: object + properties: + validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: type: object properties: - denom: + ed25519: type: string - example: stake - amount: + format: byte + secp256k1: type: string - example: '50' - memo: - type: string - signature: + format: byte + title: >- + PublicKey defines the keys available for use with Tendermint + Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + proposer: type: object properties: - signature: + address: type: string - example: >- - MEUCIQD02fsDPra8MtbRsyB1w7bqTM55Wu138zQbFcWx4+CFyAIge5WNPfKIuvzBZ69MyqHsqD8S1IwiEp+iUb6VSdtlpgY= + format: byte pub_key: type: object properties: - type: + ed25519: type: string - example: tendermint/PubKeySecp256k1 - value: + format: byte + secp256k1: type: string - example: Avz04VhtKJh8ACCVzlI8aTosGy0ikFXKIVHQ3jKMrosH - account_number: + format: byte + title: >- + PublicKey defines the keys available for use with Tendermint + Validators + voting_power: type: string - example: '0' - sequence: + format: int64 + proposer_priority: type: string - example: '0' - BlockID: - type: object - properties: - hash: + format: int64 + total_voting_power: type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - BlockHeader: + format: int64 + tendermint.types.Vote: type: object properties: - chain_id: + type: type: string - example: cosmoshub-2 + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: |- + SignedMsgType is a type of signed message in the consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals height: - type: number - example: 1 - time: type: string - example: '2017-12-30T05:53:09.287+01:00' - num_txs: - type: number - example: 0 - last_block_id: + format: int64 + round: + type: integer + format: int32 + block_id: type: object properties: hash: type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: + format: byte + part_set_header: type: object properties: total: - type: number - example: 0 + type: integer + format: int64 hash: type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - total_txs: - type: number - example: 35 - last_commit_hash: + format: byte + title: PartsetHeader + title: BlockID + timestamp: type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - data_hash: + format: date-time + validator_address: type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - validators_hash: + format: byte + validator_index: + type: integer + format: int32 + signature: type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - next_validators_hash: + format: byte + description: |- + Vote represents a prevote, precommit, or commit vote from validators for + consensus. + tendermint.version.Consensus: + type: object + properties: + block: type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - consensus_hash: + format: uint64 + app: type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - app_hash: + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block in the + blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + cosmos.distribution.v1beta1.DelegationDelegatorReward: + type: object + properties: + validator_address: type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - last_results_hash: + reward: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + description: |- + DelegationDelegatorReward represents the properties + of a delegator's delegation reward. + cosmos.distribution.v1beta1.Params: + type: object + properties: + community_tax: type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - evidence_hash: + base_proposer_reward: type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - proposer_address: + description: >- + Deprecated: The base_proposer_reward field is deprecated and is no + longer used + + in the x/distribution module's reward mechanism. + bonus_proposer_reward: type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - version: - type: object - properties: - block: - type: string - example: 10 - app: - type: string - example: 0 - Block: + description: >- + Deprecated: The bonus_proposer_reward field is deprecated and is no + longer used + + in the x/distribution module's reward mechanism. + withdraw_addr_enabled: + type: boolean + description: Params defines the set of params for the distribution module. + cosmos.distribution.v1beta1.QueryCommunityPoolResponse: type: object properties: - header: - type: object - properties: - chain_id: - type: string - example: cosmoshub-2 - height: - type: number - example: 1 - time: - type: string - example: '2017-12-30T05:53:09.287+01:00' - num_txs: - type: number - example: 0 - last_block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: + pool: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + description: pool defines community pool's coins. + description: >- + QueryCommunityPoolResponse is the response type for the + Query/CommunityPool + + RPC method. + cosmos.distribution.v1beta1.QueryDelegationRewardsResponse: + type: object + properties: + rewards: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + description: rewards defines the rewards accrued by a delegation. + description: |- + QueryDelegationRewardsResponse is the response type for the + Query/DelegationRewards RPC method. + cosmos.distribution.v1beta1.QueryDelegationTotalRewardsResponse: + type: object + properties: + rewards: + type: array + items: + type: object + properties: + validator_address: + type: string + reward: + type: array + items: type: object properties: - total: - type: number - example: 0 - hash: + denom: type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - total_txs: - type: number - example: 35 - last_commit_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - data_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - next_validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - consensus_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - app_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - last_results_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - evidence_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - proposer_address: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - version: - type: object - properties: - block: - type: string - example: 10 - app: - type: string - example: 0 - txs: + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + description: |- + DelegationDelegatorReward represents the properties + of a delegator's delegation reward. + description: rewards defines all the rewards accrued by a delegator. + total: type: array items: - type: string - evidence: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + description: total defines the sum of all the rewards. + description: |- + QueryDelegationTotalRewardsResponse is the response type for the + Query/DelegationTotalRewards RPC method. + cosmos.distribution.v1beta1.QueryDelegatorValidatorsResponse: + type: object + properties: + validators: type: array items: type: string - last_commit: - type: object - properties: - block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - precommits: - type: array - items: - type: object - properties: - validator_address: - type: string - validator_index: - type: string - example: '0' - height: - type: string - example: '0' - round: - type: string - example: '0' - timestamp: - type: string - example: '2017-12-30T05:53:09.287+01:00' - type: - type: number - example: 2 - block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - signature: - type: string - example: >- - 7uTC74QlknqYWEwg7Vn6M8Om7FuZ0EO4bjvuj6rwH1mTUJrRuMMZvAAqT9VjNgP0RA/TDp6u/92AqrZfXJSpBQ== - BlockQuery: + description: validators defines the validators a delegator is delegating for. + description: |- + QueryDelegatorValidatorsResponse is the response type for the + Query/DelegatorValidators RPC method. + cosmos.distribution.v1beta1.QueryDelegatorWithdrawAddressResponse: + type: object + properties: + withdraw_address: + type: string + description: withdraw_address defines the delegator address to query for. + description: |- + QueryDelegatorWithdrawAddressResponse is the response type for the + Query/DelegatorWithdrawAddress RPC method. + cosmos.distribution.v1beta1.QueryParamsResponse: type: object properties: - block_meta: + params: + description: params defines the parameters of the module. type: object properties: - header: - type: object - properties: - chain_id: - type: string - example: cosmoshub-2 - height: - type: number - example: 1 - time: - type: string - example: '2017-12-30T05:53:09.287+01:00' - num_txs: - type: number - example: 0 - last_block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - total_txs: - type: number - example: 35 - last_commit_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - data_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - next_validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - consensus_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - app_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - last_results_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - evidence_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - proposer_address: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - version: - type: object - properties: - block: - type: string - example: 10 - app: - type: string - example: 0 - block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - block: + community_tax: + type: string + base_proposer_reward: + type: string + description: >- + Deprecated: The base_proposer_reward field is deprecated and is no + longer used + + in the x/distribution module's reward mechanism. + bonus_proposer_reward: + type: string + description: >- + Deprecated: The bonus_proposer_reward field is deprecated and is + no longer used + + in the x/distribution module's reward mechanism. + withdraw_addr_enabled: + type: boolean + description: QueryParamsResponse is the response type for the Query/Params RPC method. + cosmos.distribution.v1beta1.QueryValidatorCommissionResponse: + type: object + properties: + commission: + description: commission defines the commission the validator received. type: object properties: - header: - type: object - properties: - chain_id: - type: string - example: cosmoshub-2 - height: - type: number - example: 1 - time: - type: string - example: '2017-12-30T05:53:09.287+01:00' - num_txs: - type: number - example: 0 - last_block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - total_txs: - type: number - example: 35 - last_commit_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - data_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - next_validators_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - consensus_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - app_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - last_results_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - evidence_hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - proposer_address: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - version: - type: object - properties: - block: - type: string - example: 10 - app: - type: string - example: 0 - txs: - type: array - items: - type: string - evidence: + commission: type: array items: - type: string - last_commit: - type: object - properties: - block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - precommits: - type: array - items: - type: object - properties: - validator_address: - type: string - validator_index: - type: string - example: '0' - height: - type: string - example: '0' - round: - type: string - example: '0' - timestamp: - type: string - example: '2017-12-30T05:53:09.287+01:00' - type: - type: number - example: 2 - block_id: - type: object - properties: - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - parts: - type: object - properties: - total: - type: number - example: 0 - hash: - type: string - example: EE5F3404034C524501629B56E0DDC38FAD651F04 - signature: - type: string - example: >- - 7uTC74QlknqYWEwg7Vn6M8Om7FuZ0EO4bjvuj6rwH1mTUJrRuMMZvAAqT9VjNgP0RA/TDp6u/92AqrZfXJSpBQ== - DelegationDelegatorReward: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + title: |- + QueryValidatorCommissionResponse is the response type for the + Query/ValidatorCommission RPC method + cosmos.distribution.v1beta1.QueryValidatorDistributionInfoResponse: type: object properties: - validator_address: + operator_address: type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - reward: + description: operator_address defines the validator operator address. + self_bond_rewards: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + description: self_bond_rewards defines the self delegations rewards. + commission: type: array items: type: object properties: denom: type: string - example: stake amount: type: string - example: '50' - DelegatorTotalRewards: + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + description: commission defines the commission the validator received. + description: >- + QueryValidatorDistributionInfoResponse is the response type for the + Query/ValidatorDistributionInfo RPC method. + cosmos.distribution.v1beta1.QueryValidatorOutstandingRewardsResponse: type: object properties: rewards: + type: object + properties: + rewards: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + DecCoin defines a token with a denomination and a decimal + amount. + + + NOTE: The amount field is an Dec which implements the custom + method + + signatures required by gogoproto. + description: >- + ValidatorOutstandingRewards represents outstanding (un-withdrawn) + rewards + + for a validator inexpensive to track, allows simple sanity checks. + description: |- + QueryValidatorOutstandingRewardsResponse is the response type for the + Query/ValidatorOutstandingRewards RPC method. + cosmos.distribution.v1beta1.QueryValidatorSlashesResponse: + type: object + properties: + slashes: type: array items: type: object properties: - validator_address: + validator_period: type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - reward: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - total: + format: uint64 + fraction: + type: string + description: |- + ValidatorSlashEvent represents a validator slash event. + Height is implicit within the store key. + This is needed to calculate appropriate amount of staking tokens + for delegations which are withdrawn after a slash has occurred. + description: slashes defines the slashes the validator received. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + QueryValidatorSlashesResponse is the response type for the + Query/ValidatorSlashes RPC method. + cosmos.distribution.v1beta1.ValidatorAccumulatedCommission: + type: object + properties: + commission: type: array items: type: object properties: denom: type: string - example: stake amount: type: string - example: '50' - BaseReq: + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + description: |- + ValidatorAccumulatedCommission represents accumulated commission + for a validator kept as a running counter, can be withdrawn at any time. + cosmos.distribution.v1beta1.ValidatorOutstandingRewards: type: object properties: - from: - type: string - example: cosmos1g9ahr6xhht5rmqven628nklxluzyv8z9jqjcmc - description: Sender address or Keybase name to generate a transaction - memo: - type: string - example: Sent via Cosmos Voyager 🚀 - chain_id: - type: string - example: Cosmos-Hub - account_number: - type: string - example: '0' - sequence: - type: string - example: '1' - gas: - type: string - example: '200000' - gas_adjustment: - type: string - example: '1.2' - fees: + rewards: type: array items: type: object properties: denom: type: string - example: stake amount: type: string - example: '50' - simulate: - type: boolean - example: false - description: >- - Estimate gas for a transaction (cannot be used in conjunction with - generate_only) - TendermintValidator: + description: |- + DecCoin defines a token with a denomination and a decimal amount. + + NOTE: The amount field is an Dec which implements the custom method + signatures required by gogoproto. + description: |- + ValidatorOutstandingRewards represents outstanding (un-withdrawn) rewards + for a validator inexpensive to track, allows simple sanity checks. + cosmos.distribution.v1beta1.ValidatorSlashEvent: type: object properties: - address: - type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - pub_key: - type: string - example: >- - cosmosvalconspub1zcjduepq0vu2zgkgk49efa0nqwzndanq5m4c7pa3u4apz4g2r9gspqg6g9cs3k9cuf - voting_power: + validator_period: type: string - example: '1000' - proposer_priority: + format: uint64 + fraction: type: string - example: '1000' - TextProposal: + description: |- + ValidatorSlashEvent represents a validator slash event. + Height is implicit within the store key. + This is needed to calculate appropriate amount of staking tokens + for delegations which are withdrawn after a slash has occurred. + cosmos.evidence.v1beta1.QueryAllEvidenceResponse: type: object properties: - proposal_id: - type: integer - title: - type: string - description: - type: string - proposal_type: - type: string - proposal_status: - type: string - final_tally_result: + evidence: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up + a type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might + be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: evidence returns all evidences. + pagination: + description: pagination defines the pagination in the response. type: object properties: - 'yes': + next_key: type: string - example: '0.0000000000' - abstain: + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: type: string - example: '0.0000000000' - 'no': + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryAllEvidenceResponse is the response type for the Query/AllEvidence + RPC + + method. + cosmos.evidence.v1beta1.QueryEvidenceResponse: + type: object + properties: + evidence: + type: object + properties: + type_url: type: string - example: '0.0000000000' - no_with_veto: + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up a + type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + value: type: string - example: '0.0000000000' - submit_time: + format: byte + description: >- + Must be a valid serialized protocol buffer of the above specified + type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + QueryEvidenceResponse is the response type for the Query/Evidence RPC + method. + cosmos.gov.v1beta1.Deposit: + type: object + properties: + proposal_id: type: string - total_deposit: + format: uint64 + description: proposal_id defines the unique id of the proposal. + depositor: + type: string + description: depositor defines the deposit addresses from the proposals. + amount: type: array items: type: object properties: denom: type: string - example: stake amount: type: string - example: '50' - voting_start_time: - type: string - Proposer: - type: object - properties: - proposal_id: - type: string - proposer: - type: string - Deposit: + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: amount to be deposited by depositor. + description: |- + Deposit defines an amount deposited by an account address to an active + proposal. + cosmos.gov.v1beta1.DepositParams: type: object properties: - amount: + min_deposit: type: array items: type: object properties: denom: type: string - example: stake amount: type: string - example: '50' - proposal_id: - type: string - depositor: - type: string - description: bech32 encoded address - example: cosmos1depk54cuajgkzea6zpgkq36tnjwdzv4afc3d27 - TallyResult: - type: object - properties: - 'yes': - type: string - example: '0.0000000000' - abstain: - type: string - example: '0.0000000000' - 'no': - type: string - example: '0.0000000000' - no_with_veto: + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: Minimum deposit for a proposal to enter voting period. + max_deposit_period: type: string - example: '0.0000000000' - Vote: + description: >- + Maximum period for Atom holders to deposit on a proposal. Initial + value: 2 + + months. + description: DepositParams defines the params for deposits on governance proposals. + cosmos.gov.v1beta1.Proposal: type: object properties: - voter: - type: string proposal_id: type: string - option: - type: string - Validator: - type: object - properties: - operator_address: - type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - consensus_pubkey: - type: string - example: >- - cosmosvalconspub1zcjduepq0vu2zgkgk49efa0nqwzndanq5m4c7pa3u4apz4g2r9gspqg6g9cs3k9cuf - jailed: - type: boolean - status: - type: integer - tokens: - type: string - delegator_shares: - type: string - description: + format: uint64 + description: proposal_id defines the unique id of the proposal. + content: type: object properties: - moniker: - type: string - identity: - type: string - website: - type: string - security_contact: + type_url: type: string - details: + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up a + type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + value: type: string - bond_height: - type: string - example: '0' - bond_intra_tx_counter: - type: integer - example: 0 - unbonding_height: - type: string - example: '0' - unbonding_time: + format: byte + description: >- + Must be a valid serialized protocol buffer of the above specified + type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + status: + description: status defines the proposal status. type: string - example: '1970-01-01T00:00:00Z' - commission: + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_DEPOSIT_PERIOD + - PROPOSAL_STATUS_VOTING_PERIOD + - PROPOSAL_STATUS_PASSED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_FAILED + default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: + description: |- + final_tally_result is the final tally result of the proposal. When + querying a proposal via gRPC, this field is not populated until the + proposal's voting period has ended. type: object properties: - rate: - type: string - example: '0' - max_rate: - type: string - example: '0' - max_change_rate: + 'yes': type: string - example: '0' - update_time: + description: yes is the number of yes votes on a proposal. + abstain: type: string - example: '1970-01-01T00:00:00Z' - Delegation: - type: object - properties: - delegator_address: - type: string - validator_address: - type: string - shares: - type: string - balance: - type: object - properties: - denom: + description: abstain is the number of abstain votes on a proposal. + 'no': type: string - example: stake - amount: + description: no is the number of no votes on a proposal. + no_with_veto: type: string - example: '50' - UnbondingDelegationPair: - type: object - properties: - delegator_address: - type: string - validator_address: - type: string - entries: - type: array - items: - type: object - properties: - initial_balance: - type: string - balance: - type: string - creation_height: - type: string - min_time: - type: string - UnbondingEntries: - type: object - properties: - initial_balance: - type: string - balance: - type: string - creation_height: - type: string - min_time: - type: string - UnbondingDelegation: - type: object - properties: - delegator_address: - type: string - validator_address: - type: string - initial_balance: - type: string - balance: - type: string - creation_height: - type: integer - min_time: - type: integer - Redelegation: - type: object - properties: - delegator_address: - type: string - validator_src_address: - type: string - validator_dst_address: - type: string - entries: - type: array - items: - $ref: '#/definitions/Redelegation' - RedelegationEntry: - type: object - properties: - creation_height: - type: integer - completion_time: - type: integer - initial_balance: - type: string - balance: - type: string - shares_dst: + description: no_with_veto is the number of no with veto votes on a proposal. + submit_time: type: string - ValidatorDistInfo: - type: object - properties: - operator_address: + format: date-time + description: submit_time is the time of proposal submission. + deposit_end_time: type: string - description: bech32 encoded address - example: cosmosvaloper16xyempempp92x9hyzz9wrgf94r6j9h5f2w4n2l - self_bond_rewards: - type: array - items: - type: object - properties: - denom: - type: string - example: stake - amount: - type: string - example: '50' - val_commission: + format: date-time + description: deposit_end_time is the end time for deposition. + total_deposit: type: array items: type: object properties: denom: type: string - example: stake amount: type: string - example: '50' - PublicKey: - type: object - properties: - type: - type: string - value: - type: string - SigningInfo: - type: object - properties: - start_height: - type: string - index_offset: - type: string - jailed_until: + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: total_deposit is the total deposit on the proposal. + voting_start_time: type: string - missed_blocks_counter: + format: date-time + description: voting_start_time is the starting time to vote on a proposal. + voting_end_time: type: string - ParamChange: + format: date-time + description: voting_end_time is the end time of voting on a proposal. + description: Proposal defines the core field members of a governance proposal. + cosmos.gov.v1beta1.ProposalStatus: + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_DEPOSIT_PERIOD + - PROPOSAL_STATUS_VOTING_PERIOD + - PROPOSAL_STATUS_PASSED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_FAILED + default: PROPOSAL_STATUS_UNSPECIFIED + description: |- + ProposalStatus enumerates the valid statuses of a proposal. + + - PROPOSAL_STATUS_UNSPECIFIED: PROPOSAL_STATUS_UNSPECIFIED defines the default proposal status. + - PROPOSAL_STATUS_DEPOSIT_PERIOD: PROPOSAL_STATUS_DEPOSIT_PERIOD defines a proposal status during the deposit + period. + - PROPOSAL_STATUS_VOTING_PERIOD: PROPOSAL_STATUS_VOTING_PERIOD defines a proposal status during the voting + period. + - PROPOSAL_STATUS_PASSED: PROPOSAL_STATUS_PASSED defines a proposal status of a proposal that has + passed. + - PROPOSAL_STATUS_REJECTED: PROPOSAL_STATUS_REJECTED defines a proposal status of a proposal that has + been rejected. + - PROPOSAL_STATUS_FAILED: PROPOSAL_STATUS_FAILED defines a proposal status of a proposal that has + failed. + cosmos.gov.v1beta1.QueryDepositResponse: type: object properties: - subspace: - type: string - example: staking - key: - type: string - example: MaxValidators - subkey: - type: string - example: '' - value: + deposit: type: object - Supply: + properties: + proposal_id: + type: string + format: uint64 + description: proposal_id defines the unique id of the proposal. + depositor: + type: string + description: depositor defines the deposit addresses from the proposals. + amount: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: amount to be deposited by depositor. + description: |- + Deposit defines an amount deposited by an account address to an active + proposal. + description: >- + QueryDepositResponse is the response type for the Query/Deposit RPC + method. + cosmos.gov.v1beta1.QueryDepositsResponse: type: object properties: - total: + deposits: type: array items: type: object properties: - denom: + proposal_id: type: string - example: stake - amount: + format: uint64 + description: proposal_id defines the unique id of the proposal. + depositor: type: string - example: '50' - cosmos.auth.v1beta1.Params: + description: depositor defines the deposit addresses from the proposals. + amount: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: amount to be deposited by depositor. + description: >- + Deposit defines an amount deposited by an account address to an + active + + proposal. + description: deposits defines the requested deposits. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryDepositsResponse is the response type for the Query/Deposits RPC + method. + cosmos.gov.v1beta1.QueryParamsResponse: type: object properties: - max_memo_characters: - type: string - format: uint64 - tx_sig_limit: - type: string - format: uint64 - tx_size_cost_per_byte: - type: string - format: uint64 - sig_verify_cost_ed25519: - type: string - format: uint64 - sig_verify_cost_secp256k1: - type: string - format: uint64 - description: Params defines the parameters for the auth module. - cosmos.auth.v1beta1.QueryAccountResponse: + voting_params: + description: voting_params defines the parameters related to voting. + type: object + properties: + voting_period: + type: string + description: Duration of the voting period. + deposit_params: + description: deposit_params defines the parameters related to deposit. + type: object + properties: + min_deposit: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: Minimum deposit for a proposal to enter voting period. + max_deposit_period: + type: string + description: >- + Maximum period for Atom holders to deposit on a proposal. Initial + value: 2 + + months. + tally_params: + description: tally_params defines the parameters related to tally. + type: object + properties: + quorum: + type: string + format: byte + description: >- + Minimum percentage of total stake needed to vote for a result to + be + + considered valid. + threshold: + type: string + format: byte + description: >- + Minimum proportion of Yes votes for proposal to pass. Default + value: 0.5. + veto_threshold: + type: string + format: byte + description: >- + Minimum value of Veto votes to Total votes ratio for proposal to + be + + vetoed. Default value: 1/3. + description: QueryParamsResponse is the response type for the Query/Params RPC method. + cosmos.gov.v1beta1.QueryProposalResponse: type: object properties: - account: + proposal: type: object properties: - type_url: + proposal_id: type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized + format: uint64 + description: proposal_id defines the unique id of the proposal. + content: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - protocol buffer message. This string must contain at least + protocol buffer message. This string must contain at least - one "/" character. The last segment of the URL's path must - represent + one "/" character. The last segment of the URL's path must + represent - the fully qualified name of the type (as in + the fully qualified name of the type (as in - `path/google.protobuf.Duration`). The name should be in a - canonical form + `path/google.protobuf.Duration`). The name should be in a + canonical form - (e.g., leading "." is not accepted). + (e.g., leading "." is not accepted). - In practice, teams usually precompile into the binary all types - that they + In practice, teams usually precompile into the binary all + types that they - expect it to use in the context of Any. However, for URLs which - use the + expect it to use in the context of Any. However, for URLs + which use the - scheme `http`, `https`, or no scheme, one can optionally set up a - type + scheme `http`, `https`, or no scheme, one can optionally set + up a type - server that maps type URLs to message definitions as follows: + server that maps type URLs to message definitions as follows: - * If no scheme is provided, `https` is assumed. + * If no scheme is provided, `https` is assumed. - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - Note: this functionality is not currently available in the - official + Note: this functionality is not currently available in the + official - protobuf release, and it is not used for type URLs beginning with + protobuf release, and it is not used for type URLs beginning + with - type.googleapis.com. + type.googleapis.com. - Schemes other than `http`, `https` (or the empty scheme) might be + Schemes other than `http`, `https` (or the empty scheme) might + be - used with implementation specific semantics. - value: - type: string - format: byte + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. description: >- - Must be a valid serialized protocol buffer of the above specified + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message along - with a - URL that describes the type of the serialized message. + Example 1: Pack and unpack a message in C++. - Protobuf library provides support to pack/unpack Any values in the - form + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - of utility functions or additional generated methods of the Any type. + Example 2: Pack and unpack a message in Java. + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - Example 1: Pack and unpack a message in C++. + Example 3: Pack and unpack a message in Python. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - Example 2: Pack and unpack a message in Java. + Example 4: Pack and unpack a message in Go - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - Example 3: Pack and unpack a message in Python. + The pack methods provided by protobuf library will by default use - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - Example 4: Pack and unpack a message in Go + methods only use the fully qualified type name after the last '/' - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + in the type URL, for example "foo.bar.com/x/y.z" will yield type - The pack methods provided by protobuf library will by default use + name "y.z". - 'type.googleapis.com/full.type.name' as the type URL and the unpack - methods only use the fully qualified type name after the last '/' - in the type URL, for example "foo.bar.com/x/y.z" will yield type + JSON - name "y.z". + The JSON representation of an `Any` value uses the regular + representation of the deserialized, embedded message, with an - JSON + additional field `@type` which contains the type URL. Example: - ==== + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - The JSON representation of an `Any` value uses the regular + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - representation of the deserialized, embedded message, with an + If the embedded message type is well-known and has a custom JSON - additional field `@type` which contains the type URL. Example: + representation, that representation will be embedded adding a + field - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + `value` which holds the custom JSON in addition to the `@type` - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + field. Example (for message [google.protobuf.Duration][]): - If the embedded message type is well-known and has a custom JSON + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + status: + description: status defines the proposal status. + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_DEPOSIT_PERIOD + - PROPOSAL_STATUS_VOTING_PERIOD + - PROPOSAL_STATUS_PASSED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_FAILED + default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: + description: >- + final_tally_result is the final tally result of the proposal. When - representation, that representation will be embedded adding a field + querying a proposal via gRPC, this field is not populated until + the - `value` which holds the custom JSON in addition to the `@type` + proposal's voting period has ended. + type: object + properties: + 'yes': + type: string + description: yes is the number of yes votes on a proposal. + abstain: + type: string + description: abstain is the number of abstain votes on a proposal. + 'no': + type: string + description: no is the number of no votes on a proposal. + no_with_veto: + type: string + description: >- + no_with_veto is the number of no with veto votes on a + proposal. + submit_time: + type: string + format: date-time + description: submit_time is the time of proposal submission. + deposit_end_time: + type: string + format: date-time + description: deposit_end_time is the end time for deposition. + total_deposit: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - field. Example (for message [google.protobuf.Duration][]): - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: total_deposit is the total deposit on the proposal. + voting_start_time: + type: string + format: date-time + description: voting_start_time is the starting time to vote on a proposal. + voting_end_time: + type: string + format: date-time + description: voting_end_time is the end time of voting on a proposal. + description: Proposal defines the core field members of a governance proposal. description: >- - QueryAccountResponse is the response type for the Query/Account RPC + QueryProposalResponse is the response type for the Query/Proposal RPC method. - cosmos.auth.v1beta1.QueryAccountsResponse: + cosmos.gov.v1beta1.QueryProposalsResponse: type: object properties: - accounts: + proposals: type: array items: type: object properties: - type_url: + proposal_id: type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized + format: uint64 + description: proposal_id defines the unique id of the proposal. + content: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - protocol buffer message. This string must contain at least + protocol buffer message. This string must contain at least - one "/" character. The last segment of the URL's path must - represent + one "/" character. The last segment of the URL's path must + represent - the fully qualified name of the type (as in + the fully qualified name of the type (as in - `path/google.protobuf.Duration`). The name should be in a - canonical form + `path/google.protobuf.Duration`). The name should be in a + canonical form - (e.g., leading "." is not accepted). + (e.g., leading "." is not accepted). - In practice, teams usually precompile into the binary all types - that they + In practice, teams usually precompile into the binary all + types that they - expect it to use in the context of Any. However, for URLs which - use the + expect it to use in the context of Any. However, for URLs + which use the - scheme `http`, `https`, or no scheme, one can optionally set up - a type + scheme `http`, `https`, or no scheme, one can optionally set + up a type - server that maps type URLs to message definitions as follows: + server that maps type URLs to message definitions as + follows: - * If no scheme is provided, `https` is assumed. + * If no scheme is provided, `https` is assumed. - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - Note: this functionality is not currently available in the - official + Note: this functionality is not currently available in the + official - protobuf release, and it is not used for type URLs beginning - with + protobuf release, and it is not used for type URLs beginning + with - type.googleapis.com. + type.googleapis.com. - Schemes other than `http`, `https` (or the empty scheme) might - be + Schemes other than `http`, `https` (or the empty scheme) + might be - used with implementation specific semantics. - value: - type: string - format: byte + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message along - with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values in the - form - - of utility functions or additional generated methods of the Any - type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + `Any` contains an arbitrary serialized protocol buffer message + along with a - Example 3: Pack and unpack a message in Python. + URL that describes the type of the serialized message. - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - Example 4: Pack and unpack a message in Go + Protobuf library provides support to pack/unpack Any values in + the form - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + of utility functions or additional generated methods of the Any + type. - The pack methods provided by protobuf library will by default use - 'type.googleapis.com/full.type.name' as the type URL and the unpack + Example 1: Pack and unpack a message in C++. - methods only use the fully qualified type name after the last '/' + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - in the type URL, for example "foo.bar.com/x/y.z" will yield type + Example 2: Pack and unpack a message in Java. - name "y.z". + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + Example 3: Pack and unpack a message in Python. + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - JSON + Example 4: Pack and unpack a message in Go - ==== + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - The JSON representation of an `Any` value uses the regular + The pack methods provided by protobuf library will by default + use - representation of the deserialized, embedded message, with an + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - additional field `@type` which contains the type URL. Example: + methods only use the fully qualified type name after the last + '/' - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + in the type URL, for example "foo.bar.com/x/y.z" will yield type - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + name "y.z". - If the embedded message type is well-known and has a custom JSON - representation, that representation will be embedded adding a field - `value` which holds the custom JSON in addition to the `@type` + JSON - field. Example (for message [google.protobuf.Duration][]): - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - title: accounts are the existing accounts - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + The JSON representation of an `Any` value uses the regular - was set, its value is undefined otherwise - description: >- - QueryAccountsResponse is the response type for the Query/Accounts RPC - method. - cosmos.auth.v1beta1.QueryParamsResponse: - type: object - properties: - params: - description: params defines the parameters of the module. - type: object - properties: - max_memo_characters: - type: string - format: uint64 - tx_sig_limit: - type: string - format: uint64 - tx_size_cost_per_byte: - type: string - format: uint64 - sig_verify_cost_ed25519: - type: string - format: uint64 - sig_verify_cost_secp256k1: - type: string - format: uint64 - description: QueryParamsResponse is the response type for the Query/Params RPC method. - cosmos.bank.v1beta1.DenomOwner: - type: object - properties: - address: - type: string - description: address defines the address that owns a particular denomination. - balance: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - Coin defines a token with a denomination and an amount. + representation of the deserialized, embedded message, with an - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - description: |- - DenomOwner defines structure representing an account that owns or holds a - particular denominated token. It contains the account address and account - balance of the denominated token. - cosmos.bank.v1beta1.DenomUnit: - type: object - properties: - denom: - type: string - description: denom represents the string name of the given denom unit (e.g uatom). - exponent: - type: integer - format: int64 - description: >- - exponent represents power of 10 exponent that one must + additional field `@type` which contains the type URL. Example: - raise the base_denom to in order to equal the given DenomUnit's denom + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - 1 denom = 1^exponent base_denom + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - (e.g. with a base_denom of uatom, one can create a DenomUnit of 'atom' - with + If the embedded message type is well-known and has a custom JSON - exponent = 6, thus: 1 atom = 10^6 uatom). - aliases: - type: array - items: - type: string - title: aliases is a list of string aliases for the given denom - description: |- - DenomUnit represents a struct that describes a given - denomination unit of the basic token. - cosmos.bank.v1beta1.Metadata: - type: object - properties: - description: - type: string - denom_units: - type: array - items: - type: object - properties: - denom: - type: string - description: >- - denom represents the string name of the given denom unit (e.g - uatom). - exponent: - type: integer - format: int64 - description: >- - exponent represents power of 10 exponent that one must + representation, that representation will be embedded adding a + field - raise the base_denom to in order to equal the given DenomUnit's - denom + `value` which holds the custom JSON in addition to the `@type` - 1 denom = 1^exponent base_denom + field. Example (for message [google.protobuf.Duration][]): - (e.g. with a base_denom of uatom, one can create a DenomUnit of - 'atom' with + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + status: + description: status defines the proposal status. + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_DEPOSIT_PERIOD + - PROPOSAL_STATUS_VOTING_PERIOD + - PROPOSAL_STATUS_PASSED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_FAILED + default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: + description: >- + final_tally_result is the final tally result of the proposal. + When - exponent = 6, thus: 1 atom = 10^6 uatom). - aliases: + querying a proposal via gRPC, this field is not populated until + the + + proposal's voting period has ended. + type: object + properties: + 'yes': + type: string + description: yes is the number of yes votes on a proposal. + abstain: + type: string + description: abstain is the number of abstain votes on a proposal. + 'no': + type: string + description: no is the number of no votes on a proposal. + no_with_veto: + type: string + description: >- + no_with_veto is the number of no with veto votes on a + proposal. + submit_time: + type: string + format: date-time + description: submit_time is the time of proposal submission. + deposit_end_time: + type: string + format: date-time + description: deposit_end_time is the end time for deposition. + total_deposit: type: array items: - type: string - title: aliases is a list of string aliases for the given denom - description: |- - DenomUnit represents a struct that describes a given - denomination unit of the basic token. - title: denom_units represents the list of DenomUnit's for a given coin - base: - type: string - description: >- - base represents the base denom (should be the DenomUnit with exponent - = 0). - display: - type: string - description: |- - display indicates the suggested denom that should be - displayed in clients. - name: - type: string - title: 'name defines the name of the token (eg: Cosmos Atom)' - symbol: - type: string - description: >- - symbol is the token symbol usually shown on exchanges (eg: ATOM). This - can + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - be the same as the display. - uri: - type: string - description: >- - URI to a document (on or off-chain) that contains additional - information. Optional. - uri_hash: - type: string - description: >- - URIHash is a sha256 hash of a document pointed by URI. It's used to - verify that - the document didn't change. Optional. - description: |- - Metadata represents a struct that describes - a basic token. - cosmos.bank.v1beta1.Params: - type: object - properties: - send_enabled: - type: array - items: - type: object - properties: - denom: - type: string - enabled: - type: boolean - format: boolean - description: >- - SendEnabled maps coin denom to a send_enabled status (whether a - denom is + NOTE: The amount field is an Int which implements the custom + method - sendable). - default_send_enabled: - type: boolean - format: boolean - description: Params defines the parameters for the bank module. - cosmos.bank.v1beta1.QueryAllBalancesResponse: - type: object - properties: - balances: - type: array - items: - type: object - properties: - denom: + signatures required by gogoproto. + description: total_deposit is the total deposit on the proposal. + voting_start_time: type: string - amount: + format: date-time + description: voting_start_time is the starting time to vote on a proposal. + voting_end_time: type: string - description: |- - Coin defines a token with a denomination and an amount. - - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - description: balances is the balances of all the coins. + format: date-time + description: voting_end_time is the end time of voting on a proposal. + description: Proposal defines the core field members of a governance proposal. + description: proposals defines all the requested governance proposals. pagination: description: pagination defines the pagination in the response. type: object @@ -33772,9 +55924,10 @@ definitions: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -33783,248 +55936,157 @@ definitions: PageRequest.count_total was set, its value is undefined otherwise - description: >- - QueryAllBalancesResponse is the response type for the Query/AllBalances - RPC - + description: |- + QueryProposalsResponse is the response type for the Query/Proposals RPC method. - cosmos.bank.v1beta1.QueryBalanceResponse: + cosmos.gov.v1beta1.QueryTallyResultResponse: type: object properties: - balance: + tally: + description: tally defines the requested tally. type: object properties: - denom: + 'yes': type: string - amount: + description: yes is the number of yes votes on a proposal. + abstain: type: string - description: |- - Coin defines a token with a denomination and an amount. - - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. + description: abstain is the number of abstain votes on a proposal. + 'no': + type: string + description: no is the number of no votes on a proposal. + no_with_veto: + type: string + description: no_with_veto is the number of no with veto votes on a proposal. description: >- - QueryBalanceResponse is the response type for the Query/Balance RPC + QueryTallyResultResponse is the response type for the Query/Tally RPC method. - cosmos.bank.v1beta1.QueryDenomMetadataResponse: + cosmos.gov.v1beta1.QueryVoteResponse: type: object properties: - metadata: + vote: type: object properties: - description: - type: string - denom_units: - type: array - items: - type: object - properties: - denom: - type: string - description: >- - denom represents the string name of the given denom unit - (e.g uatom). - exponent: - type: integer - format: int64 - description: >- - exponent represents power of 10 exponent that one must - - raise the base_denom to in order to equal the given - DenomUnit's denom - - 1 denom = 1^exponent base_denom - - (e.g. with a base_denom of uatom, one can create a DenomUnit - of 'atom' with - - exponent = 6, thus: 1 atom = 10^6 uatom). - aliases: - type: array - items: - type: string - title: aliases is a list of string aliases for the given denom - description: |- - DenomUnit represents a struct that describes a given - denomination unit of the basic token. - title: denom_units represents the list of DenomUnit's for a given coin - base: - type: string - description: >- - base represents the base denom (should be the DenomUnit with - exponent = 0). - display: - type: string - description: |- - display indicates the suggested denom that should be - displayed in clients. - name: - type: string - title: 'name defines the name of the token (eg: Cosmos Atom)' - symbol: - type: string - description: >- - symbol is the token symbol usually shown on exchanges (eg: ATOM). - This can - - be the same as the display. - uri: + proposal_id: type: string - description: >- - URI to a document (on or off-chain) that contains additional - information. Optional. - uri_hash: + format: uint64 + description: proposal_id defines the unique id of the proposal. + voter: type: string + description: voter is the voter address of the proposal. + option: description: >- - URIHash is a sha256 hash of a document pointed by URI. It's used - to verify that + Deprecated: Prefer to use `options` instead. This field is set in + queries - the document didn't change. Optional. - description: |- - Metadata represents a struct that describes - a basic token. - description: >- - QueryDenomMetadataResponse is the response type for the - Query/DenomMetadata RPC + if and only if `len(options) == 1` and that option has weight 1. + In all - method. - cosmos.bank.v1beta1.QueryDenomOwnersResponse: - type: object - properties: - denom_owners: - type: array - items: - type: object - properties: - address: - type: string - description: address defines the address that owns a particular denomination. - balance: + other cases, this field will default to VOTE_OPTION_UNSPECIFIED. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + options: + type: array + items: type: object properties: - denom: + option: + description: >- + option defines the valid vote options, it must not contain + duplicate vote options. type: string - amount: + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + weight: type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method - - signatures required by gogoproto. - description: >- - DenomOwner defines structure representing an account that owns or - holds a - - particular denominated token. It contains the account address and - account + description: weight is the vote weight associated with the vote option. + description: |- + WeightedVoteOption defines a unit of vote for vote split. - balance of the denominated token. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + Since: cosmos-sdk 0.43 + description: |- + options is the weighted vote options. - was set, its value is undefined otherwise - description: >- - QueryDenomOwnersResponse defines the RPC response of a DenomOwners RPC - query. - cosmos.bank.v1beta1.QueryDenomsMetadataResponse: + Since: cosmos-sdk 0.43 + description: |- + Vote defines a vote on a governance proposal. + A Vote consists of a proposal ID, the voter, and the vote option. + description: QueryVoteResponse is the response type for the Query/Vote RPC method. + cosmos.gov.v1beta1.QueryVotesResponse: type: object properties: - metadatas: + votes: type: array items: type: object properties: - description: - type: string - denom_units: - type: array - items: - type: object - properties: - denom: - type: string - description: >- - denom represents the string name of the given denom unit - (e.g uatom). - exponent: - type: integer - format: int64 - description: >- - exponent represents power of 10 exponent that one must - - raise the base_denom to in order to equal the given - DenomUnit's denom - - 1 denom = 1^exponent base_denom - - (e.g. with a base_denom of uatom, one can create a - DenomUnit of 'atom' with - - exponent = 6, thus: 1 atom = 10^6 uatom). - aliases: - type: array - items: - type: string - title: aliases is a list of string aliases for the given denom - description: |- - DenomUnit represents a struct that describes a given - denomination unit of the basic token. - title: denom_units represents the list of DenomUnit's for a given coin - base: - type: string - description: >- - base represents the base denom (should be the DenomUnit with - exponent = 0). - display: - type: string - description: |- - display indicates the suggested denom that should be - displayed in clients. - name: - type: string - title: 'name defines the name of the token (eg: Cosmos Atom)' - symbol: + proposal_id: type: string - description: >- - symbol is the token symbol usually shown on exchanges (eg: - ATOM). This can - - be the same as the display. - uri: + format: uint64 + description: proposal_id defines the unique id of the proposal. + voter: type: string + description: voter is the voter address of the proposal. + option: description: >- - URI to a document (on or off-chain) that contains additional - information. Optional. - uri_hash: + Deprecated: Prefer to use `options` instead. This field is set + in queries + + if and only if `len(options) == 1` and that option has weight 1. + In all + + other cases, this field will default to VOTE_OPTION_UNSPECIFIED. type: string - description: >- - URIHash is a sha256 hash of a document pointed by URI. It's used - to verify that + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + options: + type: array + items: + type: object + properties: + option: + description: >- + option defines the valid vote options, it must not contain + duplicate vote options. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + weight: + type: string + description: weight is the vote weight associated with the vote option. + description: |- + WeightedVoteOption defines a unit of vote for vote split. - the document didn't change. Optional. + Since: cosmos-sdk 0.43 + description: |- + options is the weighted vote options. + + Since: cosmos-sdk 0.43 description: |- - Metadata represents a struct that describes - a basic token. - description: >- - metadata provides the client information for all the registered - tokens. + Vote defines a vote on a governance proposal. + A Vote consists of a proposal ID, the voter, and the vote option. + description: votes defines the queried votes. pagination: description: pagination defines the pagination in the response. type: object @@ -34032,9 +56094,10 @@ definitions: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -34043,61 +56106,161 @@ definitions: PageRequest.count_total was set, its value is undefined otherwise - description: >- - QueryDenomsMetadataResponse is the response type for the - Query/DenomsMetadata RPC - - method. - cosmos.bank.v1beta1.QueryParamsResponse: + description: QueryVotesResponse is the response type for the Query/Votes RPC method. + cosmos.gov.v1beta1.TallyParams: type: object properties: - params: - type: object - properties: - send_enabled: - type: array - items: - type: object - properties: - denom: - type: string - enabled: - type: boolean - format: boolean + quorum: + type: string + format: byte + description: |- + Minimum percentage of total stake needed to vote for a result to be + considered valid. + threshold: + type: string + format: byte + description: >- + Minimum proportion of Yes votes for proposal to pass. Default value: + 0.5. + veto_threshold: + type: string + format: byte + description: |- + Minimum value of Veto votes to Total votes ratio for proposal to be + vetoed. Default value: 1/3. + description: TallyParams defines the params for tallying votes on governance proposals. + cosmos.gov.v1beta1.TallyResult: + type: object + properties: + 'yes': + type: string + description: yes is the number of yes votes on a proposal. + abstain: + type: string + description: abstain is the number of abstain votes on a proposal. + 'no': + type: string + description: no is the number of no votes on a proposal. + no_with_veto: + type: string + description: no_with_veto is the number of no with veto votes on a proposal. + description: TallyResult defines a standard tally for a governance proposal. + cosmos.gov.v1beta1.Vote: + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal_id defines the unique id of the proposal. + voter: + type: string + description: voter is the voter address of the proposal. + option: + description: >- + Deprecated: Prefer to use `options` instead. This field is set in + queries + + if and only if `len(options) == 1` and that option has weight 1. In + all + + other cases, this field will default to VOTE_OPTION_UNSPECIFIED. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + options: + type: array + items: + type: object + properties: + option: description: >- - SendEnabled maps coin denom to a send_enabled status (whether a - denom is + option defines the valid vote options, it must not contain + duplicate vote options. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + weight: + type: string + description: weight is the vote weight associated with the vote option. + description: |- + WeightedVoteOption defines a unit of vote for vote split. - sendable). - default_send_enabled: - type: boolean - format: boolean - description: Params defines the parameters for the bank module. + Since: cosmos-sdk 0.43 + description: |- + options is the weighted vote options. + + Since: cosmos-sdk 0.43 + description: |- + Vote defines a vote on a governance proposal. + A Vote consists of a proposal ID, the voter, and the vote option. + cosmos.gov.v1beta1.VoteOption: + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED description: >- - QueryParamsResponse defines the response type for querying x/bank - parameters. - cosmos.bank.v1beta1.QuerySupplyOfResponse: + VoteOption enumerates the valid vote options for a given governance + proposal. + + - VOTE_OPTION_UNSPECIFIED: VOTE_OPTION_UNSPECIFIED defines a no-op vote option. + - VOTE_OPTION_YES: VOTE_OPTION_YES defines a yes vote option. + - VOTE_OPTION_ABSTAIN: VOTE_OPTION_ABSTAIN defines an abstain vote option. + - VOTE_OPTION_NO: VOTE_OPTION_NO defines a no vote option. + - VOTE_OPTION_NO_WITH_VETO: VOTE_OPTION_NO_WITH_VETO defines a no with veto vote option. + cosmos.gov.v1beta1.VotingParams: type: object properties: - amount: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - Coin defines a token with a denomination and an amount. + voting_period: + type: string + description: Duration of the voting period. + description: VotingParams defines the params for voting on governance proposals. + cosmos.gov.v1beta1.WeightedVoteOption: + type: object + properties: + option: + description: >- + option defines the valid vote options, it must not contain duplicate + vote options. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + weight: + type: string + description: weight is the vote weight associated with the vote option. + description: |- + WeightedVoteOption defines a unit of vote for vote split. - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - description: >- - QuerySupplyOfResponse is the response type for the Query/SupplyOf RPC - method. - cosmos.bank.v1beta1.QueryTotalSupplyResponse: + Since: cosmos-sdk 0.43 + cosmos.gov.v1.Deposit: type: object properties: - supply: + proposal_id: + type: string + format: uint64 + description: proposal_id defines the unique id of the proposal. + depositor: + type: string + description: depositor defines the deposit addresses from the proposals. + amount: type: array items: type: object @@ -34111,1342 +56274,1707 @@ definitions: NOTE: The amount field is an Int which implements the custom method signatures required by gogoproto. - title: supply is the supply of the coins - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + description: amount to be deposited by depositor. + description: |- + Deposit defines an amount deposited by an account address to an active + proposal. + cosmos.gov.v1.DepositParams: + type: object + properties: + min_deposit: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. - was set, its value is undefined otherwise - title: >- - QueryTotalSupplyResponse is the response type for the Query/TotalSupply - RPC + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: Minimum deposit for a proposal to enter voting period. + max_deposit_period: + type: string + description: >- + Maximum period for Atom holders to deposit on a proposal. Initial + value: 2 - method - cosmos.bank.v1beta1.SendEnabled: + months. + description: DepositParams defines the params for deposits on governance proposals. + cosmos.gov.v1.Params: type: object properties: - denom: + min_deposit: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: Minimum deposit for a proposal to enter voting period. + max_deposit_period: type: string - enabled: - type: boolean - format: boolean + description: >- + Maximum period for Atom holders to deposit on a proposal. Initial + value: 2 + + months. + voting_period: + type: string + description: Duration of the voting period. + quorum: + type: string + description: |- + Minimum percentage of total stake needed to vote for a result to be + considered valid. + threshold: + type: string + description: >- + Minimum proportion of Yes votes for proposal to pass. Default value: + 0.5. + veto_threshold: + type: string + description: |- + Minimum value of Veto votes to Total votes ratio for proposal to be + vetoed. Default value: 1/3. + min_initial_deposit_ratio: + type: string + description: >- + The ratio representing the proportion of the deposit value that must + be paid at proposal submission. description: |- - SendEnabled maps coin denom to a send_enabled status (whether a denom is - sendable). - cosmos.base.tendermint.v1beta1.GetBlockByHeightResponse: + Params defines the parameters for the x/gov module. + + Since: cosmos-sdk 0.47 + cosmos.gov.v1.Proposal: type: object properties: - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - block: - type: object - properties: - header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a block - in the blockchain, + id: + type: string + format: uint64 + description: id defines the unique id of the proposal. + messages: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - including all blockchain data structures and the rules of the - application's + protocol buffer message. This string must contain at least - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - data: - type: object - properties: - txs: - type: array - items: - type: string - format: byte - description: >- - Txs that will be applied by state @ block.Height+1. + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up + a type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might + be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - NOTE: not all txs here are valid. We're just agreeing on the - order first. + Example 2: Pack and unpack a message in Java. - This means that block.AppHash does not include these txs. - title: Data contains the set of transactions included in the block - evidence: - type: object - properties: - evidence: - type: array - items: - type: object - properties: - duplicate_vote_evidence: - type: object - properties: - vote_a: - type: object - properties: - type: - type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN - description: >- - SignedMsgType is a type of signed message in the - consensus. + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - timestamp: - type: string - format: date-time - validator_address: - type: string - format: byte - validator_index: - type: integer - format: int32 - signature: - type: string - format: byte - description: >- - Vote represents a prevote, precommit, or commit vote - from validators for + Example 3: Pack and unpack a message in Python. - consensus. - vote_b: - type: object - properties: - type: - type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN - description: >- - SignedMsgType is a type of signed message in the - consensus. + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - timestamp: - type: string - format: date-time - validator_address: - type: string - format: byte - validator_index: - type: integer - format: int32 - signature: - type: string - format: byte - description: >- - Vote represents a prevote, precommit, or commit vote - from validators for + Example 4: Pack and unpack a message in Go - consensus. - total_voting_power: - type: string - format: int64 - validator_power: - type: string - format: int64 - timestamp: - type: string - format: date-time - description: >- - DuplicateVoteEvidence contains evidence of a validator - signed two conflicting votes. - light_client_attack_evidence: - type: object - properties: - conflicting_block: - type: object - properties: - signed_header: - type: object - properties: - header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules - for processing a block in the - blockchain, + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - including all blockchain data structures - and the rules of the application's + The pack methods provided by protobuf library will by default use - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: >- - hashes from the app output from the prev - block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: >- - Header defines the structure of a Tendermint - block header. - commit: - type: object - properties: - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - signatures: - type: array - items: - type: object - properties: - block_id_flag: - type: string - enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: >- - BlockIdFlag indicates which BlcokID the - signature is for - validator_address: - type: string - format: byte - timestamp: - type: string - format: date-time - signature: - type: string - format: byte - description: >- - CommitSig is a part of the Vote included - in a Commit. - description: >- - Commit contains the evidence that a block - was committed by a set of validators. - validator_set: - type: object - properties: - validators: - type: array - items: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for - use with Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - proposer: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for - use with Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - total_voting_power: - type: string - format: int64 - common_height: - type: string - format: int64 - byzantine_validators: - type: array - items: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use - with Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - total_voting_power: - type: string - format: int64 - timestamp: - type: string - format: date-time - description: >- - LightClientAttackEvidence contains evidence of a set of - validators attempting to mislead a light client. - last_commit: - type: object - properties: - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + messages are the arbitrary messages to be executed if the proposal + passes. + status: + description: status defines the proposal status. + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_DEPOSIT_PERIOD + - PROPOSAL_STATUS_VOTING_PERIOD + - PROPOSAL_STATUS_PASSED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_FAILED + default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: + description: |- + final_tally_result is the final tally result of the proposal. When + querying a proposal via gRPC, this field is not populated until the + proposal's voting period has ended. + type: object + properties: + yes_count: + type: string + description: yes_count is the number of yes votes on a proposal. + abstain_count: + type: string + description: abstain_count is the number of abstain votes on a proposal. + no_count: + type: string + description: no_count is the number of no votes on a proposal. + no_with_veto_count: + type: string + description: >- + no_with_veto_count is the number of no with veto votes on a + proposal. + submit_time: + type: string + format: date-time + description: submit_time is the time of proposal submission. + deposit_end_time: + type: string + format: date-time + description: deposit_end_time is the end time for deposition. + total_deposit: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: total_deposit is the total deposit on the proposal. + voting_start_time: + type: string + format: date-time + description: voting_start_time is the starting time to vote on a proposal. + voting_end_time: + type: string + format: date-time + description: voting_end_time is the end time of voting on a proposal. + metadata: + type: string + description: metadata is any arbitrary metadata attached to the proposal. + title: + type: string + description: 'Since: cosmos-sdk 0.47' + title: title is the title of the proposal + summary: + type: string + description: 'Since: cosmos-sdk 0.47' + title: summary is a short summary of the proposal + proposer: + type: string + description: 'Since: cosmos-sdk 0.47' + title: Proposer is the address of the proposal sumbitter + description: Proposal defines the core field members of a governance proposal. + cosmos.gov.v1.ProposalStatus: + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_DEPOSIT_PERIOD + - PROPOSAL_STATUS_VOTING_PERIOD + - PROPOSAL_STATUS_PASSED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_FAILED + default: PROPOSAL_STATUS_UNSPECIFIED + description: |- + ProposalStatus enumerates the valid statuses of a proposal. + + - PROPOSAL_STATUS_UNSPECIFIED: PROPOSAL_STATUS_UNSPECIFIED defines the default proposal status. + - PROPOSAL_STATUS_DEPOSIT_PERIOD: PROPOSAL_STATUS_DEPOSIT_PERIOD defines a proposal status during the deposit + period. + - PROPOSAL_STATUS_VOTING_PERIOD: PROPOSAL_STATUS_VOTING_PERIOD defines a proposal status during the voting + period. + - PROPOSAL_STATUS_PASSED: PROPOSAL_STATUS_PASSED defines a proposal status of a proposal that has + passed. + - PROPOSAL_STATUS_REJECTED: PROPOSAL_STATUS_REJECTED defines a proposal status of a proposal that has + been rejected. + - PROPOSAL_STATUS_FAILED: PROPOSAL_STATUS_FAILED defines a proposal status of a proposal that has + failed. + cosmos.gov.v1.QueryDepositResponse: + type: object + properties: + deposit: + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal_id defines the unique id of the proposal. + depositor: + type: string + description: depositor defines the deposit addresses from the proposals. + amount: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: amount to be deposited by depositor. + description: |- + Deposit defines an amount deposited by an account address to an active + proposal. + description: >- + QueryDepositResponse is the response type for the Query/Deposit RPC + method. + cosmos.gov.v1.QueryDepositsResponse: + type: object + properties: + deposits: + type: array + items: + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal_id defines the unique id of the proposal. + depositor: + type: string + description: depositor defines the deposit addresses from the proposals. + amount: + type: array + items: type: object properties: - hash: + denom: type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - signatures: - type: array - items: - type: object - properties: - block_id_flag: - type: string - enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: BlockIdFlag indicates which BlcokID the signature is for - validator_address: - type: string - format: byte - timestamp: - type: string - format: date-time - signature: - type: string - format: byte - description: CommitSig is a part of the Vote included in a Commit. - description: >- - Commit contains the evidence that a block was committed by a set - of validators. + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: amount to be deposited by depositor. + description: >- + Deposit defines an amount deposited by an account address to an + active + + proposal. + description: deposits defines the requested deposits. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise description: >- - GetBlockByHeightResponse is the response type for the - Query/GetBlockByHeight RPC method. - cosmos.base.tendermint.v1beta1.GetLatestBlockResponse: + QueryDepositsResponse is the response type for the Query/Deposits RPC + method. + cosmos.gov.v1.QueryParamsResponse: type: object properties: - block_id: + voting_params: + description: |- + Deprecated: Prefer to use `params` instead. + voting_params defines the parameters related to voting. type: object properties: - hash: + voting_period: type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - block: + description: Duration of the voting period. + deposit_params: + description: |- + Deprecated: Prefer to use `params` instead. + deposit_params defines the parameters related to deposit. type: object properties: - header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a block - in the blockchain, + min_deposit: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - including all blockchain data structures and the rules of the - application's - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: Minimum deposit for a proposal to enter voting period. + max_deposit_period: + type: string + description: >- + Maximum period for Atom holders to deposit on a proposal. Initial + value: 2 + + months. + tally_params: + description: |- + Deprecated: Prefer to use `params` instead. + tally_params defines the parameters related to tally. + type: object + properties: + quorum: + type: string + description: >- + Minimum percentage of total stake needed to vote for a result to + be + + considered valid. + threshold: + type: string + description: >- + Minimum proportion of Yes votes for proposal to pass. Default + value: 0.5. + veto_threshold: + type: string + description: >- + Minimum value of Veto votes to Total votes ratio for proposal to + be + + vetoed. Default value: 1/3. + params: + description: |- + params defines all the paramaters of x/gov module. + + Since: cosmos-sdk 0.47 + type: object + properties: + min_deposit: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: Minimum deposit for a proposal to enter voting period. + max_deposit_period: + type: string + description: >- + Maximum period for Atom holders to deposit on a proposal. Initial + value: 2 + + months. + voting_period: + type: string + description: Duration of the voting period. + quorum: + type: string + description: >- + Minimum percentage of total stake needed to vote for a result to + be + considered valid. + threshold: + type: string + description: >- + Minimum proportion of Yes votes for proposal to pass. Default + value: 0.5. + veto_threshold: + type: string + description: >- + Minimum value of Veto votes to Total votes ratio for proposal to + be + vetoed. Default value: 1/3. + min_initial_deposit_ratio: + type: string + description: >- + The ratio representing the proportion of the deposit value that + must be paid at proposal submission. + description: QueryParamsResponse is the response type for the Query/Params RPC method. + cosmos.gov.v1.QueryProposalResponse: + type: object + properties: + proposal: + type: object + properties: + id: + type: string + format: uint64 + description: id defines the unique id of the proposal. + messages: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + messages are the arbitrary messages to be executed if the proposal + passes. + status: + description: status defines the proposal status. + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_DEPOSIT_PERIOD + - PROPOSAL_STATUS_VOTING_PERIOD + - PROPOSAL_STATUS_PASSED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_FAILED + default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: + description: >- + final_tally_result is the final tally result of the proposal. When + + querying a proposal via gRPC, this field is not populated until + the + + proposal's voting period has ended. + type: object + properties: + yes_count: type: string - format: byte - last_results_hash: + description: yes_count is the number of yes votes on a proposal. + abstain_count: type: string - format: byte - evidence_hash: + description: abstain_count is the number of abstain votes on a proposal. + no_count: type: string - format: byte - title: consensus info - proposer_address: + description: no_count is the number of no votes on a proposal. + no_with_veto_count: type: string - format: byte - description: Header defines the structure of a Tendermint block header. - data: - type: object - properties: - txs: - type: array - items: - type: string - format: byte description: >- - Txs that will be applied by state @ block.Height+1. + no_with_veto_count is the number of no with veto votes on a + proposal. + submit_time: + type: string + format: date-time + description: submit_time is the time of proposal submission. + deposit_end_time: + type: string + format: date-time + description: deposit_end_time is the end time for deposition. + total_deposit: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. - NOTE: not all txs here are valid. We're just agreeing on the - order first. - This means that block.AppHash does not include these txs. - title: Data contains the set of transactions included in the block - evidence: - type: object - properties: - evidence: - type: array - items: - type: object - properties: - duplicate_vote_evidence: - type: object - properties: - vote_a: - type: object - properties: - type: - type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN - description: >- - SignedMsgType is a type of signed message in the - consensus. + NOTE: The amount field is an Int which implements the custom + method - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - timestamp: - type: string - format: date-time - validator_address: - type: string - format: byte - validator_index: - type: integer - format: int32 - signature: - type: string - format: byte - description: >- - Vote represents a prevote, precommit, or commit vote - from validators for + signatures required by gogoproto. + description: total_deposit is the total deposit on the proposal. + voting_start_time: + type: string + format: date-time + description: voting_start_time is the starting time to vote on a proposal. + voting_end_time: + type: string + format: date-time + description: voting_end_time is the end time of voting on a proposal. + metadata: + type: string + description: metadata is any arbitrary metadata attached to the proposal. + title: + type: string + description: 'Since: cosmos-sdk 0.47' + title: title is the title of the proposal + summary: + type: string + description: 'Since: cosmos-sdk 0.47' + title: summary is a short summary of the proposal + proposer: + type: string + description: 'Since: cosmos-sdk 0.47' + title: Proposer is the address of the proposal sumbitter + description: Proposal defines the core field members of a governance proposal. + description: >- + QueryProposalResponse is the response type for the Query/Proposal RPC + method. + cosmos.gov.v1.QueryProposalsResponse: + type: object + properties: + proposals: + type: array + items: + type: object + properties: + id: + type: string + format: uint64 + description: id defines the unique id of the proposal. + messages: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized - consensus. - vote_b: - type: object - properties: - type: - type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN - description: >- - SignedMsgType is a type of signed message in the - consensus. + protocol buffer message. This string must contain at least - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - timestamp: - type: string - format: date-time - validator_address: - type: string - format: byte - validator_index: - type: integer - format: int32 - signature: - type: string - format: byte - description: >- - Vote represents a prevote, precommit, or commit vote - from validators for + one "/" character. The last segment of the URL's path must + represent - consensus. - total_voting_power: - type: string - format: int64 - validator_power: - type: string - format: int64 - timestamp: - type: string - format: date-time - description: >- - DuplicateVoteEvidence contains evidence of a validator - signed two conflicting votes. - light_client_attack_evidence: - type: object - properties: - conflicting_block: - type: object - properties: - signed_header: - type: object - properties: - header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules - for processing a block in the - blockchain, + the fully qualified name of the type (as in - including all blockchain data structures - and the rules of the application's + `path/google.protobuf.Duration`). The name should be in a + canonical form - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: >- - hashes from the app output from the prev - block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: >- - Header defines the structure of a Tendermint - block header. - commit: - type: object - properties: - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - signatures: - type: array - items: - type: object - properties: - block_id_flag: - type: string - enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: >- - BlockIdFlag indicates which BlcokID the - signature is for - validator_address: - type: string - format: byte - timestamp: - type: string - format: date-time - signature: - type: string - format: byte - description: >- - CommitSig is a part of the Vote included - in a Commit. - description: >- - Commit contains the evidence that a block - was committed by a set of validators. - validator_set: - type: object - properties: - validators: - type: array - items: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for - use with Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - proposer: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for - use with Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - total_voting_power: - type: string - format: int64 - common_height: - type: string - format: int64 - byzantine_validators: - type: array - items: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use - with Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - total_voting_power: - type: string - format: int64 - timestamp: - type: string - format: date-time - description: >- - LightClientAttackEvidence contains evidence of a set of - validators attempting to mislead a light client. - last_commit: - type: object - properties: - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + messages are the arbitrary messages to be executed if the + proposal passes. + status: + description: status defines the proposal status. + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_DEPOSIT_PERIOD + - PROPOSAL_STATUS_VOTING_PERIOD + - PROPOSAL_STATUS_PASSED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_FAILED + default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: + description: >- + final_tally_result is the final tally result of the proposal. + When + + querying a proposal via gRPC, this field is not populated until + the + + proposal's voting period has ended. + type: object + properties: + yes_count: + type: string + description: yes_count is the number of yes votes on a proposal. + abstain_count: + type: string + description: abstain_count is the number of abstain votes on a proposal. + no_count: + type: string + description: no_count is the number of no votes on a proposal. + no_with_veto_count: + type: string + description: >- + no_with_veto_count is the number of no with veto votes on a + proposal. + submit_time: + type: string + format: date-time + description: submit_time is the time of proposal submission. + deposit_end_time: + type: string + format: date-time + description: deposit_end_time is the end time for deposition. + total_deposit: + type: array + items: type: object properties: - hash: + denom: type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - signatures: - type: array - items: - type: object - properties: - block_id_flag: - type: string - enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: BlockIdFlag indicates which BlcokID the signature is for - validator_address: - type: string - format: byte - timestamp: - type: string - format: date-time - signature: - type: string - format: byte - description: CommitSig is a part of the Vote included in a Commit. + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: total_deposit is the total deposit on the proposal. + voting_start_time: + type: string + format: date-time + description: voting_start_time is the starting time to vote on a proposal. + voting_end_time: + type: string + format: date-time + description: voting_end_time is the end time of voting on a proposal. + metadata: + type: string + description: metadata is any arbitrary metadata attached to the proposal. + title: + type: string + description: 'Since: cosmos-sdk 0.47' + title: title is the title of the proposal + summary: + type: string + description: 'Since: cosmos-sdk 0.47' + title: summary is a short summary of the proposal + proposer: + type: string + description: 'Since: cosmos-sdk 0.47' + title: Proposer is the address of the proposal sumbitter + description: Proposal defines the core field members of a governance proposal. + description: proposals defines all the requested governance proposals. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + QueryProposalsResponse is the response type for the Query/Proposals RPC + method. + cosmos.gov.v1.QueryTallyResultResponse: + type: object + properties: + tally: + description: tally defines the requested tally. + type: object + properties: + yes_count: + type: string + description: yes_count is the number of yes votes on a proposal. + abstain_count: + type: string + description: abstain_count is the number of abstain votes on a proposal. + no_count: + type: string + description: no_count is the number of no votes on a proposal. + no_with_veto_count: + type: string description: >- - Commit contains the evidence that a block was committed by a set - of validators. + no_with_veto_count is the number of no with veto votes on a + proposal. description: >- - GetLatestBlockResponse is the response type for the Query/GetLatestBlock - RPC method. - cosmos.base.tendermint.v1beta1.GetLatestValidatorSetResponse: + QueryTallyResultResponse is the response type for the Query/Tally RPC + method. + cosmos.gov.v1.QueryVoteResponse: type: object properties: - block_height: - type: string - format: int64 - validators: + vote: + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal_id defines the unique id of the proposal. + voter: + type: string + description: voter is the voter address of the proposal. + options: + type: array + items: + type: object + properties: + option: + description: >- + option defines the valid vote options, it must not contain + duplicate vote options. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + weight: + type: string + description: weight is the vote weight associated with the vote option. + description: WeightedVoteOption defines a unit of vote for vote split. + description: options is the weighted vote options. + metadata: + type: string + description: metadata is any arbitrary metadata to attached to the vote. + description: |- + Vote defines a vote on a governance proposal. + A Vote consists of a proposal ID, the voter, and the vote option. + description: QueryVoteResponse is the response type for the Query/Vote RPC method. + cosmos.gov.v1.QueryVotesResponse: + type: object + properties: + votes: type: array items: type: object properties: - address: + proposal_id: type: string - pub_key: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized - - protocol buffer message. This string must contain at least - - one "/" character. The last segment of the URL's path must - represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in a - canonical form - - (e.g., leading "." is not accepted). - + format: uint64 + description: proposal_id defines the unique id of the proposal. + voter: + type: string + description: voter is the voter address of the proposal. + options: + type: array + items: + type: object + properties: + option: + description: >- + option defines the valid vote options, it must not contain + duplicate vote options. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + weight: + type: string + description: weight is the vote weight associated with the vote option. + description: WeightedVoteOption defines a unit of vote for vote split. + description: options is the weighted vote options. + metadata: + type: string + description: metadata is any arbitrary metadata to attached to the vote. + description: |- + Vote defines a vote on a governance proposal. + A Vote consists of a proposal ID, the voter, and the vote option. + description: votes defines the queried votes. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - In practice, teams usually precompile into the binary all - types that they + was set, its value is undefined otherwise + description: QueryVotesResponse is the response type for the Query/Votes RPC method. + cosmos.gov.v1.TallyParams: + type: object + properties: + quorum: + type: string + description: |- + Minimum percentage of total stake needed to vote for a result to be + considered valid. + threshold: + type: string + description: >- + Minimum proportion of Yes votes for proposal to pass. Default value: + 0.5. + veto_threshold: + type: string + description: |- + Minimum value of Veto votes to Total votes ratio for proposal to be + vetoed. Default value: 1/3. + description: TallyParams defines the params for tallying votes on governance proposals. + cosmos.gov.v1.TallyResult: + type: object + properties: + yes_count: + type: string + description: yes_count is the number of yes votes on a proposal. + abstain_count: + type: string + description: abstain_count is the number of abstain votes on a proposal. + no_count: + type: string + description: no_count is the number of no votes on a proposal. + no_with_veto_count: + type: string + description: no_with_veto_count is the number of no with veto votes on a proposal. + description: TallyResult defines a standard tally for a governance proposal. + cosmos.gov.v1.Vote: + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal_id defines the unique id of the proposal. + voter: + type: string + description: voter is the voter address of the proposal. + options: + type: array + items: + type: object + properties: + option: + description: >- + option defines the valid vote options, it must not contain + duplicate vote options. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + weight: + type: string + description: weight is the vote weight associated with the vote option. + description: WeightedVoteOption defines a unit of vote for vote split. + description: options is the weighted vote options. + metadata: + type: string + description: metadata is any arbitrary metadata to attached to the vote. + description: |- + Vote defines a vote on a governance proposal. + A Vote consists of a proposal ID, the voter, and the vote option. + cosmos.gov.v1.VoteOption: + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + description: >- + VoteOption enumerates the valid vote options for a given governance + proposal. - expect it to use in the context of Any. However, for URLs - which use the + - VOTE_OPTION_UNSPECIFIED: VOTE_OPTION_UNSPECIFIED defines a no-op vote option. + - VOTE_OPTION_YES: VOTE_OPTION_YES defines a yes vote option. + - VOTE_OPTION_ABSTAIN: VOTE_OPTION_ABSTAIN defines an abstain vote option. + - VOTE_OPTION_NO: VOTE_OPTION_NO defines a no vote option. + - VOTE_OPTION_NO_WITH_VETO: VOTE_OPTION_NO_WITH_VETO defines a no with veto vote option. + cosmos.gov.v1.VotingParams: + type: object + properties: + voting_period: + type: string + description: Duration of the voting period. + description: VotingParams defines the params for voting on governance proposals. + cosmos.gov.v1.WeightedVoteOption: + type: object + properties: + option: + description: >- + option defines the valid vote options, it must not contain duplicate + vote options. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + weight: + type: string + description: weight is the vote weight associated with the vote option. + description: WeightedVoteOption defines a unit of vote for vote split. + cosmos.mint.v1beta1.Params: + type: object + properties: + mint_denom: + type: string + title: type of coin to mint + inflation_rate_change: + type: string + title: maximum annual change in inflation rate + inflation_max: + type: string + title: maximum inflation rate + inflation_min: + type: string + title: minimum inflation rate + goal_bonded: + type: string + title: goal of percent bonded atoms + blocks_per_year: + type: string + format: uint64 + title: expected blocks per year + description: Params defines the parameters for the x/mint module. + cosmos.mint.v1beta1.QueryAnnualProvisionsResponse: + type: object + properties: + annual_provisions: + type: string + format: byte + description: annual_provisions is the current minting annual provisions value. + description: |- + QueryAnnualProvisionsResponse is the response type for the + Query/AnnualProvisions RPC method. + cosmos.mint.v1beta1.QueryInflationResponse: + type: object + properties: + inflation: + type: string + format: byte + description: inflation is the current minting inflation value. + description: |- + QueryInflationResponse is the response type for the Query/Inflation RPC + method. + cosmos.mint.v1beta1.QueryParamsResponse: + type: object + properties: + params: + description: params defines the parameters of the module. + type: object + properties: + mint_denom: + type: string + title: type of coin to mint + inflation_rate_change: + type: string + title: maximum annual change in inflation rate + inflation_max: + type: string + title: maximum inflation rate + inflation_min: + type: string + title: minimum inflation rate + goal_bonded: + type: string + title: goal of percent bonded atoms + blocks_per_year: + type: string + format: uint64 + title: expected blocks per year + description: QueryParamsResponse is the response type for the Query/Params RPC method. + cosmos.params.v1beta1.ParamChange: + type: object + properties: + subspace: + type: string + key: + type: string + value: + type: string + description: |- + ParamChange defines an individual parameter change, for use in + ParameterChangeProposal. + cosmos.params.v1beta1.QueryParamsResponse: + type: object + properties: + param: + description: param defines the queried parameter. + type: object + properties: + subspace: + type: string + key: + type: string + value: + type: string + description: QueryParamsResponse is response type for the Query/Params RPC method. + cosmos.params.v1beta1.QuerySubspacesResponse: + type: object + properties: + subspaces: + type: array + items: + type: object + properties: + subspace: + type: string + keys: + type: array + items: + type: string + description: >- + Subspace defines a parameter subspace name and all the keys that + exist for - scheme `http`, `https`, or no scheme, one can optionally set - up a type + the subspace. - server that maps type URLs to message definitions as - follows: + Since: cosmos-sdk 0.46 + description: |- + QuerySubspacesResponse defines the response types for querying for all + registered subspaces and all keys for a subspace. - * If no scheme is provided, `https` is assumed. + Since: cosmos-sdk 0.46 + cosmos.params.v1beta1.Subspace: + type: object + properties: + subspace: + type: string + keys: + type: array + items: + type: string + description: |- + Subspace defines a parameter subspace name and all the keys that exist for + the subspace. - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on - the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + Since: cosmos-sdk 0.46 + cosmos.slashing.v1beta1.Params: + type: object + properties: + signed_blocks_window: + type: string + format: int64 + min_signed_per_window: + type: string + format: byte + downtime_jail_duration: + type: string + slash_fraction_double_sign: + type: string + format: byte + slash_fraction_downtime: + type: string + format: byte + description: Params represents the parameters used for by the slashing module. + cosmos.slashing.v1beta1.QueryParamsResponse: + type: object + properties: + params: + type: object + properties: + signed_blocks_window: + type: string + format: int64 + min_signed_per_window: + type: string + format: byte + downtime_jail_duration: + type: string + slash_fraction_double_sign: + type: string + format: byte + slash_fraction_downtime: + type: string + format: byte + description: Params represents the parameters used for by the slashing module. + title: QueryParamsResponse is the response type for the Query/Params RPC method + cosmos.slashing.v1beta1.QuerySigningInfoResponse: + type: object + properties: + val_signing_info: + type: object + properties: + address: + type: string + start_height: + type: string + format: int64 + title: Height at which validator was first a candidate OR was unjailed + index_offset: + type: string + format: int64 + description: >- + Index which is incremented each time the validator was a bonded - Note: this functionality is not currently available in the - official + in a block and may have signed a precommit or not. This in + conjunction with the - protobuf release, and it is not used for type URLs beginning - with + `SignedBlocksWindow` param determines the index in the + `MissedBlocksBitArray`. + jailed_until: + type: string + format: date-time + description: >- + Timestamp until which the validator is jailed due to liveness + downtime. + tombstoned: + type: boolean + description: >- + Whether or not a validator has been tombstoned (killed out of + validator set). It is set - type.googleapis.com. + once the validator commits an equivocation or for any other + configured misbehiavor. + missed_blocks_counter: + type: string + format: int64 + description: >- + A counter kept to avoid unnecessary array reads. + Note that `Sum(MissedBlocksBitArray)` always equals + `MissedBlocksCounter`. + description: >- + ValidatorSigningInfo defines a validator's signing info for monitoring + their - Schemes other than `http`, `https` (or the empty scheme) - might be + liveness activity. + title: val_signing_info is the signing info of requested val cons address + title: >- + QuerySigningInfoResponse is the response type for the Query/SigningInfo + RPC - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. + method + cosmos.slashing.v1beta1.QuerySigningInfosResponse: + type: object + properties: + info: + type: array + items: + type: object + properties: + address: + type: string + start_height: + type: string + format: int64 + title: Height at which validator was first a candidate OR was unjailed + index_offset: + type: string + format: int64 description: >- - `Any` contains an arbitrary serialized protocol buffer message - along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values in - the form - - of utility functions or additional generated methods of the Any - type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by default - use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack - - methods only use the fully qualified type name after the last - '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with an - - additional field `@type` which contains the type URL. Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom JSON - - representation, that representation will be embedded adding a - field - - `value` which holds the custom JSON in addition to the `@type` + Index which is incremented each time the validator was a bonded - field. Example (for message [google.protobuf.Duration][]): + in a block and may have signed a precommit or not. This in + conjunction with the - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - voting_power: + `SignedBlocksWindow` param determines the index in the + `MissedBlocksBitArray`. + jailed_until: type: string - format: int64 - proposer_priority: + format: date-time + description: >- + Timestamp until which the validator is jailed due to liveness + downtime. + tombstoned: + type: boolean + description: >- + Whether or not a validator has been tombstoned (killed out of + validator set). It is set + + once the validator commits an equivocation or for any other + configured misbehiavor. + missed_blocks_counter: type: string format: int64 - description: Validator is the type for the validator-set. + description: >- + A counter kept to avoid unnecessary array reads. + + Note that `Sum(MissedBlocksBitArray)` always equals + `MissedBlocksCounter`. + description: >- + ValidatorSigningInfo defines a validator's signing info for + monitoring their + + liveness activity. + title: info is the signing info of all validators pagination: - description: pagination defines an pagination for the response. type: object properties: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -35455,106 +57983,291 @@ definitions: PageRequest.count_total was set, its value is undefined otherwise + description: |- + PageResponse is to be embedded in gRPC response messages where the + corresponding request message has used PageRequest. + + message SomeResponse { + repeated Bar results = 1; + PageResponse page = 2; + } + title: >- + QuerySigningInfosResponse is the response type for the Query/SigningInfos + RPC + + method + cosmos.slashing.v1beta1.ValidatorSigningInfo: + type: object + properties: + address: + type: string + start_height: + type: string + format: int64 + title: Height at which validator was first a candidate OR was unjailed + index_offset: + type: string + format: int64 + description: >- + Index which is incremented each time the validator was a bonded + + in a block and may have signed a precommit or not. This in conjunction + with the + + `SignedBlocksWindow` param determines the index in the + `MissedBlocksBitArray`. + jailed_until: + type: string + format: date-time + description: >- + Timestamp until which the validator is jailed due to liveness + downtime. + tombstoned: + type: boolean + description: >- + Whether or not a validator has been tombstoned (killed out of + validator set). It is set + + once the validator commits an equivocation or for any other configured + misbehiavor. + missed_blocks_counter: + type: string + format: int64 + description: >- + A counter kept to avoid unnecessary array reads. + + Note that `Sum(MissedBlocksBitArray)` always equals + `MissedBlocksCounter`. description: >- - GetLatestValidatorSetResponse is the response type for the - Query/GetValidatorSetByHeight RPC method. - cosmos.base.tendermint.v1beta1.GetNodeInfoResponse: + ValidatorSigningInfo defines a validator's signing info for monitoring + their + + liveness activity. + cosmos.staking.v1beta1.BondStatus: + type: string + enum: + - BOND_STATUS_UNSPECIFIED + - BOND_STATUS_UNBONDED + - BOND_STATUS_UNBONDING + - BOND_STATUS_BONDED + default: BOND_STATUS_UNSPECIFIED + description: |- + BondStatus is the status of a validator. + + - BOND_STATUS_UNSPECIFIED: UNSPECIFIED defines an invalid validator status. + - BOND_STATUS_UNBONDED: UNBONDED defines a validator that is not bonded. + - BOND_STATUS_UNBONDING: UNBONDING defines a validator that is unbonding. + - BOND_STATUS_BONDED: BONDED defines a validator that is bonded. + cosmos.staking.v1beta1.Commission: type: object properties: - default_node_info: + commission_rates: + description: >- + commission_rates defines the initial commission rates to be used for + creating a validator. type: object properties: - protocol_version: - type: object - properties: - p2p: - type: string - format: uint64 - block: - type: string - format: uint64 - app: - type: string - format: uint64 - default_node_id: + rate: type: string - listen_addr: + description: rate is the commission rate charged to delegators, as a fraction. + max_rate: type: string - network: + description: >- + max_rate defines the maximum commission rate which validator can + ever charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily increase of the + validator commission, as a fraction. + update_time: + type: string + format: date-time + description: update_time is the last time the commission rate was changed. + description: Commission defines commission parameters for a given validator. + cosmos.staking.v1beta1.CommissionRates: + type: object + properties: + rate: + type: string + description: rate is the commission rate charged to delegators, as a fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate which validator can ever + charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily increase of the validator + commission, as a fraction. + description: >- + CommissionRates defines the initial commission rates to be used for + creating + + a validator. + cosmos.staking.v1beta1.Delegation: + type: object + properties: + delegator_address: + type: string + description: delegator_address is the bech32-encoded address of the delegator. + validator_address: + type: string + description: validator_address is the bech32-encoded address of the validator. + shares: + type: string + description: shares define the delegation shares received. + description: |- + Delegation represents the bond with tokens held by an account. It is + owned by one delegator, and is associated with the voting power of one + validator. + cosmos.staking.v1beta1.DelegationResponse: + type: object + properties: + delegation: + type: object + properties: + delegator_address: + type: string + description: delegator_address is the bech32-encoded address of the delegator. + validator_address: + type: string + description: validator_address is the bech32-encoded address of the validator. + shares: + type: string + description: shares define the delegation shares received. + description: |- + Delegation represents the bond with tokens held by an account. It is + owned by one delegator, and is associated with the voting power of one + validator. + balance: + type: object + properties: + denom: + type: string + amount: type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + description: |- + DelegationResponse is equivalent to Delegation except that it contains a + balance in addition to shares which is more suitable for client responses. + cosmos.staking.v1beta1.Description: + type: object + properties: + moniker: + type: string + description: moniker defines a human-readable name for the validator. + identity: + type: string + description: >- + identity defines an optional identity signature (ex. UPort or + Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: security_contact defines an optional email for security contact. + details: + type: string + description: details define other optional details. + description: Description defines a validator description. + cosmos.staking.v1beta1.HistoricalInfo: + type: object + properties: + header: + type: object + properties: version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block in + the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: type: string - channels: + height: type: string - format: byte - moniker: + format: int64 + time: type: string - other: + format: date-time + last_block_id: + title: prev block info type: object properties: - tx_index: - type: string - rpc_address: + hash: type: string - application_version: - type: object - properties: - name: + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + last_commit_hash: type: string - app_name: + format: byte + title: hashes of block data + data_hash: type: string - version: + format: byte + validators_hash: type: string - git_commit: + format: byte + title: hashes from the app output from the prev block + next_validators_hash: type: string - build_tags: + format: byte + consensus_hash: type: string - go_version: + format: byte + app_hash: type: string - build_deps: - type: array - items: - type: object - properties: - path: - type: string - title: module path - version: - type: string - title: module version - sum: - type: string - title: checksum - title: Module is the type for VersionInfo - cosmos_sdk_version: + format: byte + last_results_hash: type: string - description: VersionInfo is the type for the GetNodeInfoResponse message. - description: >- - GetNodeInfoResponse is the request type for the Query/GetNodeInfo RPC - method. - cosmos.base.tendermint.v1beta1.GetSyncingResponse: - type: object - properties: - syncing: - type: boolean - format: boolean - description: >- - GetSyncingResponse is the response type for the Query/GetSyncing RPC - method. - cosmos.base.tendermint.v1beta1.GetValidatorSetByHeightResponse: - type: object - properties: - block_height: - type: string - format: int64 - validators: + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + valset: type: array items: type: object properties: - address: + operator_address: type: string - pub_key: + description: >- + operator_address defines the address of the validator's + operator; bech encoded in JSON. + consensus_pubkey: type: object properties: type_url: @@ -35653,7 +58366,7 @@ definitions: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -35663,13 +58376,16 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -35690,7 +58406,6 @@ definitions: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -35723,1115 +58438,1084 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - voting_power: + jailed: + type: boolean + description: >- + jailed defined whether the validator has been jailed from bonded + status or not. + status: + description: status is the validator status (bonded/unbonding/unbonded). type: string - format: int64 - proposer_priority: + enum: + - BOND_STATUS_UNSPECIFIED + - BOND_STATUS_UNBONDED + - BOND_STATUS_UNBONDING + - BOND_STATUS_BONDED + default: BOND_STATUS_UNSPECIFIED + tokens: type: string - format: int64 - description: Validator is the type for the validator-set. - pagination: - description: pagination defines an pagination for the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: >- - GetValidatorSetByHeightResponse is the response type for the - Query/GetValidatorSetByHeight RPC method. - cosmos.base.tendermint.v1beta1.Module: - type: object - properties: - path: - type: string - title: module path - version: - type: string - title: module version - sum: - type: string - title: checksum - title: Module is the type for VersionInfo - cosmos.base.tendermint.v1beta1.Validator: - type: object - properties: - address: - type: string - pub_key: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized - - protocol buffer message. This string must contain at least - - one "/" character. The last segment of the URL's path must - represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in a - canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary all types - that they - - expect it to use in the context of Any. However, for URLs which - use the - - scheme `http`, `https`, or no scheme, one can optionally set up a - type - - server that maps type URLs to message definitions as follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in the - official - - protobuf release, and it is not used for type URLs beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) might be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above specified - type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message along - with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values in the - form - - of utility functions or additional generated methods of the Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by default use - - 'type.googleapis.com/full.type.name' as the type URL and the unpack - - methods only use the fully qualified type name after the last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield type - - name "y.z". - - - - JSON - - ==== - - The JSON representation of an `Any` value uses the regular - - representation of the deserialized, embedded message, with an - - additional field `@type` which contains the type URL. Example: - - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } - - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom JSON - - representation, that representation will be embedded adding a field - - `value` which holds the custom JSON in addition to the `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - description: Validator is the type for the validator-set. - cosmos.base.tendermint.v1beta1.VersionInfo: - type: object - properties: - name: - type: string - app_name: - type: string - version: - type: string - git_commit: - type: string - build_tags: - type: string - go_version: - type: string - build_deps: - type: array - items: - type: object - properties: - path: + description: tokens define the delegated tokens (incl. self-delegation). + delegator_shares: type: string - title: module path - version: + description: >- + delegator_shares defines total shares issued to a validator's + delegators. + description: + description: description defines the description terms for the validator. + type: object + properties: + moniker: + type: string + description: moniker defines a human-readable name for the validator. + identity: + type: string + description: >- + identity defines an optional identity signature (ex. UPort + or Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: >- + security_contact defines an optional email for security + contact. + details: + type: string + description: details define other optional details. + unbonding_height: type: string - title: module version - sum: + format: int64 + description: >- + unbonding_height defines, if unbonding, the height at which this + validator has begun unbonding. + unbonding_time: type: string - title: checksum - title: Module is the type for VersionInfo - cosmos_sdk_version: - type: string - description: VersionInfo is the type for the GetNodeInfoResponse message. - tendermint.crypto.PublicKey: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: PublicKey defines the keys available for use with Tendermint Validators - tendermint.p2p.DefaultNodeInfo: - type: object - properties: - protocol_version: - type: object - properties: - p2p: - type: string - format: uint64 - block: - type: string - format: uint64 - app: - type: string - format: uint64 - default_node_id: - type: string - listen_addr: - type: string - network: - type: string - version: - type: string - channels: - type: string - format: byte - moniker: - type: string - other: - type: object - properties: - tx_index: - type: string - rpc_address: - type: string - tendermint.p2p.DefaultNodeInfoOther: - type: object - properties: - tx_index: - type: string - rpc_address: - type: string - tendermint.p2p.ProtocolVersion: - type: object - properties: - p2p: - type: string - format: uint64 - block: - type: string - format: uint64 - app: - type: string - format: uint64 - tendermint.types.Block: - type: object - properties: - header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: + format: date-time + description: >- + unbonding_time defines, if unbonding, the min time for the + validator to complete unbonding. + commission: + description: commission defines the commission parameters. + type: object + properties: + commission_rates: + description: >- + commission_rates defines the initial commission rates to be + used for creating a validator. + type: object + properties: + rate: + type: string + description: >- + rate is the commission rate charged to delegators, as a + fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate which + validator can ever charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily increase of + the validator commission, as a fraction. + update_time: + type: string + format: date-time + description: >- + update_time is the last time the commission rate was + changed. + min_self_delegation: + type: string + description: >- + min_self_delegation is the validator's self declared minimum + self delegation. + + + Since: cosmos-sdk 0.46 + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + strictly positive if this validator's unbonding has been stopped + by external modules + unbonding_ids: + type: array + items: type: string format: uint64 - description: >- - Consensus captures the consensus rules for processing a block in - the blockchain, + title: >- + list of unbonding ids, each uniquely identifing an unbonding of + this validator + description: >- + Validator defines a validator, together with the total amount of the - including all blockchain data structures and the rules of the - application's + Validator's bond shares and their exchange rate to coins. Slashing + results in - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - data: - type: object - properties: - txs: - type: array - items: - type: string - format: byte - description: >- - Txs that will be applied by state @ block.Height+1. + a decrease in the exchange rate, allowing correct calculation of + future - NOTE: not all txs here are valid. We're just agreeing on the - order first. + undelegations without iterating over delegators. When coins are + delegated to - This means that block.AppHash does not include these txs. - title: Data contains the set of transactions included in the block - evidence: - type: object - properties: - evidence: - type: array - items: - type: object - properties: - duplicate_vote_evidence: - type: object - properties: - vote_a: - type: object - properties: - type: - type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN - description: >- - SignedMsgType is a type of signed message in the - consensus. + this validator, the validator is credited with a delegation whose + number of - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - timestamp: - type: string - format: date-time - validator_address: - type: string - format: byte - validator_index: - type: integer - format: int32 - signature: - type: string - format: byte - description: >- - Vote represents a prevote, precommit, or commit vote - from validators for + bond shares is based on the amount of coins delegated divided by the + current - consensus. - vote_b: - type: object - properties: - type: - type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN - description: >- - SignedMsgType is a type of signed message in the - consensus. + exchange rate. Voting power can be calculated as total bonded shares - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - timestamp: - type: string - format: date-time - validator_address: - type: string - format: byte - validator_index: - type: integer - format: int32 - signature: - type: string - format: byte - description: >- - Vote represents a prevote, precommit, or commit vote - from validators for + multiplied by exchange rate. + description: >- + HistoricalInfo contains header and validator information for a given + block. - consensus. - total_voting_power: - type: string - format: int64 - validator_power: - type: string - format: int64 - timestamp: - type: string - format: date-time - description: >- - DuplicateVoteEvidence contains evidence of a validator - signed two conflicting votes. - light_client_attack_evidence: - type: object - properties: - conflicting_block: - type: object - properties: - signed_header: - type: object - properties: - header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for - processing a block in the blockchain, + It is stored as part of staking module's state, which persists the `n` + most - including all blockchain data structures and - the rules of the application's + recent HistoricalInfo - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: >- - hashes from the app output from the prev - block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: >- - Header defines the structure of a Tendermint - block header. - commit: - type: object - properties: - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - signatures: - type: array - items: - type: object - properties: - block_id_flag: - type: string - enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: >- - BlockIdFlag indicates which BlcokID the - signature is for - validator_address: - type: string - format: byte - timestamp: - type: string - format: date-time - signature: - type: string - format: byte - description: >- - CommitSig is a part of the Vote included - in a Commit. - description: >- - Commit contains the evidence that a block was - committed by a set of validators. - validator_set: - type: object - properties: - validators: - type: array - items: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for - use with Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - proposer: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use - with Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - total_voting_power: - type: string - format: int64 - common_height: - type: string - format: int64 - byzantine_validators: - type: array - items: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use with - Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - total_voting_power: - type: string - format: int64 - timestamp: - type: string - format: date-time - description: >- - LightClientAttackEvidence contains evidence of a set of - validators attempting to mislead a light client. - last_commit: + (`n` is set by the staking module's `historical_entries` parameter). + cosmos.staking.v1beta1.Params: + type: object + properties: + unbonding_time: + type: string + description: unbonding_time is the time duration of unbonding. + max_validators: + type: integer + format: int64 + description: max_validators is the maximum number of validators. + max_entries: + type: integer + format: int64 + description: >- + max_entries is the max entries for either unbonding delegation or + redelegation (per pair/trio). + historical_entries: + type: integer + format: int64 + description: historical_entries is the number of historical entries to persist. + bond_denom: + type: string + description: bond_denom defines the bondable coin denomination. + min_commission_rate: + type: string + title: >- + min_commission_rate is the chain-wide minimum commission rate that a + validator can charge their delegators + description: Params defines the parameters for the x/staking module. + cosmos.staking.v1beta1.Pool: + type: object + properties: + not_bonded_tokens: + type: string + bonded_tokens: + type: string + description: |- + Pool is used for tracking bonded and not-bonded token supply of the bond + denomination. + cosmos.staking.v1beta1.QueryDelegationResponse: + type: object + properties: + delegation_response: type: object properties: - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: + delegation: type: object properties: - hash: + delegator_address: type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - signatures: - type: array - items: + description: >- + delegator_address is the bech32-encoded address of the + delegator. + validator_address: + type: string + description: >- + validator_address is the bech32-encoded address of the + validator. + shares: + type: string + description: shares define the delegation shares received. + description: >- + Delegation represents the bond with tokens held by an account. It + is + + owned by one delegator, and is associated with the voting power of + one + + validator. + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: >- + DelegationResponse is equivalent to Delegation except that it contains + a + + balance in addition to shares which is more suitable for client + responses. + description: >- + QueryDelegationResponse is response type for the Query/Delegation RPC + method. + cosmos.staking.v1beta1.QueryDelegatorDelegationsResponse: + type: object + properties: + delegation_responses: + type: array + items: + type: object + properties: + delegation: type: object properties: - block_id_flag: + delegator_address: type: string - enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: BlockIdFlag indicates which BlcokID the signature is for + description: >- + delegator_address is the bech32-encoded address of the + delegator. validator_address: type: string - format: byte - timestamp: + description: >- + validator_address is the bech32-encoded address of the + validator. + shares: type: string - format: date-time - signature: + description: shares define the delegation shares received. + description: >- + Delegation represents the bond with tokens held by an account. + It is + + owned by one delegator, and is associated with the voting power + of one + + validator. + balance: + type: object + properties: + denom: type: string - format: byte - description: CommitSig is a part of the Vote included in a Commit. - description: >- - Commit contains the evidence that a block was committed by a set of - validators. - tendermint.types.BlockID: + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: >- + DelegationResponse is equivalent to Delegation except that it + contains a + + balance in addition to shares which is more suitable for client + responses. + description: delegation_responses defines all the delegations' info of a delegator. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + QueryDelegatorDelegationsResponse is response type for the + Query/DelegatorDelegations RPC method. + cosmos.staking.v1beta1.QueryDelegatorUnbondingDelegationsResponse: type: object properties: - hash: - type: string - format: byte - part_set_header: + unbonding_responses: + type: array + items: + type: object + properties: + delegator_address: + type: string + description: >- + delegator_address is the bech32-encoded address of the + delegator. + validator_address: + type: string + description: >- + validator_address is the bech32-encoded address of the + validator. + entries: + type: array + items: + type: object + properties: + creation_height: + type: string + format: int64 + description: >- + creation_height is the height which the unbonding took + place. + completion_time: + type: string + format: date-time + description: completion_time is the unix time for unbonding completion. + initial_balance: + type: string + description: >- + initial_balance defines the tokens initially scheduled to + receive at completion. + balance: + type: string + description: balance defines the tokens to receive at completion. + unbonding_id: + type: string + format: uint64 + title: Incrementing id that uniquely identifies this entry + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + Strictly positive if this entry's unbonding has been + stopped by external modules + description: >- + UnbondingDelegationEntry defines an unbonding object with + relevant metadata. + description: entries are the unbonding delegation entries. + description: >- + UnbondingDelegation stores all of a single delegator's unbonding + bonds + + for a single validator in an time-ordered list. + pagination: + description: pagination defines the pagination in the response. type: object properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. total: - type: integer + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + QueryUnbondingDelegatorDelegationsResponse is response type for the + Query/UnbondingDelegatorDelegations RPC method. + cosmos.staking.v1beta1.QueryDelegatorValidatorResponse: + type: object + properties: + validator: + type: object + properties: + operator_address: + type: string + description: >- + operator_address defines the address of the validator's operator; + bech encoded in JSON. + consensus_pubkey: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might + be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + jailed: + type: boolean + description: >- + jailed defined whether the validator has been jailed from bonded + status or not. + status: + description: status is the validator status (bonded/unbonding/unbonded). + type: string + enum: + - BOND_STATUS_UNSPECIFIED + - BOND_STATUS_UNBONDED + - BOND_STATUS_UNBONDING + - BOND_STATUS_BONDED + default: BOND_STATUS_UNSPECIFIED + tokens: + type: string + description: tokens define the delegated tokens (incl. self-delegation). + delegator_shares: + type: string + description: >- + delegator_shares defines total shares issued to a validator's + delegators. + description: + description: description defines the description terms for the validator. + type: object + properties: + moniker: + type: string + description: moniker defines a human-readable name for the validator. + identity: + type: string + description: >- + identity defines an optional identity signature (ex. UPort or + Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: >- + security_contact defines an optional email for security + contact. + details: + type: string + description: details define other optional details. + unbonding_height: + type: string format: int64 - hash: + description: >- + unbonding_height defines, if unbonding, the height at which this + validator has begun unbonding. + unbonding_time: type: string - format: byte - title: PartsetHeader - title: BlockID - tendermint.types.BlockIDFlag: - type: string - enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: BlockIdFlag indicates which BlcokID the signature is for - tendermint.types.Commit: + format: date-time + description: >- + unbonding_time defines, if unbonding, the min time for the + validator to complete unbonding. + commission: + description: commission defines the commission parameters. + type: object + properties: + commission_rates: + description: >- + commission_rates defines the initial commission rates to be + used for creating a validator. + type: object + properties: + rate: + type: string + description: >- + rate is the commission rate charged to delegators, as a + fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate which + validator can ever charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily increase of the + validator commission, as a fraction. + update_time: + type: string + format: date-time + description: update_time is the last time the commission rate was changed. + min_self_delegation: + type: string + description: >- + min_self_delegation is the validator's self declared minimum self + delegation. + + + Since: cosmos-sdk 0.46 + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + strictly positive if this validator's unbonding has been stopped + by external modules + unbonding_ids: + type: array + items: + type: string + format: uint64 + title: >- + list of unbonding ids, each uniquely identifing an unbonding of + this validator + description: >- + Validator defines a validator, together with the total amount of the + + Validator's bond shares and their exchange rate to coins. Slashing + results in + + a decrease in the exchange rate, allowing correct calculation of + future + + undelegations without iterating over delegators. When coins are + delegated to + + this validator, the validator is credited with a delegation whose + number of + + bond shares is based on the amount of coins delegated divided by the + current + + exchange rate. Voting power can be calculated as total bonded shares + + multiplied by exchange rate. + description: |- + QueryDelegatorValidatorResponse response type for the + Query/DelegatorValidator RPC method. + cosmos.staking.v1beta1.QueryDelegatorValidatorsResponse: type: object properties: - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - signatures: + validators: type: array items: type: object properties: - block_id_flag: + operator_address: + type: string + description: >- + operator_address defines the address of the validator's + operator; bech encoded in JSON. + consensus_pubkey: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + jailed: + type: boolean + description: >- + jailed defined whether the validator has been jailed from bonded + status or not. + status: + description: status is the validator status (bonded/unbonding/unbonded). type: string enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: BlockIdFlag indicates which BlcokID the signature is for - validator_address: + - BOND_STATUS_UNSPECIFIED + - BOND_STATUS_UNBONDED + - BOND_STATUS_UNBONDING + - BOND_STATUS_BONDED + default: BOND_STATUS_UNSPECIFIED + tokens: type: string - format: byte - timestamp: + description: tokens define the delegated tokens (incl. self-delegation). + delegator_shares: + type: string + description: >- + delegator_shares defines total shares issued to a validator's + delegators. + description: + description: description defines the description terms for the validator. + type: object + properties: + moniker: + type: string + description: moniker defines a human-readable name for the validator. + identity: + type: string + description: >- + identity defines an optional identity signature (ex. UPort + or Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: >- + security_contact defines an optional email for security + contact. + details: + type: string + description: details define other optional details. + unbonding_height: + type: string + format: int64 + description: >- + unbonding_height defines, if unbonding, the height at which this + validator has begun unbonding. + unbonding_time: type: string format: date-time - signature: + description: >- + unbonding_time defines, if unbonding, the min time for the + validator to complete unbonding. + commission: + description: commission defines the commission parameters. + type: object + properties: + commission_rates: + description: >- + commission_rates defines the initial commission rates to be + used for creating a validator. + type: object + properties: + rate: + type: string + description: >- + rate is the commission rate charged to delegators, as a + fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate which + validator can ever charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily increase of + the validator commission, as a fraction. + update_time: + type: string + format: date-time + description: >- + update_time is the last time the commission rate was + changed. + min_self_delegation: type: string - format: byte - description: CommitSig is a part of the Vote included in a Commit. - description: >- - Commit contains the evidence that a block was committed by a set of - validators. - tendermint.types.CommitSig: - type: object - properties: - block_id_flag: - type: string - enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: BlockIdFlag indicates which BlcokID the signature is for - validator_address: - type: string - format: byte - timestamp: - type: string - format: date-time - signature: - type: string - format: byte - description: CommitSig is a part of the Vote included in a Commit. - tendermint.types.Data: - type: object - properties: - txs: - type: array - items: - type: string - format: byte - description: >- - Txs that will be applied by state @ block.Height+1. - - NOTE: not all txs here are valid. We're just agreeing on the order - first. + description: >- + min_self_delegation is the validator's self declared minimum + self delegation. - This means that block.AppHash does not include these txs. - title: Data contains the set of transactions included in the block - tendermint.types.DuplicateVoteEvidence: - type: object - properties: - vote_a: - type: object - properties: - type: - type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN - description: |- - SignedMsgType is a type of signed message in the consensus. - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: + Since: cosmos-sdk 0.46 + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + strictly positive if this validator's unbonding has been stopped + by external modules + unbonding_ids: + type: array + items: type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - timestamp: - type: string - format: date-time - validator_address: - type: string - format: byte - validator_index: - type: integer - format: int32 - signature: - type: string - format: byte - description: >- - Vote represents a prevote, precommit, or commit vote from validators - for + format: uint64 + title: >- + list of unbonding ids, each uniquely identifing an unbonding of + this validator + description: >- + Validator defines a validator, together with the total amount of the - consensus. - vote_b: + Validator's bond shares and their exchange rate to coins. Slashing + results in + + a decrease in the exchange rate, allowing correct calculation of + future + + undelegations without iterating over delegators. When coins are + delegated to + + this validator, the validator is credited with a delegation whose + number of + + bond shares is based on the amount of coins delegated divided by the + current + + exchange rate. Voting power can be calculated as total bonded shares + + multiplied by exchange rate. + description: validators defines the validators' info of a delegator. + pagination: + description: pagination defines the pagination in the response. type: object properties: - type: - type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN - description: |- - SignedMsgType is a type of signed message in the consensus. - - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - timestamp: - type: string - format: date-time - validator_address: + next_key: type: string format: byte - validator_index: - type: integer - format: int32 - signature: + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: type: string - format: byte - description: >- - Vote represents a prevote, precommit, or commit vote from validators - for + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - consensus. - total_voting_power: - type: string - format: int64 - validator_power: - type: string - format: int64 - timestamp: - type: string - format: date-time - description: >- - DuplicateVoteEvidence contains evidence of a validator signed two - conflicting votes. - tendermint.types.Evidence: + was set, its value is undefined otherwise + description: |- + QueryDelegatorValidatorsResponse is response type for the + Query/DelegatorValidators RPC method. + cosmos.staking.v1beta1.QueryHistoricalInfoResponse: type: object properties: - duplicate_vote_evidence: + hist: + description: hist defines the historical info at the given height. type: object properties: - vote_a: + header: type: object properties: - type: - type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN - description: |- - SignedMsgType is a type of signed message in the consensus. + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string height: type: string format: int64 - round: - type: integer - format: int32 - block_id: + time: + type: string + format: date-time + last_block_id: + title: prev block info type: object properties: hash: @@ -36847,1839 +59531,2178 @@ definitions: type: string format: byte title: PartsetHeader - title: BlockID - timestamp: + last_commit_hash: type: string - format: date-time - validator_address: + format: byte + title: hashes of block data + data_hash: type: string format: byte - validator_index: - type: integer - format: int32 - signature: + validators_hash: type: string format: byte - description: >- - Vote represents a prevote, precommit, or commit vote from - validators for - - consensus. - vote_b: - type: object - properties: - type: + title: hashes from the app output from the prev block + next_validators_hash: type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN - description: |- - SignedMsgType is a type of signed message in the consensus. - - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals - height: + format: byte + consensus_hash: type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - timestamp: + format: byte + app_hash: type: string - format: date-time - validator_address: + format: byte + last_results_hash: type: string format: byte - validator_index: - type: integer - format: int32 - signature: + evidence_hash: type: string format: byte - description: >- - Vote represents a prevote, precommit, or commit vote from - validators for - - consensus. - total_voting_power: - type: string - format: int64 - validator_power: - type: string - format: int64 - timestamp: - type: string - format: date-time - description: >- - DuplicateVoteEvidence contains evidence of a validator signed two - conflicting votes. - light_client_attack_evidence: - type: object - properties: - conflicting_block: - type: object - properties: - signed_header: - type: object - properties: - header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing - a block in the blockchain, - - including all blockchain data structures and the rules - of the application's - - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - commit: - type: object - properties: - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - signatures: - type: array - items: - type: object - properties: - block_id_flag: - type: string - enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: >- - BlockIdFlag indicates which BlcokID the - signature is for - validator_address: - type: string - format: byte - timestamp: - type: string - format: date-time - signature: - type: string - format: byte - description: >- - CommitSig is a part of the Vote included in a - Commit. - description: >- - Commit contains the evidence that a block was committed by - a set of validators. - validator_set: - type: object - properties: - validators: - type: array - items: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use with - Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - proposer: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use with - Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - total_voting_power: - type: string - format: int64 - common_height: - type: string - format: int64 - byzantine_validators: + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + valset: type: array items: type: object properties: - address: + operator_address: + type: string + description: >- + operator_address defines the address of the validator's + operator; bech encoded in JSON. + consensus_pubkey: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at + least + + one "/" character. The last segment of the URL's path + must represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in + a canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary + all types that they + + expect it to use in the context of Any. However, for + URLs which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in + the official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer + message along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values + in the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by + default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the + last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield + type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with + an + + additional field `@type` which contains the type URL. + Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom + JSON + + representation, that representation will be embedded adding + a field + + `value` which holds the custom JSON in addition to the + `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + jailed: + type: boolean + description: >- + jailed defined whether the validator has been jailed from + bonded status or not. + status: + description: status is the validator status (bonded/unbonding/unbonded). type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators - voting_power: + enum: + - BOND_STATUS_UNSPECIFIED + - BOND_STATUS_UNBONDED + - BOND_STATUS_UNBONDING + - BOND_STATUS_BONDED + default: BOND_STATUS_UNSPECIFIED + tokens: type: string - format: int64 - proposer_priority: + description: tokens define the delegated tokens (incl. self-delegation). + delegator_shares: type: string - format: int64 - total_voting_power: - type: string - format: int64 - timestamp: - type: string - format: date-time - description: >- - LightClientAttackEvidence contains evidence of a set of validators - attempting to mislead a light client. - tendermint.types.EvidenceList: - type: object - properties: - evidence: - type: array - items: - type: object - properties: - duplicate_vote_evidence: - type: object - properties: - vote_a: + description: >- + delegator_shares defines total shares issued to a + validator's delegators. + description: + description: description defines the description terms for the validator. type: object properties: - type: + moniker: type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN - description: >- - SignedMsgType is a type of signed message in the - consensus. - - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals - height: + description: moniker defines a human-readable name for the validator. + identity: type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - timestamp: + description: >- + identity defines an optional identity signature (ex. + UPort or Keybase). + website: type: string - format: date-time - validator_address: + description: website defines an optional website link. + security_contact: type: string - format: byte - validator_index: - type: integer - format: int32 - signature: + description: >- + security_contact defines an optional email for security + contact. + details: type: string - format: byte + description: details define other optional details. + unbonding_height: + type: string + format: int64 description: >- - Vote represents a prevote, precommit, or commit vote from - validators for - - consensus. - vote_b: + unbonding_height defines, if unbonding, the height at which + this validator has begun unbonding. + unbonding_time: + type: string + format: date-time + description: >- + unbonding_time defines, if unbonding, the min time for the + validator to complete unbonding. + commission: + description: commission defines the commission parameters. type: object properties: - type: - type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN + commission_rates: description: >- - SignedMsgType is a type of signed message in the - consensus. - - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: + commission_rates defines the initial commission rates to + be used for creating a validator. type: object properties: - hash: + rate: type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - timestamp: + description: >- + rate is the commission rate charged to delegators, + as a fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate which + validator can ever charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily increase + of the validator commission, as a fraction. + update_time: type: string format: date-time - validator_address: - type: string - format: byte - validator_index: - type: integer - format: int32 - signature: - type: string - format: byte + description: >- + update_time is the last time the commission rate was + changed. + min_self_delegation: + type: string description: >- - Vote represents a prevote, precommit, or commit vote from - validators for + min_self_delegation is the validator's self declared minimum + self delegation. - consensus. - total_voting_power: - type: string - format: int64 - validator_power: + + Since: cosmos-sdk 0.46 + unbonding_on_hold_ref_count: type: string format: int64 - timestamp: - type: string - format: date-time + title: >- + strictly positive if this validator's unbonding has been + stopped by external modules + unbonding_ids: + type: array + items: + type: string + format: uint64 + title: >- + list of unbonding ids, each uniquely identifing an unbonding + of this validator description: >- - DuplicateVoteEvidence contains evidence of a validator signed - two conflicting votes. - light_client_attack_evidence: - type: object - properties: - conflicting_block: - type: object - properties: - signed_header: - type: object - properties: - header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for - processing a block in the blockchain, + Validator defines a validator, together with the total amount of + the - including all blockchain data structures and the - rules of the application's + Validator's bond shares and their exchange rate to coins. + Slashing results in - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: >- - Header defines the structure of a Tendermint block - header. - commit: - type: object - properties: - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - signatures: - type: array - items: - type: object - properties: - block_id_flag: - type: string - enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: >- - BlockIdFlag indicates which BlcokID the - signature is for - validator_address: - type: string - format: byte - timestamp: - type: string - format: date-time - signature: - type: string - format: byte - description: >- - CommitSig is a part of the Vote included in a - Commit. - description: >- - Commit contains the evidence that a block was - committed by a set of validators. - validator_set: - type: object - properties: - validators: - type: array - items: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use - with Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - proposer: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use - with Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - total_voting_power: - type: string - format: int64 - common_height: + a decrease in the exchange rate, allowing correct calculation of + future + + undelegations without iterating over delegators. When coins are + delegated to + + this validator, the validator is credited with a delegation + whose number of + + bond shares is based on the amount of coins delegated divided by + the current + + exchange rate. Voting power can be calculated as total bonded + shares + + multiplied by exchange rate. + description: >- + QueryHistoricalInfoResponse is response type for the Query/HistoricalInfo + RPC + + method. + cosmos.staking.v1beta1.QueryParamsResponse: + type: object + properties: + params: + description: params holds all the parameters of this module. + type: object + properties: + unbonding_time: + type: string + description: unbonding_time is the time duration of unbonding. + max_validators: + type: integer + format: int64 + description: max_validators is the maximum number of validators. + max_entries: + type: integer + format: int64 + description: >- + max_entries is the max entries for either unbonding delegation or + redelegation (per pair/trio). + historical_entries: + type: integer + format: int64 + description: historical_entries is the number of historical entries to persist. + bond_denom: + type: string + description: bond_denom defines the bondable coin denomination. + min_commission_rate: + type: string + title: >- + min_commission_rate is the chain-wide minimum commission rate that + a validator can charge their delegators + description: QueryParamsResponse is response type for the Query/Params RPC method. + cosmos.staking.v1beta1.QueryPoolResponse: + type: object + properties: + pool: + description: pool defines the pool info. + type: object + properties: + not_bonded_tokens: + type: string + bonded_tokens: + type: string + description: QueryPoolResponse is response type for the Query/Pool RPC method. + cosmos.staking.v1beta1.QueryRedelegationsResponse: + type: object + properties: + redelegation_responses: + type: array + items: + type: object + properties: + redelegation: + type: object + properties: + delegator_address: type: string - format: int64 - byzantine_validators: + description: >- + delegator_address is the bech32-encoded address of the + delegator. + validator_src_address: + type: string + description: >- + validator_src_address is the validator redelegation source + operator address. + validator_dst_address: + type: string + description: >- + validator_dst_address is the validator redelegation + destination operator address. + entries: type: array items: type: object properties: - address: + creation_height: type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte + format: int64 + description: >- + creation_height defines the height which the + redelegation took place. + completion_time: + type: string + format: date-time + description: >- + completion_time defines the unix time for redelegation + completion. + initial_balance: + type: string + description: >- + initial_balance defines the initial balance when + redelegation started. + shares_dst: + type: string + description: >- + shares_dst is the amount of destination-validator + shares created by redelegation. + unbonding_id: + type: string + format: uint64 + title: Incrementing id that uniquely identifies this entry + unbonding_on_hold_ref_count: + type: string + format: int64 title: >- - PublicKey defines the keys available for use with - Tendermint Validators - voting_power: + Strictly positive if this entry's unbonding has been + stopped by external modules + description: >- + RedelegationEntry defines a redelegation object with + relevant metadata. + description: entries are the redelegation entries. + description: >- + Redelegation contains the list of a particular delegator's + redelegating bonds + + from a particular source validator to a particular destination + validator. + entries: + type: array + items: + type: object + properties: + redelegation_entry: + type: object + properties: + creation_height: type: string format: int64 - proposer_priority: + description: >- + creation_height defines the height which the + redelegation took place. + completion_time: + type: string + format: date-time + description: >- + completion_time defines the unix time for redelegation + completion. + initial_balance: + type: string + description: >- + initial_balance defines the initial balance when + redelegation started. + shares_dst: + type: string + description: >- + shares_dst is the amount of destination-validator + shares created by redelegation. + unbonding_id: + type: string + format: uint64 + title: Incrementing id that uniquely identifies this entry + unbonding_on_hold_ref_count: type: string format: int64 - total_voting_power: + title: >- + Strictly positive if this entry's unbonding has been + stopped by external modules + description: >- + RedelegationEntry defines a redelegation object with + relevant metadata. + balance: + type: string + description: >- + RedelegationEntryResponse is equivalent to a RedelegationEntry + except that it + + contains a balance in addition to shares which is more + suitable for client + + responses. + description: >- + RedelegationResponse is equivalent to a Redelegation except that its + entries + + contain a balance in addition to shares which is more suitable for + client + + responses. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryRedelegationsResponse is response type for the Query/Redelegations + RPC + + method. + cosmos.staking.v1beta1.QueryUnbondingDelegationResponse: + type: object + properties: + unbond: + type: object + properties: + delegator_address: + type: string + description: delegator_address is the bech32-encoded address of the delegator. + validator_address: + type: string + description: validator_address is the bech32-encoded address of the validator. + entries: + type: array + items: + type: object + properties: + creation_height: type: string format: int64 - timestamp: + description: >- + creation_height is the height which the unbonding took + place. + completion_time: type: string format: date-time + description: completion_time is the unix time for unbonding completion. + initial_balance: + type: string + description: >- + initial_balance defines the tokens initially scheduled to + receive at completion. + balance: + type: string + description: balance defines the tokens to receive at completion. + unbonding_id: + type: string + format: uint64 + title: Incrementing id that uniquely identifies this entry + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + Strictly positive if this entry's unbonding has been stopped + by external modules description: >- - LightClientAttackEvidence contains evidence of a set of - validators attempting to mislead a light client. - tendermint.types.Header: + UnbondingDelegationEntry defines an unbonding object with + relevant metadata. + description: entries are the unbonding delegation entries. + description: |- + UnbondingDelegation stores all of a single delegator's unbonding bonds + for a single validator in an time-ordered list. + description: |- + QueryDelegationResponse is response type for the Query/UnbondingDelegation + RPC method. + cosmos.staking.v1beta1.QueryValidatorDelegationsResponse: type: object properties: - version: - title: basic block info + delegation_responses: + type: array + items: + type: object + properties: + delegation: + type: object + properties: + delegator_address: + type: string + description: >- + delegator_address is the bech32-encoded address of the + delegator. + validator_address: + type: string + description: >- + validator_address is the bech32-encoded address of the + validator. + shares: + type: string + description: shares define the delegation shares received. + description: >- + Delegation represents the bond with tokens held by an account. + It is + + owned by one delegator, and is associated with the voting power + of one + + validator. + balance: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + description: >- + DelegationResponse is equivalent to Delegation except that it + contains a + + balance in addition to shares which is more suitable for client + responses. + pagination: + description: pagination defines the pagination in the response. type: object properties: - block: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + title: |- + QueryValidatorDelegationsResponse is response type for the + Query/ValidatorDelegations RPC method + cosmos.staking.v1beta1.QueryValidatorResponse: + type: object + properties: + validator: + type: object + properties: + operator_address: + type: string + description: >- + operator_address defines the address of the validator's operator; + bech encoded in JSON. + consensus_pubkey: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might + be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + jailed: + type: boolean + description: >- + jailed defined whether the validator has been jailed from bonded + status or not. + status: + description: status is the validator status (bonded/unbonding/unbonded). + type: string + enum: + - BOND_STATUS_UNSPECIFIED + - BOND_STATUS_UNBONDED + - BOND_STATUS_UNBONDING + - BOND_STATUS_BONDED + default: BOND_STATUS_UNSPECIFIED + tokens: + type: string + description: tokens define the delegated tokens (incl. self-delegation). + delegator_shares: + type: string + description: >- + delegator_shares defines total shares issued to a validator's + delegators. + description: + description: description defines the description terms for the validator. + type: object + properties: + moniker: + type: string + description: moniker defines a human-readable name for the validator. + identity: + type: string + description: >- + identity defines an optional identity signature (ex. UPort or + Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: >- + security_contact defines an optional email for security + contact. + details: + type: string + description: details define other optional details. + unbonding_height: + type: string + format: int64 + description: >- + unbonding_height defines, if unbonding, the height at which this + validator has begun unbonding. + unbonding_time: + type: string + format: date-time + description: >- + unbonding_time defines, if unbonding, the min time for the + validator to complete unbonding. + commission: + description: commission defines the commission parameters. + type: object + properties: + commission_rates: + description: >- + commission_rates defines the initial commission rates to be + used for creating a validator. + type: object + properties: + rate: + type: string + description: >- + rate is the commission rate charged to delegators, as a + fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate which + validator can ever charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily increase of the + validator commission, as a fraction. + update_time: + type: string + format: date-time + description: update_time is the last time the commission rate was changed. + min_self_delegation: type: string - format: uint64 - app: + description: >- + min_self_delegation is the validator's self declared minimum self + delegation. + + + Since: cosmos-sdk 0.46 + unbonding_on_hold_ref_count: type: string - format: uint64 + format: int64 + title: >- + strictly positive if this validator's unbonding has been stopped + by external modules + unbonding_ids: + type: array + items: + type: string + format: uint64 + title: >- + list of unbonding ids, each uniquely identifing an unbonding of + this validator description: >- - Consensus captures the consensus rules for processing a block in the - blockchain, + Validator defines a validator, together with the total amount of the - including all blockchain data structures and the rules of the - application's + Validator's bond shares and their exchange rate to coins. Slashing + results in - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - tendermint.types.LightBlock: + a decrease in the exchange rate, allowing correct calculation of + future + + undelegations without iterating over delegators. When coins are + delegated to + + this validator, the validator is credited with a delegation whose + number of + + bond shares is based on the amount of coins delegated divided by the + current + + exchange rate. Voting power can be calculated as total bonded shares + + multiplied by exchange rate. + title: QueryValidatorResponse is response type for the Query/Validator RPC method + cosmos.staking.v1beta1.QueryValidatorUnbondingDelegationsResponse: type: object properties: - signed_header: - type: object - properties: - header: - type: object - properties: - version: - title: basic block info + unbonding_responses: + type: array + items: + type: object + properties: + delegator_address: + type: string + description: >- + delegator_address is the bech32-encoded address of the + delegator. + validator_address: + type: string + description: >- + validator_address is the bech32-encoded address of the + validator. + entries: + type: array + items: type: object properties: - block: + creation_height: type: string - format: uint64 - app: + format: int64 + description: >- + creation_height is the height which the unbonding took + place. + completion_time: + type: string + format: date-time + description: completion_time is the unix time for unbonding completion. + initial_balance: + type: string + description: >- + initial_balance defines the tokens initially scheduled to + receive at completion. + balance: + type: string + description: balance defines the tokens to receive at completion. + unbonding_id: type: string format: uint64 + title: Incrementing id that uniquely identifies this entry + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + Strictly positive if this entry's unbonding has been + stopped by external modules description: >- - Consensus captures the consensus rules for processing a block - in the blockchain, - - including all blockchain data structures and the rules of the - application's + UnbondingDelegationEntry defines an unbonding object with + relevant metadata. + description: entries are the unbonding delegation entries. + description: >- + UnbondingDelegation stores all of a single delegator's unbonding + bonds - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - commit: - type: object - properties: - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - signatures: - type: array - items: - type: object - properties: - block_id_flag: - type: string - enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: BlockIdFlag indicates which BlcokID the signature is for - validator_address: - type: string - format: byte - timestamp: - type: string - format: date-time - signature: - type: string - format: byte - description: CommitSig is a part of the Vote included in a Commit. - description: >- - Commit contains the evidence that a block was committed by a set - of validators. - validator_set: + for a single validator in an time-ordered list. + pagination: + description: pagination defines the pagination in the response. type: object properties: - validators: - type: array - items: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: |- + QueryValidatorUnbondingDelegationsResponse is response type for the + Query/ValidatorUnbondingDelegations RPC method. + cosmos.staking.v1beta1.QueryValidatorsResponse: + type: object + properties: + validators: + type: array + items: + type: object + properties: + operator_address: + type: string + description: >- + operator_address defines the address of the validator's + operator; bech encoded in JSON. + consensus_pubkey: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + jailed: + type: boolean + description: >- + jailed defined whether the validator has been jailed from bonded + status or not. + status: + description: status is the validator status (bonded/unbonding/unbonded). + type: string + enum: + - BOND_STATUS_UNSPECIFIED + - BOND_STATUS_UNBONDED + - BOND_STATUS_UNBONDING + - BOND_STATUS_BONDED + default: BOND_STATUS_UNSPECIFIED + tokens: + type: string + description: tokens define the delegated tokens (incl. self-delegation). + delegator_shares: + type: string + description: >- + delegator_shares defines total shares issued to a validator's + delegators. + description: + description: description defines the description terms for the validator. type: object properties: - address: + moniker: type: string - format: byte - pub_key: + description: moniker defines a human-readable name for the validator. + identity: + type: string + description: >- + identity defines an optional identity signature (ex. UPort + or Keybase). + website: + type: string + description: website defines an optional website link. + security_contact: + type: string + description: >- + security_contact defines an optional email for security + contact. + details: + type: string + description: details define other optional details. + unbonding_height: + type: string + format: int64 + description: >- + unbonding_height defines, if unbonding, the height at which this + validator has begun unbonding. + unbonding_time: + type: string + format: date-time + description: >- + unbonding_time defines, if unbonding, the min time for the + validator to complete unbonding. + commission: + description: commission defines the commission parameters. + type: object + properties: + commission_rates: + description: >- + commission_rates defines the initial commission rates to be + used for creating a validator. type: object properties: - ed25519: + rate: type: string - format: byte - secp256k1: + description: >- + rate is the commission rate charged to delegators, as a + fraction. + max_rate: type: string - format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators - voting_power: - type: string - format: int64 - proposer_priority: + description: >- + max_rate defines the maximum commission rate which + validator can ever charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily increase of + the validator commission, as a fraction. + update_time: type: string - format: int64 - proposer: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators - voting_power: - type: string - format: int64 - proposer_priority: + format: date-time + description: >- + update_time is the last time the commission rate was + changed. + min_self_delegation: + type: string + description: >- + min_self_delegation is the validator's self declared minimum + self delegation. + + + Since: cosmos-sdk 0.46 + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + strictly positive if this validator's unbonding has been stopped + by external modules + unbonding_ids: + type: array + items: type: string - format: int64 - total_voting_power: - type: string - format: int64 - tendermint.types.LightClientAttackEvidence: - type: object - properties: - conflicting_block: + format: uint64 + title: >- + list of unbonding ids, each uniquely identifing an unbonding of + this validator + description: >- + Validator defines a validator, together with the total amount of the + + Validator's bond shares and their exchange rate to coins. Slashing + results in + + a decrease in the exchange rate, allowing correct calculation of + future + + undelegations without iterating over delegators. When coins are + delegated to + + this validator, the validator is credited with a delegation whose + number of + + bond shares is based on the amount of coins delegated divided by the + current + + exchange rate. Voting power can be calculated as total bonded shares + + multiplied by exchange rate. + description: validators contains all the queried validators. + pagination: + description: pagination defines the pagination in the response. type: object properties: - signed_header: - type: object - properties: - header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a - block in the blockchain, - - including all blockchain data structures and the rules of - the application's + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - commit: - type: object - properties: - height: - type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - signatures: - type: array - items: - type: object - properties: - block_id_flag: - type: string - enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: >- - BlockIdFlag indicates which BlcokID the signature is - for - validator_address: - type: string - format: byte - timestamp: - type: string - format: date-time - signature: - type: string - format: byte - description: CommitSig is a part of the Vote included in a Commit. - description: >- - Commit contains the evidence that a block was committed by a - set of validators. - validator_set: - type: object - properties: - validators: - type: array - items: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use with - Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - proposer: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use with - Tendermint Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - total_voting_power: - type: string - format: int64 - common_height: + was set, its value is undefined otherwise + title: >- + QueryValidatorsResponse is response type for the Query/Validators RPC + method + cosmos.staking.v1beta1.Redelegation: + type: object + properties: + delegator_address: type: string - format: int64 - byzantine_validators: + description: delegator_address is the bech32-encoded address of the delegator. + validator_src_address: + type: string + description: >- + validator_src_address is the validator redelegation source operator + address. + validator_dst_address: + type: string + description: >- + validator_dst_address is the validator redelegation destination + operator address. + entries: type: array items: type: object properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators - voting_power: + creation_height: type: string format: int64 - proposer_priority: + description: >- + creation_height defines the height which the redelegation took + place. + completion_time: + type: string + format: date-time + description: >- + completion_time defines the unix time for redelegation + completion. + initial_balance: + type: string + description: >- + initial_balance defines the initial balance when redelegation + started. + shares_dst: + type: string + description: >- + shares_dst is the amount of destination-validator shares created + by redelegation. + unbonding_id: + type: string + format: uint64 + title: Incrementing id that uniquely identifies this entry + unbonding_on_hold_ref_count: type: string format: int64 - total_voting_power: - type: string - format: int64 - timestamp: - type: string - format: date-time + title: >- + Strictly positive if this entry's unbonding has been stopped by + external modules + description: >- + RedelegationEntry defines a redelegation object with relevant + metadata. + description: entries are the redelegation entries. description: >- - LightClientAttackEvidence contains evidence of a set of validators - attempting to mislead a light client. - tendermint.types.PartSetHeader: + Redelegation contains the list of a particular delegator's redelegating + bonds + + from a particular source validator to a particular destination validator. + cosmos.staking.v1beta1.RedelegationEntry: type: object properties: - total: - type: integer + creation_height: + type: string format: int64 - hash: + description: creation_height defines the height which the redelegation took place. + completion_time: type: string - format: byte - title: PartsetHeader - tendermint.types.SignedHeader: - type: object - properties: - header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a block in - the blockchain, - - including all blockchain data structures and the rules of the - application's - - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: + format: date-time + description: completion_time defines the unix time for redelegation completion. + initial_balance: + type: string + description: initial_balance defines the initial balance when redelegation started. + shares_dst: + type: string + description: >- + shares_dst is the amount of destination-validator shares created by + redelegation. + unbonding_id: + type: string + format: uint64 + title: Incrementing id that uniquely identifies this entry + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + Strictly positive if this entry's unbonding has been stopped by + external modules + description: RedelegationEntry defines a redelegation object with relevant metadata. + cosmos.staking.v1beta1.RedelegationEntryResponse: + type: object + properties: + redelegation_entry: + type: object + properties: + creation_height: type: string - format: byte - consensus_hash: + format: int64 + description: >- + creation_height defines the height which the redelegation took + place. + completion_time: type: string - format: byte - app_hash: + format: date-time + description: completion_time defines the unix time for redelegation completion. + initial_balance: type: string - format: byte - last_results_hash: + description: >- + initial_balance defines the initial balance when redelegation + started. + shares_dst: type: string - format: byte - evidence_hash: + description: >- + shares_dst is the amount of destination-validator shares created + by redelegation. + unbonding_id: type: string - format: byte - title: consensus info - proposer_address: + format: uint64 + title: Incrementing id that uniquely identifies this entry + unbonding_on_hold_ref_count: type: string - format: byte - description: Header defines the structure of a Tendermint block header. - commit: + format: int64 + title: >- + Strictly positive if this entry's unbonding has been stopped by + external modules + description: >- + RedelegationEntry defines a redelegation object with relevant + metadata. + balance: + type: string + description: >- + RedelegationEntryResponse is equivalent to a RedelegationEntry except that + it + + contains a balance in addition to shares which is more suitable for client + + responses. + cosmos.staking.v1beta1.RedelegationResponse: + type: object + properties: + redelegation: type: object properties: - height: + delegator_address: type: string - format: int64 - round: - type: integer - format: int32 - block_id: - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - title: BlockID - signatures: + description: delegator_address is the bech32-encoded address of the delegator. + validator_src_address: + type: string + description: >- + validator_src_address is the validator redelegation source + operator address. + validator_dst_address: + type: string + description: >- + validator_dst_address is the validator redelegation destination + operator address. + entries: type: array items: type: object properties: - block_id_flag: - type: string - enum: - - BLOCK_ID_FLAG_UNKNOWN - - BLOCK_ID_FLAG_ABSENT - - BLOCK_ID_FLAG_COMMIT - - BLOCK_ID_FLAG_NIL - default: BLOCK_ID_FLAG_UNKNOWN - title: BlockIdFlag indicates which BlcokID the signature is for - validator_address: + creation_height: type: string - format: byte - timestamp: + format: int64 + description: >- + creation_height defines the height which the redelegation + took place. + completion_time: type: string format: date-time - signature: + description: >- + completion_time defines the unix time for redelegation + completion. + initial_balance: type: string - format: byte - description: CommitSig is a part of the Vote included in a Commit. + description: >- + initial_balance defines the initial balance when + redelegation started. + shares_dst: + type: string + description: >- + shares_dst is the amount of destination-validator shares + created by redelegation. + unbonding_id: + type: string + format: uint64 + title: Incrementing id that uniquely identifies this entry + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + Strictly positive if this entry's unbonding has been stopped + by external modules + description: >- + RedelegationEntry defines a redelegation object with relevant + metadata. + description: entries are the redelegation entries. description: >- - Commit contains the evidence that a block was committed by a set of - validators. - tendermint.types.SignedMsgType: - type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN - description: |- - SignedMsgType is a type of signed message in the consensus. + Redelegation contains the list of a particular delegator's + redelegating bonds - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals - tendermint.types.Validator: - type: object - properties: - address: - type: string - format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators - voting_power: - type: string - format: int64 - proposer_priority: - type: string - format: int64 - tendermint.types.ValidatorSet: - type: object - properties: - validators: + from a particular source validator to a particular destination + validator. + entries: type: array items: type: object properties: - address: - type: string - format: byte - pub_key: + redelegation_entry: type: object properties: - ed25519: + creation_height: type: string - format: byte - secp256k1: + format: int64 + description: >- + creation_height defines the height which the redelegation + took place. + completion_time: type: string - format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators - voting_power: + format: date-time + description: >- + completion_time defines the unix time for redelegation + completion. + initial_balance: + type: string + description: >- + initial_balance defines the initial balance when + redelegation started. + shares_dst: + type: string + description: >- + shares_dst is the amount of destination-validator shares + created by redelegation. + unbonding_id: + type: string + format: uint64 + title: Incrementing id that uniquely identifies this entry + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + Strictly positive if this entry's unbonding has been stopped + by external modules + description: >- + RedelegationEntry defines a redelegation object with relevant + metadata. + balance: + type: string + description: >- + RedelegationEntryResponse is equivalent to a RedelegationEntry + except that it + + contains a balance in addition to shares which is more suitable for + client + + responses. + description: >- + RedelegationResponse is equivalent to a Redelegation except that its + entries + + contain a balance in addition to shares which is more suitable for client + + responses. + cosmos.staking.v1beta1.UnbondingDelegation: + type: object + properties: + delegator_address: + type: string + description: delegator_address is the bech32-encoded address of the delegator. + validator_address: + type: string + description: validator_address is the bech32-encoded address of the validator. + entries: + type: array + items: + type: object + properties: + creation_height: type: string format: int64 - proposer_priority: + description: creation_height is the height which the unbonding took place. + completion_time: + type: string + format: date-time + description: completion_time is the unix time for unbonding completion. + initial_balance: + type: string + description: >- + initial_balance defines the tokens initially scheduled to + receive at completion. + balance: + type: string + description: balance defines the tokens to receive at completion. + unbonding_id: + type: string + format: uint64 + title: Incrementing id that uniquely identifies this entry + unbonding_on_hold_ref_count: type: string format: int64 - proposer: + title: >- + Strictly positive if this entry's unbonding has been stopped by + external modules + description: >- + UnbondingDelegationEntry defines an unbonding object with relevant + metadata. + description: entries are the unbonding delegation entries. + description: |- + UnbondingDelegation stores all of a single delegator's unbonding bonds + for a single validator in an time-ordered list. + cosmos.staking.v1beta1.UnbondingDelegationEntry: + type: object + properties: + creation_height: + type: string + format: int64 + description: creation_height is the height which the unbonding took place. + completion_time: + type: string + format: date-time + description: completion_time is the unix time for unbonding completion. + initial_balance: + type: string + description: >- + initial_balance defines the tokens initially scheduled to receive at + completion. + balance: + type: string + description: balance defines the tokens to receive at completion. + unbonding_id: + type: string + format: uint64 + title: Incrementing id that uniquely identifies this entry + unbonding_on_hold_ref_count: + type: string + format: int64 + title: >- + Strictly positive if this entry's unbonding has been stopped by + external modules + description: >- + UnbondingDelegationEntry defines an unbonding object with relevant + metadata. + cosmos.staking.v1beta1.Validator: + type: object + properties: + operator_address: + type: string + description: >- + operator_address defines the address of the validator's operator; bech + encoded in JSON. + consensus_pubkey: type: object properties: - address: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up a + type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + value: type: string format: byte - pub_key: - type: object - properties: - ed25519: - type: string - format: byte - secp256k1: - type: string - format: byte - title: >- - PublicKey defines the keys available for use with Tendermint - Validators - voting_power: + description: >- + Must be a valid serialized protocol buffer of the above specified + type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + jailed: + type: boolean + description: >- + jailed defined whether the validator has been jailed from bonded + status or not. + status: + description: status is the validator status (bonded/unbonding/unbonded). + type: string + enum: + - BOND_STATUS_UNSPECIFIED + - BOND_STATUS_UNBONDED + - BOND_STATUS_UNBONDING + - BOND_STATUS_BONDED + default: BOND_STATUS_UNSPECIFIED + tokens: + type: string + description: tokens define the delegated tokens (incl. self-delegation). + delegator_shares: + type: string + description: >- + delegator_shares defines total shares issued to a validator's + delegators. + description: + description: description defines the description terms for the validator. + type: object + properties: + moniker: + type: string + description: moniker defines a human-readable name for the validator. + identity: type: string - format: int64 - proposer_priority: + description: >- + identity defines an optional identity signature (ex. UPort or + Keybase). + website: type: string - format: int64 - total_voting_power: + description: website defines an optional website link. + security_contact: + type: string + description: security_contact defines an optional email for security contact. + details: + type: string + description: details define other optional details. + unbonding_height: type: string format: int64 - tendermint.types.Vote: - type: object - properties: - type: - type: string - enum: - - SIGNED_MSG_TYPE_UNKNOWN - - SIGNED_MSG_TYPE_PREVOTE - - SIGNED_MSG_TYPE_PRECOMMIT - - SIGNED_MSG_TYPE_PROPOSAL - default: SIGNED_MSG_TYPE_UNKNOWN - description: |- - SignedMsgType is a type of signed message in the consensus. - - - SIGNED_MSG_TYPE_PREVOTE: Votes - - SIGNED_MSG_TYPE_PROPOSAL: Proposals - height: + description: >- + unbonding_height defines, if unbonding, the height at which this + validator has begun unbonding. + unbonding_time: type: string - format: int64 - round: - type: integer - format: int32 - block_id: + format: date-time + description: >- + unbonding_time defines, if unbonding, the min time for the validator + to complete unbonding. + commission: + description: commission defines the commission parameters. type: object properties: - hash: - type: string - format: byte - part_set_header: + commission_rates: + description: >- + commission_rates defines the initial commission rates to be used + for creating a validator. type: object properties: - total: - type: integer - format: int64 - hash: + rate: type: string - format: byte - title: PartsetHeader - title: BlockID - timestamp: - type: string - format: date-time - validator_address: - type: string - format: byte - validator_index: - type: integer - format: int32 - signature: - type: string - format: byte - description: |- - Vote represents a prevote, precommit, or commit vote from validators for - consensus. - tendermint.version.Consensus: - type: object - properties: - block: - type: string - format: uint64 - app: + description: >- + rate is the commission rate charged to delegators, as a + fraction. + max_rate: + type: string + description: >- + max_rate defines the maximum commission rate which validator + can ever charge, as a fraction. + max_change_rate: + type: string + description: >- + max_change_rate defines the maximum daily increase of the + validator commission, as a fraction. + update_time: + type: string + format: date-time + description: update_time is the last time the commission rate was changed. + min_self_delegation: type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a block in the - blockchain, + description: >- + min_self_delegation is the validator's self declared minimum self + delegation. - including all blockchain data structures and the rules of the - application's - state transition machine. - cosmos.distribution.v1beta1.DelegationDelegatorReward: - type: object - properties: - validator_address: + Since: cosmos-sdk 0.46 + unbonding_on_hold_ref_count: type: string - reward: + format: int64 + title: >- + strictly positive if this validator's unbonding has been stopped by + external modules + unbonding_ids: type: array items: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. + type: string + format: uint64 + title: >- + list of unbonding ids, each uniquely identifing an unbonding of this + validator + description: >- + Validator defines a validator, together with the total amount of the - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - description: |- - DelegationDelegatorReward represents the properties - of a delegator's delegation reward. - cosmos.distribution.v1beta1.Params: - type: object - properties: - community_tax: - type: string - base_proposer_reward: - type: string - bonus_proposer_reward: - type: string - withdraw_addr_enabled: - type: boolean - format: boolean - description: Params defines the set of params for the distribution module. - cosmos.distribution.v1beta1.QueryCommunityPoolResponse: - type: object - properties: - pool: - type: array - items: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. + Validator's bond shares and their exchange rate to coins. Slashing results + in - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - description: pool defines community pool's coins. - description: >- - QueryCommunityPoolResponse is the response type for the - Query/CommunityPool + a decrease in the exchange rate, allowing correct calculation of future - RPC method. - cosmos.distribution.v1beta1.QueryDelegationRewardsResponse: - type: object - properties: - rewards: - type: array - items: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. + undelegations without iterating over delegators. When coins are delegated + to - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - description: rewards defines the rewards accrued by a delegation. - description: |- - QueryDelegationRewardsResponse is the response type for the - Query/DelegationRewards RPC method. - cosmos.distribution.v1beta1.QueryDelegationTotalRewardsResponse: + this validator, the validator is credited with a delegation whose number + of + + bond shares is based on the amount of coins delegated divided by the + current + + exchange rate. Voting power can be calculated as total bonded shares + + multiplied by exchange rate. + cosmos.base.abci.v1beta1.ABCIMessageLog: type: object properties: - rewards: + msg_index: + type: integer + format: int64 + log: + type: string + events: type: array items: type: object properties: - validator_address: + type: type: string - reward: + attributes: type: array items: type: object properties: - denom: + key: type: string - amount: + value: type: string description: >- - DecCoin defines a token with a denomination and a decimal - amount. - - - NOTE: The amount field is an Dec which implements the custom - method + Attribute defines an attribute wrapper where the key and value + are - signatures required by gogoproto. - description: |- - DelegationDelegatorReward represents the properties - of a delegator's delegation reward. - description: rewards defines all the rewards accrued by a delegator. - total: - type: array - items: - type: object - properties: - denom: - type: string - amount: - type: string + strings instead of raw bytes. description: |- - DecCoin defines a token with a denomination and a decimal amount. - - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - description: total defines the sum of all the rewards. - description: |- - QueryDelegationTotalRewardsResponse is the response type for the - Query/DelegationTotalRewards RPC method. - cosmos.distribution.v1beta1.QueryDelegatorValidatorsResponse: - type: object - properties: - validators: - type: array - items: - type: string - description: validators defines the validators a delegator is delegating for. - description: |- - QueryDelegatorValidatorsResponse is the response type for the - Query/DelegatorValidators RPC method. - cosmos.distribution.v1beta1.QueryDelegatorWithdrawAddressResponse: + StringEvent defines en Event object wrapper where all the attributes + contain key/value pairs that are strings instead of raw bytes. + description: |- + Events contains a slice of Event objects that were emitted during some + execution. + description: >- + ABCIMessageLog defines a structure containing an indexed tx ABCI message + log. + cosmos.base.abci.v1beta1.Attribute: type: object properties: - withdraw_address: + key: + type: string + value: type: string - description: withdraw_address defines the delegator address to query for. description: |- - QueryDelegatorWithdrawAddressResponse is the response type for the - Query/DelegatorWithdrawAddress RPC method. - cosmos.distribution.v1beta1.QueryParamsResponse: - type: object - properties: - params: - description: params defines the parameters of the module. - type: object - properties: - community_tax: - type: string - base_proposer_reward: - type: string - bonus_proposer_reward: - type: string - withdraw_addr_enabled: - type: boolean - format: boolean - description: QueryParamsResponse is the response type for the Query/Params RPC method. - cosmos.distribution.v1beta1.QueryValidatorCommissionResponse: + Attribute defines an attribute wrapper where the key and value are + strings instead of raw bytes. + cosmos.base.abci.v1beta1.GasInfo: type: object properties: - commission: - description: commission defines the commision the validator received. - type: object - properties: - commission: - type: array - items: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal - amount. - - - NOTE: The amount field is an Dec which implements the custom - method - - signatures required by gogoproto. - title: |- - QueryValidatorCommissionResponse is the response type for the - Query/ValidatorCommission RPC method - cosmos.distribution.v1beta1.QueryValidatorOutstandingRewardsResponse: + gas_wanted: + type: string + format: uint64 + description: GasWanted is the maximum units of work we allow this tx to perform. + gas_used: + type: string + format: uint64 + description: GasUsed is the amount of gas actually consumed. + description: GasInfo defines tx execution gas context. + cosmos.base.abci.v1beta1.Result: type: object properties: - rewards: - type: object - properties: - rewards: - type: array - items: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - DecCoin defines a token with a denomination and a decimal - amount. - + data: + type: string + format: byte + description: >- + Data is any data returned from message or handler execution. It MUST + be - NOTE: The amount field is an Dec which implements the custom - method + length prefixed in order to separate data from multiple message + executions. - signatures required by gogoproto. - description: >- - ValidatorOutstandingRewards represents outstanding (un-withdrawn) - rewards + Deprecated. This field is still populated, but prefer msg_response + instead - for a validator inexpensive to track, allows simple sanity checks. - description: |- - QueryValidatorOutstandingRewardsResponse is the response type for the - Query/ValidatorOutstandingRewards RPC method. - cosmos.distribution.v1beta1.QueryValidatorSlashesResponse: - type: object - properties: - slashes: + because it also contains the Msg response typeURL. + log: + type: string + description: Log contains the log information from message or handler execution. + events: type: array items: type: object properties: - validator_period: - type: string - format: uint64 - fraction: + type: type: string - description: |- - ValidatorSlashEvent represents a validator slash event. - Height is implicit within the store key. - This is needed to calculate appropriate amount of staking tokens - for delegations which are withdrawn after a slash has occurred. - description: slashes defines the slashes the validator received. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + index: + type: boolean + description: >- + EventAttribute is a single key-value pair, associated with an + event. + description: >- + Event allows application developers to attach additional information + to - was set, its value is undefined otherwise - description: |- - QueryValidatorSlashesResponse is the response type for the - Query/ValidatorSlashes RPC method. - cosmos.distribution.v1beta1.ValidatorAccumulatedCommission: - type: object - properties: - commission: - type: array - items: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. + ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and + ResponseDeliverTx. - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - description: |- - ValidatorAccumulatedCommission represents accumulated commission - for a validator kept as a running counter, can be withdrawn at any time. - cosmos.distribution.v1beta1.ValidatorOutstandingRewards: - type: object - properties: - rewards: - type: array - items: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - DecCoin defines a token with a denomination and a decimal amount. + Later, transactions may be queried using these events. + description: >- + Events contains a slice of Event objects that were emitted during + message - NOTE: The amount field is an Dec which implements the custom method - signatures required by gogoproto. - description: |- - ValidatorOutstandingRewards represents outstanding (un-withdrawn) rewards - for a validator inexpensive to track, allows simple sanity checks. - cosmos.distribution.v1beta1.ValidatorSlashEvent: - type: object - properties: - validator_period: - type: string - format: uint64 - fraction: - type: string - description: |- - ValidatorSlashEvent represents a validator slash event. - Height is implicit within the store key. - This is needed to calculate appropriate amount of staking tokens - for delegations which are withdrawn after a slash has occurred. - cosmos.evidence.v1beta1.QueryAllEvidenceResponse: - type: object - properties: - evidence: + or handler execution. + msg_responses: type: array items: type: object @@ -38777,7 +61800,7 @@ definitions: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -38787,13 +61810,16 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -38811,7 +61837,6 @@ definitions: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -38837,40 +61862,121 @@ definitions: `value` which holds the custom JSON in addition to the `@type` - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - description: evidence returns all evidences. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + field. Example (for message [google.protobuf.Duration][]): - was set, its value is undefined otherwise - description: >- - QueryAllEvidenceResponse is the response type for the Query/AllEvidence - RPC + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: |- + msg_responses contains the Msg handler responses type packed in Anys. - method. - cosmos.evidence.v1beta1.QueryEvidenceResponse: + Since: cosmos-sdk 0.46 + description: Result is the union of ResponseFormat and ResponseCheckTx. + cosmos.base.abci.v1beta1.StringEvent: type: object properties: - evidence: + type: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + description: |- + Attribute defines an attribute wrapper where the key and value are + strings instead of raw bytes. + description: |- + StringEvent defines en Event object wrapper where all the attributes + contain key/value pairs that are strings instead of raw bytes. + cosmos.base.abci.v1beta1.TxResponse: + type: object + properties: + height: + type: string + format: int64 + title: The block height + txhash: + type: string + description: The transaction hash. + codespace: + type: string + title: Namespace for the Code + code: + type: integer + format: int64 + description: Response code. + data: + type: string + description: Result bytes, if any. + raw_log: + type: string + description: |- + The output of the application's logger (raw string). May be + non-deterministic. + logs: + type: array + items: + type: object + properties: + msg_index: + type: integer + format: int64 + log: + type: string + events: + type: array + items: + type: object + properties: + type: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + description: >- + Attribute defines an attribute wrapper where the key and + value are + + strings instead of raw bytes. + description: >- + StringEvent defines en Event object wrapper where all the + attributes + + contain key/value pairs that are strings instead of raw bytes. + description: >- + Events contains a slice of Event objects that were emitted + during some + + execution. + description: >- + ABCIMessageLog defines a structure containing an indexed tx ABCI + message log. + description: >- + The output of the application's logger (typed). May be + non-deterministic. + info: + type: string + description: Additional information. May be non-deterministic. + gas_wanted: + type: string + format: int64 + description: Amount of gas requested for transaction. + gas_used: + type: string + format: int64 + description: Amount of gas consumed by transaction. + tx: type: object properties: type_url: @@ -38963,7 +62069,7 @@ definitions: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -38973,13 +62079,16 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -38997,7 +62106,6 @@ definitions: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -39029,267 +62137,605 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - description: >- - QueryEvidenceResponse is the response type for the Query/Evidence RPC - method. - cosmos.gov.v1beta1.Deposit: - type: object - properties: - proposal_id: - type: string - format: uint64 - depositor: + timestamp: type: string - amount: + description: >- + Time of the previous block. For heights > 1, it's the weighted median + of + + the timestamps of the valid votes in the block.LastCommit. For height + == 1, + + it's genesis time. + events: type: array items: type: object properties: - denom: - type: string - amount: + type: type: string - description: |- - Coin defines a token with a denomination and an amount. + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + index: + type: boolean + description: >- + EventAttribute is a single key-value pair, associated with an + event. + description: >- + Event allows application developers to attach additional information + to - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. + ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and + ResponseDeliverTx. + + Later, transactions may be queried using these events. + description: >- + Events defines all the events emitted by processing a transaction. + Note, + + these events include those emitted by processing all the messages and + those + + emitted from the ante. Whereas Logs contains the events, with + + additional metadata, emitted only by processing the messages. + + + Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + description: >- + TxResponse defines a structure containing relevant tx data and metadata. + The + + tags are stringified and the log is JSON decoded. + cosmos.crypto.multisig.v1beta1.CompactBitArray: + type: object + properties: + extra_bits_stored: + type: integer + format: int64 + elems: + type: string + format: byte description: |- - Deposit defines an amount deposited by an account address to an active - proposal. - cosmos.gov.v1beta1.DepositParams: + CompactBitArray is an implementation of a space efficient bit array. + This is used to ensure that the encoded data takes up a minimal amount of + space after proto encoding. + This is not thread safe, and is not intended for concurrent usage. + cosmos.tx.signing.v1beta1.SignMode: + type: string + enum: + - SIGN_MODE_UNSPECIFIED + - SIGN_MODE_DIRECT + - SIGN_MODE_TEXTUAL + - SIGN_MODE_DIRECT_AUX + - SIGN_MODE_LEGACY_AMINO_JSON + - SIGN_MODE_EIP_191 + default: SIGN_MODE_UNSPECIFIED + description: |- + SignMode represents a signing mode with its own security guarantees. + + This enum should be considered a registry of all known sign modes + in the Cosmos ecosystem. Apps are not expected to support all known + sign modes. Apps that would like to support custom sign modes are + encouraged to open a small PR against this file to add a new case + to this SignMode enum describing their sign mode so that different + apps have a consistent version of this enum. + + - SIGN_MODE_UNSPECIFIED: SIGN_MODE_UNSPECIFIED specifies an unknown signing mode and will be + rejected. + - SIGN_MODE_DIRECT: SIGN_MODE_DIRECT specifies a signing mode which uses SignDoc and is + verified with raw bytes from Tx. + - SIGN_MODE_TEXTUAL: SIGN_MODE_TEXTUAL is a future signing mode that will verify some + human-readable textual representation on top of the binary representation + from SIGN_MODE_DIRECT. It is currently not supported. + - SIGN_MODE_DIRECT_AUX: SIGN_MODE_DIRECT_AUX specifies a signing mode which uses + SignDocDirectAux. As opposed to SIGN_MODE_DIRECT, this sign mode does not + require signers signing over other signers' `signer_info`. It also allows + for adding Tips in transactions. + + Since: cosmos-sdk 0.46 + - SIGN_MODE_LEGACY_AMINO_JSON: SIGN_MODE_LEGACY_AMINO_JSON is a backwards compatibility mode which uses + Amino JSON and will be removed in the future. + - SIGN_MODE_EIP_191: SIGN_MODE_EIP_191 specifies the sign mode for EIP 191 signing on the Cosmos + SDK. Ref: https://eips.ethereum.org/EIPS/eip-191 + + Currently, SIGN_MODE_EIP_191 is registered as a SignMode enum variant, + but is not implemented on the SDK by default. To enable EIP-191, you need + to pass a custom `TxConfig` that has an implementation of + `SignModeHandler` for EIP-191. The SDK may decide to fully support + EIP-191 in the future. + + Since: cosmos-sdk 0.45.2 + cosmos.tx.v1beta1.AuthInfo: type: object properties: - min_deposit: + signer_infos: type: array items: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - Coin defines a token with a denomination and an amount. + $ref: '#/definitions/cosmos.tx.v1beta1.SignerInfo' + description: >- + signer_infos defines the signing modes for the required signers. The + number - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - description: Minimum deposit for a proposal to enter voting period. - max_deposit_period: - type: string + and order of elements must match the required signers from TxBody's + + messages. The first element is the primary signer and the one which + pays + + the fee. + fee: description: >- - Maximum period for Atom holders to deposit on a proposal. Initial - value: 2 - months. - description: DepositParams defines the params for deposits on governance proposals. - cosmos.gov.v1beta1.Proposal: + Fee is the fee and gas limit for the transaction. The first signer is + the + + primary signer and the one which pays the fee. The fee can be + calculated + + based on the cost of evaluating the body and doing signature + verification + + of the signers. This can be estimated via simulation. + type: object + properties: + amount: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + title: amount is the amount of coins to be paid as a fee + gas_limit: + type: string + format: uint64 + title: >- + gas_limit is the maximum gas that can be used in transaction + processing + + before an out of gas error occurs + payer: + type: string + description: >- + if unset, the first signer is responsible for paying the fees. If + set, the specified account must pay the fees. + + the payer must be a tx signer (and thus have signed this field in + AuthInfo). + + setting this field does *not* change the ordering of required + signers for the transaction. + granter: + type: string + title: >- + if set, the fee payer (either the first signer or the value of the + payer field) requests that a fee grant be used + + to pay fees instead of the fee payer's own balance. If an + appropriate fee grant does not exist or the chain does + + not support fee grants, this will fail + tip: + description: >- + Tip is the optional tip used for transactions fees paid in another + denom. + + + This field is ignored if the chain didn't enable tips, i.e. didn't add + the + + `TipDecorator` in its posthandler. + + + Since: cosmos-sdk 0.46 + type: object + properties: + amount: + type: array + items: + type: object + properties: + denom: + type: string + amount: + type: string + description: >- + Coin defines a token with a denomination and an amount. + + + NOTE: The amount field is an Int which implements the custom + method + + signatures required by gogoproto. + title: amount is the amount of the tip + tipper: + type: string + title: tipper is the address of the account paying for the tip + description: |- + AuthInfo describes the fee and signer modes that are used to sign a + transaction. + cosmos.tx.v1beta1.BroadcastMode: + type: string + enum: + - BROADCAST_MODE_UNSPECIFIED + - BROADCAST_MODE_BLOCK + - BROADCAST_MODE_SYNC + - BROADCAST_MODE_ASYNC + default: BROADCAST_MODE_UNSPECIFIED + description: >- + BroadcastMode specifies the broadcast mode for the TxService.Broadcast RPC + method. + + - BROADCAST_MODE_UNSPECIFIED: zero-value for mode ordering + - BROADCAST_MODE_BLOCK: DEPRECATED: use BROADCAST_MODE_SYNC instead, + BROADCAST_MODE_BLOCK is not supported by the SDK from v0.47.x onwards. + - BROADCAST_MODE_SYNC: BROADCAST_MODE_SYNC defines a tx broadcasting mode where the client waits for + a CheckTx execution response only. + - BROADCAST_MODE_ASYNC: BROADCAST_MODE_ASYNC defines a tx broadcasting mode where the client returns + immediately. + cosmos.tx.v1beta1.BroadcastTxRequest: type: object properties: - proposal_id: + tx_bytes: type: string - format: uint64 - content: + format: byte + description: tx_bytes is the raw transaction. + mode: + type: string + enum: + - BROADCAST_MODE_UNSPECIFIED + - BROADCAST_MODE_BLOCK + - BROADCAST_MODE_SYNC + - BROADCAST_MODE_ASYNC + default: BROADCAST_MODE_UNSPECIFIED + description: >- + BroadcastMode specifies the broadcast mode for the TxService.Broadcast + RPC method. + + - BROADCAST_MODE_UNSPECIFIED: zero-value for mode ordering + - BROADCAST_MODE_BLOCK: DEPRECATED: use BROADCAST_MODE_SYNC instead, + BROADCAST_MODE_BLOCK is not supported by the SDK from v0.47.x onwards. + - BROADCAST_MODE_SYNC: BROADCAST_MODE_SYNC defines a tx broadcasting mode where the client waits for + a CheckTx execution response only. + - BROADCAST_MODE_ASYNC: BROADCAST_MODE_ASYNC defines a tx broadcasting mode where the client returns + immediately. + description: |- + BroadcastTxRequest is the request type for the Service.BroadcastTxRequest + RPC method. + cosmos.tx.v1beta1.BroadcastTxResponse: + type: object + properties: + tx_response: type: object properties: - type_url: + height: + type: string + format: int64 + title: The block height + txhash: + type: string + description: The transaction hash. + codespace: + type: string + title: Namespace for the Code + code: + type: integer + format: int64 + description: Response code. + data: + type: string + description: Result bytes, if any. + raw_log: type: string + description: |- + The output of the application's logger (raw string). May be + non-deterministic. + logs: + type: array + items: + type: object + properties: + msg_index: + type: integer + format: int64 + log: + type: string + events: + type: array + items: + type: object + properties: + type: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + description: >- + Attribute defines an attribute wrapper where the key + and value are + + strings instead of raw bytes. + description: >- + StringEvent defines en Event object wrapper where all the + attributes + + contain key/value pairs that are strings instead of raw + bytes. + description: >- + Events contains a slice of Event objects that were emitted + during some + + execution. + description: >- + ABCIMessageLog defines a structure containing an indexed tx ABCI + message log. description: >- - A URL/resource name that uniquely identifies the type of the - serialized + The output of the application's logger (typed). May be + non-deterministic. + info: + type: string + description: Additional information. May be non-deterministic. + gas_wanted: + type: string + format: int64 + description: Amount of gas requested for transaction. + gas_used: + type: string + format: int64 + description: Amount of gas consumed by transaction. + tx: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - protocol buffer message. This string must contain at least + protocol buffer message. This string must contain at least - one "/" character. The last segment of the URL's path must - represent + one "/" character. The last segment of the URL's path must + represent - the fully qualified name of the type (as in + the fully qualified name of the type (as in - `path/google.protobuf.Duration`). The name should be in a - canonical form + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official - (e.g., leading "." is not accepted). + protobuf release, and it is not used for type URLs beginning + with + type.googleapis.com. - In practice, teams usually precompile into the binary all types - that they - expect it to use in the context of Any. However, for URLs which - use the + Schemes other than `http`, `https` (or the empty scheme) might + be - scheme `http`, `https`, or no scheme, one can optionally set up a - type + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a - server that maps type URLs to message definitions as follows: + URL that describes the type of the serialized message. - * If no scheme is provided, `https` is assumed. + Protobuf library provides support to pack/unpack Any values in the + form - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + of utility functions or additional generated methods of the Any + type. - Note: this functionality is not currently available in the - official - protobuf release, and it is not used for type URLs beginning with + Example 1: Pack and unpack a message in C++. - type.googleapis.com. + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + Example 2: Pack and unpack a message in Java. - Schemes other than `http`, `https` (or the empty scheme) might be + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above specified - type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message along - with a + Example 3: Pack and unpack a message in Python. - URL that describes the type of the serialized message. + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + Example 4: Pack and unpack a message in Go - Protobuf library provides support to pack/unpack Any values in the - form + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - of utility functions or additional generated methods of the Any type. + The pack methods provided by protobuf library will by default use + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - Example 1: Pack and unpack a message in C++. + methods only use the fully qualified type name after the last '/' - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + in the type URL, for example "foo.bar.com/x/y.z" will yield type - Example 2: Pack and unpack a message in Java. + name "y.z". - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - Example 3: Pack and unpack a message in Python. - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + JSON - Example 4: Pack and unpack a message in Go - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + The JSON representation of an `Any` value uses the regular - The pack methods provided by protobuf library will by default use + representation of the deserialized, embedded message, with an - 'type.googleapis.com/full.type.name' as the type URL and the unpack + additional field `@type` which contains the type URL. Example: - methods only use the fully qualified type name after the last '/' + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - in the type URL, for example "foo.bar.com/x/y.z" will yield type + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - name "y.z". + If the embedded message type is well-known and has a custom JSON + representation, that representation will be embedded adding a + field + `value` which holds the custom JSON in addition to the `@type` - JSON + field. Example (for message [google.protobuf.Duration][]): - ==== + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + timestamp: + type: string + description: >- + Time of the previous block. For heights > 1, it's the weighted + median of - The JSON representation of an `Any` value uses the regular + the timestamps of the valid votes in the block.LastCommit. For + height == 1, - representation of the deserialized, embedded message, with an + it's genesis time. + events: + type: array + items: + type: object + properties: + type: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + index: + type: boolean + description: >- + EventAttribute is a single key-value pair, associated with + an event. + description: >- + Event allows application developers to attach additional + information to - additional field `@type` which contains the type URL. Example: + ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and + ResponseDeliverTx. - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + Later, transactions may be queried using these events. + description: >- + Events defines all the events emitted by processing a transaction. + Note, - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + these events include those emitted by processing all the messages + and those - If the embedded message type is well-known and has a custom JSON + emitted from the ante. Whereas Logs contains the events, with - representation, that representation will be embedded adding a field + additional metadata, emitted only by processing the messages. - `value` which holds the custom JSON in addition to the `@type` - field. Example (for message [google.protobuf.Duration][]): + Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + description: >- + TxResponse defines a structure containing relevant tx data and + metadata. The - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - status: - type: string - enum: - - PROPOSAL_STATUS_UNSPECIFIED - - PROPOSAL_STATUS_DEPOSIT_PERIOD - - PROPOSAL_STATUS_VOTING_PERIOD - - PROPOSAL_STATUS_PASSED - - PROPOSAL_STATUS_REJECTED - - PROPOSAL_STATUS_FAILED - default: PROPOSAL_STATUS_UNSPECIFIED - description: |- - ProposalStatus enumerates the valid statuses of a proposal. - - - PROPOSAL_STATUS_UNSPECIFIED: PROPOSAL_STATUS_UNSPECIFIED defines the default propopsal status. - - PROPOSAL_STATUS_DEPOSIT_PERIOD: PROPOSAL_STATUS_DEPOSIT_PERIOD defines a proposal status during the deposit - period. - - PROPOSAL_STATUS_VOTING_PERIOD: PROPOSAL_STATUS_VOTING_PERIOD defines a proposal status during the voting - period. - - PROPOSAL_STATUS_PASSED: PROPOSAL_STATUS_PASSED defines a proposal status of a proposal that has - passed. - - PROPOSAL_STATUS_REJECTED: PROPOSAL_STATUS_REJECTED defines a proposal status of a proposal that has - been rejected. - - PROPOSAL_STATUS_FAILED: PROPOSAL_STATUS_FAILED defines a proposal status of a proposal that has - failed. - final_tally_result: - type: object - properties: - 'yes': - type: string - abstain: - type: string - 'no': - type: string - no_with_veto: - type: string - description: TallyResult defines a standard tally for a governance proposal. - submit_time: - type: string - format: date-time - deposit_end_time: - type: string - format: date-time - total_deposit: + tags are stringified and the log is JSON decoded. + description: |- + BroadcastTxResponse is the response type for the + Service.BroadcastTx method. + cosmos.tx.v1beta1.Fee: + type: object + properties: + amount: type: array items: type: object @@ -39303,116 +62749,607 @@ definitions: NOTE: The amount field is an Int which implements the custom method signatures required by gogoproto. - voting_start_time: + title: amount is the amount of coins to be paid as a fee + gas_limit: type: string - format: date-time - voting_end_time: + format: uint64 + title: >- + gas_limit is the maximum gas that can be used in transaction + processing + + before an out of gas error occurs + payer: type: string - format: date-time - description: Proposal defines the core field members of a governance proposal. - cosmos.gov.v1beta1.ProposalStatus: - type: string - enum: - - PROPOSAL_STATUS_UNSPECIFIED - - PROPOSAL_STATUS_DEPOSIT_PERIOD - - PROPOSAL_STATUS_VOTING_PERIOD - - PROPOSAL_STATUS_PASSED - - PROPOSAL_STATUS_REJECTED - - PROPOSAL_STATUS_FAILED - default: PROPOSAL_STATUS_UNSPECIFIED - description: |- - ProposalStatus enumerates the valid statuses of a proposal. + description: >- + if unset, the first signer is responsible for paying the fees. If set, + the specified account must pay the fees. - - PROPOSAL_STATUS_UNSPECIFIED: PROPOSAL_STATUS_UNSPECIFIED defines the default propopsal status. - - PROPOSAL_STATUS_DEPOSIT_PERIOD: PROPOSAL_STATUS_DEPOSIT_PERIOD defines a proposal status during the deposit - period. - - PROPOSAL_STATUS_VOTING_PERIOD: PROPOSAL_STATUS_VOTING_PERIOD defines a proposal status during the voting - period. - - PROPOSAL_STATUS_PASSED: PROPOSAL_STATUS_PASSED defines a proposal status of a proposal that has - passed. - - PROPOSAL_STATUS_REJECTED: PROPOSAL_STATUS_REJECTED defines a proposal status of a proposal that has - been rejected. - - PROPOSAL_STATUS_FAILED: PROPOSAL_STATUS_FAILED defines a proposal status of a proposal that has - failed. - cosmos.gov.v1beta1.QueryDepositResponse: + the payer must be a tx signer (and thus have signed this field in + AuthInfo). + + setting this field does *not* change the ordering of required signers + for the transaction. + granter: + type: string + title: >- + if set, the fee payer (either the first signer or the value of the + payer field) requests that a fee grant be used + + to pay fees instead of the fee payer's own balance. If an appropriate + fee grant does not exist or the chain does + + not support fee grants, this will fail + description: >- + Fee includes the amount of coins paid in fees and the maximum + + gas to be used by the transaction. The ratio yields an effective + "gasprice", + + which must be above some miminum to be accepted into the mempool. + cosmos.tx.v1beta1.GetBlockWithTxsResponse: type: object properties: - deposit: + txs: + type: array + items: + $ref: '#/definitions/cosmos.tx.v1beta1.Tx' + description: txs are the transactions in the block. + block_id: type: object properties: - proposal_id: - type: string - format: uint64 - depositor: + hash: type: string - amount: - type: array - items: - type: object - properties: - denom: - type: string - amount: + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + block: + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules for processing a block + in the blockchain, + + including all blockchain data structures and the rules of the + application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: hashes from the app output from the prev block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: Header defines the structure of a Tendermint block header. + data: + type: object + properties: + txs: + type: array + items: type: string - description: >- - Coin defines a token with a denomination and an amount. + format: byte + description: >- + Txs that will be applied by state @ block.Height+1. + + NOTE: not all txs here are valid. We're just agreeing on the + order first. + + This means that block.AppHash does not include these txs. + title: Data contains the set of transactions included in the block + evidence: + type: object + properties: + evidence: + type: array + items: + type: object + properties: + duplicate_vote_evidence: + type: object + properties: + vote_a: + type: object + properties: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. + + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for + + consensus. + vote_b: + type: object + properties: + type: + type: string + enum: + - SIGNED_MSG_TYPE_UNKNOWN + - SIGNED_MSG_TYPE_PREVOTE + - SIGNED_MSG_TYPE_PRECOMMIT + - SIGNED_MSG_TYPE_PROPOSAL + default: SIGNED_MSG_TYPE_UNKNOWN + description: >- + SignedMsgType is a type of signed message in the + consensus. + - SIGNED_MSG_TYPE_PREVOTE: Votes + - SIGNED_MSG_TYPE_PROPOSAL: Proposals + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + timestamp: + type: string + format: date-time + validator_address: + type: string + format: byte + validator_index: + type: integer + format: int32 + signature: + type: string + format: byte + description: >- + Vote represents a prevote, precommit, or commit vote + from validators for - NOTE: The amount field is an Int which implements the custom - method + consensus. + total_voting_power: + type: string + format: int64 + validator_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + DuplicateVoteEvidence contains evidence of a validator + signed two conflicting votes. + light_client_attack_evidence: + type: object + properties: + conflicting_block: + type: object + properties: + signed_header: + type: object + properties: + header: + type: object + properties: + version: + title: basic block info + type: object + properties: + block: + type: string + format: uint64 + app: + type: string + format: uint64 + description: >- + Consensus captures the consensus rules + for processing a block in the + blockchain, - signatures required by gogoproto. - description: |- - Deposit defines an amount deposited by an account address to an active - proposal. - description: >- - QueryDepositResponse is the response type for the Query/Deposit RPC - method. - cosmos.gov.v1beta1.QueryDepositsResponse: - type: object - properties: - deposits: - type: array - items: - type: object - properties: - proposal_id: - type: string - format: uint64 - depositor: - type: string - amount: - type: array - items: + including all blockchain data structures + and the rules of the application's + + state transition machine. + chain_id: + type: string + height: + type: string + format: int64 + time: + type: string + format: date-time + last_block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + last_commit_hash: + type: string + format: byte + title: hashes of block data + data_hash: + type: string + format: byte + validators_hash: + type: string + format: byte + title: >- + hashes from the app output from the prev + block + next_validators_hash: + type: string + format: byte + consensus_hash: + type: string + format: byte + app_hash: + type: string + format: byte + last_results_hash: + type: string + format: byte + evidence_hash: + type: string + format: byte + title: consensus info + proposer_address: + type: string + format: byte + description: >- + Header defines the structure of a Tendermint + block header. + commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: + type: object + properties: + hash: + type: string + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: >- + BlockIdFlag indicates which BlcokID the + signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: >- + CommitSig is a part of the Vote included + in a Commit. + description: >- + Commit contains the evidence that a block + was committed by a set of validators. + validator_set: + type: object + properties: + validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + proposer: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for + use with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + common_height: + type: string + format: int64 + byzantine_validators: + type: array + items: + type: object + properties: + address: + type: string + format: byte + pub_key: + type: object + properties: + ed25519: + type: string + format: byte + secp256k1: + type: string + format: byte + title: >- + PublicKey defines the keys available for use + with Tendermint Validators + voting_power: + type: string + format: int64 + proposer_priority: + type: string + format: int64 + total_voting_power: + type: string + format: int64 + timestamp: + type: string + format: date-time + description: >- + LightClientAttackEvidence contains evidence of a set of + validators attempting to mislead a light client. + last_commit: + type: object + properties: + height: + type: string + format: int64 + round: + type: integer + format: int32 + block_id: type: object properties: - denom: - type: string - amount: + hash: type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method - - signatures required by gogoproto. - description: >- - Deposit defines an amount deposited by an account address to an - active - - proposal. + format: byte + part_set_header: + type: object + properties: + total: + type: integer + format: int64 + hash: + type: string + format: byte + title: PartsetHeader + title: BlockID + signatures: + type: array + items: + type: object + properties: + block_id_flag: + type: string + enum: + - BLOCK_ID_FLAG_UNKNOWN + - BLOCK_ID_FLAG_ABSENT + - BLOCK_ID_FLAG_COMMIT + - BLOCK_ID_FLAG_NIL + default: BLOCK_ID_FLAG_UNKNOWN + title: BlockIdFlag indicates which BlcokID the signature is for + validator_address: + type: string + format: byte + timestamp: + type: string + format: date-time + signature: + type: string + format: byte + description: CommitSig is a part of the Vote included in a Commit. + description: >- + Commit contains the evidence that a block was committed by a set + of validators. pagination: - description: pagination defines the pagination in the response. + description: pagination defines a pagination for the response. type: object properties: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -39422,81 +63359,102 @@ definitions: was set, its value is undefined otherwise description: >- - QueryDepositsResponse is the response type for the Query/Deposits RPC - method. - cosmos.gov.v1beta1.QueryParamsResponse: + GetBlockWithTxsResponse is the response type for the + Service.GetBlockWithTxs method. + + + Since: cosmos-sdk 0.45.2 + cosmos.tx.v1beta1.GetTxResponse: type: object properties: - voting_params: - description: voting_params defines the parameters related to voting. + tx: + $ref: '#/definitions/cosmos.tx.v1beta1.Tx' + description: tx is the queried transaction. + tx_response: type: object properties: - voting_period: + height: type: string - description: Length of the voting period. - deposit_params: - description: deposit_params defines the parameters related to deposit. - type: object - properties: - min_deposit: + format: int64 + title: The block height + txhash: + type: string + description: The transaction hash. + codespace: + type: string + title: Namespace for the Code + code: + type: integer + format: int64 + description: Response code. + data: + type: string + description: Result bytes, if any. + raw_log: + type: string + description: |- + The output of the application's logger (raw string). May be + non-deterministic. + logs: type: array items: type: object properties: - denom: - type: string - amount: + msg_index: + type: integer + format: int64 + log: type: string - description: >- - Coin defines a token with a denomination and an amount. + events: + type: array + items: + type: object + properties: + type: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + description: >- + Attribute defines an attribute wrapper where the key + and value are + strings instead of raw bytes. + description: >- + StringEvent defines en Event object wrapper where all the + attributes - NOTE: The amount field is an Int which implements the custom - method + contain key/value pairs that are strings instead of raw + bytes. + description: >- + Events contains a slice of Event objects that were emitted + during some - signatures required by gogoproto. - description: Minimum deposit for a proposal to enter voting period. - max_deposit_period: - type: string - description: >- - Maximum period for Atom holders to deposit on a proposal. Initial - value: 2 - months. - tally_params: - description: tally_params defines the parameters related to tally. - type: object - properties: - quorum: - type: string - format: byte - description: >- - Minimum percentage of total stake needed to vote for a result to - be - considered valid. - threshold: - type: string - format: byte - description: >- - Minimum proportion of Yes votes for proposal to pass. Default - value: 0.5. - veto_threshold: - type: string - format: byte - description: >- - Minimum value of Veto votes to Total votes ratio for proposal to - be - vetoed. Default value: 1/3. - description: QueryParamsResponse is the response type for the Query/Params RPC method. - cosmos.gov.v1beta1.QueryProposalResponse: - type: object - properties: - proposal: - type: object - properties: - proposal_id: + execution. + description: >- + ABCIMessageLog defines a structure containing an indexed tx ABCI + message log. + description: >- + The output of the application's logger (typed). May be + non-deterministic. + info: type: string - format: uint64 - content: + description: Additional information. May be non-deterministic. + gas_wanted: + type: string + format: int64 + description: Amount of gas requested for transaction. + gas_used: + type: string + format: int64 + description: Amount of gas consumed by transaction. + tx: type: object properties: type_url: @@ -39593,7 +63551,7 @@ definitions: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -39603,13 +63561,16 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -39628,7 +63589,6 @@ definitions: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -39661,87 +63621,159 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - status: - type: string - enum: - - PROPOSAL_STATUS_UNSPECIFIED - - PROPOSAL_STATUS_DEPOSIT_PERIOD - - PROPOSAL_STATUS_VOTING_PERIOD - - PROPOSAL_STATUS_PASSED - - PROPOSAL_STATUS_REJECTED - - PROPOSAL_STATUS_FAILED - default: PROPOSAL_STATUS_UNSPECIFIED - description: |- - ProposalStatus enumerates the valid statuses of a proposal. - - - PROPOSAL_STATUS_UNSPECIFIED: PROPOSAL_STATUS_UNSPECIFIED defines the default propopsal status. - - PROPOSAL_STATUS_DEPOSIT_PERIOD: PROPOSAL_STATUS_DEPOSIT_PERIOD defines a proposal status during the deposit - period. - - PROPOSAL_STATUS_VOTING_PERIOD: PROPOSAL_STATUS_VOTING_PERIOD defines a proposal status during the voting - period. - - PROPOSAL_STATUS_PASSED: PROPOSAL_STATUS_PASSED defines a proposal status of a proposal that has - passed. - - PROPOSAL_STATUS_REJECTED: PROPOSAL_STATUS_REJECTED defines a proposal status of a proposal that has - been rejected. - - PROPOSAL_STATUS_FAILED: PROPOSAL_STATUS_FAILED defines a proposal status of a proposal that has - failed. - final_tally_result: - type: object - properties: - 'yes': - type: string - abstain: - type: string - 'no': - type: string - no_with_veto: - type: string - description: TallyResult defines a standard tally for a governance proposal. - submit_time: - type: string - format: date-time - deposit_end_time: + timestamp: type: string - format: date-time - total_deposit: + description: >- + Time of the previous block. For heights > 1, it's the weighted + median of + + the timestamps of the valid votes in the block.LastCommit. For + height == 1, + + it's genesis time. + events: type: array items: type: object properties: - denom: - type: string - amount: + type: type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + index: + type: boolean + description: >- + EventAttribute is a single key-value pair, associated with + an event. description: >- - Coin defines a token with a denomination and an amount. + Event allows application developers to attach additional + information to + ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and + ResponseDeliverTx. - NOTE: The amount field is an Int which implements the custom - method + Later, transactions may be queried using these events. + description: >- + Events defines all the events emitted by processing a transaction. + Note, - signatures required by gogoproto. - voting_start_time: - type: string - format: date-time - voting_end_time: - type: string - format: date-time - description: Proposal defines the core field members of a governance proposal. - description: >- - QueryProposalResponse is the response type for the Query/Proposal RPC - method. - cosmos.gov.v1beta1.QueryProposalsResponse: + these events include those emitted by processing all the messages + and those + + emitted from the ante. Whereas Logs contains the events, with + + additional metadata, emitted only by processing the messages. + + + Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + description: >- + TxResponse defines a structure containing relevant tx data and + metadata. The + + tags are stringified and the log is JSON decoded. + description: GetTxResponse is the response type for the Service.GetTx method. + cosmos.tx.v1beta1.GetTxsEventResponse: type: object properties: - proposals: + txs: + type: array + items: + $ref: '#/definitions/cosmos.tx.v1beta1.Tx' + description: txs is the list of queried transactions. + tx_responses: type: array items: type: object properties: - proposal_id: + height: type: string - format: uint64 - content: + format: int64 + title: The block height + txhash: + type: string + description: The transaction hash. + codespace: + type: string + title: Namespace for the Code + code: + type: integer + format: int64 + description: Response code. + data: + type: string + description: Result bytes, if any. + raw_log: + type: string + description: |- + The output of the application's logger (raw string). May be + non-deterministic. + logs: + type: array + items: + type: object + properties: + msg_index: + type: integer + format: int64 + log: + type: string + events: + type: array + items: + type: object + properties: + type: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + description: >- + Attribute defines an attribute wrapper where the + key and value are + + strings instead of raw bytes. + description: >- + StringEvent defines en Event object wrapper where all + the attributes + + contain key/value pairs that are strings instead of raw + bytes. + description: >- + Events contains a slice of Event objects that were emitted + during some + + execution. + description: >- + ABCIMessageLog defines a structure containing an indexed tx + ABCI message log. + description: >- + The output of the application's logger (typed). May be + non-deterministic. + info: + type: string + description: Additional information. May be non-deterministic. + gas_wanted: + type: string + format: int64 + description: Amount of gas requested for transaction. + gas_used: + type: string + format: int64 + description: Amount of gas consumed by transaction. + tx: type: object properties: type_url: @@ -39840,7 +63872,7 @@ definitions: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -39850,13 +63882,16 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -39877,7 +63912,6 @@ definitions: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -39910,935 +63944,779 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - status: - type: string - enum: - - PROPOSAL_STATUS_UNSPECIFIED - - PROPOSAL_STATUS_DEPOSIT_PERIOD - - PROPOSAL_STATUS_VOTING_PERIOD - - PROPOSAL_STATUS_PASSED - - PROPOSAL_STATUS_REJECTED - - PROPOSAL_STATUS_FAILED - default: PROPOSAL_STATUS_UNSPECIFIED - description: |- - ProposalStatus enumerates the valid statuses of a proposal. - - - PROPOSAL_STATUS_UNSPECIFIED: PROPOSAL_STATUS_UNSPECIFIED defines the default propopsal status. - - PROPOSAL_STATUS_DEPOSIT_PERIOD: PROPOSAL_STATUS_DEPOSIT_PERIOD defines a proposal status during the deposit - period. - - PROPOSAL_STATUS_VOTING_PERIOD: PROPOSAL_STATUS_VOTING_PERIOD defines a proposal status during the voting - period. - - PROPOSAL_STATUS_PASSED: PROPOSAL_STATUS_PASSED defines a proposal status of a proposal that has - passed. - - PROPOSAL_STATUS_REJECTED: PROPOSAL_STATUS_REJECTED defines a proposal status of a proposal that has - been rejected. - - PROPOSAL_STATUS_FAILED: PROPOSAL_STATUS_FAILED defines a proposal status of a proposal that has - failed. - final_tally_result: - type: object - properties: - 'yes': - type: string - abstain: - type: string - 'no': - type: string - no_with_veto: - type: string - description: TallyResult defines a standard tally for a governance proposal. - submit_time: - type: string - format: date-time - deposit_end_time: - type: string - format: date-time - total_deposit: - type: array - items: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method - - signatures required by gogoproto. - voting_start_time: - type: string - format: date-time - voting_end_time: - type: string - format: date-time - description: Proposal defines the core field members of a governance proposal. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: |- - QueryProposalsResponse is the response type for the Query/Proposals RPC - method. - cosmos.gov.v1beta1.QueryTallyResultResponse: - type: object - properties: - tally: - type: object - properties: - 'yes': - type: string - abstain: - type: string - 'no': - type: string - no_with_veto: - type: string - description: TallyResult defines a standard tally for a governance proposal. - description: >- - QueryTallyResultResponse is the response type for the Query/Tally RPC - method. - cosmos.gov.v1beta1.QueryVoteResponse: - type: object - properties: - vote: - type: object - properties: - proposal_id: - type: string - format: uint64 - voter: - type: string - option: - description: >- - Deprecated: Prefer to use `options` instead. This field is set in - queries - - if and only if `len(options) == 1` and that option has weight 1. - In all - - other cases, this field will default to VOTE_OPTION_UNSPECIFIED. - type: string - enum: - - VOTE_OPTION_UNSPECIFIED - - VOTE_OPTION_YES - - VOTE_OPTION_ABSTAIN - - VOTE_OPTION_NO - - VOTE_OPTION_NO_WITH_VETO - default: VOTE_OPTION_UNSPECIFIED - options: - type: array - items: - type: object - properties: - option: - type: string - enum: - - VOTE_OPTION_UNSPECIFIED - - VOTE_OPTION_YES - - VOTE_OPTION_ABSTAIN - - VOTE_OPTION_NO - - VOTE_OPTION_NO_WITH_VETO - default: VOTE_OPTION_UNSPECIFIED - description: >- - VoteOption enumerates the valid vote options for a given - governance proposal. - - - VOTE_OPTION_UNSPECIFIED: VOTE_OPTION_UNSPECIFIED defines a no-op vote option. - - VOTE_OPTION_YES: VOTE_OPTION_YES defines a yes vote option. - - VOTE_OPTION_ABSTAIN: VOTE_OPTION_ABSTAIN defines an abstain vote option. - - VOTE_OPTION_NO: VOTE_OPTION_NO defines a no vote option. - - VOTE_OPTION_NO_WITH_VETO: VOTE_OPTION_NO_WITH_VETO defines a no with veto vote option. - weight: - type: string - description: WeightedVoteOption defines a unit of vote for vote split. - description: |- - Vote defines a vote on a governance proposal. - A Vote consists of a proposal ID, the voter, and the vote option. - description: QueryVoteResponse is the response type for the Query/Vote RPC method. - cosmos.gov.v1beta1.QueryVotesResponse: - type: object - properties: - votes: - type: array - items: - type: object - properties: - proposal_id: - type: string - format: uint64 - voter: + timestamp: type: string - option: description: >- - Deprecated: Prefer to use `options` instead. This field is set - in queries + Time of the previous block. For heights > 1, it's the weighted + median of - if and only if `len(options) == 1` and that option has weight 1. - In all + the timestamps of the valid votes in the block.LastCommit. For + height == 1, - other cases, this field will default to VOTE_OPTION_UNSPECIFIED. - type: string - enum: - - VOTE_OPTION_UNSPECIFIED - - VOTE_OPTION_YES - - VOTE_OPTION_ABSTAIN - - VOTE_OPTION_NO - - VOTE_OPTION_NO_WITH_VETO - default: VOTE_OPTION_UNSPECIFIED - options: + it's genesis time. + events: type: array items: type: object properties: - option: - type: string - enum: - - VOTE_OPTION_UNSPECIFIED - - VOTE_OPTION_YES - - VOTE_OPTION_ABSTAIN - - VOTE_OPTION_NO - - VOTE_OPTION_NO_WITH_VETO - default: VOTE_OPTION_UNSPECIFIED - description: >- - VoteOption enumerates the valid vote options for a given - governance proposal. - - - VOTE_OPTION_UNSPECIFIED: VOTE_OPTION_UNSPECIFIED defines a no-op vote option. - - VOTE_OPTION_YES: VOTE_OPTION_YES defines a yes vote option. - - VOTE_OPTION_ABSTAIN: VOTE_OPTION_ABSTAIN defines an abstain vote option. - - VOTE_OPTION_NO: VOTE_OPTION_NO defines a no vote option. - - VOTE_OPTION_NO_WITH_VETO: VOTE_OPTION_NO_WITH_VETO defines a no with veto vote option. - weight: + type: type: string - description: WeightedVoteOption defines a unit of vote for vote split. - description: |- - Vote defines a vote on a governance proposal. - A Vote consists of a proposal ID, the voter, and the vote option. - description: votes defined the queried votes. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: QueryVotesResponse is the response type for the Query/Votes RPC method. - cosmos.gov.v1beta1.TallyParams: - type: object - properties: - quorum: - type: string - format: byte - description: |- - Minimum percentage of total stake needed to vote for a result to be - considered valid. - threshold: - type: string - format: byte - description: >- - Minimum proportion of Yes votes for proposal to pass. Default value: - 0.5. - veto_threshold: - type: string - format: byte - description: |- - Minimum value of Veto votes to Total votes ratio for proposal to be - vetoed. Default value: 1/3. - description: TallyParams defines the params for tallying votes on governance proposals. - cosmos.gov.v1beta1.TallyResult: - type: object - properties: - 'yes': - type: string - abstain: - type: string - 'no': - type: string - no_with_veto: - type: string - description: TallyResult defines a standard tally for a governance proposal. - cosmos.gov.v1beta1.Vote: - type: object - properties: - proposal_id: - type: string - format: uint64 - voter: - type: string - option: - description: >- - Deprecated: Prefer to use `options` instead. This field is set in - queries - - if and only if `len(options) == 1` and that option has weight 1. In - all + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + index: + type: boolean + description: >- + EventAttribute is a single key-value pair, associated + with an event. + description: >- + Event allows application developers to attach additional + information to - other cases, this field will default to VOTE_OPTION_UNSPECIFIED. - type: string - enum: - - VOTE_OPTION_UNSPECIFIED - - VOTE_OPTION_YES - - VOTE_OPTION_ABSTAIN - - VOTE_OPTION_NO - - VOTE_OPTION_NO_WITH_VETO - default: VOTE_OPTION_UNSPECIFIED - options: - type: array - items: - type: object - properties: - option: - type: string - enum: - - VOTE_OPTION_UNSPECIFIED - - VOTE_OPTION_YES - - VOTE_OPTION_ABSTAIN - - VOTE_OPTION_NO - - VOTE_OPTION_NO_WITH_VETO - default: VOTE_OPTION_UNSPECIFIED - description: >- - VoteOption enumerates the valid vote options for a given - governance proposal. - - - VOTE_OPTION_UNSPECIFIED: VOTE_OPTION_UNSPECIFIED defines a no-op vote option. - - VOTE_OPTION_YES: VOTE_OPTION_YES defines a yes vote option. - - VOTE_OPTION_ABSTAIN: VOTE_OPTION_ABSTAIN defines an abstain vote option. - - VOTE_OPTION_NO: VOTE_OPTION_NO defines a no vote option. - - VOTE_OPTION_NO_WITH_VETO: VOTE_OPTION_NO_WITH_VETO defines a no with veto vote option. - weight: - type: string - description: WeightedVoteOption defines a unit of vote for vote split. - description: |- - Vote defines a vote on a governance proposal. - A Vote consists of a proposal ID, the voter, and the vote option. - cosmos.gov.v1beta1.VoteOption: - type: string - enum: - - VOTE_OPTION_UNSPECIFIED - - VOTE_OPTION_YES - - VOTE_OPTION_ABSTAIN - - VOTE_OPTION_NO - - VOTE_OPTION_NO_WITH_VETO - default: VOTE_OPTION_UNSPECIFIED - description: >- - VoteOption enumerates the valid vote options for a given governance - proposal. + ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and + ResponseDeliverTx. - - VOTE_OPTION_UNSPECIFIED: VOTE_OPTION_UNSPECIFIED defines a no-op vote option. - - VOTE_OPTION_YES: VOTE_OPTION_YES defines a yes vote option. - - VOTE_OPTION_ABSTAIN: VOTE_OPTION_ABSTAIN defines an abstain vote option. - - VOTE_OPTION_NO: VOTE_OPTION_NO defines a no vote option. - - VOTE_OPTION_NO_WITH_VETO: VOTE_OPTION_NO_WITH_VETO defines a no with veto vote option. - cosmos.gov.v1beta1.VotingParams: - type: object - properties: - voting_period: - type: string - description: Length of the voting period. - description: VotingParams defines the params for voting on governance proposals. - cosmos.gov.v1beta1.WeightedVoteOption: - type: object - properties: - option: - type: string - enum: - - VOTE_OPTION_UNSPECIFIED - - VOTE_OPTION_YES - - VOTE_OPTION_ABSTAIN - - VOTE_OPTION_NO - - VOTE_OPTION_NO_WITH_VETO - default: VOTE_OPTION_UNSPECIFIED - description: >- - VoteOption enumerates the valid vote options for a given governance - proposal. + Later, transactions may be queried using these events. + description: >- + Events defines all the events emitted by processing a + transaction. Note, - - VOTE_OPTION_UNSPECIFIED: VOTE_OPTION_UNSPECIFIED defines a no-op vote option. - - VOTE_OPTION_YES: VOTE_OPTION_YES defines a yes vote option. - - VOTE_OPTION_ABSTAIN: VOTE_OPTION_ABSTAIN defines an abstain vote option. - - VOTE_OPTION_NO: VOTE_OPTION_NO defines a no vote option. - - VOTE_OPTION_NO_WITH_VETO: VOTE_OPTION_NO_WITH_VETO defines a no with veto vote option. - weight: - type: string - description: WeightedVoteOption defines a unit of vote for vote split. - cosmos.mint.v1beta1.Params: - type: object - properties: - mint_denom: - type: string - title: type of coin to mint - inflation_rate_change: - type: string - title: maximum annual change in inflation rate - inflation_max: - type: string - title: maximum inflation rate - inflation_min: - type: string - title: minimum inflation rate - goal_bonded: - type: string - title: goal of percent bonded atoms - blocks_per_year: - type: string - format: uint64 - title: expected blocks per year - description: Params holds parameters for the mint module. - cosmos.mint.v1beta1.QueryAnnualProvisionsResponse: - type: object - properties: - annual_provisions: - type: string - format: byte - description: annual_provisions is the current minting annual provisions value. - description: |- - QueryAnnualProvisionsResponse is the response type for the - Query/AnnualProvisions RPC method. - cosmos.mint.v1beta1.QueryInflationResponse: - type: object - properties: - inflation: - type: string - format: byte - description: inflation is the current minting inflation value. - description: |- - QueryInflationResponse is the response type for the Query/Inflation RPC - method. - cosmos.mint.v1beta1.QueryParamsResponse: - type: object - properties: - params: - description: params defines the parameters of the module. + these events include those emitted by processing all the + messages and those + + emitted from the ante. Whereas Logs contains the events, with + + additional metadata, emitted only by processing the messages. + + + Since: cosmos-sdk 0.42.11, 0.44.5, 0.45 + description: >- + TxResponse defines a structure containing relevant tx data and + metadata. The + + tags are stringified and the log is JSON decoded. + description: tx_responses is the list of queried TxResponses. + pagination: + description: |- + pagination defines a pagination for the response. + Deprecated post v0.46.x: use total instead. type: object properties: - mint_denom: - type: string - title: type of coin to mint - inflation_rate_change: - type: string - title: maximum annual change in inflation rate - inflation_max: - type: string - title: maximum inflation rate - inflation_min: - type: string - title: minimum inflation rate - goal_bonded: + next_key: type: string - title: goal of percent bonded atoms - blocks_per_year: + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: type: string format: uint64 - title: expected blocks per year - description: QueryParamsResponse is the response type for the Query/Params RPC method. - cosmos.params.v1beta1.ParamChange: - type: object - properties: - subspace: - type: string - key: - type: string - value: + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + total: type: string + format: uint64 + title: total is total number of results available description: |- - ParamChange defines an individual parameter change, for use in - ParameterChangeProposal. - cosmos.params.v1beta1.QueryParamsResponse: + GetTxsEventResponse is the response type for the Service.TxsByEvents + RPC method. + cosmos.tx.v1beta1.ModeInfo: type: object properties: - param: - description: param defines the queried parameter. + single: + title: single represents a single signer type: object properties: - subspace: - type: string - key: - type: string - value: + mode: + title: mode is the signing mode of the single signer type: string - description: QueryParamsResponse is response type for the Query/Params RPC method. - cosmos.slashing.v1beta1.Params: - type: object - properties: - signed_blocks_window: - type: string - format: int64 - min_signed_per_window: - type: string - format: byte - downtime_jail_duration: - type: string - slash_fraction_double_sign: - type: string - format: byte - slash_fraction_downtime: - type: string - format: byte - description: Params represents the parameters used for by the slashing module. - cosmos.slashing.v1beta1.QueryParamsResponse: + enum: + - SIGN_MODE_UNSPECIFIED + - SIGN_MODE_DIRECT + - SIGN_MODE_TEXTUAL + - SIGN_MODE_DIRECT_AUX + - SIGN_MODE_LEGACY_AMINO_JSON + - SIGN_MODE_EIP_191 + default: SIGN_MODE_UNSPECIFIED + description: >- + SignMode represents a signing mode with its own security + guarantees. + + + This enum should be considered a registry of all known sign modes + + in the Cosmos ecosystem. Apps are not expected to support all + known + + sign modes. Apps that would like to support custom sign modes are + + encouraged to open a small PR against this file to add a new case + + to this SignMode enum describing their sign mode so that different + + apps have a consistent version of this enum. + + - SIGN_MODE_UNSPECIFIED: SIGN_MODE_UNSPECIFIED specifies an unknown signing mode and will be + rejected. + - SIGN_MODE_DIRECT: SIGN_MODE_DIRECT specifies a signing mode which uses SignDoc and is + verified with raw bytes from Tx. + - SIGN_MODE_TEXTUAL: SIGN_MODE_TEXTUAL is a future signing mode that will verify some + human-readable textual representation on top of the binary + representation + + from SIGN_MODE_DIRECT. It is currently not supported. + - SIGN_MODE_DIRECT_AUX: SIGN_MODE_DIRECT_AUX specifies a signing mode which uses + SignDocDirectAux. As opposed to SIGN_MODE_DIRECT, this sign mode + does not + + require signers signing over other signers' `signer_info`. It also + allows + + for adding Tips in transactions. + + + Since: cosmos-sdk 0.46 + - SIGN_MODE_LEGACY_AMINO_JSON: SIGN_MODE_LEGACY_AMINO_JSON is a backwards compatibility mode which uses + Amino JSON and will be removed in the future. + - SIGN_MODE_EIP_191: SIGN_MODE_EIP_191 specifies the sign mode for EIP 191 signing on the Cosmos + SDK. Ref: https://eips.ethereum.org/EIPS/eip-191 + + + Currently, SIGN_MODE_EIP_191 is registered as a SignMode enum + variant, + + but is not implemented on the SDK by default. To enable EIP-191, + you need + + to pass a custom `TxConfig` that has an implementation of + + `SignModeHandler` for EIP-191. The SDK may decide to fully support + + EIP-191 in the future. + + + Since: cosmos-sdk 0.45.2 + multi: + $ref: '#/definitions/cosmos.tx.v1beta1.ModeInfo.Multi' + title: multi represents a nested multisig signer + description: ModeInfo describes the signing mode of a single or nested multisig signer. + cosmos.tx.v1beta1.ModeInfo.Multi: type: object properties: - params: + bitarray: + title: bitarray specifies which keys within the multisig are signing type: object properties: - signed_blocks_window: - type: string + extra_bits_stored: + type: integer format: int64 - min_signed_per_window: - type: string - format: byte - downtime_jail_duration: - type: string - slash_fraction_double_sign: - type: string - format: byte - slash_fraction_downtime: + elems: type: string format: byte - description: Params represents the parameters used for by the slashing module. - title: QueryParamsResponse is the response type for the Query/Params RPC method - cosmos.slashing.v1beta1.QuerySigningInfoResponse: + description: >- + CompactBitArray is an implementation of a space efficient bit array. + + This is used to ensure that the encoded data takes up a minimal amount + of + + space after proto encoding. + + This is not thread safe, and is not intended for concurrent usage. + mode_infos: + type: array + items: + $ref: '#/definitions/cosmos.tx.v1beta1.ModeInfo' + title: |- + mode_infos is the corresponding modes of the signers of the multisig + which could include nested multisig public keys + title: Multi is the mode info for a multisig public key + cosmos.tx.v1beta1.ModeInfo.Single: type: object properties: - val_signing_info: + mode: + title: mode is the signing mode of the single signer + type: string + enum: + - SIGN_MODE_UNSPECIFIED + - SIGN_MODE_DIRECT + - SIGN_MODE_TEXTUAL + - SIGN_MODE_DIRECT_AUX + - SIGN_MODE_LEGACY_AMINO_JSON + - SIGN_MODE_EIP_191 + default: SIGN_MODE_UNSPECIFIED + description: >- + SignMode represents a signing mode with its own security guarantees. + + + This enum should be considered a registry of all known sign modes + + in the Cosmos ecosystem. Apps are not expected to support all known + + sign modes. Apps that would like to support custom sign modes are + + encouraged to open a small PR against this file to add a new case + + to this SignMode enum describing their sign mode so that different + + apps have a consistent version of this enum. + + - SIGN_MODE_UNSPECIFIED: SIGN_MODE_UNSPECIFIED specifies an unknown signing mode and will be + rejected. + - SIGN_MODE_DIRECT: SIGN_MODE_DIRECT specifies a signing mode which uses SignDoc and is + verified with raw bytes from Tx. + - SIGN_MODE_TEXTUAL: SIGN_MODE_TEXTUAL is a future signing mode that will verify some + human-readable textual representation on top of the binary + representation + + from SIGN_MODE_DIRECT. It is currently not supported. + - SIGN_MODE_DIRECT_AUX: SIGN_MODE_DIRECT_AUX specifies a signing mode which uses + SignDocDirectAux. As opposed to SIGN_MODE_DIRECT, this sign mode does + not + + require signers signing over other signers' `signer_info`. It also + allows + + for adding Tips in transactions. + + + Since: cosmos-sdk 0.46 + - SIGN_MODE_LEGACY_AMINO_JSON: SIGN_MODE_LEGACY_AMINO_JSON is a backwards compatibility mode which uses + Amino JSON and will be removed in the future. + - SIGN_MODE_EIP_191: SIGN_MODE_EIP_191 specifies the sign mode for EIP 191 signing on the Cosmos + SDK. Ref: https://eips.ethereum.org/EIPS/eip-191 + + + Currently, SIGN_MODE_EIP_191 is registered as a SignMode enum variant, + + but is not implemented on the SDK by default. To enable EIP-191, you + need + + to pass a custom `TxConfig` that has an implementation of + + `SignModeHandler` for EIP-191. The SDK may decide to fully support + + EIP-191 in the future. + + + Since: cosmos-sdk 0.45.2 + title: |- + Single is the mode info for a single signer. It is structured as a message + to allow for additional fields such as locale for SIGN_MODE_TEXTUAL in the + future + cosmos.tx.v1beta1.OrderBy: + type: string + enum: + - ORDER_BY_UNSPECIFIED + - ORDER_BY_ASC + - ORDER_BY_DESC + default: ORDER_BY_UNSPECIFIED + description: >- + - ORDER_BY_UNSPECIFIED: ORDER_BY_UNSPECIFIED specifies an unknown sorting + order. OrderBy defaults to ASC in this case. + - ORDER_BY_ASC: ORDER_BY_ASC defines ascending order + - ORDER_BY_DESC: ORDER_BY_DESC defines descending order + title: OrderBy defines the sorting order + cosmos.tx.v1beta1.SignerInfo: + type: object + properties: + public_key: type: object properties: - address: - type: string - start_height: - type: string - format: int64 - title: Height at which validator was first a candidate OR was unjailed - index_offset: + type_url: type: string - format: int64 description: >- - Index which is incremented each time the validator was a bonded + A URL/resource name that uniquely identifies the type of the + serialized - in a block and may have signed a precommit or not. This in - conjunction with the + protocol buffer message. This string must contain at least - `SignedBlocksWindow` param determines the index in the - `MissedBlocksBitArray`. - jailed_until: - type: string - format: date-time - description: >- - Timestamp until which the validator is jailed due to liveness - downtime. - tombstoned: - type: boolean - format: boolean - description: >- - Whether or not a validator has been tombstoned (killed out of - validator set). It is set + one "/" character. The last segment of the URL's path must + represent - once the validator commits an equivocation or for any other - configured misbehiavor. - missed_blocks_counter: - type: string - format: int64 - description: >- - A counter kept to avoid unnecessary array reads. + the fully qualified name of the type (as in - Note that `Sum(MissedBlocksBitArray)` always equals - `MissedBlocksCounter`. - description: >- - ValidatorSigningInfo defines a validator's signing info for monitoring - their + `path/google.protobuf.Duration`). The name should be in a + canonical form - liveness activity. - title: val_signing_info is the signing info of requested val cons address - title: >- - QuerySigningInfoResponse is the response type for the Query/SigningInfo - RPC + (e.g., leading "." is not accepted). - method - cosmos.slashing.v1beta1.QuerySigningInfosResponse: - type: object - properties: - info: - type: array - items: - type: object - properties: - address: - type: string - start_height: - type: string - format: int64 - title: Height at which validator was first a candidate OR was unjailed - index_offset: - type: string - format: int64 - description: >- - Index which is incremented each time the validator was a bonded - in a block and may have signed a precommit or not. This in - conjunction with the + In practice, teams usually precompile into the binary all types + that they - `SignedBlocksWindow` param determines the index in the - `MissedBlocksBitArray`. - jailed_until: - type: string - format: date-time - description: >- - Timestamp until which the validator is jailed due to liveness - downtime. - tombstoned: - type: boolean - format: boolean - description: >- - Whether or not a validator has been tombstoned (killed out of - validator set). It is set + expect it to use in the context of Any. However, for URLs which + use the - once the validator commits an equivocation or for any other - configured misbehiavor. - missed_blocks_counter: - type: string - format: int64 - description: >- - A counter kept to avoid unnecessary array reads. + scheme `http`, `https`, or no scheme, one can optionally set up a + type - Note that `Sum(MissedBlocksBitArray)` always equals - `MissedBlocksCounter`. - description: >- - ValidatorSigningInfo defines a validator's signing info for - monitoring their + server that maps type URLs to message definitions as follows: - liveness activity. - title: info is the signing info of all validators - pagination: - type: object - properties: - next_key: + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + value: type: string format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + description: >- + Must be a valid serialized protocol buffer of the above specified + type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular - was set, its value is undefined otherwise - description: |- - PageResponse is to be embedded in gRPC response messages where the - corresponding request message has used PageRequest. + representation of the deserialized, embedded message, with an - message SomeResponse { - repeated Bar results = 1; - PageResponse page = 2; - } - title: >- - QuerySigningInfosResponse is the response type for the Query/SigningInfos - RPC + additional field `@type` which contains the type URL. Example: - method - cosmos.slashing.v1beta1.ValidatorSigningInfo: - type: object - properties: - address: - type: string - start_height: - type: string - format: int64 - title: Height at which validator was first a candidate OR was unjailed - index_offset: - type: string - format: int64 - description: >- - Index which is incremented each time the validator was a bonded + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - in a block and may have signed a precommit or not. This in conjunction - with the + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - `SignedBlocksWindow` param determines the index in the - `MissedBlocksBitArray`. - jailed_until: - type: string - format: date-time - description: >- - Timestamp until which the validator is jailed due to liveness - downtime. - tombstoned: - type: boolean - format: boolean - description: >- - Whether or not a validator has been tombstoned (killed out of - validator set). It is set + If the embedded message type is well-known and has a custom JSON - once the validator commits an equivocation or for any other configured - misbehiavor. - missed_blocks_counter: - type: string - format: int64 - description: >- - A counter kept to avoid unnecessary array reads. + representation, that representation will be embedded adding a field - Note that `Sum(MissedBlocksBitArray)` always equals - `MissedBlocksCounter`. - description: >- - ValidatorSigningInfo defines a validator's signing info for monitoring - their + `value` which holds the custom JSON in addition to the `@type` - liveness activity. - cosmos.staking.v1beta1.BondStatus: - type: string - enum: - - BOND_STATUS_UNSPECIFIED - - BOND_STATUS_UNBONDED - - BOND_STATUS_UNBONDING - - BOND_STATUS_BONDED - default: BOND_STATUS_UNSPECIFIED - description: |- - BondStatus is the status of a validator. + field. Example (for message [google.protobuf.Duration][]): - - BOND_STATUS_UNSPECIFIED: UNSPECIFIED defines an invalid validator status. - - BOND_STATUS_UNBONDED: UNBONDED defines a validator that is not bonded. - - BOND_STATUS_UNBONDING: UNBONDING defines a validator that is unbonding. - - BOND_STATUS_BONDED: BONDED defines a validator that is bonded. - cosmos.staking.v1beta1.Commission: - type: object - properties: - commission_rates: - description: >- - commission_rates defines the initial commission rates to be used for - creating a validator. - type: object - properties: - rate: - type: string - description: rate is the commission rate charged to delegators, as a fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate which validator can - ever charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily increase of the - validator commission, as a fraction. - update_time: - type: string - format: date-time - description: update_time is the last time the commission rate was changed. - description: Commission defines commission parameters for a given validator. - cosmos.staking.v1beta1.CommissionRates: - type: object - properties: - rate: - type: string - description: rate is the commission rate charged to delegators, as a fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate which validator can ever - charge, as a fraction. - max_change_rate: + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + mode_info: + $ref: '#/definitions/cosmos.tx.v1beta1.ModeInfo' + title: |- + mode_info describes the signing mode of the signer and is a nested + structure to support nested multisig pubkey's + sequence: type: string + format: uint64 description: >- - max_change_rate defines the maximum daily increase of the validator - commission, as a fraction. - description: >- - CommissionRates defines the initial commission rates to be used for - creating + sequence is the sequence of the account, which describes the - a validator. - cosmos.staking.v1beta1.Delegation: + number of committed transactions signed by a given address. It is used + to + + prevent replay attacks. + description: |- + SignerInfo describes the public key and signing mode of a single top-level + signer. + cosmos.tx.v1beta1.SimulateRequest: type: object properties: - delegator_address: - type: string - description: delegator_address is the bech32-encoded address of the delegator. - validator_address: - type: string - description: validator_address is the bech32-encoded address of the validator. - shares: + tx: + $ref: '#/definitions/cosmos.tx.v1beta1.Tx' + description: |- + tx is the transaction to simulate. + Deprecated. Send raw tx bytes instead. + tx_bytes: type: string - description: shares define the delegation shares received. + format: byte + description: |- + tx_bytes is the raw transaction. + + Since: cosmos-sdk 0.43 description: |- - Delegation represents the bond with tokens held by an account. It is - owned by one delegator, and is associated with the voting power of one - validator. - cosmos.staking.v1beta1.DelegationResponse: + SimulateRequest is the request type for the Service.Simulate + RPC method. + cosmos.tx.v1beta1.SimulateResponse: type: object properties: - delegation: + gas_info: + description: gas_info is the information about gas used in the simulation. type: object properties: - delegator_address: - type: string - description: delegator_address is the bech32-encoded address of the delegator. - validator_address: + gas_wanted: type: string - description: validator_address is the bech32-encoded address of the validator. - shares: + format: uint64 + description: >- + GasWanted is the maximum units of work we allow this tx to + perform. + gas_used: type: string - description: shares define the delegation shares received. - description: |- - Delegation represents the bond with tokens held by an account. It is - owned by one delegator, and is associated with the voting power of one - validator. - balance: + format: uint64 + description: GasUsed is the amount of gas actually consumed. + result: + description: result is the result of the simulation. type: object properties: - denom: + data: type: string - amount: + format: byte + description: >- + Data is any data returned from message or handler execution. It + MUST be + + length prefixed in order to separate data from multiple message + executions. + + Deprecated. This field is still populated, but prefer msg_response + instead + + because it also contains the Msg response typeURL. + log: type: string - description: |- - Coin defines a token with a denomination and an amount. + description: >- + Log contains the log information from message or handler + execution. + events: + type: array + items: + type: object + properties: + type: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + index: + type: boolean + description: >- + EventAttribute is a single key-value pair, associated with + an event. + description: >- + Event allows application developers to attach additional + information to - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - description: |- - DelegationResponse is equivalent to Delegation except that it contains a - balance in addition to shares which is more suitable for client responses. - cosmos.staking.v1beta1.Description: - type: object - properties: - moniker: - type: string - description: moniker defines a human-readable name for the validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. UPort or - Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: security_contact defines an optional email for security contact. - details: - type: string - description: details define other optional details. - description: Description defines a validator description. - cosmos.staking.v1beta1.HistoricalInfo: - type: object - properties: - header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 + ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and + ResponseDeliverTx. + + Later, transactions may be queried using these events. description: >- - Consensus captures the consensus rules for processing a block in - the blockchain, + Events contains a slice of Event objects that were emitted during + message - including all blockchain data structures and the rules of the - application's + or handler execution. + msg_responses: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - valset: + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + msg_responses contains the Msg handler responses type packed in + Anys. + + + Since: cosmos-sdk 0.46 + description: |- + SimulateResponse is the response type for the + Service.SimulateRPC method. + cosmos.tx.v1beta1.Tip: + type: object + properties: + amount: type: array items: type: object properties: - operator_address: + denom: type: string - description: >- - operator_address defines the address of the validator's - operator; bech encoded in JSON. - consensus_pubkey: + amount: + type: string + description: |- + Coin defines a token with a denomination and an amount. + + NOTE: The amount field is an Int which implements the custom method + signatures required by gogoproto. + title: amount is the amount of the tip + tipper: + type: string + title: tipper is the address of the account paying for the tip + description: |- + Tip is the tip used for meta-transactions. + + Since: cosmos-sdk 0.46 + cosmos.tx.v1beta1.Tx: + type: object + properties: + body: + title: body is the processable content of the transaction + type: object + properties: + messages: + type: array + items: type: object properties: type_url: @@ -40937,7 +64815,7 @@ definitions: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -40947,13 +64825,16 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -40974,7 +64855,6 @@ definitions: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -41007,671 +64887,225 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - jailed: - type: boolean - format: boolean - description: >- - jailed defined whether the validator has been jailed from bonded - status or not. - status: - description: status is the validator status (bonded/unbonding/unbonded). - type: string - enum: - - BOND_STATUS_UNSPECIFIED - - BOND_STATUS_UNBONDED - - BOND_STATUS_UNBONDING - - BOND_STATUS_BONDED - default: BOND_STATUS_UNSPECIFIED - tokens: - type: string - description: tokens define the delegated tokens (incl. self-delegation). - delegator_shares: - type: string - description: >- - delegator_shares defines total shares issued to a validator's - delegators. - description: - description: description defines the description terms for the validator. - type: object - properties: - moniker: - type: string - description: moniker defines a human-readable name for the validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. UPort - or Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: >- - security_contact defines an optional email for security - contact. - details: - type: string - description: details define other optional details. - unbonding_height: - type: string - format: int64 - description: >- - unbonding_height defines, if unbonding, the height at which this - validator has begun unbonding. - unbonding_time: - type: string - format: date-time - description: >- - unbonding_time defines, if unbonding, the min time for the - validator to complete unbonding. - commission: - description: commission defines the commission parameters. - type: object - properties: - commission_rates: - description: >- - commission_rates defines the initial commission rates to be - used for creating a validator. - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to delegators, as a - fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate which - validator can ever charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily increase of - the validator commission, as a fraction. - update_time: - type: string - format: date-time - description: >- - update_time is the last time the commission rate was - changed. - min_self_delegation: - type: string - description: >- - min_self_delegation is the validator's self declared minimum - self delegation. - description: >- - Validator defines a validator, together with the total amount of the - - Validator's bond shares and their exchange rate to coins. Slashing - results in - - a decrease in the exchange rate, allowing correct calculation of - future - - undelegations without iterating over delegators. When coins are - delegated to - - this validator, the validator is credited with a delegation whose - number of - - bond shares is based on the amount of coins delegated divided by the - current - - exchange rate. Voting power can be calculated as total bonded shares + description: >- + messages is a list of messages to be executed. The required + signers of - multiplied by exchange rate. - description: >- - HistoricalInfo contains header and validator information for a given - block. + those messages define the number and order of elements in + AuthInfo's - It is stored as part of staking module's state, which persists the `n` - most + signer_infos and Tx's signatures. Each required signer address is + added to - recent HistoricalInfo + the list only the first time it occurs. - (`n` is set by the staking module's `historical_entries` parameter). - cosmos.staking.v1beta1.Params: - type: object - properties: - unbonding_time: - type: string - description: unbonding_time is the time duration of unbonding. - max_validators: - type: integer - format: int64 - description: max_validators is the maximum number of validators. - max_entries: - type: integer - format: int64 - description: >- - max_entries is the max entries for either unbonding delegation or - redelegation (per pair/trio). - historical_entries: - type: integer - format: int64 - description: historical_entries is the number of historical entries to persist. - bond_denom: - type: string - description: bond_denom defines the bondable coin denomination. - description: Params defines the parameters for the staking module. - cosmos.staking.v1beta1.Pool: - type: object - properties: - not_bonded_tokens: - type: string - bonded_tokens: - type: string - description: |- - Pool is used for tracking bonded and not-bonded token supply of the bond - denomination. - cosmos.staking.v1beta1.QueryDelegationResponse: - type: object - properties: - delegation_response: - type: object - properties: - delegation: - type: object - properties: - delegator_address: - type: string - description: >- - delegator_address is the bech32-encoded address of the - delegator. - validator_address: - type: string - description: >- - validator_address is the bech32-encoded address of the - validator. - shares: - type: string - description: shares define the delegation shares received. - description: >- - Delegation represents the bond with tokens held by an account. It - is + By convention, the first required signer (usually from the first + message) - owned by one delegator, and is associated with the voting power of - one + is referred to as the primary signer and pays the fee for the + whole - validator. - balance: - type: object - properties: - denom: - type: string - amount: - type: string + transaction. + memo: + type: string description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method + memo is any arbitrary note/comment to be added to the transaction. - signatures required by gogoproto. - description: >- - DelegationResponse is equivalent to Delegation except that it contains - a + WARNING: in clients, any publicly exposed text should not be + called memo, - balance in addition to shares which is more suitable for client - responses. - description: >- - QueryDelegationResponse is response type for the Query/Delegation RPC - method. - cosmos.staking.v1beta1.QueryDelegatorDelegationsResponse: - type: object - properties: - delegation_responses: - type: array - items: - type: object - properties: - delegation: + but should be called `note` instead (see + https://github.com/cosmos/cosmos-sdk/issues/9122). + timeout_height: + type: string + format: uint64 + title: |- + timeout is the block height after which this transaction will not + be processed by the chain + extension_options: + type: array + items: type: object properties: - delegator_address: - type: string - description: >- - delegator_address is the bech32-encoded address of the - delegator. - validator_address: + type_url: type: string description: >- - validator_address is the bech32-encoded address of the - validator. - shares: - type: string - description: shares define the delegation shares received. - description: >- - Delegation represents the bond with tokens held by an account. - It is - - owned by one delegator, and is associated with the voting power - of one - - validator. - balance: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. - - - NOTE: The amount field is an Int which implements the custom - method - - signatures required by gogoproto. - description: >- - DelegationResponse is equivalent to Delegation except that it - contains a - - balance in addition to shares which is more suitable for client - responses. - description: delegation_responses defines all the delegations' info of a delegator. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: |- - QueryDelegatorDelegationsResponse is response type for the - Query/DelegatorDelegations RPC method. - cosmos.staking.v1beta1.QueryDelegatorUnbondingDelegationsResponse: - type: object - properties: - unbonding_responses: - type: array - items: - type: object - properties: - delegator_address: - type: string - description: >- - delegator_address is the bech32-encoded address of the - delegator. - validator_address: - type: string - description: >- - validator_address is the bech32-encoded address of the - validator. - entries: - type: array - items: - type: object - properties: - creation_height: - type: string - format: int64 - description: >- - creation_height is the height which the unbonding took - place. - completion_time: - type: string - format: date-time - description: completion_time is the unix time for unbonding completion. - initial_balance: - type: string - description: >- - initial_balance defines the tokens initially scheduled to - receive at completion. - balance: - type: string - description: balance defines the tokens to receive at completion. - description: >- - UnbondingDelegationEntry defines an unbonding object with - relevant metadata. - description: entries are the unbonding delegation entries. - description: >- - UnbondingDelegation stores all of a single delegator's unbonding - bonds - - for a single validator in an time-ordered list. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - - was set, its value is undefined otherwise - description: |- - QueryUnbondingDelegatorDelegationsResponse is response type for the - Query/UnbondingDelegatorDelegations RPC method. - cosmos.staking.v1beta1.QueryDelegatorValidatorResponse: - type: object - properties: - validator: - type: object - properties: - operator_address: - type: string - description: >- - operator_address defines the address of the validator's operator; - bech encoded in JSON. - consensus_pubkey: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized - - protocol buffer message. This string must contain at least - - one "/" character. The last segment of the URL's path must - represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in a - canonical form + A URL/resource name that uniquely identifies the type of the + serialized - (e.g., leading "." is not accepted). + protocol buffer message. This string must contain at least + one "/" character. The last segment of the URL's path must + represent - In practice, teams usually precompile into the binary all - types that they + the fully qualified name of the type (as in - expect it to use in the context of Any. However, for URLs - which use the + `path/google.protobuf.Duration`). The name should be in a + canonical form - scheme `http`, `https`, or no scheme, one can optionally set - up a type + (e.g., leading "." is not accepted). - server that maps type URLs to message definitions as follows: + In practice, teams usually precompile into the binary all + types that they - * If no scheme is provided, `https` is assumed. + expect it to use in the context of Any. However, for URLs + which use the - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on - the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + scheme `http`, `https`, or no scheme, one can optionally set + up a type - Note: this functionality is not currently available in the - official + server that maps type URLs to message definitions as + follows: - protobuf release, and it is not used for type URLs beginning - with - type.googleapis.com. + * If no scheme is provided, `https` is assumed. + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - Schemes other than `http`, `https` (or the empty scheme) might - be + Note: this functionality is not currently available in the + official - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message - along with a + protobuf release, and it is not used for type URLs beginning + with - URL that describes the type of the serialized message. + type.googleapis.com. - Protobuf library provides support to pack/unpack Any values in the - form + Schemes other than `http`, `https` (or the empty scheme) + might be - of utility functions or additional generated methods of the Any - type. + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + URL that describes the type of the serialized message. - Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + Protobuf library provides support to pack/unpack Any values in + the form - Example 2: Pack and unpack a message in Java. + of utility functions or additional generated methods of the Any + type. - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - Example 3: Pack and unpack a message in Python. + Example 1: Pack and unpack a message in C++. - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) + Foo foo = ...; + Any any; + any.PackFrom(foo); ... + if (any.UnpackTo(&foo)) { + ... + } - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + Example 2: Pack and unpack a message in Java. - The pack methods provided by protobuf library will by default use + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + Example 3: Pack and unpack a message in Python. - methods only use the fully qualified type name after the last '/' + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - in the type URL, for example "foo.bar.com/x/y.z" will yield type + Example 4: Pack and unpack a message in Go - name "y.z". + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + The pack methods provided by protobuf library will by default + use + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - JSON + methods only use the fully qualified type name after the last + '/' - ==== + in the type URL, for example "foo.bar.com/x/y.z" will yield type - The JSON representation of an `Any` value uses the regular + name "y.z". - representation of the deserialized, embedded message, with an - additional field `@type` which contains the type URL. Example: - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + JSON - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - If the embedded message type is well-known and has a custom JSON + The JSON representation of an `Any` value uses the regular - representation, that representation will be embedded adding a - field + representation of the deserialized, embedded message, with an - `value` which holds the custom JSON in addition to the `@type` + additional field `@type` which contains the type URL. Example: - field. Example (for message [google.protobuf.Duration][]): + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - jailed: - type: boolean - format: boolean - description: >- - jailed defined whether the validator has been jailed from bonded - status or not. - status: - description: status is the validator status (bonded/unbonding/unbonded). - type: string - enum: - - BOND_STATUS_UNSPECIFIED - - BOND_STATUS_UNBONDED - - BOND_STATUS_UNBONDING - - BOND_STATUS_BONDED - default: BOND_STATUS_UNSPECIFIED - tokens: - type: string - description: tokens define the delegated tokens (incl. self-delegation). - delegator_shares: - type: string - description: >- - delegator_shares defines total shares issued to a validator's - delegators. - description: - description: description defines the description terms for the validator. - type: object - properties: - moniker: - type: string - description: moniker defines a human-readable name for the validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. UPort or - Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: >- - security_contact defines an optional email for security - contact. - details: - type: string - description: details define other optional details. - unbonding_height: - type: string - format: int64 - description: >- - unbonding_height defines, if unbonding, the height at which this - validator has begun unbonding. - unbonding_time: - type: string - format: date-time - description: >- - unbonding_time defines, if unbonding, the min time for the - validator to complete unbonding. - commission: - description: commission defines the commission parameters. - type: object - properties: - commission_rates: - description: >- - commission_rates defines the initial commission rates to be - used for creating a validator. - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to delegators, as a - fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate which - validator can ever charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily increase of the - validator commission, as a fraction. - update_time: - type: string - format: date-time - description: update_time is the last time the commission rate was changed. - min_self_delegation: - type: string - description: >- - min_self_delegation is the validator's self declared minimum self - delegation. - description: >- - Validator defines a validator, together with the total amount of the + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - Validator's bond shares and their exchange rate to coins. Slashing - results in + If the embedded message type is well-known and has a custom JSON - a decrease in the exchange rate, allowing correct calculation of - future + representation, that representation will be embedded adding a + field - undelegations without iterating over delegators. When coins are - delegated to + `value` which holds the custom JSON in addition to the `@type` - this validator, the validator is credited with a delegation whose - number of + field. Example (for message [google.protobuf.Duration][]): - bond shares is based on the amount of coins delegated divided by the - current + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + title: >- + extension_options are arbitrary options that can be added by + chains - exchange rate. Voting power can be calculated as total bonded shares + when the default options are not sufficient. If any of these are + present - multiplied by exchange rate. - description: |- - QueryDelegatorValidatorResponse response type for the - Query/DelegatorValidator RPC method. - cosmos.staking.v1beta1.QueryDelegatorValidatorsResponse: - type: object - properties: - validators: - type: array - items: - type: object - properties: - operator_address: - type: string - description: >- - operator_address defines the address of the validator's - operator; bech encoded in JSON. - consensus_pubkey: + and can't be handled, the transaction will be rejected + non_critical_extension_options: + type: array + items: type: object properties: type_url: @@ -41741,1893 +65175,1312 @@ definitions: `Any` contains an arbitrary serialized protocol buffer message along with a - URL that describes the type of the serialized message. + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + title: >- + extension_options are arbitrary options that can be added by + chains + + when the default options are not sufficient. If any of these are + present + + and can't be handled, they will be ignored + description: TxBody is the body of a transaction that all signers sign over. + auth_info: + $ref: '#/definitions/cosmos.tx.v1beta1.AuthInfo' + title: |- + auth_info is the authorization related content of the transaction, + specifically signers, signer modes and fee + signatures: + type: array + items: + type: string + format: byte + description: >- + signatures is a list of signatures that matches the length and order + of + + AuthInfo's signer_infos to allow connecting signature meta information + like + + public key and signing mode by position. + description: Tx is the standard type used for broadcasting transactions. + cosmos.tx.v1beta1.TxBody: + type: object + properties: + messages: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they + + expect it to use in the context of Any. However, for URLs which + use the + + scheme `http`, `https`, or no scheme, one can optionally set up + a type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might + be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the unpack + methods only use the fully qualified type name after the last '/' - Protobuf library provides support to pack/unpack Any values in - the form + in the type URL, for example "foo.bar.com/x/y.z" will yield type - of utility functions or additional generated methods of the Any - type. + name "y.z". - Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + JSON - Example 2: Pack and unpack a message in Java. - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + The JSON representation of an `Any` value uses the regular - Example 3: Pack and unpack a message in Python. + representation of the deserialized, embedded message, with an - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + additional field `@type` which contains the type URL. Example: - Example 4: Pack and unpack a message in Go + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - The pack methods provided by protobuf library will by default - use + If the embedded message type is well-known and has a custom JSON - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + representation, that representation will be embedded adding a field - methods only use the fully qualified type name after the last - '/' + `value` which holds the custom JSON in addition to the `@type` - in the type URL, for example "foo.bar.com/x/y.z" will yield type + field. Example (for message [google.protobuf.Duration][]): - name "y.z". + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + messages is a list of messages to be executed. The required signers of + those messages define the number and order of elements in AuthInfo's + signer_infos and Tx's signatures. Each required signer address is + added to - JSON + the list only the first time it occurs. - ==== + By convention, the first required signer (usually from the first + message) - The JSON representation of an `Any` value uses the regular + is referred to as the primary signer and pays the fee for the whole - representation of the deserialized, embedded message, with an + transaction. + memo: + type: string + description: >- + memo is any arbitrary note/comment to be added to the transaction. - additional field `@type` which contains the type URL. Example: + WARNING: in clients, any publicly exposed text should not be called + memo, - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + but should be called `note` instead (see + https://github.com/cosmos/cosmos-sdk/issues/9122). + timeout_height: + type: string + format: uint64 + title: |- + timeout is the block height after which this transaction will not + be processed by the chain + extension_options: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + protocol buffer message. This string must contain at least - If the embedded message type is well-known and has a custom JSON + one "/" character. The last segment of the URL's path must + represent - representation, that representation will be embedded adding a - field + the fully qualified name of the type (as in - `value` which holds the custom JSON in addition to the `@type` + `path/google.protobuf.Duration`). The name should be in a + canonical form - field. Example (for message [google.protobuf.Duration][]): + (e.g., leading "." is not accepted). - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - jailed: - type: boolean - format: boolean - description: >- - jailed defined whether the validator has been jailed from bonded - status or not. - status: - description: status is the validator status (bonded/unbonding/unbonded). - type: string - enum: - - BOND_STATUS_UNSPECIFIED - - BOND_STATUS_UNBONDED - - BOND_STATUS_UNBONDING - - BOND_STATUS_BONDED - default: BOND_STATUS_UNSPECIFIED - tokens: - type: string - description: tokens define the delegated tokens (incl. self-delegation). - delegator_shares: - type: string - description: >- - delegator_shares defines total shares issued to a validator's - delegators. - description: - description: description defines the description terms for the validator. - type: object - properties: - moniker: - type: string - description: moniker defines a human-readable name for the validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. UPort - or Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: >- - security_contact defines an optional email for security - contact. - details: - type: string - description: details define other optional details. - unbonding_height: - type: string - format: int64 - description: >- - unbonding_height defines, if unbonding, the height at which this - validator has begun unbonding. - unbonding_time: - type: string - format: date-time - description: >- - unbonding_time defines, if unbonding, the min time for the - validator to complete unbonding. - commission: - description: commission defines the commission parameters. - type: object - properties: - commission_rates: - description: >- - commission_rates defines the initial commission rates to be - used for creating a validator. - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to delegators, as a - fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate which - validator can ever charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily increase of - the validator commission, as a fraction. - update_time: - type: string - format: date-time - description: >- - update_time is the last time the commission rate was - changed. - min_self_delegation: - type: string - description: >- - min_self_delegation is the validator's self declared minimum - self delegation. - description: >- - Validator defines a validator, together with the total amount of the - Validator's bond shares and their exchange rate to coins. Slashing - results in + In practice, teams usually precompile into the binary all types + that they - a decrease in the exchange rate, allowing correct calculation of - future + expect it to use in the context of Any. However, for URLs which + use the - undelegations without iterating over delegators. When coins are - delegated to + scheme `http`, `https`, or no scheme, one can optionally set up + a type - this validator, the validator is credited with a delegation whose - number of + server that maps type URLs to message definitions as follows: - bond shares is based on the amount of coins delegated divided by the - current - exchange rate. Voting power can be calculated as total bonded shares + * If no scheme is provided, `https` is assumed. - multiplied by exchange rate. - description: validators defines the the validators' info of a delegator. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - was set, its value is undefined otherwise - description: |- - QueryDelegatorValidatorsResponse is response type for the - Query/DelegatorValidators RPC method. - cosmos.staking.v1beta1.QueryHistoricalInfoResponse: - type: object - properties: - hist: - description: hist defines the historical info at the given height. - type: object - properties: - header: - type: object - properties: - version: - title: basic block info - type: object - properties: - block: - type: string - format: uint64 - app: - type: string - format: uint64 - description: >- - Consensus captures the consensus rules for processing a block - in the blockchain, + Note: this functionality is not currently available in the + official - including all blockchain data structures and the rules of the - application's + protobuf release, and it is not used for type URLs beginning + with - state transition machine. - chain_id: - type: string - height: - type: string - format: int64 - time: - type: string - format: date-time - last_block_id: - title: prev block info - type: object - properties: - hash: - type: string - format: byte - part_set_header: - type: object - properties: - total: - type: integer - format: int64 - hash: - type: string - format: byte - title: PartsetHeader - last_commit_hash: - type: string - format: byte - title: hashes of block data - data_hash: - type: string - format: byte - validators_hash: - type: string - format: byte - title: hashes from the app output from the prev block - next_validators_hash: - type: string - format: byte - consensus_hash: - type: string - format: byte - app_hash: - type: string - format: byte - last_results_hash: - type: string - format: byte - evidence_hash: - type: string - format: byte - title: consensus info - proposer_address: - type: string - format: byte - description: Header defines the structure of a Tendermint block header. - valset: - type: array - items: - type: object - properties: - operator_address: - type: string - description: >- - operator_address defines the address of the validator's - operator; bech encoded in JSON. - consensus_pubkey: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of - the serialized + type.googleapis.com. - protocol buffer message. This string must contain at - least - one "/" character. The last segment of the URL's path - must represent + Schemes other than `http`, `https` (or the empty scheme) might + be - the fully qualified name of the type (as in + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a - `path/google.protobuf.Duration`). The name should be in - a canonical form + URL that describes the type of the serialized message. - (e.g., leading "." is not accepted). + Protobuf library provides support to pack/unpack Any values in the + form - In practice, teams usually precompile into the binary - all types that they + of utility functions or additional generated methods of the Any + type. - expect it to use in the context of Any. However, for - URLs which use the - scheme `http`, `https`, or no scheme, one can optionally - set up a type + Example 1: Pack and unpack a message in C++. - server that maps type URLs to message definitions as - follows: + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + Example 2: Pack and unpack a message in Java. - * If no scheme is provided, `https` is assumed. + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based - on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + Example 3: Pack and unpack a message in Python. - Note: this functionality is not currently available in - the official + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - protobuf release, and it is not used for type URLs - beginning with + Example 4: Pack and unpack a message in Go - type.googleapis.com. + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + The pack methods provided by protobuf library will by default use - Schemes other than `http`, `https` (or the empty scheme) - might be + 'type.googleapis.com/full.type.name' as the type URL and the unpack - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer - message along with a + methods only use the fully qualified type name after the last '/' - URL that describes the type of the serialized message. + in the type URL, for example "foo.bar.com/x/y.z" will yield type + name "y.z". - Protobuf library provides support to pack/unpack Any values - in the form - of utility functions or additional generated methods of the - Any type. + JSON - Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + The JSON representation of an `Any` value uses the regular - Example 2: Pack and unpack a message in Java. + representation of the deserialized, embedded message, with an - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a field + + `value` which holds the custom JSON in addition to the `@type` - Example 3: Pack and unpack a message in Python. + field. Example (for message [google.protobuf.Duration][]): - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + title: >- + extension_options are arbitrary options that can be added by chains - Example 4: Pack and unpack a message in Go + when the default options are not sufficient. If any of these are + present - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + and can't be handled, the transaction will be rejected + non_critical_extension_options: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - The pack methods provided by protobuf library will by - default use + protocol buffer message. This string must contain at least - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + one "/" character. The last segment of the URL's path must + represent - methods only use the fully qualified type name after the - last '/' + the fully qualified name of the type (as in - in the type URL, for example "foo.bar.com/x/y.z" will yield - type + `path/google.protobuf.Duration`). The name should be in a + canonical form - name "y.z". + (e.g., leading "." is not accepted). + In practice, teams usually precompile into the binary all types + that they - JSON + expect it to use in the context of Any. However, for URLs which + use the - ==== + scheme `http`, `https`, or no scheme, one can optionally set up + a type - The JSON representation of an `Any` value uses the regular + server that maps type URLs to message definitions as follows: - representation of the deserialized, embedded message, with - an - additional field `@type` which contains the type URL. - Example: + * If no scheme is provided, `https` is assumed. - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + Note: this functionality is not currently available in the + official - If the embedded message type is well-known and has a custom - JSON + protobuf release, and it is not used for type URLs beginning + with - representation, that representation will be embedded adding - a field + type.googleapis.com. - `value` which holds the custom JSON in addition to the - `@type` - field. Example (for message [google.protobuf.Duration][]): + Schemes other than `http`, `https` (or the empty scheme) might + be - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - jailed: - type: boolean - format: boolean - description: >- - jailed defined whether the validator has been jailed from - bonded status or not. - status: - description: status is the validator status (bonded/unbonding/unbonded). - type: string - enum: - - BOND_STATUS_UNSPECIFIED - - BOND_STATUS_UNBONDED - - BOND_STATUS_UNBONDING - - BOND_STATUS_BONDED - default: BOND_STATUS_UNSPECIFIED - tokens: - type: string - description: tokens define the delegated tokens (incl. self-delegation). - delegator_shares: - type: string - description: >- - delegator_shares defines total shares issued to a - validator's delegators. - description: - description: description defines the description terms for the validator. - type: object - properties: - moniker: - type: string - description: moniker defines a human-readable name for the validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. - UPort or Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: >- - security_contact defines an optional email for security - contact. - details: - type: string - description: details define other optional details. - unbonding_height: - type: string - format: int64 - description: >- - unbonding_height defines, if unbonding, the height at which - this validator has begun unbonding. - unbonding_time: - type: string - format: date-time - description: >- - unbonding_time defines, if unbonding, the min time for the - validator to complete unbonding. - commission: - description: commission defines the commission parameters. - type: object - properties: - commission_rates: - description: >- - commission_rates defines the initial commission rates to - be used for creating a validator. - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to delegators, - as a fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate which - validator can ever charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily increase - of the validator commission, as a fraction. - update_time: - type: string - format: date-time - description: >- - update_time is the last time the commission rate was - changed. - min_self_delegation: - type: string - description: >- - min_self_delegation is the validator's self declared minimum - self delegation. + used with implementation specific semantics. + value: + type: string + format: byte description: >- - Validator defines a validator, together with the total amount of - the - - Validator's bond shares and their exchange rate to coins. - Slashing results in + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a - a decrease in the exchange rate, allowing correct calculation of - future + URL that describes the type of the serialized message. - undelegations without iterating over delegators. When coins are - delegated to - this validator, the validator is credited with a delegation - whose number of + Protobuf library provides support to pack/unpack Any values in the + form - bond shares is based on the amount of coins delegated divided by - the current + of utility functions or additional generated methods of the Any + type. - exchange rate. Voting power can be calculated as total bonded - shares - multiplied by exchange rate. - description: >- - QueryHistoricalInfoResponse is response type for the Query/HistoricalInfo - RPC + Example 1: Pack and unpack a message in C++. - method. - cosmos.staking.v1beta1.QueryParamsResponse: - type: object - properties: - params: - description: params holds all the parameters of this module. - type: object - properties: - unbonding_time: - type: string - description: unbonding_time is the time duration of unbonding. - max_validators: - type: integer - format: int64 - description: max_validators is the maximum number of validators. - max_entries: - type: integer - format: int64 - description: >- - max_entries is the max entries for either unbonding delegation or - redelegation (per pair/trio). - historical_entries: - type: integer - format: int64 - description: historical_entries is the number of historical entries to persist. - bond_denom: - type: string - description: bond_denom defines the bondable coin denomination. - description: QueryParamsResponse is response type for the Query/Params RPC method. - cosmos.staking.v1beta1.QueryPoolResponse: - type: object - properties: - pool: - description: pool defines the pool info. - type: object - properties: - not_bonded_tokens: - type: string - bonded_tokens: - type: string - description: QueryPoolResponse is response type for the Query/Pool RPC method. - cosmos.staking.v1beta1.QueryRedelegationsResponse: - type: object - properties: - redelegation_responses: - type: array - items: - type: object - properties: - redelegation: - type: object - properties: - delegator_address: - type: string - description: >- - delegator_address is the bech32-encoded address of the - delegator. - validator_src_address: - type: string - description: >- - validator_src_address is the validator redelegation source - operator address. - validator_dst_address: - type: string - description: >- - validator_dst_address is the validator redelegation - destination operator address. - entries: - type: array - items: - type: object - properties: - creation_height: - type: string - format: int64 - description: >- - creation_height defines the height which the - redelegation took place. - completion_time: - type: string - format: date-time - description: >- - completion_time defines the unix time for redelegation - completion. - initial_balance: - type: string - description: >- - initial_balance defines the initial balance when - redelegation started. - shares_dst: - type: string - description: >- - shares_dst is the amount of destination-validator - shares created by redelegation. - description: >- - RedelegationEntry defines a redelegation object with - relevant metadata. - description: entries are the redelegation entries. - description: >- - Redelegation contains the list of a particular delegator's - redelegating bonds + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - from a particular source validator to a particular destination - validator. - entries: - type: array - items: - type: object - properties: - redelegation_entry: - type: object - properties: - creation_height: - type: string - format: int64 - description: >- - creation_height defines the height which the - redelegation took place. - completion_time: - type: string - format: date-time - description: >- - completion_time defines the unix time for redelegation - completion. - initial_balance: - type: string - description: >- - initial_balance defines the initial balance when - redelegation started. - shares_dst: - type: string - description: >- - shares_dst is the amount of destination-validator - shares created by redelegation. - description: >- - RedelegationEntry defines a redelegation object with - relevant metadata. - balance: - type: string - description: >- - RedelegationEntryResponse is equivalent to a RedelegationEntry - except that it + Example 2: Pack and unpack a message in Java. - contains a balance in addition to shares which is more - suitable for client + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - responses. - description: >- - RedelegationResponse is equivalent to a Redelegation except that its - entries + Example 3: Pack and unpack a message in Python. - contain a balance in addition to shares which is more suitable for - client + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - responses. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + Example 4: Pack and unpack a message in Go - was set, its value is undefined otherwise - description: >- - QueryRedelegationsResponse is response type for the Query/Redelegations - RPC + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - method. - cosmos.staking.v1beta1.QueryUnbondingDelegationResponse: - type: object - properties: - unbond: - type: object - properties: - delegator_address: - type: string - description: delegator_address is the bech32-encoded address of the delegator. - validator_address: - type: string - description: validator_address is the bech32-encoded address of the validator. - entries: - type: array - items: - type: object - properties: - creation_height: - type: string - format: int64 - description: >- - creation_height is the height which the unbonding took - place. - completion_time: - type: string - format: date-time - description: completion_time is the unix time for unbonding completion. - initial_balance: - type: string - description: >- - initial_balance defines the tokens initially scheduled to - receive at completion. - balance: - type: string - description: balance defines the tokens to receive at completion. - description: >- - UnbondingDelegationEntry defines an unbonding object with - relevant metadata. - description: entries are the unbonding delegation entries. - description: |- - UnbondingDelegation stores all of a single delegator's unbonding bonds - for a single validator in an time-ordered list. - description: |- - QueryDelegationResponse is response type for the Query/UnbondingDelegation - RPC method. - cosmos.staking.v1beta1.QueryValidatorDelegationsResponse: - type: object - properties: - delegation_responses: - type: array - items: - type: object - properties: - delegation: - type: object - properties: - delegator_address: - type: string - description: >- - delegator_address is the bech32-encoded address of the - delegator. - validator_address: - type: string - description: >- - validator_address is the bech32-encoded address of the - validator. - shares: - type: string - description: shares define the delegation shares received. - description: >- - Delegation represents the bond with tokens held by an account. - It is + The pack methods provided by protobuf library will by default use - owned by one delegator, and is associated with the voting power - of one + 'type.googleapis.com/full.type.name' as the type URL and the unpack - validator. - balance: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. + methods only use the fully qualified type name after the last '/' + in the type URL, for example "foo.bar.com/x/y.z" will yield type - NOTE: The amount field is an Int which implements the custom - method + name "y.z". - signatures required by gogoproto. - description: >- - DelegationResponse is equivalent to Delegation except that it - contains a - balance in addition to shares which is more suitable for client - responses. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total - was set, its value is undefined otherwise - title: |- - QueryValidatorDelegationsResponse is response type for the - Query/ValidatorDelegations RPC method - cosmos.staking.v1beta1.QueryValidatorResponse: - type: object - properties: - validator: - type: object - properties: - operator_address: - type: string - description: >- - operator_address defines the address of the validator's operator; - bech encoded in JSON. - consensus_pubkey: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized + JSON - protocol buffer message. This string must contain at least - one "/" character. The last segment of the URL's path must - represent + The JSON representation of an `Any` value uses the regular - the fully qualified name of the type (as in + representation of the deserialized, embedded message, with an - `path/google.protobuf.Duration`). The name should be in a - canonical form + additional field `@type` which contains the type URL. Example: - (e.g., leading "." is not accepted). + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - In practice, teams usually precompile into the binary all - types that they + If the embedded message type is well-known and has a custom JSON - expect it to use in the context of Any. However, for URLs - which use the + representation, that representation will be embedded adding a field - scheme `http`, `https`, or no scheme, one can optionally set - up a type + `value` which holds the custom JSON in addition to the `@type` - server that maps type URLs to message definitions as follows: + field. Example (for message [google.protobuf.Duration][]): + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + title: >- + extension_options are arbitrary options that can be added by chains - * If no scheme is provided, `https` is assumed. + when the default options are not sufficient. If any of these are + present - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on - the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + and can't be handled, they will be ignored + description: TxBody is the body of a transaction that all signers sign over. + cosmos.tx.v1beta1.TxDecodeAminoRequest: + type: object + properties: + amino_binary: + type: string + format: byte + description: |- + TxDecodeAminoRequest is the request type for the Service.TxDecodeAmino + RPC method. - Note: this functionality is not currently available in the - official + Since: cosmos-sdk 0.47 + cosmos.tx.v1beta1.TxDecodeAminoResponse: + type: object + properties: + amino_json: + type: string + description: |- + TxDecodeAminoResponse is the response type for the Service.TxDecodeAmino + RPC method. - protobuf release, and it is not used for type URLs beginning - with + Since: cosmos-sdk 0.47 + cosmos.tx.v1beta1.TxDecodeRequest: + type: object + properties: + tx_bytes: + type: string + format: byte + description: tx_bytes is the raw transaction. + description: |- + TxDecodeRequest is the request type for the Service.TxDecode + RPC method. - type.googleapis.com. + Since: cosmos-sdk 0.47 + cosmos.tx.v1beta1.TxDecodeResponse: + type: object + properties: + tx: + $ref: '#/definitions/cosmos.tx.v1beta1.Tx' + description: tx is the decoded transaction. + description: |- + TxDecodeResponse is the response type for the + Service.TxDecode method. + Since: cosmos-sdk 0.47 + cosmos.tx.v1beta1.TxEncodeAminoRequest: + type: object + properties: + amino_json: + type: string + description: |- + TxEncodeAminoRequest is the request type for the Service.TxEncodeAmino + RPC method. - Schemes other than `http`, `https` (or the empty scheme) might - be + Since: cosmos-sdk 0.47 + cosmos.tx.v1beta1.TxEncodeAminoResponse: + type: object + properties: + amino_binary: + type: string + format: byte + description: |- + TxEncodeAminoResponse is the response type for the Service.TxEncodeAmino + RPC method. - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message - along with a + Since: cosmos-sdk 0.47 + cosmos.tx.v1beta1.TxEncodeRequest: + type: object + properties: + tx: + $ref: '#/definitions/cosmos.tx.v1beta1.Tx' + description: tx is the transaction to encode. + description: |- + TxEncodeRequest is the request type for the Service.TxEncode + RPC method. - URL that describes the type of the serialized message. + Since: cosmos-sdk 0.47 + cosmos.tx.v1beta1.TxEncodeResponse: + type: object + properties: + tx_bytes: + type: string + format: byte + description: tx_bytes is the encoded transaction bytes. + description: |- + TxEncodeResponse is the response type for the + Service.TxEncode method. + Since: cosmos-sdk 0.47 + tendermint.abci.Event: + type: object + properties: + type: + type: string + attributes: + type: array + items: + type: object + properties: + key: + type: string + value: + type: string + index: + type: boolean + description: EventAttribute is a single key-value pair, associated with an event. + description: >- + Event allows application developers to attach additional information to - Protobuf library provides support to pack/unpack Any values in the - form + ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and + ResponseDeliverTx. - of utility functions or additional generated methods of the Any - type. + Later, transactions may be queried using these events. + tendermint.abci.EventAttribute: + type: object + properties: + key: + type: string + value: + type: string + index: + type: boolean + description: EventAttribute is a single key-value pair, associated with an event. + cosmos.upgrade.v1beta1.ModuleVersion: + type: object + properties: + name: + type: string + title: name of the app module + version: + type: string + format: uint64 + title: consensus version of the app module + description: |- + ModuleVersion specifies a module and its consensus version. + Since: cosmos-sdk 0.43 + cosmos.upgrade.v1beta1.Plan: + type: object + properties: + name: + type: string + description: >- + Sets the name for the upgrade. This name will be used by the upgraded - Example 1: Pack and unpack a message in C++. + version of the software to apply any special "on-upgrade" commands + during - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + the first BeginBlock method after the upgrade is applied. It is also + used - Example 2: Pack and unpack a message in Java. + to detect whether a software version can handle a given upgrade. If no - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + upgrade handler with this name has been set in the software, it will + be - Example 3: Pack and unpack a message in Python. + assumed that the software is out-of-date when the upgrade Time or + Height is - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + reached and the software will exit. + time: + type: string + format: date-time + description: >- + Deprecated: Time based upgrades have been deprecated. Time based + upgrade logic - Example 4: Pack and unpack a message in Go + has been removed from the SDK. - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + If this field is not empty, an error will be thrown. + height: + type: string + format: int64 + description: The height at which the upgrade must be performed. + info: + type: string + title: |- + Any application specific upgrade info to be included on-chain + such as a git commit that validators could automatically upgrade to + upgraded_client_state: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - The pack methods provided by protobuf library will by default use + protocol buffer message. This string must contain at least - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + one "/" character. The last segment of the URL's path must + represent - methods only use the fully qualified type name after the last '/' + the fully qualified name of the type (as in - in the type URL, for example "foo.bar.com/x/y.z" will yield type + `path/google.protobuf.Duration`). The name should be in a + canonical form - name "y.z". + (e.g., leading "." is not accepted). + In practice, teams usually precompile into the binary all types + that they - JSON + expect it to use in the context of Any. However, for URLs which + use the - ==== + scheme `http`, `https`, or no scheme, one can optionally set up a + type - The JSON representation of an `Any` value uses the regular + server that maps type URLs to message definitions as follows: - representation of the deserialized, embedded message, with an - additional field `@type` which contains the type URL. Example: + * If no scheme is provided, `https` is assumed. - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + Note: this functionality is not currently available in the + official - If the embedded message type is well-known and has a custom JSON + protobuf release, and it is not used for type URLs beginning with - representation, that representation will be embedded adding a - field + type.googleapis.com. - `value` which holds the custom JSON in addition to the `@type` - field. Example (for message [google.protobuf.Duration][]): + Schemes other than `http`, `https` (or the empty scheme) might be - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - jailed: - type: boolean - format: boolean - description: >- - jailed defined whether the validator has been jailed from bonded - status or not. - status: - description: status is the validator status (bonded/unbonding/unbonded). - type: string - enum: - - BOND_STATUS_UNSPECIFIED - - BOND_STATUS_UNBONDED - - BOND_STATUS_UNBONDING - - BOND_STATUS_BONDED - default: BOND_STATUS_UNSPECIFIED - tokens: - type: string - description: tokens define the delegated tokens (incl. self-delegation). - delegator_shares: - type: string - description: >- - delegator_shares defines total shares issued to a validator's - delegators. - description: - description: description defines the description terms for the validator. - type: object - properties: - moniker: - type: string - description: moniker defines a human-readable name for the validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. UPort or - Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: >- - security_contact defines an optional email for security - contact. - details: - type: string - description: details define other optional details. - unbonding_height: - type: string - format: int64 - description: >- - unbonding_height defines, if unbonding, the height at which this - validator has begun unbonding. - unbonding_time: - type: string - format: date-time - description: >- - unbonding_time defines, if unbonding, the min time for the - validator to complete unbonding. - commission: - description: commission defines the commission parameters. - type: object - properties: - commission_rates: - description: >- - commission_rates defines the initial commission rates to be - used for creating a validator. - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to delegators, as a - fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate which - validator can ever charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily increase of the - validator commission, as a fraction. - update_time: - type: string - format: date-time - description: update_time is the last time the commission rate was changed. - min_self_delegation: + used with implementation specific semantics. + value: type: string + format: byte description: >- - min_self_delegation is the validator's self declared minimum self - delegation. + Must be a valid serialized protocol buffer of the above specified + type. description: >- - Validator defines a validator, together with the total amount of the + `Any` contains an arbitrary serialized protocol buffer message along + with a - Validator's bond shares and their exchange rate to coins. Slashing - results in + URL that describes the type of the serialized message. - a decrease in the exchange rate, allowing correct calculation of - future - undelegations without iterating over delegators. When coins are - delegated to + Protobuf library provides support to pack/unpack Any values in the + form - this validator, the validator is credited with a delegation whose - number of + of utility functions or additional generated methods of the Any type. - bond shares is based on the amount of coins delegated divided by the - current - exchange rate. Voting power can be calculated as total bonded shares + Example 1: Pack and unpack a message in C++. - multiplied by exchange rate. - title: QueryValidatorResponse is response type for the Query/Validator RPC method - cosmos.staking.v1beta1.QueryValidatorUnbondingDelegationsResponse: - type: object - properties: - unbonding_responses: - type: array - items: - type: object - properties: - delegator_address: - type: string - description: >- - delegator_address is the bech32-encoded address of the - delegator. - validator_address: - type: string - description: >- - validator_address is the bech32-encoded address of the - validator. - entries: - type: array - items: - type: object - properties: - creation_height: - type: string - format: int64 - description: >- - creation_height is the height which the unbonding took - place. - completion_time: - type: string - format: date-time - description: completion_time is the unix time for unbonding completion. - initial_balance: - type: string - description: >- - initial_balance defines the tokens initially scheduled to - receive at completion. - balance: - type: string - description: balance defines the tokens to receive at completion. - description: >- - UnbondingDelegationEntry defines an unbonding object with - relevant metadata. - description: entries are the unbonding delegation entries. - description: >- - UnbondingDelegation stores all of a single delegator's unbonding - bonds + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. - for a single validator in an time-ordered list. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - was set, its value is undefined otherwise - description: |- - QueryValidatorUnbondingDelegationsResponse is response type for the - Query/ValidatorUnbondingDelegations RPC method. - cosmos.staking.v1beta1.QueryValidatorsResponse: - type: object - properties: - validators: - type: array - items: - type: object - properties: - operator_address: - type: string - description: >- - operator_address defines the address of the validator's - operator; bech encoded in JSON. - consensus_pubkey: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized + Example 4: Pack and unpack a message in Go - protocol buffer message. This string must contain at least + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - one "/" character. The last segment of the URL's path must - represent + The pack methods provided by protobuf library will by default use - the fully qualified name of the type (as in + 'type.googleapis.com/full.type.name' as the type URL and the unpack - `path/google.protobuf.Duration`). The name should be in a - canonical form + methods only use the fully qualified type name after the last '/' - (e.g., leading "." is not accepted). + in the type URL, for example "foo.bar.com/x/y.z" will yield type + name "y.z". - In practice, teams usually precompile into the binary all - types that they - expect it to use in the context of Any. However, for URLs - which use the - scheme `http`, `https`, or no scheme, one can optionally set - up a type + JSON - server that maps type URLs to message definitions as - follows: + The JSON representation of an `Any` value uses the regular - * If no scheme is provided, `https` is assumed. + representation of the deserialized, embedded message, with an - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on - the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + additional field `@type` which contains the type URL. Example: - Note: this functionality is not currently available in the - official + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - protobuf release, and it is not used for type URLs beginning - with + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - type.googleapis.com. + If the embedded message type is well-known and has a custom JSON + representation, that representation will be embedded adding a field - Schemes other than `http`, `https` (or the empty scheme) - might be + `value` which holds the custom JSON in addition to the `@type` - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message - along with a + field. Example (for message [google.protobuf.Duration][]): - URL that describes the type of the serialized message. + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + Plan specifies information about a planned upgrade and when it should + occur. + cosmos.upgrade.v1beta1.QueryAppliedPlanResponse: + type: object + properties: + height: + type: string + format: int64 + description: height is the block height at which the plan was applied. + description: >- + QueryAppliedPlanResponse is the response type for the Query/AppliedPlan + RPC + method. + cosmos.upgrade.v1beta1.QueryAuthorityResponse: + type: object + properties: + address: + type: string + description: 'Since: cosmos-sdk 0.46' + title: QueryAuthorityResponse is the response type for Query/Authority + cosmos.upgrade.v1beta1.QueryCurrentPlanResponse: + type: object + properties: + plan: + description: plan is the current upgrade plan. + type: object + properties: + name: + type: string + description: >- + Sets the name for the upgrade. This name will be used by the + upgraded - Protobuf library provides support to pack/unpack Any values in - the form + version of the software to apply any special "on-upgrade" commands + during - of utility functions or additional generated methods of the Any - type. + the first BeginBlock method after the upgrade is applied. It is + also used + to detect whether a software version can handle a given upgrade. + If no - Example 1: Pack and unpack a message in C++. + upgrade handler with this name has been set in the software, it + will be - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + assumed that the software is out-of-date when the upgrade Time or + Height is - Example 2: Pack and unpack a message in Java. + reached and the software will exit. + time: + type: string + format: date-time + description: >- + Deprecated: Time based upgrades have been deprecated. Time based + upgrade logic - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + has been removed from the SDK. - Example 3: Pack and unpack a message in Python. + If this field is not empty, an error will be thrown. + height: + type: string + format: int64 + description: The height at which the upgrade must be performed. + info: + type: string + title: >- + Any application specific upgrade info to be included on-chain - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + such as a git commit that validators could automatically upgrade + to + upgraded_client_state: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - Example 4: Pack and unpack a message in Go + protocol buffer message. This string must contain at least - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + one "/" character. The last segment of the URL's path must + represent - The pack methods provided by protobuf library will by default - use + the fully qualified name of the type (as in - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + `path/google.protobuf.Duration`). The name should be in a + canonical form - methods only use the fully qualified type name after the last - '/' + (e.g., leading "." is not accepted). - in the type URL, for example "foo.bar.com/x/y.z" will yield type - name "y.z". + In practice, teams usually precompile into the binary all + types that they + expect it to use in the context of Any. However, for URLs + which use the + scheme `http`, `https`, or no scheme, one can optionally set + up a type - JSON + server that maps type URLs to message definitions as follows: - ==== - The JSON representation of an `Any` value uses the regular + * If no scheme is provided, `https` is assumed. - representation of the deserialized, embedded message, with an + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - additional field `@type` which contains the type URL. Example: + Note: this functionality is not currently available in the + official - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + protobuf release, and it is not used for type URLs beginning + with - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + type.googleapis.com. - If the embedded message type is well-known and has a custom JSON - representation, that representation will be embedded adding a - field + Schemes other than `http`, `https` (or the empty scheme) might + be - `value` which holds the custom JSON in addition to the `@type` + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a - field. Example (for message [google.protobuf.Duration][]): + URL that describes the type of the serialized message. - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - jailed: - type: boolean - format: boolean - description: >- - jailed defined whether the validator has been jailed from bonded - status or not. - status: - description: status is the validator status (bonded/unbonding/unbonded). - type: string - enum: - - BOND_STATUS_UNSPECIFIED - - BOND_STATUS_UNBONDED - - BOND_STATUS_UNBONDING - - BOND_STATUS_BONDED - default: BOND_STATUS_UNSPECIFIED - tokens: - type: string - description: tokens define the delegated tokens (incl. self-delegation). - delegator_shares: - type: string - description: >- - delegator_shares defines total shares issued to a validator's - delegators. - description: - description: description defines the description terms for the validator. - type: object - properties: - moniker: - type: string - description: moniker defines a human-readable name for the validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. UPort - or Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: >- - security_contact defines an optional email for security - contact. - details: - type: string - description: details define other optional details. - unbonding_height: - type: string - format: int64 - description: >- - unbonding_height defines, if unbonding, the height at which this - validator has begun unbonding. - unbonding_time: - type: string - format: date-time - description: >- - unbonding_time defines, if unbonding, the min time for the - validator to complete unbonding. - commission: - description: commission defines the commission parameters. - type: object - properties: - commission_rates: - description: >- - commission_rates defines the initial commission rates to be - used for creating a validator. - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to delegators, as a - fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate which - validator can ever charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily increase of - the validator commission, as a fraction. - update_time: - type: string - format: date-time - description: >- - update_time is the last time the commission rate was - changed. - min_self_delegation: - type: string - description: >- - min_self_delegation is the validator's self declared minimum - self delegation. - description: >- - Validator defines a validator, together with the total amount of the - Validator's bond shares and their exchange rate to coins. Slashing - results in + Protobuf library provides support to pack/unpack Any values in the + form - a decrease in the exchange rate, allowing correct calculation of - future + of utility functions or additional generated methods of the Any + type. - undelegations without iterating over delegators. When coins are - delegated to - this validator, the validator is credited with a delegation whose - number of + Example 1: Pack and unpack a message in C++. - bond shares is based on the amount of coins delegated divided by the - current + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - exchange rate. Voting power can be calculated as total bonded shares + Example 2: Pack and unpack a message in Java. - multiplied by exchange rate. - description: validators contains all the queried validators. - pagination: - description: pagination defines the pagination in the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - was set, its value is undefined otherwise - title: >- - QueryValidatorsResponse is response type for the Query/Validators RPC - method - cosmos.staking.v1beta1.Redelegation: - type: object - properties: - delegator_address: - type: string - description: delegator_address is the bech32-encoded address of the delegator. - validator_src_address: - type: string - description: >- - validator_src_address is the validator redelegation source operator - address. - validator_dst_address: - type: string - description: >- - validator_dst_address is the validator redelegation destination - operator address. - entries: - type: array - items: - type: object - properties: - creation_height: - type: string - format: int64 - description: >- - creation_height defines the height which the redelegation took - place. - completion_time: - type: string - format: date-time - description: >- - completion_time defines the unix time for redelegation - completion. - initial_balance: - type: string - description: >- - initial_balance defines the initial balance when redelegation - started. - shares_dst: - type: string - description: >- - shares_dst is the amount of destination-validator shares created - by redelegation. - description: >- - RedelegationEntry defines a redelegation object with relevant - metadata. - description: entries are the redelegation entries. - description: >- - Redelegation contains the list of a particular delegator's redelegating - bonds + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - from a particular source validator to a particular destination validator. - cosmos.staking.v1beta1.RedelegationEntry: - type: object - properties: - creation_height: - type: string - format: int64 - description: creation_height defines the height which the redelegation took place. - completion_time: - type: string - format: date-time - description: completion_time defines the unix time for redelegation completion. - initial_balance: - type: string - description: initial_balance defines the initial balance when redelegation started. - shares_dst: - type: string - description: >- - shares_dst is the amount of destination-validator shares created by - redelegation. - description: RedelegationEntry defines a redelegation object with relevant metadata. - cosmos.staking.v1beta1.RedelegationEntryResponse: - type: object - properties: - redelegation_entry: - type: object - properties: - creation_height: - type: string - format: int64 - description: >- - creation_height defines the height which the redelegation took - place. - completion_time: - type: string - format: date-time - description: completion_time defines the unix time for redelegation completion. - initial_balance: - type: string - description: >- - initial_balance defines the initial balance when redelegation - started. - shares_dst: - type: string - description: >- - shares_dst is the amount of destination-validator shares created - by redelegation. - description: >- - RedelegationEntry defines a redelegation object with relevant - metadata. - balance: - type: string - description: >- - RedelegationEntryResponse is equivalent to a RedelegationEntry except that - it + methods only use the fully qualified type name after the last '/' - contains a balance in addition to shares which is more suitable for client + in the type URL, for example "foo.bar.com/x/y.z" will yield type - responses. - cosmos.staking.v1beta1.RedelegationResponse: - type: object - properties: - redelegation: - type: object - properties: - delegator_address: - type: string - description: delegator_address is the bech32-encoded address of the delegator. - validator_src_address: - type: string - description: >- - validator_src_address is the validator redelegation source - operator address. - validator_dst_address: - type: string - description: >- - validator_dst_address is the validator redelegation destination - operator address. - entries: - type: array - items: - type: object - properties: - creation_height: - type: string - format: int64 - description: >- - creation_height defines the height which the redelegation - took place. - completion_time: - type: string - format: date-time - description: >- - completion_time defines the unix time for redelegation - completion. - initial_balance: - type: string - description: >- - initial_balance defines the initial balance when - redelegation started. - shares_dst: - type: string - description: >- - shares_dst is the amount of destination-validator shares - created by redelegation. - description: >- - RedelegationEntry defines a redelegation object with relevant - metadata. - description: entries are the redelegation entries. - description: >- - Redelegation contains the list of a particular delegator's - redelegating bonds + name "y.z". - from a particular source validator to a particular destination - validator. - entries: - type: array - items: - type: object - properties: - redelegation_entry: - type: object - properties: - creation_height: - type: string - format: int64 - description: >- - creation_height defines the height which the redelegation - took place. - completion_time: - type: string - format: date-time - description: >- - completion_time defines the unix time for redelegation - completion. - initial_balance: - type: string - description: >- - initial_balance defines the initial balance when - redelegation started. - shares_dst: - type: string - description: >- - shares_dst is the amount of destination-validator shares - created by redelegation. - description: >- - RedelegationEntry defines a redelegation object with relevant - metadata. - balance: - type: string - description: >- - RedelegationEntryResponse is equivalent to a RedelegationEntry - except that it - contains a balance in addition to shares which is more suitable for - client - responses. - description: >- - RedelegationResponse is equivalent to a Redelegation except that its - entries + JSON - contain a balance in addition to shares which is more suitable for client - responses. - cosmos.staking.v1beta1.UnbondingDelegation: + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + QueryCurrentPlanResponse is the response type for the Query/CurrentPlan + RPC + + method. + cosmos.upgrade.v1beta1.QueryModuleVersionsResponse: type: object properties: - delegator_address: - type: string - description: delegator_address is the bech32-encoded address of the delegator. - validator_address: - type: string - description: validator_address is the bech32-encoded address of the validator. - entries: + module_versions: type: array items: type: object properties: - creation_height: - type: string - format: int64 - description: creation_height is the height which the unbonding took place. - completion_time: - type: string - format: date-time - description: completion_time is the unix time for unbonding completion. - initial_balance: + name: type: string - description: >- - initial_balance defines the tokens initially scheduled to - receive at completion. - balance: + title: name of the app module + version: type: string - description: balance defines the tokens to receive at completion. - description: >- - UnbondingDelegationEntry defines an unbonding object with relevant - metadata. - description: entries are the unbonding delegation entries. - description: |- - UnbondingDelegation stores all of a single delegator's unbonding bonds - for a single validator in an time-ordered list. - cosmos.staking.v1beta1.UnbondingDelegationEntry: + format: uint64 + title: consensus version of the app module + description: |- + ModuleVersion specifies a module and its consensus version. + + Since: cosmos-sdk 0.43 + description: >- + module_versions is a list of module names with their consensus + versions. + description: >- + QueryModuleVersionsResponse is the response type for the + Query/ModuleVersions + + RPC method. + + + Since: cosmos-sdk 0.43 + cosmos.upgrade.v1beta1.QueryUpgradedConsensusStateResponse: type: object properties: - creation_height: - type: string - format: int64 - description: creation_height is the height which the unbonding took place. - completion_time: - type: string - format: date-time - description: completion_time is the unix time for unbonding completion. - initial_balance: - type: string - description: >- - initial_balance defines the tokens initially scheduled to receive at - completion. - balance: + upgraded_consensus_state: type: string - description: balance defines the tokens to receive at completion. + format: byte + title: 'Since: cosmos-sdk 0.43' description: >- - UnbondingDelegationEntry defines an unbonding object with relevant - metadata. - cosmos.staking.v1beta1.Validator: + QueryUpgradedConsensusStateResponse is the response type for the + Query/UpgradedConsensusState + + RPC method. + cosmos.authz.v1beta1.Grant: type: object properties: - operator_address: - type: string - description: >- - operator_address defines the address of the validator's operator; bech - encoded in JSON. - consensus_pubkey: + authorization: type: object properties: type_url: @@ -43720,7 +66573,7 @@ definitions: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -43730,13 +66583,16 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -43754,7 +66610,6 @@ definitions: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -43768,351 +66623,45 @@ definitions: string last_name = 2; } - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } - - If the embedded message type is well-known and has a custom JSON - - representation, that representation will be embedded adding a field - - `value` which holds the custom JSON in addition to the `@type` - - field. Example (for message [google.protobuf.Duration][]): - - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - jailed: - type: boolean - format: boolean - description: >- - jailed defined whether the validator has been jailed from bonded - status or not. - status: - description: status is the validator status (bonded/unbonding/unbonded). - type: string - enum: - - BOND_STATUS_UNSPECIFIED - - BOND_STATUS_UNBONDED - - BOND_STATUS_UNBONDING - - BOND_STATUS_BONDED - default: BOND_STATUS_UNSPECIFIED - tokens: - type: string - description: tokens define the delegated tokens (incl. self-delegation). - delegator_shares: - type: string - description: >- - delegator_shares defines total shares issued to a validator's - delegators. - description: - description: description defines the description terms for the validator. - type: object - properties: - moniker: - type: string - description: moniker defines a human-readable name for the validator. - identity: - type: string - description: >- - identity defines an optional identity signature (ex. UPort or - Keybase). - website: - type: string - description: website defines an optional website link. - security_contact: - type: string - description: security_contact defines an optional email for security contact. - details: - type: string - description: details define other optional details. - unbonding_height: - type: string - format: int64 - description: >- - unbonding_height defines, if unbonding, the height at which this - validator has begun unbonding. - unbonding_time: - type: string - format: date-time - description: >- - unbonding_time defines, if unbonding, the min time for the validator - to complete unbonding. - commission: - description: commission defines the commission parameters. - type: object - properties: - commission_rates: - description: >- - commission_rates defines the initial commission rates to be used - for creating a validator. - type: object - properties: - rate: - type: string - description: >- - rate is the commission rate charged to delegators, as a - fraction. - max_rate: - type: string - description: >- - max_rate defines the maximum commission rate which validator - can ever charge, as a fraction. - max_change_rate: - type: string - description: >- - max_change_rate defines the maximum daily increase of the - validator commission, as a fraction. - update_time: - type: string - format: date-time - description: update_time is the last time the commission rate was changed. - min_self_delegation: - type: string - description: >- - min_self_delegation is the validator's self declared minimum self - delegation. - description: >- - Validator defines a validator, together with the total amount of the - - Validator's bond shares and their exchange rate to coins. Slashing results - in - - a decrease in the exchange rate, allowing correct calculation of future - - undelegations without iterating over delegators. When coins are delegated - to - - this validator, the validator is credited with a delegation whose number - of - - bond shares is based on the amount of coins delegated divided by the - current - - exchange rate. Voting power can be calculated as total bonded shares - - multiplied by exchange rate. - cosmos.base.abci.v1beta1.ABCIMessageLog: - type: object - properties: - msg_index: - type: integer - format: int64 - log: - type: string - events: - type: array - items: - type: object - properties: - type: - type: string - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - description: >- - Attribute defines an attribute wrapper where the key and value - are - - strings instead of raw bytes. - description: |- - StringEvent defines en Event object wrapper where all the attributes - contain key/value pairs that are strings instead of raw bytes. - description: |- - Events contains a slice of Event objects that were emitted during some - execution. - description: >- - ABCIMessageLog defines a structure containing an indexed tx ABCI message - log. - cosmos.base.abci.v1beta1.Attribute: - type: object - properties: - key: - type: string - value: - type: string - description: |- - Attribute defines an attribute wrapper where the key and value are - strings instead of raw bytes. - cosmos.base.abci.v1beta1.GasInfo: - type: object - properties: - gas_wanted: - type: string - format: uint64 - description: GasWanted is the maximum units of work we allow this tx to perform. - gas_used: - type: string - format: uint64 - description: GasUsed is the amount of gas actually consumed. - description: GasInfo defines tx execution gas context. - cosmos.base.abci.v1beta1.Result: - type: object - properties: - data: - type: string - format: byte - description: >- - Data is any data returned from message or handler execution. It MUST - be - - length prefixed in order to separate data from multiple message - executions. - log: - type: string - description: Log contains the log information from message or handler execution. - events: - type: array - items: - type: object - properties: - type: - type: string - attributes: - type: array - items: - type: object - properties: - key: - type: string - format: byte - value: - type: string - format: byte - index: - type: boolean - format: boolean - description: >- - EventAttribute is a single key-value pair, associated with an - event. - description: >- - Event allows application developers to attach additional information - to - - ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and - ResponseDeliverTx. + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - Later, transactions may be queried using these events. - description: >- - Events contains a slice of Event objects that were emitted during - message + If the embedded message type is well-known and has a custom JSON - or handler execution. - description: Result is the union of ResponseFormat and ResponseCheckTx. - cosmos.base.abci.v1beta1.StringEvent: - type: object - properties: - type: - type: string - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - description: |- - Attribute defines an attribute wrapper where the key and value are - strings instead of raw bytes. - description: |- - StringEvent defines en Event object wrapper where all the attributes - contain key/value pairs that are strings instead of raw bytes. - cosmos.base.abci.v1beta1.TxResponse: - type: object - properties: - height: - type: string - format: int64 - title: The block height - txhash: - type: string - description: The transaction hash. - codespace: - type: string - title: Namespace for the Code - code: - type: integer - format: int64 - description: Response code. - data: - type: string - description: Result bytes, if any. - raw_log: - type: string - description: |- - The output of the application's logger (raw string). May be - non-deterministic. - logs: - type: array - items: - type: object - properties: - msg_index: - type: integer - format: int64 - log: - type: string - events: - type: array - items: - type: object - properties: - type: - type: string - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - description: >- - Attribute defines an attribute wrapper where the key and - value are + representation, that representation will be embedded adding a field - strings instead of raw bytes. - description: >- - StringEvent defines en Event object wrapper where all the - attributes + `value` which holds the custom JSON in addition to the `@type` - contain key/value pairs that are strings instead of raw bytes. - description: >- - Events contains a slice of Event objects that were emitted - during some + field. Example (for message [google.protobuf.Duration][]): - execution. - description: >- - ABCIMessageLog defines a structure containing an indexed tx ABCI - message log. - description: >- - The output of the application's logger (typed). May be - non-deterministic. - info: + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + expiration: type: string - description: Additional information. May be non-deterministic. - gas_wanted: + format: date-time + title: >- + time when the grant will expire and will be pruned. If null, then the + grant + + doesn't have a time expiration (other conditions in `authorization` + + may apply to invalidate the grant) + description: |- + Grant gives permissions to execute + the provide method with expiration time. + cosmos.authz.v1beta1.GrantAuthorization: + type: object + properties: + granter: type: string - format: int64 - description: Amount of gas requested for transaction. - gas_used: + grantee: type: string - format: int64 - description: Amount of gas consumed by transaction. - tx: + authorization: type: object properties: type_url: @@ -44205,7 +66754,7 @@ definitions: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -44215,13 +66764,16 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -44239,7 +66791,6 @@ definitions: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -44271,599 +66822,753 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - timestamp: + expiration: type: string - description: >- - Time of the previous block. For heights > 1, it's the weighted median - of - - the timestamps of the valid votes in the block.LastCommit. For height - == 1, - - it's genesis time. - description: >- - TxResponse defines a structure containing relevant tx data and metadata. - The + format: date-time + title: >- + GrantAuthorization extends a grant with both the addresses of the grantee + and granter. - tags are stringified and the log is JSON decoded. - cosmos.crypto.multisig.v1beta1.CompactBitArray: + It is used in genesis.proto and query.proto + cosmos.authz.v1beta1.QueryGranteeGrantsResponse: type: object properties: - extra_bits_stored: - type: integer - format: int64 - elems: - type: string - format: byte - description: |- - CompactBitArray is an implementation of a space efficient bit array. - This is used to ensure that the encoded data takes up a minimal amount of - space after proto encoding. - This is not thread safe, and is not intended for concurrent usage. - cosmos.tx.signing.v1beta1.SignMode: - type: string - enum: - - SIGN_MODE_UNSPECIFIED - - SIGN_MODE_DIRECT - - SIGN_MODE_TEXTUAL - - SIGN_MODE_LEGACY_AMINO_JSON - default: SIGN_MODE_UNSPECIFIED - description: |- - SignMode represents a signing mode with its own security guarantees. + grants: + type: array + items: + type: object + properties: + granter: + type: string + grantee: + type: string + authorization: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - - SIGN_MODE_UNSPECIFIED: SIGN_MODE_UNSPECIFIED specifies an unknown signing mode and will be - rejected - - SIGN_MODE_DIRECT: SIGN_MODE_DIRECT specifies a signing mode which uses SignDoc and is - verified with raw bytes from Tx - - SIGN_MODE_TEXTUAL: SIGN_MODE_TEXTUAL is a future signing mode that will verify some - human-readable textual representation on top of the binary representation - from SIGN_MODE_DIRECT - - SIGN_MODE_LEGACY_AMINO_JSON: SIGN_MODE_LEGACY_AMINO_JSON is a backwards compatibility mode which uses - Amino JSON and will be removed in the future - cosmos.tx.v1beta1.AuthInfo: + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + expiration: + type: string + format: date-time + title: >- + GrantAuthorization extends a grant with both the addresses of the + grantee and granter. + + It is used in genesis.proto and query.proto + description: grants is a list of grants granted to the grantee. + pagination: + description: pagination defines an pagination for the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryGranteeGrantsResponse is the response type for the + Query/GranteeGrants RPC method. + cosmos.authz.v1beta1.QueryGranterGrantsResponse: type: object properties: - signer_infos: + grants: type: array items: - $ref: '#/definitions/cosmos.tx.v1beta1.SignerInfo' - description: >- - signer_infos defines the signing modes for the required signers. The - number + type: object + properties: + granter: + type: string + grantee: + type: string + authorization: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' - and order of elements must match the required signers from TxBody's + in the type URL, for example "foo.bar.com/x/y.z" will yield type - messages. The first element is the primary signer and the one which - pays + name "y.z". - the fee. - fee: - description: >- - Fee is the fee and gas limit for the transaction. The first signer is - the - primary signer and the one which pays the fee. The fee can be - calculated - based on the cost of evaluating the body and doing signature - verification + JSON - of the signers. This can be estimated via simulation. - type: object - properties: - amount: - type: array - items: - type: object - properties: - denom: - type: string - amount: - type: string - description: >- - Coin defines a token with a denomination and an amount. + The JSON representation of an `Any` value uses the regular - NOTE: The amount field is an Int which implements the custom - method + representation of the deserialized, embedded message, with an - signatures required by gogoproto. - title: amount is the amount of coins to be paid as a fee - gas_limit: - type: string - format: uint64 - title: >- - gas_limit is the maximum gas that can be used in transaction - processing + additional field `@type` which contains the type URL. Example: - before an out of gas error occurs - payer: - type: string - description: >- - if unset, the first signer is responsible for paying the fees. If - set, the specified account must pay the fees. + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - the payer must be a tx signer (and thus have signed this field in - AuthInfo). + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - setting this field does *not* change the ordering of required - signers for the transaction. - granter: - type: string - title: >- - if set, the fee payer (either the first signer or the value of the - payer field) requests that a fee grant be used + If the embedded message type is well-known and has a custom JSON - to pay fees instead of the fee payer's own balance. If an - appropriate fee grant does not exist or the chain does + representation, that representation will be embedded adding a + field - not support fee grants, this will fail - description: |- - AuthInfo describes the fee and signer modes that are used to sign a - transaction. - cosmos.tx.v1beta1.BroadcastMode: - type: string - enum: - - BROADCAST_MODE_UNSPECIFIED - - BROADCAST_MODE_BLOCK - - BROADCAST_MODE_SYNC - - BROADCAST_MODE_ASYNC - default: BROADCAST_MODE_UNSPECIFIED - description: >- - BroadcastMode specifies the broadcast mode for the TxService.Broadcast RPC - method. + `value` which holds the custom JSON in addition to the `@type` - - BROADCAST_MODE_UNSPECIFIED: zero-value for mode ordering - - BROADCAST_MODE_BLOCK: BROADCAST_MODE_BLOCK defines a tx broadcasting mode where the client waits for - the tx to be committed in a block. - - BROADCAST_MODE_SYNC: BROADCAST_MODE_SYNC defines a tx broadcasting mode where the client waits for - a CheckTx execution response only. - - BROADCAST_MODE_ASYNC: BROADCAST_MODE_ASYNC defines a tx broadcasting mode where the client returns - immediately. - cosmos.tx.v1beta1.BroadcastTxRequest: - type: object - properties: - tx_bytes: - type: string - format: byte - description: tx_bytes is the raw transaction. - mode: - type: string - enum: - - BROADCAST_MODE_UNSPECIFIED - - BROADCAST_MODE_BLOCK - - BROADCAST_MODE_SYNC - - BROADCAST_MODE_ASYNC - default: BROADCAST_MODE_UNSPECIFIED - description: >- - BroadcastMode specifies the broadcast mode for the TxService.Broadcast - RPC method. + field. Example (for message [google.protobuf.Duration][]): - - BROADCAST_MODE_UNSPECIFIED: zero-value for mode ordering - - BROADCAST_MODE_BLOCK: BROADCAST_MODE_BLOCK defines a tx broadcasting mode where the client waits for - the tx to be committed in a block. - - BROADCAST_MODE_SYNC: BROADCAST_MODE_SYNC defines a tx broadcasting mode where the client waits for - a CheckTx execution response only. - - BROADCAST_MODE_ASYNC: BROADCAST_MODE_ASYNC defines a tx broadcasting mode where the client returns - immediately. - description: |- - BroadcastTxRequest is the request type for the Service.BroadcastTxRequest - RPC method. - cosmos.tx.v1beta1.BroadcastTxResponse: - type: object - properties: - tx_response: + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + expiration: + type: string + format: date-time + title: >- + GrantAuthorization extends a grant with both the addresses of the + grantee and granter. + + It is used in genesis.proto and query.proto + description: grants is a list of grants granted by the granter. + pagination: + description: pagination defines an pagination for the response. type: object properties: - height: - type: string - format: int64 - title: The block height - txhash: - type: string - description: The transaction hash. - codespace: - type: string - title: Namespace for the Code - code: - type: integer - format: int64 - description: Response code. - data: - type: string - description: Result bytes, if any. - raw_log: + next_key: type: string + format: byte description: |- - The output of the application's logger (raw string). May be - non-deterministic. - logs: - type: array - items: + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryGranterGrantsResponse is the response type for the + Query/GranterGrants RPC method. + cosmos.authz.v1beta1.QueryGrantsResponse: + type: object + properties: + grants: + type: array + items: + type: object + properties: + authorization: type: object properties: - msg_index: - type: integer - format: int64 - log: + type_url: type: string - events: - type: array - items: - type: object - properties: - type: - type: string - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - description: >- - Attribute defines an attribute wrapper where the key - and value are - - strings instead of raw bytes. - description: >- - StringEvent defines en Event object wrapper where all the - attributes - - contain key/value pairs that are strings instead of raw - bytes. description: >- - Events contains a slice of Event objects that were emitted - during some - - execution. - description: >- - ABCIMessageLog defines a structure containing an indexed tx ABCI - message log. - description: >- - The output of the application's logger (typed). May be - non-deterministic. - info: - type: string - description: Additional information. May be non-deterministic. - gas_wanted: - type: string - format: int64 - description: Amount of gas requested for transaction. - gas_used: - type: string - format: int64 - description: Amount of gas consumed by transaction. - tx: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized + A URL/resource name that uniquely identifies the type of the + serialized - protocol buffer message. This string must contain at least + protocol buffer message. This string must contain at least - one "/" character. The last segment of the URL's path must - represent + one "/" character. The last segment of the URL's path must + represent - the fully qualified name of the type (as in + the fully qualified name of the type (as in - `path/google.protobuf.Duration`). The name should be in a - canonical form + `path/google.protobuf.Duration`). The name should be in a + canonical form - (e.g., leading "." is not accepted). + (e.g., leading "." is not accepted). - In practice, teams usually precompile into the binary all - types that they + In practice, teams usually precompile into the binary all + types that they - expect it to use in the context of Any. However, for URLs - which use the + expect it to use in the context of Any. However, for URLs + which use the - scheme `http`, `https`, or no scheme, one can optionally set - up a type + scheme `http`, `https`, or no scheme, one can optionally set + up a type - server that maps type URLs to message definitions as follows: + server that maps type URLs to message definitions as + follows: - * If no scheme is provided, `https` is assumed. + * If no scheme is provided, `https` is assumed. - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on - the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - Note: this functionality is not currently available in the - official + Note: this functionality is not currently available in the + official - protobuf release, and it is not used for type URLs beginning - with + protobuf release, and it is not used for type URLs beginning + with - type.googleapis.com. + type.googleapis.com. - Schemes other than `http`, `https` (or the empty scheme) might - be + Schemes other than `http`, `https` (or the empty scheme) + might be - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message - along with a + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a - URL that describes the type of the serialized message. + URL that describes the type of the serialized message. - Protobuf library provides support to pack/unpack Any values in the - form + Protobuf library provides support to pack/unpack Any values in + the form - of utility functions or additional generated methods of the Any - type. + of utility functions or additional generated methods of the Any + type. - Example 1: Pack and unpack a message in C++. + Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { + Foo foo = ...; + Any any; + any.PackFrom(foo); ... - } + if (any.UnpackTo(&foo)) { + ... + } - Example 2: Pack and unpack a message in Java. + Example 2: Pack and unpack a message in Java. - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) + foo = Foo(...) + any = Any() + any.Pack(foo) ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } ... - } + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - The pack methods provided by protobuf library will by default use + The pack methods provided by protobuf library will by default + use - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - methods only use the fully qualified type name after the last '/' + methods only use the fully qualified type name after the last + '/' - in the type URL, for example "foo.bar.com/x/y.z" will yield type + in the type URL, for example "foo.bar.com/x/y.z" will yield type - name "y.z". + name "y.z". - JSON + JSON - ==== - The JSON representation of an `Any` value uses the regular + The JSON representation of an `Any` value uses the regular - representation of the deserialized, embedded message, with an + representation of the deserialized, embedded message, with an - additional field `@type` which contains the type URL. Example: + additional field `@type` which contains the type URL. Example: - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - If the embedded message type is well-known and has a custom JSON + If the embedded message type is well-known and has a custom JSON - representation, that representation will be embedded adding a - field + representation, that representation will be embedded adding a + field - `value` which holds the custom JSON in addition to the `@type` + `value` which holds the custom JSON in addition to the `@type` - field. Example (for message [google.protobuf.Duration][]): + field. Example (for message [google.protobuf.Duration][]): - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - timestamp: + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + expiration: + type: string + format: date-time + title: >- + time when the grant will expire and will be pruned. If null, + then the grant + + doesn't have a time expiration (other conditions in + `authorization` + + may apply to invalidate the grant) + description: |- + Grant gives permissions to execute + the provide method with expiration time. + description: authorizations is a list of grants granted for grantee by granter. + pagination: + description: pagination defines an pagination for the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryGrantsResponse is the response type for the Query/Authorizations RPC + method. + cosmos.feegrant.v1beta1.Grant: + type: object + properties: + granter: + type: string + description: >- + granter is the address of the user granting an allowance of their + funds. + grantee: + type: string + description: >- + grantee is the address of the user being granted an allowance of + another user's funds. + allowance: + description: allowance can be any of basic, periodic, allowed fee allowance. + type: object + properties: + type_url: type: string description: >- - Time of the previous block. For heights > 1, it's the weighted - median of + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all types + that they - the timestamps of the valid votes in the block.LastCommit. For - height == 1, + expect it to use in the context of Any. However, for URLs which + use the - it's genesis time. - description: >- - TxResponse defines a structure containing relevant tx data and - metadata. The + scheme `http`, `https`, or no scheme, one can optionally set up a + type - tags are stringified and the log is JSON decoded. - description: |- - BroadcastTxResponse is the response type for the - Service.BroadcastTx method. - cosmos.tx.v1beta1.Fee: - type: object - properties: - amount: - type: array - items: - type: object - properties: - denom: - type: string - amount: - type: string - description: |- - Coin defines a token with a denomination and an amount. + server that maps type URLs to message definitions as follows: - NOTE: The amount field is an Int which implements the custom method - signatures required by gogoproto. - title: amount is the amount of coins to be paid as a fee - gas_limit: - type: string - format: uint64 - title: >- - gas_limit is the maximum gas that can be used in transaction - processing - before an out of gas error occurs - payer: - type: string - description: >- - if unset, the first signer is responsible for paying the fees. If set, - the specified account must pay the fees. + * If no scheme is provided, `https` is assumed. - the payer must be a tx signer (and thus have signed this field in - AuthInfo). + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - setting this field does *not* change the ordering of required signers - for the transaction. - granter: - type: string - title: >- - if set, the fee payer (either the first signer or the value of the - payer field) requests that a fee grant be used + Note: this functionality is not currently available in the + official - to pay fees instead of the fee payer's own balance. If an appropriate - fee grant does not exist or the chain does + protobuf release, and it is not used for type URLs beginning with - not support fee grants, this will fail - description: >- - Fee includes the amount of coins paid in fees and the maximum + type.googleapis.com. - gas to be used by the transaction. The ratio yields an effective - "gasprice", - which must be above some miminum to be accepted into the mempool. - cosmos.tx.v1beta1.GetTxResponse: + Schemes other than `http`, `https` (or the empty scheme) might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above specified + type. + title: Grant is stored in the KVStore to record a grant with full context + cosmos.feegrant.v1beta1.QueryAllowanceResponse: type: object properties: - tx: - $ref: '#/definitions/cosmos.tx.v1beta1.Tx' - description: tx is the queried transaction. - tx_response: + allowance: + description: allowance is a allowance granted for grantee by granter. type: object properties: - height: - type: string - format: int64 - title: The block height - txhash: - type: string - description: The transaction hash. - codespace: - type: string - title: Namespace for the Code - code: - type: integer - format: int64 - description: Response code. - data: - type: string - description: Result bytes, if any. - raw_log: + granter: type: string - description: |- - The output of the application's logger (raw string). May be - non-deterministic. - logs: - type: array - items: - type: object - properties: - msg_index: - type: integer - format: int64 - log: - type: string - events: - type: array - items: - type: object - properties: - type: - type: string - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - description: >- - Attribute defines an attribute wrapper where the key - and value are - - strings instead of raw bytes. - description: >- - StringEvent defines en Event object wrapper where all the - attributes - - contain key/value pairs that are strings instead of raw - bytes. - description: >- - Events contains a slice of Event objects that were emitted - during some - - execution. - description: >- - ABCIMessageLog defines a structure containing an indexed tx ABCI - message log. description: >- - The output of the application's logger (typed). May be - non-deterministic. - info: - type: string - description: Additional information. May be non-deterministic. - gas_wanted: - type: string - format: int64 - description: Amount of gas requested for transaction. - gas_used: + granter is the address of the user granting an allowance of their + funds. + grantee: type: string - format: int64 - description: Amount of gas consumed by transaction. - tx: + description: >- + grantee is the address of the user being granted an allowance of + another user's funds. + allowance: + description: allowance can be any of basic, periodic, allowed fee allowance. type: object properties: type_url: @@ -44927,218 +67632,142 @@ definitions: description: >- Must be a valid serialized protocol buffer of the above specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message - along with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values in the - form - - of utility functions or additional generated methods of the Any - type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + title: Grant is stored in the KVStore to record a grant with full context + description: >- + QueryAllowanceResponse is the response type for the Query/Allowance RPC + method. + cosmos.feegrant.v1beta1.QueryAllowancesByGranterResponse: + type: object + properties: + allowances: + type: array + items: + type: object + properties: + granter: + type: string + description: >- + granter is the address of the user granting an allowance of + their funds. + grantee: + type: string + description: >- + grantee is the address of the user being granted an allowance of + another user's funds. + allowance: + description: allowance can be any of basic, periodic, allowed fee allowance. + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - The pack methods provided by protobuf library will by default use + protocol buffer message. This string must contain at least - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + one "/" character. The last segment of the URL's path must + represent - methods only use the fully qualified type name after the last '/' + the fully qualified name of the type (as in - in the type URL, for example "foo.bar.com/x/y.z" will yield type + `path/google.protobuf.Duration`). The name should be in a + canonical form - name "y.z". + (e.g., leading "." is not accepted). + In practice, teams usually precompile into the binary all + types that they - JSON + expect it to use in the context of Any. However, for URLs + which use the - ==== + scheme `http`, `https`, or no scheme, one can optionally set + up a type - The JSON representation of an `Any` value uses the regular + server that maps type URLs to message definitions as + follows: - representation of the deserialized, embedded message, with an - additional field `@type` which contains the type URL. Example: + * If no scheme is provided, `https` is assumed. - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + Note: this functionality is not currently available in the + official - If the embedded message type is well-known and has a custom JSON + protobuf release, and it is not used for type URLs beginning + with - representation, that representation will be embedded adding a - field + type.googleapis.com. - `value` which holds the custom JSON in addition to the `@type` - field. Example (for message [google.protobuf.Duration][]): + Schemes other than `http`, `https` (or the empty scheme) + might be - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - timestamp: + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + title: Grant is stored in the KVStore to record a grant with full context + description: allowances that have been issued by the granter. + pagination: + description: pagination defines an pagination for the response. + type: object + properties: + next_key: type: string - description: >- - Time of the previous block. For heights > 1, it's the weighted - median of + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - the timestamps of the valid votes in the block.LastCommit. For - height == 1, + was set, its value is undefined otherwise + description: >- + QueryAllowancesByGranterResponse is the response type for the + Query/AllowancesByGranter RPC method. - it's genesis time. - description: >- - TxResponse defines a structure containing relevant tx data and - metadata. The - tags are stringified and the log is JSON decoded. - description: GetTxResponse is the response type for the Service.GetTx method. - cosmos.tx.v1beta1.GetTxsEventResponse: + Since: cosmos-sdk 0.46 + cosmos.feegrant.v1beta1.QueryAllowancesResponse: type: object properties: - txs: - type: array - items: - $ref: '#/definitions/cosmos.tx.v1beta1.Tx' - description: txs is the list of queried transactions. - tx_responses: + allowances: type: array items: type: object properties: - height: - type: string - format: int64 - title: The block height - txhash: - type: string - description: The transaction hash. - codespace: - type: string - title: Namespace for the Code - code: - type: integer - format: int64 - description: Response code. - data: - type: string - description: Result bytes, if any. - raw_log: + granter: type: string - description: |- - The output of the application's logger (raw string). May be - non-deterministic. - logs: - type: array - items: - type: object - properties: - msg_index: - type: integer - format: int64 - log: - type: string - events: - type: array - items: - type: object - properties: - type: - type: string - attributes: - type: array - items: - type: object - properties: - key: - type: string - value: - type: string - description: >- - Attribute defines an attribute wrapper where the - key and value are - - strings instead of raw bytes. - description: >- - StringEvent defines en Event object wrapper where all - the attributes - - contain key/value pairs that are strings instead of raw - bytes. - description: >- - Events contains a slice of Event objects that were emitted - during some - - execution. - description: >- - ABCIMessageLog defines a structure containing an indexed tx - ABCI message log. description: >- - The output of the application's logger (typed). May be - non-deterministic. - info: - type: string - description: Additional information. May be non-deterministic. - gas_wanted: - type: string - format: int64 - description: Amount of gas requested for transaction. - gas_used: + granter is the address of the user granting an allowance of + their funds. + grantee: type: string - format: int64 - description: Amount of gas consumed by transaction. - tx: + description: >- + grantee is the address of the user being granted an allowance of + another user's funds. + allowance: + description: allowance can be any of basic, periodic, allowed fee allowance. type: object properties: type_url: @@ -45169,293 +67798,273 @@ definitions: scheme `http`, `https`, or no scheme, one can optionally set up a type - server that maps type URLs to message definitions as - follows: + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) + might be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + title: Grant is stored in the KVStore to record a grant with full context + description: allowances are allowance's granted for grantee by granter. + pagination: + description: pagination defines an pagination for the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: >- + QueryAllowancesResponse is the response type for the Query/Allowances RPC + method. + cosmos.nft.v1beta1.Class: + type: object + properties: + id: + type: string + title: >- + id defines the unique identifier of the NFT classification, similar to + the contract address of ERC721 + name: + type: string + title: >- + name defines the human-readable name of the NFT classification. + Optional + symbol: + type: string + title: symbol is an abbreviated name for nft classification. Optional + description: + type: string + title: description is a brief description of nft classification. Optional + uri: + type: string + title: >- + uri for the class metadata stored off chain. It can define schema for + Class and NFT `Data` attributes. Optional + uri_hash: + type: string + title: uri_hash is a hash of the document pointed by uri. Optional + data: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + one "/" character. The last segment of the URL's path must + represent - * If no scheme is provided, `https` is assumed. + the fully qualified name of the type (as in - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on - the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + `path/google.protobuf.Duration`). The name should be in a + canonical form - Note: this functionality is not currently available in the - official + (e.g., leading "." is not accepted). - protobuf release, and it is not used for type URLs beginning - with - type.googleapis.com. + In practice, teams usually precompile into the binary all types + that they + expect it to use in the context of Any. However, for URLs which + use the - Schemes other than `http`, `https` (or the empty scheme) - might be + scheme `http`, `https`, or no scheme, one can optionally set up a + type - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message - along with a + server that maps type URLs to message definitions as follows: - URL that describes the type of the serialized message. + * If no scheme is provided, `https` is assumed. - Protobuf library provides support to pack/unpack Any values in - the form + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - of utility functions or additional generated methods of the Any - type. + Note: this functionality is not currently available in the + official + protobuf release, and it is not used for type URLs beginning with - Example 1: Pack and unpack a message in C++. + type.googleapis.com. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - Example 2: Pack and unpack a message in Java. + Schemes other than `http`, `https` (or the empty scheme) might be - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above specified + type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a - Example 3: Pack and unpack a message in Python. + URL that describes the type of the serialized message. - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - Example 4: Pack and unpack a message in Go + Protobuf library provides support to pack/unpack Any values in the + form - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + of utility functions or additional generated methods of the Any type. - The pack methods provided by protobuf library will by default - use - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + Example 1: Pack and unpack a message in C++. - methods only use the fully qualified type name after the last - '/' + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - in the type URL, for example "foo.bar.com/x/y.z" will yield type + Example 2: Pack and unpack a message in Java. - name "y.z". + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + Example 3: Pack and unpack a message in Python. + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - JSON + Example 4: Pack and unpack a message in Go - ==== + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - The JSON representation of an `Any` value uses the regular + The pack methods provided by protobuf library will by default use - representation of the deserialized, embedded message, with an + 'type.googleapis.com/full.type.name' as the type URL and the unpack - additional field `@type` which contains the type URL. Example: + methods only use the fully qualified type name after the last '/' - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + in the type URL, for example "foo.bar.com/x/y.z" will yield type - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + name "y.z". - If the embedded message type is well-known and has a custom JSON - representation, that representation will be embedded adding a - field - `value` which holds the custom JSON in addition to the `@type` + JSON - field. Example (for message [google.protobuf.Duration][]): - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - timestamp: - type: string - description: >- - Time of the previous block. For heights > 1, it's the weighted - median of + The JSON representation of an `Any` value uses the regular - the timestamps of the valid votes in the block.LastCommit. For - height == 1, + representation of the deserialized, embedded message, with an - it's genesis time. - description: >- - TxResponse defines a structure containing relevant tx data and - metadata. The + additional field `@type` which contains the type URL. Example: - tags are stringified and the log is JSON decoded. - description: tx_responses is the list of queried TxResponses. - pagination: - description: pagination defines an pagination for the response. - type: object - properties: - next_key: - type: string - format: byte - title: |- - next_key is the key to be passed to PageRequest.key to - query the next page most efficiently - total: - type: string - format: uint64 - title: >- - total is total number of results available if - PageRequest.count_total + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - was set, its value is undefined otherwise - description: |- - GetTxsEventResponse is the response type for the Service.TxsByEvents - RPC method. - cosmos.tx.v1beta1.ModeInfo: - type: object - properties: - single: - title: single represents a single signer - type: object - properties: - mode: - title: mode is the signing mode of the single signer - type: string - enum: - - SIGN_MODE_UNSPECIFIED - - SIGN_MODE_DIRECT - - SIGN_MODE_TEXTUAL - - SIGN_MODE_LEGACY_AMINO_JSON - default: SIGN_MODE_UNSPECIFIED - description: >- - SignMode represents a signing mode with its own security - guarantees. + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - - SIGN_MODE_UNSPECIFIED: SIGN_MODE_UNSPECIFIED specifies an unknown signing mode and will be - rejected - - SIGN_MODE_DIRECT: SIGN_MODE_DIRECT specifies a signing mode which uses SignDoc and is - verified with raw bytes from Tx - - SIGN_MODE_TEXTUAL: SIGN_MODE_TEXTUAL is a future signing mode that will verify some - human-readable textual representation on top of the binary - representation + If the embedded message type is well-known and has a custom JSON - from SIGN_MODE_DIRECT - - SIGN_MODE_LEGACY_AMINO_JSON: SIGN_MODE_LEGACY_AMINO_JSON is a backwards compatibility mode which uses - Amino JSON and will be removed in the future - multi: - $ref: '#/definitions/cosmos.tx.v1beta1.ModeInfo.Multi' - title: multi represents a nested multisig signer - description: ModeInfo describes the signing mode of a single or nested multisig signer. - cosmos.tx.v1beta1.ModeInfo.Multi: - type: object - properties: - bitarray: - title: bitarray specifies which keys within the multisig are signing - type: object - properties: - extra_bits_stored: - type: integer - format: int64 - elems: - type: string - format: byte - description: >- - CompactBitArray is an implementation of a space efficient bit array. + representation, that representation will be embedded adding a field - This is used to ensure that the encoded data takes up a minimal amount - of + `value` which holds the custom JSON in addition to the `@type` - space after proto encoding. + field. Example (for message [google.protobuf.Duration][]): - This is not thread safe, and is not intended for concurrent usage. - mode_infos: - type: array - items: - $ref: '#/definitions/cosmos.tx.v1beta1.ModeInfo' - title: |- - mode_infos is the corresponding modes of the signers of the multisig - which could include nested multisig public keys - title: Multi is the mode info for a multisig public key - cosmos.tx.v1beta1.ModeInfo.Single: + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + title: data is the app specific metadata of the NFT class. Optional + description: Class defines the class of the nft type. + cosmos.nft.v1beta1.NFT: type: object properties: - mode: - title: mode is the signing mode of the single signer + class_id: type: string - enum: - - SIGN_MODE_UNSPECIFIED - - SIGN_MODE_DIRECT - - SIGN_MODE_TEXTUAL - - SIGN_MODE_LEGACY_AMINO_JSON - default: SIGN_MODE_UNSPECIFIED - description: >- - SignMode represents a signing mode with its own security guarantees. - - - SIGN_MODE_UNSPECIFIED: SIGN_MODE_UNSPECIFIED specifies an unknown signing mode and will be - rejected - - SIGN_MODE_DIRECT: SIGN_MODE_DIRECT specifies a signing mode which uses SignDoc and is - verified with raw bytes from Tx - - SIGN_MODE_TEXTUAL: SIGN_MODE_TEXTUAL is a future signing mode that will verify some - human-readable textual representation on top of the binary - representation - - from SIGN_MODE_DIRECT - - SIGN_MODE_LEGACY_AMINO_JSON: SIGN_MODE_LEGACY_AMINO_JSON is a backwards compatibility mode which uses - Amino JSON and will be removed in the future - title: |- - Single is the mode info for a single signer. It is structured as a message - to allow for additional fields such as locale for SIGN_MODE_TEXTUAL in the - future - cosmos.tx.v1beta1.OrderBy: - type: string - enum: - - ORDER_BY_UNSPECIFIED - - ORDER_BY_ASC - - ORDER_BY_DESC - default: ORDER_BY_UNSPECIFIED - description: >- - - ORDER_BY_UNSPECIFIED: ORDER_BY_UNSPECIFIED specifies an unknown sorting - order. OrderBy defaults to ASC in this case. - - ORDER_BY_ASC: ORDER_BY_ASC defines ascending order - - ORDER_BY_DESC: ORDER_BY_DESC defines descending order - title: OrderBy defines the sorting order - cosmos.tx.v1beta1.SignerInfo: - type: object - properties: - public_key: + title: >- + class_id associated with the NFT, similar to the contract address of + ERC721 + id: + type: string + title: id is a unique identifier of the NFT + uri: + type: string + title: uri for the NFT metadata stored off chain + uri_hash: + type: string + title: uri_hash is a hash of the document pointed by uri + data: type: object properties: type_url: @@ -45548,7 +68157,7 @@ definitions: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -45558,13 +68167,16 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -45582,7 +68194,6 @@ definitions: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -45614,124 +68225,251 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - mode_info: - $ref: '#/definitions/cosmos.tx.v1beta1.ModeInfo' - title: |- - mode_info describes the signing mode of the signer and is a nested - structure to support nested multisig pubkey's - sequence: + title: data is an app specific data of the NFT. Optional + description: NFT defines the NFT. + cosmos.nft.v1beta1.QueryBalanceResponse: + type: object + properties: + amount: type: string format: uint64 - description: >- - sequence is the sequence of the account, which describes the + title: amount is the number of all NFTs of a given class owned by the owner + title: QueryBalanceResponse is the response type for the Query/Balance RPC method + cosmos.nft.v1beta1.QueryClassResponse: + type: object + properties: + class: + type: object + properties: + id: + type: string + title: >- + id defines the unique identifier of the NFT classification, + similar to the contract address of ERC721 + name: + type: string + title: >- + name defines the human-readable name of the NFT classification. + Optional + symbol: + type: string + title: symbol is an abbreviated name for nft classification. Optional + description: + type: string + title: description is a brief description of nft classification. Optional + uri: + type: string + title: >- + uri for the class metadata stored off chain. It can define schema + for Class and NFT `Data` attributes. Optional + uri_hash: + type: string + title: uri_hash is a hash of the document pointed by uri. Optional + data: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally set + up a type + + server that maps type URLs to message definitions as follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs beginning + with + + type.googleapis.com. + + + Schemes other than `http`, `https` (or the empty scheme) might + be + + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in the + form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: - number of committed transactions signed by a given address. It is used - to + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - prevent replay attacks. - description: |- - SignerInfo describes the public key and signing mode of a single top-level - signer. - cosmos.tx.v1beta1.SimulateRequest: - type: object - properties: - tx: - $ref: '#/definitions/cosmos.tx.v1beta1.Tx' - description: |- - tx is the transaction to simulate. - Deprecated. Send raw tx bytes instead. - tx_bytes: - type: string - format: byte - description: tx_bytes is the raw transaction. - description: |- - SimulateRequest is the request type for the Service.Simulate - RPC method. - cosmos.tx.v1beta1.SimulateResponse: - type: object - properties: - gas_info: - description: gas_info is the information about gas used in the simulation. - type: object - properties: - gas_wanted: - type: string - format: uint64 - description: >- - GasWanted is the maximum units of work we allow this tx to - perform. - gas_used: - type: string - format: uint64 - description: GasUsed is the amount of gas actually consumed. - result: - description: result is the result of the simulation. - type: object - properties: - data: - type: string - format: byte - description: >- - Data is any data returned from message or handler execution. It - MUST be + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - length prefixed in order to separate data from multiple message - executions. - log: - type: string - description: >- - Log contains the log information from message or handler - execution. - events: - type: array - items: - type: object - properties: - type: - type: string - attributes: - type: array - items: - type: object - properties: - key: - type: string - format: byte - value: - type: string - format: byte - index: - type: boolean - format: boolean - description: >- - EventAttribute is a single key-value pair, associated with - an event. - description: >- - Event allows application developers to attach additional - information to + If the embedded message type is well-known and has a custom JSON - ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and - ResponseDeliverTx. + representation, that representation will be embedded adding a + field - Later, transactions may be queried using these events. - description: >- - Events contains a slice of Event objects that were emitted during - message + `value` which holds the custom JSON in addition to the `@type` - or handler execution. - description: |- - SimulateResponse is the response type for the - Service.SimulateRPC method. - cosmos.tx.v1beta1.Tx: + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + title: data is the app specific metadata of the NFT class. Optional + description: Class defines the class of the nft type. + title: QueryClassResponse is the response type for the Query/Class RPC method + cosmos.nft.v1beta1.QueryClassesResponse: type: object properties: - body: - title: body is the processable content of the transaction - type: object - properties: - messages: - type: array - items: + classes: + type: array + items: + type: object + properties: + id: + type: string + title: >- + id defines the unique identifier of the NFT classification, + similar to the contract address of ERC721 + name: + type: string + title: >- + name defines the human-readable name of the NFT classification. + Optional + symbol: + type: string + title: symbol is an abbreviated name for nft classification. Optional + description: + type: string + title: >- + description is a brief description of nft classification. + Optional + uri: + type: string + title: >- + uri for the class metadata stored off chain. It can define + schema for Class and NFT `Data` attributes. Optional + uri_hash: + type: string + title: uri_hash is a hash of the document pointed by uri. Optional + data: type: object properties: type_url: @@ -45830,7 +68568,7 @@ definitions: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -45840,13 +68578,16 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -45867,7 +68608,6 @@ definitions: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -45900,223 +68640,243 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - description: >- - messages is a list of messages to be executed. The required - signers of - - those messages define the number and order of elements in - AuthInfo's - - signer_infos and Tx's signatures. Each required signer address is - added to - - the list only the first time it occurs. - - By convention, the first required signer (usually from the first - message) - - is referred to as the primary signer and pays the fee for the - whole - - transaction. - memo: + title: data is the app specific metadata of the NFT class. Optional + description: Class defines the class of the nft type. + description: class defines the class of the nft type. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: type: string - description: >- - memo is any arbitrary note/comment to be added to the transaction. - - WARNING: in clients, any publicly exposed text should not be - called memo, - - but should be called `note` instead (see - https://github.com/cosmos/cosmos-sdk/issues/9122). - timeout_height: + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: type: string format: uint64 - title: |- - timeout is the block height after which this transaction will not - be processed by the chain - extension_options: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized + title: >- + total is total number of results available if + PageRequest.count_total - protocol buffer message. This string must contain at least + was set, its value is undefined otherwise + title: QueryClassesResponse is the response type for the Query/Classes RPC method + cosmos.nft.v1beta1.QueryNFTResponse: + type: object + properties: + nft: + type: object + properties: + class_id: + type: string + title: >- + class_id associated with the NFT, similar to the contract address + of ERC721 + id: + type: string + title: id is a unique identifier of the NFT + uri: + type: string + title: uri for the NFT metadata stored off chain + uri_hash: + type: string + title: uri_hash is a hash of the document pointed by uri + data: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - one "/" character. The last segment of the URL's path must - represent + protocol buffer message. This string must contain at least - the fully qualified name of the type (as in + one "/" character. The last segment of the URL's path must + represent - `path/google.protobuf.Duration`). The name should be in a - canonical form + the fully qualified name of the type (as in - (e.g., leading "." is not accepted). + `path/google.protobuf.Duration`). The name should be in a + canonical form + (e.g., leading "." is not accepted). - In practice, teams usually precompile into the binary all - types that they - expect it to use in the context of Any. However, for URLs - which use the + In practice, teams usually precompile into the binary all + types that they - scheme `http`, `https`, or no scheme, one can optionally set - up a type + expect it to use in the context of Any. However, for URLs + which use the - server that maps type URLs to message definitions as - follows: + scheme `http`, `https`, or no scheme, one can optionally set + up a type + server that maps type URLs to message definitions as follows: - * If no scheme is provided, `https` is assumed. - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on - the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + * If no scheme is provided, `https` is assumed. - Note: this functionality is not currently available in the - official + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - protobuf release, and it is not used for type URLs beginning - with + Note: this functionality is not currently available in the + official - type.googleapis.com. + protobuf release, and it is not used for type URLs beginning + with + type.googleapis.com. - Schemes other than `http`, `https` (or the empty scheme) - might be - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message - along with a + Schemes other than `http`, `https` (or the empty scheme) might + be - URL that describes the type of the serialized message. + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + URL that describes the type of the serialized message. - Protobuf library provides support to pack/unpack Any values in - the form - of utility functions or additional generated methods of the Any - type. + Protobuf library provides support to pack/unpack Any values in the + form + of utility functions or additional generated methods of the Any + type. - Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { ... - if (any.UnpackTo(&foo)) { - ... - } + } - Example 2: Pack and unpack a message in Java. + Example 2: Pack and unpack a message in Java. - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. - foo = Foo(...) - any = Any() - any.Pack(foo) + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by default - use - - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - methods only use the fully qualified type name after the last - '/' + The pack methods provided by protobuf library will by default use - in the type URL, for example "foo.bar.com/x/y.z" will yield type + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - name "y.z". + methods only use the fully qualified type name after the last '/' + in the type URL, for example "foo.bar.com/x/y.z" will yield type + name "y.z". - JSON - ==== - The JSON representation of an `Any` value uses the regular + JSON - representation of the deserialized, embedded message, with an - additional field `@type` which contains the type URL. Example: + The JSON representation of an `Any` value uses the regular - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + representation of the deserialized, embedded message, with an - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + additional field `@type` which contains the type URL. Example: - If the embedded message type is well-known and has a custom JSON + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - representation, that representation will be embedded adding a - field + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - `value` which holds the custom JSON in addition to the `@type` + If the embedded message type is well-known and has a custom JSON - field. Example (for message [google.protobuf.Duration][]): + representation, that representation will be embedded adding a + field - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - title: >- - extension_options are arbitrary options that can be added by - chains + `value` which holds the custom JSON in addition to the `@type` - when the default options are not sufficient. If any of these are - present + field. Example (for message [google.protobuf.Duration][]): - and can't be handled, the transaction will be rejected - non_critical_extension_options: - type: array - items: + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + title: data is an app specific data of the NFT. Optional + description: NFT defines the NFT. + title: owner is the owner address of the nft + title: QueryNFTResponse is the response type for the Query/NFT RPC method + cosmos.nft.v1beta1.QueryNFTsResponse: + type: object + properties: + nfts: + type: array + items: + type: object + properties: + class_id: + type: string + title: >- + class_id associated with the NFT, similar to the contract + address of ERC721 + id: + type: string + title: id is a unique identifier of the NFT + uri: + type: string + title: uri for the NFT metadata stored off chain + uri_hash: + type: string + title: uri_hash is a hash of the document pointed by uri + data: type: object properties: type_url: @@ -46215,7 +68975,7 @@ definitions: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -46225,13 +68985,16 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -46252,7 +69015,6 @@ definitions: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -46285,234 +69047,421 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } + title: data is an app specific data of the NFT. Optional + description: NFT defines the NFT. + title: NFT defines the NFT + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 title: >- - extension_options are arbitrary options that can be added by - chains + total is total number of results available if + PageRequest.count_total - when the default options are not sufficient. If any of these are - present + was set, its value is undefined otherwise + title: QueryNFTsResponse is the response type for the Query/NFTs RPC methods + cosmos.nft.v1beta1.QueryOwnerResponse: + type: object + properties: + owner: + type: string + title: owner is the owner address of the nft + title: QueryOwnerResponse is the response type for the Query/Owner RPC method + cosmos.nft.v1beta1.QuerySupplyResponse: + type: object + properties: + amount: + type: string + format: uint64 + title: amount is the number of all NFTs from the given class + title: QuerySupplyResponse is the response type for the Query/Supply RPC method + cosmos.group.v1.GroupInfo: + type: object + properties: + id: + type: string + format: uint64 + description: id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group's admin. + metadata: + type: string + description: metadata is any arbitrary metadata to attached to the group. + version: + type: string + format: uint64 + title: >- + version is used to track changes to a group's membership structure + that - and can't be handled, they will be ignored - description: TxBody is the body of a transaction that all signers sign over. - auth_info: - $ref: '#/definitions/cosmos.tx.v1beta1.AuthInfo' - title: |- - auth_info is the authorization related content of the transaction, - specifically signers, signer modes and fee - signatures: - type: array - items: - type: string - format: byte - description: >- - signatures is a list of signatures that matches the length and order - of + would break existing proposals. Whenever any members weight is + changed, - AuthInfo's signer_infos to allow connecting signature meta information - like + or any member is added or removed this version is incremented and will - public key and signing mode by position. - description: Tx is the standard type used for broadcasting transactions. - cosmos.tx.v1beta1.TxBody: + cause proposals based on older versions of this group to fail + total_weight: + type: string + description: total_weight is the sum of the group members' weights. + created_at: + type: string + format: date-time + description: created_at is a timestamp specifying when a group was created. + description: GroupInfo represents the high-level on-chain information for a group. + cosmos.group.v1.GroupMember: + type: object + properties: + group_id: + type: string + format: uint64 + description: group_id is the unique ID of the group. + member: + description: member is the member data. + type: object + properties: + address: + type: string + description: address is the member's account address. + weight: + type: string + description: >- + weight is the member's voting weight that should be greater than + 0. + metadata: + type: string + description: metadata is any arbitrary metadata attached to the member. + added_at: + type: string + format: date-time + description: added_at is a timestamp specifying when a member was added. + description: GroupMember represents the relationship between a group and a member. + cosmos.group.v1.GroupPolicyInfo: type: object properties: - messages: - type: array - items: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized + address: + type: string + description: address is the account address of group policy. + group_id: + type: string + format: uint64 + description: group_id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group admin. + metadata: + type: string + description: metadata is any arbitrary metadata attached to the group policy. + version: + type: string + format: uint64 + description: >- + version is used to track changes to a group's GroupPolicyInfo + structure that - protocol buffer message. This string must contain at least + would create a different result on a running proposal. + decision_policy: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - one "/" character. The last segment of the URL's path must - represent + protocol buffer message. This string must contain at least - the fully qualified name of the type (as in + one "/" character. The last segment of the URL's path must + represent - `path/google.protobuf.Duration`). The name should be in a - canonical form + the fully qualified name of the type (as in - (e.g., leading "." is not accepted). + `path/google.protobuf.Duration`). The name should be in a + canonical form + (e.g., leading "." is not accepted). - In practice, teams usually precompile into the binary all types - that they - expect it to use in the context of Any. However, for URLs which - use the + In practice, teams usually precompile into the binary all types + that they - scheme `http`, `https`, or no scheme, one can optionally set up - a type + expect it to use in the context of Any. However, for URLs which + use the - server that maps type URLs to message definitions as follows: + scheme `http`, `https`, or no scheme, one can optionally set up a + type + server that maps type URLs to message definitions as follows: - * If no scheme is provided, `https` is assumed. - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + * If no scheme is provided, `https` is assumed. - Note: this functionality is not currently available in the - official + * An HTTP GET on the URL must yield a [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - protobuf release, and it is not used for type URLs beginning - with + Note: this functionality is not currently available in the + official - type.googleapis.com. + protobuf release, and it is not used for type URLs beginning with + type.googleapis.com. - Schemes other than `http`, `https` (or the empty scheme) might - be - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message along - with a + Schemes other than `http`, `https` (or the empty scheme) might be - URL that describes the type of the serialized message. + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above specified + type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message along + with a + URL that describes the type of the serialized message. - Protobuf library provides support to pack/unpack Any values in the - form - of utility functions or additional generated methods of the Any - type. + Protobuf library provides support to pack/unpack Any values in the + form + of utility functions or additional generated methods of the Any type. - Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { ... - if (any.UnpackTo(&foo)) { - ... - } + } - Example 2: Pack and unpack a message in Java. + Example 2: Pack and unpack a message in Java. - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. - foo = Foo(...) - any = Any() - any.Pack(foo) + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - The pack methods provided by protobuf library will by default use + The pack methods provided by protobuf library will by default use - 'type.googleapis.com/full.type.name' as the type URL and the unpack + 'type.googleapis.com/full.type.name' as the type URL and the unpack - methods only use the fully qualified type name after the last '/' + methods only use the fully qualified type name after the last '/' - in the type URL, for example "foo.bar.com/x/y.z" will yield type + in the type URL, for example "foo.bar.com/x/y.z" will yield type - name "y.z". + name "y.z". - JSON + JSON - ==== - The JSON representation of an `Any` value uses the regular + The JSON representation of an `Any` value uses the regular - representation of the deserialized, embedded message, with an + representation of the deserialized, embedded message, with an - additional field `@type` which contains the type URL. Example: + additional field `@type` which contains the type URL. Example: - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - If the embedded message type is well-known and has a custom JSON + If the embedded message type is well-known and has a custom JSON - representation, that representation will be embedded adding a field + representation, that representation will be embedded adding a field - `value` which holds the custom JSON in addition to the `@type` + `value` which holds the custom JSON in addition to the `@type` - field. Example (for message [google.protobuf.Duration][]): + field. Example (for message [google.protobuf.Duration][]): - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + created_at: + type: string + format: date-time + description: created_at is a timestamp specifying when a group policy was created. + description: >- + GroupPolicyInfo represents the high-level on-chain information for a group + policy. + cosmos.group.v1.Member: + type: object + properties: + address: + type: string + description: address is the member's account address. + weight: + type: string + description: weight is the member's voting weight that should be greater than 0. + metadata: + type: string + description: metadata is any arbitrary metadata attached to the member. + added_at: + type: string + format: date-time + description: added_at is a timestamp specifying when a member was added. + description: |- + Member represents a group member with an account address, + non-zero weight, metadata and added_at timestamp. + cosmos.group.v1.Proposal: + type: object + properties: + id: + type: string + format: uint64 + description: id is the unique id of the proposal. + group_policy_address: + type: string + description: group_policy_address is the account address of group policy. + metadata: + type: string + description: metadata is any arbitrary metadata attached to the proposal. + proposers: + type: array + items: + type: string + description: proposers are the account addresses of the proposers. + submit_time: + type: string + format: date-time + description: submit_time is a timestamp specifying when a proposal was submitted. + group_version: + type: string + format: uint64 + description: |- + group_version tracks the version of the group at proposal submission. + This field is here for informational purposes only. + group_policy_version: + type: string + format: uint64 description: >- - messages is a list of messages to be executed. The required signers of + group_policy_version tracks the version of the group policy at + proposal submission. - those messages define the number and order of elements in AuthInfo's + When a decision policy is changed, existing proposals from previous + policy - signer_infos and Tx's signatures. Each required signer address is - added to + versions will become invalid with the `ABORTED` status. - the list only the first time it occurs. + This field is here for informational purposes only. + status: + description: >- + status represents the high level position in the life cycle of the + proposal. Initial value is Submitted. + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_SUBMITTED + - PROPOSAL_STATUS_ACCEPTED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_ABORTED + - PROPOSAL_STATUS_WITHDRAWN + default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: + description: >- + final_tally_result contains the sums of all weighted votes for this - By convention, the first required signer (usually from the first - message) + proposal for each vote option. It is empty at submission, and only - is referred to as the primary signer and pays the fee for the whole + populated after tallying, at voting period end or at proposal + execution, - transaction. - memo: + whichever happens first. + type: object + properties: + yes_count: + type: string + description: yes_count is the weighted sum of yes votes. + abstain_count: + type: string + description: abstain_count is the weighted sum of abstainers. + no_count: + type: string + description: no_count is the weighted sum of no votes. + no_with_veto_count: + type: string + description: no_with_veto_count is the weighted sum of veto. + voting_period_end: type: string + format: date-time description: >- - memo is any arbitrary note/comment to be added to the transaction. + voting_period_end is the timestamp before which voting must be done. - WARNING: in clients, any publicly exposed text should not be called - memo, + Unless a successful MsgExec is called before (to execute a proposal + whose - but should be called `note` instead (see - https://github.com/cosmos/cosmos-sdk/issues/9122). - timeout_height: + tally is successful before the voting period ends), tallying will be + done + + at this point, and the `final_tally_result`and `status` fields will be + + accordingly updated. + executor_result: + description: >- + executor_result is the final result of the proposal execution. Initial + value is NotRun. type: string - format: uint64 - title: |- - timeout is the block height after which this transaction will not - be processed by the chain - extension_options: + enum: + - PROPOSAL_EXECUTOR_RESULT_UNSPECIFIED + - PROPOSAL_EXECUTOR_RESULT_NOT_RUN + - PROPOSAL_EXECUTOR_RESULT_SUCCESS + - PROPOSAL_EXECUTOR_RESULT_FAILURE + default: PROPOSAL_EXECUTOR_RESULT_UNSPECIFIED + messages: type: array items: type: object @@ -46610,7 +69559,7 @@ definitions: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -46620,13 +69569,16 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -46644,7 +69596,6 @@ definitions: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -46676,507 +69627,653 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - title: >- - extension_options are arbitrary options that can be added by chains + description: >- + messages is a list of `sdk.Msg`s that will be executed if the proposal + passes. + title: + type: string + description: 'Since: cosmos-sdk 0.47' + title: title is the title of the proposal + summary: + type: string + description: 'Since: cosmos-sdk 0.47' + title: summary is a short summary of the proposal + description: >- + Proposal defines a group proposal. Any member of a group can submit a + proposal - when the default options are not sufficient. If any of these are - present + for a group policy to decide upon. - and can't be handled, the transaction will be rejected - non_critical_extension_options: + A proposal consists of a set of `sdk.Msg`s that will be executed if the + proposal + + passes as well as some optional metadata associated with the proposal. + cosmos.group.v1.ProposalExecutorResult: + type: string + enum: + - PROPOSAL_EXECUTOR_RESULT_UNSPECIFIED + - PROPOSAL_EXECUTOR_RESULT_NOT_RUN + - PROPOSAL_EXECUTOR_RESULT_SUCCESS + - PROPOSAL_EXECUTOR_RESULT_FAILURE + default: PROPOSAL_EXECUTOR_RESULT_UNSPECIFIED + description: |- + ProposalExecutorResult defines types of proposal executor results. + + - PROPOSAL_EXECUTOR_RESULT_UNSPECIFIED: An empty value is not allowed. + - PROPOSAL_EXECUTOR_RESULT_NOT_RUN: We have not yet run the executor. + - PROPOSAL_EXECUTOR_RESULT_SUCCESS: The executor was successful and proposed action updated state. + - PROPOSAL_EXECUTOR_RESULT_FAILURE: The executor returned an error and proposed action didn't update state. + cosmos.group.v1.ProposalStatus: + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_SUBMITTED + - PROPOSAL_STATUS_ACCEPTED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_ABORTED + - PROPOSAL_STATUS_WITHDRAWN + default: PROPOSAL_STATUS_UNSPECIFIED + description: |- + ProposalStatus defines proposal statuses. + + - PROPOSAL_STATUS_UNSPECIFIED: An empty value is invalid and not allowed. + - PROPOSAL_STATUS_SUBMITTED: Initial status of a proposal when submitted. + - PROPOSAL_STATUS_ACCEPTED: Final status of a proposal when the final tally is done and the outcome + passes the group policy's decision policy. + - PROPOSAL_STATUS_REJECTED: Final status of a proposal when the final tally is done and the outcome + is rejected by the group policy's decision policy. + - PROPOSAL_STATUS_ABORTED: Final status of a proposal when the group policy is modified before the + final tally. + - PROPOSAL_STATUS_WITHDRAWN: A proposal can be withdrawn before the voting start time by the owner. + When this happens the final status is Withdrawn. + cosmos.group.v1.QueryGroupInfoResponse: + type: object + properties: + info: + description: info is the GroupInfo of the group. + type: object + properties: + id: + type: string + format: uint64 + description: id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group's admin. + metadata: + type: string + description: metadata is any arbitrary metadata to attached to the group. + version: + type: string + format: uint64 + title: >- + version is used to track changes to a group's membership structure + that + + would break existing proposals. Whenever any members weight is + changed, + + or any member is added or removed this version is incremented and + will + + cause proposals based on older versions of this group to fail + total_weight: + type: string + description: total_weight is the sum of the group members' weights. + created_at: + type: string + format: date-time + description: created_at is a timestamp specifying when a group was created. + description: QueryGroupInfoResponse is the Query/GroupInfo response type. + cosmos.group.v1.QueryGroupMembersResponse: + type: object + properties: + members: type: array items: type: object properties: - type_url: + group_id: + type: string + format: uint64 + description: group_id is the unique ID of the group. + member: + description: member is the member data. + type: object + properties: + address: + type: string + description: address is the member's account address. + weight: + type: string + description: >- + weight is the member's voting weight that should be greater + than 0. + metadata: + type: string + description: metadata is any arbitrary metadata attached to the member. + added_at: + type: string + format: date-time + description: added_at is a timestamp specifying when a member was added. + description: >- + GroupMember represents the relationship between a group and a + member. + description: members are the members of the group with given group_id. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total + + was set, its value is undefined otherwise + description: QueryGroupMembersResponse is the Query/GroupMembersResponse response type. + cosmos.group.v1.QueryGroupPoliciesByAdminResponse: + type: object + properties: + group_policies: + type: array + items: + type: object + properties: + address: + type: string + description: address is the account address of group policy. + group_id: + type: string + format: uint64 + description: group_id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group admin. + metadata: + type: string + description: metadata is any arbitrary metadata attached to the group policy. + version: type: string + format: uint64 description: >- - A URL/resource name that uniquely identifies the type of the - serialized + version is used to track changes to a group's GroupPolicyInfo + structure that - protocol buffer message. This string must contain at least + would create a different result on a running proposal. + decision_policy: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - one "/" character. The last segment of the URL's path must - represent + protocol buffer message. This string must contain at least - the fully qualified name of the type (as in + one "/" character. The last segment of the URL's path must + represent - `path/google.protobuf.Duration`). The name should be in a - canonical form + the fully qualified name of the type (as in - (e.g., leading "." is not accepted). + `path/google.protobuf.Duration`). The name should be in a + canonical form + (e.g., leading "." is not accepted). - In practice, teams usually precompile into the binary all types - that they - expect it to use in the context of Any. However, for URLs which - use the + In practice, teams usually precompile into the binary all + types that they - scheme `http`, `https`, or no scheme, one can optionally set up - a type + expect it to use in the context of Any. However, for URLs + which use the - server that maps type URLs to message definitions as follows: + scheme `http`, `https`, or no scheme, one can optionally set + up a type + server that maps type URLs to message definitions as + follows: - * If no scheme is provided, `https` is assumed. - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + * If no scheme is provided, `https` is assumed. - Note: this functionality is not currently available in the - official + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - protobuf release, and it is not used for type URLs beginning - with + Note: this functionality is not currently available in the + official - type.googleapis.com. + protobuf release, and it is not used for type URLs beginning + with + type.googleapis.com. - Schemes other than `http`, `https` (or the empty scheme) might - be - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message along - with a + Schemes other than `http`, `https` (or the empty scheme) + might be - URL that describes the type of the serialized message. + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + URL that describes the type of the serialized message. - Protobuf library provides support to pack/unpack Any values in the - form - of utility functions or additional generated methods of the Any - type. + Protobuf library provides support to pack/unpack Any values in + the form + of utility functions or additional generated methods of the Any + type. - Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + Example 1: Pack and unpack a message in C++. - Example 2: Pack and unpack a message in Java. + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + Example 2: Pack and unpack a message in Java. - Example 3: Pack and unpack a message in Python. + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + Example 3: Pack and unpack a message in Python. - Example 4: Pack and unpack a message in Go + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + Example 4: Pack and unpack a message in Go - The pack methods provided by protobuf library will by default use + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - 'type.googleapis.com/full.type.name' as the type URL and the unpack + The pack methods provided by protobuf library will by default + use - methods only use the fully qualified type name after the last '/' + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - in the type URL, for example "foo.bar.com/x/y.z" will yield type + methods only use the fully qualified type name after the last + '/' - name "y.z". + in the type URL, for example "foo.bar.com/x/y.z" will yield type + name "y.z". - JSON - ==== + JSON - The JSON representation of an `Any` value uses the regular - representation of the deserialized, embedded message, with an + The JSON representation of an `Any` value uses the regular - additional field `@type` which contains the type URL. Example: + representation of the deserialized, embedded message, with an - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + additional field `@type` which contains the type URL. Example: - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - If the embedded message type is well-known and has a custom JSON + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - representation, that representation will be embedded adding a field + If the embedded message type is well-known and has a custom JSON - `value` which holds the custom JSON in addition to the `@type` + representation, that representation will be embedded adding a + field - field. Example (for message [google.protobuf.Duration][]): + `value` which holds the custom JSON in addition to the `@type` - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - title: >- - extension_options are arbitrary options that can be added by chains + field. Example (for message [google.protobuf.Duration][]): - when the default options are not sufficient. If any of these are - present + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + created_at: + type: string + format: date-time + description: >- + created_at is a timestamp specifying when a group policy was + created. + description: >- + GroupPolicyInfo represents the high-level on-chain information for a + group policy. + description: group_policies are the group policies info with provided admin. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - and can't be handled, they will be ignored - description: TxBody is the body of a transaction that all signers sign over. - tendermint.abci.Event: + was set, its value is undefined otherwise + description: >- + QueryGroupPoliciesByAdminResponse is the Query/GroupPoliciesByAdmin + response type. + cosmos.group.v1.QueryGroupPoliciesByGroupResponse: type: object properties: - type: - type: string - attributes: + group_policies: type: array items: type: object properties: - key: + address: type: string - format: byte - value: + description: address is the account address of group policy. + group_id: type: string - format: byte - index: - type: boolean - format: boolean - description: EventAttribute is a single key-value pair, associated with an event. - description: >- - Event allows application developers to attach additional information to - - ResponseBeginBlock, ResponseEndBlock, ResponseCheckTx and - ResponseDeliverTx. - - Later, transactions may be queried using these events. - tendermint.abci.EventAttribute: - type: object - properties: - key: - type: string - format: byte - value: - type: string - format: byte - index: - type: boolean - format: boolean - description: EventAttribute is a single key-value pair, associated with an event. - cosmos.upgrade.v1beta1.ModuleVersion: - type: object - properties: - name: - type: string - title: name of the app module - version: - type: string - format: uint64 - title: consensus version of the app module - description: ModuleVersion specifies a module and its consensus version. - cosmos.upgrade.v1beta1.Plan: - type: object - properties: - name: - type: string - description: >- - Sets the name for the upgrade. This name will be used by the upgraded - - version of the software to apply any special "on-upgrade" commands - during - - the first BeginBlock method after the upgrade is applied. It is also - used - - to detect whether a software version can handle a given upgrade. If no - - upgrade handler with this name has been set in the software, it will - be - - assumed that the software is out-of-date when the upgrade Time or - Height is - - reached and the software will exit. - time: - type: string - format: date-time - description: >- - Deprecated: Time based upgrades have been deprecated. Time based - upgrade logic + format: uint64 + description: group_id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group admin. + metadata: + type: string + description: metadata is any arbitrary metadata attached to the group policy. + version: + type: string + format: uint64 + description: >- + version is used to track changes to a group's GroupPolicyInfo + structure that - has been removed from the SDK. + would create a different result on a running proposal. + decision_policy: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of the + serialized - If this field is not empty, an error will be thrown. - height: - type: string - format: int64 - description: |- - The height at which the upgrade must be performed. - Only used if Time is not set. - info: - type: string - title: |- - Any application specific upgrade info to be included on-chain - such as a git commit that validators could automatically upgrade to - upgraded_client_state: - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized + protocol buffer message. This string must contain at least - protocol buffer message. This string must contain at least + one "/" character. The last segment of the URL's path must + represent - one "/" character. The last segment of the URL's path must - represent + the fully qualified name of the type (as in - the fully qualified name of the type (as in + `path/google.protobuf.Duration`). The name should be in a + canonical form - `path/google.protobuf.Duration`). The name should be in a - canonical form + (e.g., leading "." is not accepted). - (e.g., leading "." is not accepted). + In practice, teams usually precompile into the binary all + types that they - In practice, teams usually precompile into the binary all types - that they + expect it to use in the context of Any. However, for URLs + which use the - expect it to use in the context of Any. However, for URLs which - use the + scheme `http`, `https`, or no scheme, one can optionally set + up a type - scheme `http`, `https`, or no scheme, one can optionally set up a - type + server that maps type URLs to message definitions as + follows: - server that maps type URLs to message definitions as follows: + * If no scheme is provided, `https` is assumed. - * If no scheme is provided, `https` is assumed. + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based on + the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) + Note: this functionality is not currently available in the + official - Note: this functionality is not currently available in the - official + protobuf release, and it is not used for type URLs beginning + with - protobuf release, and it is not used for type URLs beginning with + type.googleapis.com. - type.googleapis.com. + Schemes other than `http`, `https` (or the empty scheme) + might be - Schemes other than `http`, `https` (or the empty scheme) might be + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above specified - type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message along - with a + URL that describes the type of the serialized message. - URL that describes the type of the serialized message. + Protobuf library provides support to pack/unpack Any values in + the form - Protobuf library provides support to pack/unpack Any values in the - form + of utility functions or additional generated methods of the Any + type. - of utility functions or additional generated methods of the Any type. + Example 1: Pack and unpack a message in C++. - Example 1: Pack and unpack a message in C++. + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } + Example 2: Pack and unpack a message in Java. - Example 2: Pack and unpack a message in Java. + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + Example 3: Pack and unpack a message in Python. - Example 3: Pack and unpack a message in Python. + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... + Example 4: Pack and unpack a message in Go - Example 4: Pack and unpack a message in Go + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } + The pack methods provided by protobuf library will by default + use - The pack methods provided by protobuf library will by default use + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - 'type.googleapis.com/full.type.name' as the type URL and the unpack + methods only use the fully qualified type name after the last + '/' - methods only use the fully qualified type name after the last '/' + in the type URL, for example "foo.bar.com/x/y.z" will yield type - in the type URL, for example "foo.bar.com/x/y.z" will yield type + name "y.z". - name "y.z". + JSON - JSON - ==== + The JSON representation of an `Any` value uses the regular - The JSON representation of an `Any` value uses the regular + representation of the deserialized, embedded message, with an - representation of the deserialized, embedded message, with an + additional field `@type` which contains the type URL. Example: - additional field `@type` which contains the type URL. Example: + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + If the embedded message type is well-known and has a custom JSON - If the embedded message type is well-known and has a custom JSON + representation, that representation will be embedded adding a + field - representation, that representation will be embedded adding a field + `value` which holds the custom JSON in addition to the `@type` - `value` which holds the custom JSON in addition to the `@type` + field. Example (for message [google.protobuf.Duration][]): - field. Example (for message [google.protobuf.Duration][]): + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + created_at: + type: string + format: date-time + description: >- + created_at is a timestamp specifying when a group policy was + created. + description: >- + GroupPolicyInfo represents the high-level on-chain information for a + group policy. + description: >- + group_policies are the group policies info associated with the + provided group. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - description: >- - Plan specifies information about a planned upgrade and when it should - occur. - cosmos.upgrade.v1beta1.QueryAppliedPlanResponse: - type: object - properties: - height: - type: string - format: int64 - description: height is the block height at which the plan was applied. + was set, its value is undefined otherwise description: >- - QueryAppliedPlanResponse is the response type for the Query/AppliedPlan - RPC - - method. - cosmos.upgrade.v1beta1.QueryCurrentPlanResponse: + QueryGroupPoliciesByGroupResponse is the Query/GroupPoliciesByGroup + response type. + cosmos.group.v1.QueryGroupPolicyInfoResponse: type: object properties: - plan: - description: plan is the current upgrade plan. + info: type: object properties: - name: + address: type: string - description: >- - Sets the name for the upgrade. This name will be used by the - upgraded - - version of the software to apply any special "on-upgrade" commands - during - - the first BeginBlock method after the upgrade is applied. It is - also used - - to detect whether a software version can handle a given upgrade. - If no - - upgrade handler with this name has been set in the software, it - will be - - assumed that the software is out-of-date when the upgrade Time or - Height is - - reached and the software will exit. - time: + description: address is the account address of group policy. + group_id: type: string - format: date-time - description: >- - Deprecated: Time based upgrades have been deprecated. Time based - upgrade logic - - has been removed from the SDK. - - If this field is not empty, an error will be thrown. - height: + format: uint64 + description: group_id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group admin. + metadata: type: string - format: int64 - description: |- - The height at which the upgrade must be performed. - Only used if Time is not set. - info: + description: metadata is any arbitrary metadata attached to the group policy. + version: type: string - title: >- - Any application specific upgrade info to be included on-chain + format: uint64 + description: >- + version is used to track changes to a group's GroupPolicyInfo + structure that - such as a git commit that validators could automatically upgrade - to - upgraded_client_state: + would create a different result on a running proposal. + decision_policy: type: object properties: type_url: @@ -47273,7 +70370,7 @@ definitions: foo = any.unpack(Foo.class); } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. foo = Foo(...) any = Any() @@ -47283,13 +70380,16 @@ definitions: any.Unpack(foo) ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) + any, err := anypb.New(foo) + if err != nil { + ... + } ... foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + if err := any.UnmarshalTo(foo); err != nil { ... } @@ -47308,7 +70408,6 @@ definitions: JSON - ==== The JSON representation of an `Any` value uses the regular @@ -47341,223 +70440,262 @@ definitions: "@type": "type.googleapis.com/google.protobuf.Duration", "value": "1.212s" } - description: >- - QueryCurrentPlanResponse is the response type for the Query/CurrentPlan - RPC - - method. - cosmos.upgrade.v1beta1.QueryModuleVersionsResponse: + created_at: + type: string + format: date-time + description: >- + created_at is a timestamp specifying when a group policy was + created. + description: >- + GroupPolicyInfo represents the high-level on-chain information for a + group policy. + description: QueryGroupPolicyInfoResponse is the Query/GroupPolicyInfo response type. + cosmos.group.v1.QueryGroupsByAdminResponse: type: object properties: - module_versions: + groups: type: array items: type: object properties: - name: + id: type: string - title: name of the app module + format: uint64 + description: id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group's admin. + metadata: + type: string + description: metadata is any arbitrary metadata to attached to the group. version: type: string format: uint64 - title: consensus version of the app module - description: ModuleVersion specifies a module and its consensus version. - description: >- - module_versions is a list of module names with their consensus - versions. - description: >- - QueryModuleVersionsResponse is the response type for the - Query/ModuleVersions + title: >- + version is used to track changes to a group's membership + structure that - RPC method. - cosmos.upgrade.v1beta1.QueryUpgradedConsensusStateResponse: - type: object - properties: - upgraded_consensus_state: - type: string - format: byte - description: >- - QueryUpgradedConsensusStateResponse is the response type for the - Query/UpgradedConsensusState + would break existing proposals. Whenever any members weight is + changed, - RPC method. - cosmos.authz.v1beta1.Grant: - type: object - properties: - authorization: + or any member is added or removed this version is incremented + and will + + cause proposals based on older versions of this group to fail + total_weight: + type: string + description: total_weight is the sum of the group members' weights. + created_at: + type: string + format: date-time + description: created_at is a timestamp specifying when a group was created. + description: >- + GroupInfo represents the high-level on-chain information for a + group. + description: groups are the groups info with the provided admin. + pagination: + description: pagination defines the pagination in the response. type: object properties: - type_url: + next_key: type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized - - protocol buffer message. This string must contain at least - - one "/" character. The last segment of the URL's path must - represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in a - canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary all types - that they - - expect it to use in the context of Any. However, for URLs which - use the - - scheme `http`, `https`, or no scheme, one can optionally set up a - type - - server that maps type URLs to message definitions as follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in the - official - - protobuf release, and it is not used for type URLs beginning with + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - type.googleapis.com. + was set, its value is undefined otherwise + description: >- + QueryGroupsByAdminResponse is the Query/GroupsByAdminResponse response + type. + cosmos.group.v1.QueryGroupsByMemberResponse: + type: object + properties: + groups: + type: array + items: + type: object + properties: + id: + type: string + format: uint64 + description: id is the unique ID of the group. + admin: + type: string + description: admin is the account address of the group's admin. + metadata: + type: string + description: metadata is any arbitrary metadata to attached to the group. + version: + type: string + format: uint64 + title: >- + version is used to track changes to a group's membership + structure that + would break existing proposals. Whenever any members weight is + changed, - Schemes other than `http`, `https` (or the empty scheme) might be + or any member is added or removed this version is incremented + and will - used with implementation specific semantics. - value: + cause proposals based on older versions of this group to fail + total_weight: + type: string + description: total_weight is the sum of the group members' weights. + created_at: + type: string + format: date-time + description: created_at is a timestamp specifying when a group was created. + description: >- + GroupInfo represents the high-level on-chain information for a + group. + description: groups are the groups info with the provided group member. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: type: string format: byte - description: >- - Must be a valid serialized protocol buffer of the above specified - type. - description: >- - `Any` contains an arbitrary serialized protocol buffer message along - with a - - URL that describes the type of the serialized message. - - - Protobuf library provides support to pack/unpack Any values in the - form - - of utility functions or additional generated methods of the Any type. - - - Example 1: Pack and unpack a message in C++. - - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { - ... - } - - Example 2: Pack and unpack a message in Java. - - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } - - Example 3: Pack and unpack a message in Python. - - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) - ... - - Example 4: Pack and unpack a message in Go - - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { - ... - } - - The pack methods provided by protobuf library will by default use - - 'type.googleapis.com/full.type.name' as the type URL and the unpack - - methods only use the fully qualified type name after the last '/' - - in the type URL, for example "foo.bar.com/x/y.z" will yield type - - name "y.z". - - + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - JSON + was set, its value is undefined otherwise + description: QueryGroupsByMemberResponse is the Query/GroupsByMember response type. + cosmos.group.v1.QueryProposalResponse: + type: object + properties: + proposal: + description: proposal is the proposal info. + type: object + properties: + id: + type: string + format: uint64 + description: id is the unique id of the proposal. + group_policy_address: + type: string + description: group_policy_address is the account address of group policy. + metadata: + type: string + description: metadata is any arbitrary metadata attached to the proposal. + proposers: + type: array + items: + type: string + description: proposers are the account addresses of the proposers. + submit_time: + type: string + format: date-time + description: >- + submit_time is a timestamp specifying when a proposal was + submitted. + group_version: + type: string + format: uint64 + description: >- + group_version tracks the version of the group at proposal + submission. - ==== + This field is here for informational purposes only. + group_policy_version: + type: string + format: uint64 + description: >- + group_policy_version tracks the version of the group policy at + proposal submission. - The JSON representation of an `Any` value uses the regular + When a decision policy is changed, existing proposals from + previous policy - representation of the deserialized, embedded message, with an + versions will become invalid with the `ABORTED` status. - additional field `@type` which contains the type URL. Example: + This field is here for informational purposes only. + status: + description: >- + status represents the high level position in the life cycle of the + proposal. Initial value is Submitted. + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_SUBMITTED + - PROPOSAL_STATUS_ACCEPTED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_ABORTED + - PROPOSAL_STATUS_WITHDRAWN + default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: + description: >- + final_tally_result contains the sums of all weighted votes for + this - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + proposal for each vote option. It is empty at submission, and only - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + populated after tallying, at voting period end or at proposal + execution, - If the embedded message type is well-known and has a custom JSON + whichever happens first. + type: object + properties: + yes_count: + type: string + description: yes_count is the weighted sum of yes votes. + abstain_count: + type: string + description: abstain_count is the weighted sum of abstainers. + no_count: + type: string + description: no_count is the weighted sum of no votes. + no_with_veto_count: + type: string + description: no_with_veto_count is the weighted sum of veto. + voting_period_end: + type: string + format: date-time + description: >- + voting_period_end is the timestamp before which voting must be + done. - representation, that representation will be embedded adding a field + Unless a successful MsgExec is called before (to execute a + proposal whose - `value` which holds the custom JSON in addition to the `@type` + tally is successful before the voting period ends), tallying will + be done - field. Example (for message [google.protobuf.Duration][]): + at this point, and the `final_tally_result`and `status` fields + will be - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - expiration: - type: string - format: date-time - description: |- - Grant gives permissions to execute - the provide method with expiration time. - cosmos.authz.v1beta1.QueryGrantsResponse: - type: object - properties: - grants: - type: array - items: - type: object - properties: - authorization: + accordingly updated. + executor_result: + description: >- + executor_result is the final result of the proposal execution. + Initial value is NotRun. + type: string + enum: + - PROPOSAL_EXECUTOR_RESULT_UNSPECIFIED + - PROPOSAL_EXECUTOR_RESULT_NOT_RUN + - PROPOSAL_EXECUTOR_RESULT_SUCCESS + - PROPOSAL_EXECUTOR_RESULT_FAILURE + default: PROPOSAL_EXECUTOR_RESULT_UNSPECIFIED + messages: + type: array + items: type: object properties: type_url: @@ -47627,122 +70765,445 @@ definitions: `Any` contains an arbitrary serialized protocol buffer message along with a - URL that describes the type of the serialized message. + URL that describes the type of the serialized message. + + + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the Any + type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); + ... + if (any.UnpackTo(&foo)) { + ... + } + + Example 2: Pack and unpack a message in Java. + + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } + + Example 3: Pack and unpack a message in Python. + + foo = Foo(...) + any = Any() + any.Pack(foo) + ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... + + Example 4: Pack and unpack a message in Go + + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } + ... + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } + + The pack methods provided by protobuf library will by default + use + + 'type.googleapis.com/full.type.name' as the type URL and the + unpack + + methods only use the fully qualified type name after the last + '/' + + in the type URL, for example "foo.bar.com/x/y.z" will yield type + + name "y.z". + + + + JSON + + + The JSON representation of an `Any` value uses the regular + + representation of the deserialized, embedded message, with an + + additional field `@type` which contains the type URL. Example: + + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } + + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } + + If the embedded message type is well-known and has a custom JSON + + representation, that representation will be embedded adding a + field + + `value` which holds the custom JSON in addition to the `@type` + + field. Example (for message [google.protobuf.Duration][]): + + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + messages is a list of `sdk.Msg`s that will be executed if the + proposal passes. + title: + type: string + description: 'Since: cosmos-sdk 0.47' + title: title is the title of the proposal + summary: + type: string + description: 'Since: cosmos-sdk 0.47' + title: summary is a short summary of the proposal + description: QueryProposalResponse is the Query/Proposal response type. + cosmos.group.v1.QueryProposalsByGroupPolicyResponse: + type: object + properties: + proposals: + type: array + items: + type: object + properties: + id: + type: string + format: uint64 + description: id is the unique id of the proposal. + group_policy_address: + type: string + description: group_policy_address is the account address of group policy. + metadata: + type: string + description: metadata is any arbitrary metadata attached to the proposal. + proposers: + type: array + items: + type: string + description: proposers are the account addresses of the proposers. + submit_time: + type: string + format: date-time + description: >- + submit_time is a timestamp specifying when a proposal was + submitted. + group_version: + type: string + format: uint64 + description: >- + group_version tracks the version of the group at proposal + submission. + + This field is here for informational purposes only. + group_policy_version: + type: string + format: uint64 + description: >- + group_policy_version tracks the version of the group policy at + proposal submission. + + When a decision policy is changed, existing proposals from + previous policy + + versions will become invalid with the `ABORTED` status. + + This field is here for informational purposes only. + status: + description: >- + status represents the high level position in the life cycle of + the proposal. Initial value is Submitted. + type: string + enum: + - PROPOSAL_STATUS_UNSPECIFIED + - PROPOSAL_STATUS_SUBMITTED + - PROPOSAL_STATUS_ACCEPTED + - PROPOSAL_STATUS_REJECTED + - PROPOSAL_STATUS_ABORTED + - PROPOSAL_STATUS_WITHDRAWN + default: PROPOSAL_STATUS_UNSPECIFIED + final_tally_result: + description: >- + final_tally_result contains the sums of all weighted votes for + this + + proposal for each vote option. It is empty at submission, and + only + + populated after tallying, at voting period end or at proposal + execution, + + whichever happens first. + type: object + properties: + yes_count: + type: string + description: yes_count is the weighted sum of yes votes. + abstain_count: + type: string + description: abstain_count is the weighted sum of abstainers. + no_count: + type: string + description: no_count is the weighted sum of no votes. + no_with_veto_count: + type: string + description: no_with_veto_count is the weighted sum of veto. + voting_period_end: + type: string + format: date-time + description: >- + voting_period_end is the timestamp before which voting must be + done. + + Unless a successful MsgExec is called before (to execute a + proposal whose + + tally is successful before the voting period ends), tallying + will be done + + at this point, and the `final_tally_result`and `status` fields + will be + + accordingly updated. + executor_result: + description: >- + executor_result is the final result of the proposal execution. + Initial value is NotRun. + type: string + enum: + - PROPOSAL_EXECUTOR_RESULT_UNSPECIFIED + - PROPOSAL_EXECUTOR_RESULT_NOT_RUN + - PROPOSAL_EXECUTOR_RESULT_SUCCESS + - PROPOSAL_EXECUTOR_RESULT_FAILURE + default: PROPOSAL_EXECUTOR_RESULT_UNSPECIFIED + messages: + type: array + items: + type: object + properties: + type_url: + type: string + description: >- + A URL/resource name that uniquely identifies the type of + the serialized + + protocol buffer message. This string must contain at least + + one "/" character. The last segment of the URL's path must + represent + + the fully qualified name of the type (as in + + `path/google.protobuf.Duration`). The name should be in a + canonical form + + (e.g., leading "." is not accepted). + + + In practice, teams usually precompile into the binary all + types that they + + expect it to use in the context of Any. However, for URLs + which use the + + scheme `http`, `https`, or no scheme, one can optionally + set up a type + + server that maps type URLs to message definitions as + follows: + + + * If no scheme is provided, `https` is assumed. + + * An HTTP GET on the URL must yield a + [google.protobuf.Type][] + value in binary format, or produce an error. + * Applications are allowed to cache lookup results based + on the + URL, or have them precompiled into a binary to avoid any + lookup. Therefore, binary compatibility needs to be preserved + on changes to types. (Use versioned type names to manage + breaking changes.) + + Note: this functionality is not currently available in the + official + + protobuf release, and it is not used for type URLs + beginning with + + type.googleapis.com. - Protobuf library provides support to pack/unpack Any values in - the form + Schemes other than `http`, `https` (or the empty scheme) + might be - of utility functions or additional generated methods of the Any - type. + used with implementation specific semantics. + value: + type: string + format: byte + description: >- + Must be a valid serialized protocol buffer of the above + specified type. + description: >- + `Any` contains an arbitrary serialized protocol buffer message + along with a + URL that describes the type of the serialized message. - Example 1: Pack and unpack a message in C++. - Foo foo = ...; - Any any; - any.PackFrom(foo); - ... - if (any.UnpackTo(&foo)) { + Protobuf library provides support to pack/unpack Any values in + the form + + of utility functions or additional generated methods of the + Any type. + + + Example 1: Pack and unpack a message in C++. + + Foo foo = ...; + Any any; + any.PackFrom(foo); ... - } + if (any.UnpackTo(&foo)) { + ... + } - Example 2: Pack and unpack a message in Java. + Example 2: Pack and unpack a message in Java. - Foo foo = ...; - Any any = Any.pack(foo); - ... - if (any.is(Foo.class)) { - foo = any.unpack(Foo.class); - } + Foo foo = ...; + Any any = Any.pack(foo); + ... + if (any.is(Foo.class)) { + foo = any.unpack(Foo.class); + } - Example 3: Pack and unpack a message in Python. + Example 3: Pack and unpack a message in Python. - foo = Foo(...) - any = Any() - any.Pack(foo) - ... - if any.Is(Foo.DESCRIPTOR): - any.Unpack(foo) + foo = Foo(...) + any = Any() + any.Pack(foo) ... + if any.Is(Foo.DESCRIPTOR): + any.Unpack(foo) + ... - Example 4: Pack and unpack a message in Go + Example 4: Pack and unpack a message in Go - foo := &pb.Foo{...} - any, err := ptypes.MarshalAny(foo) - ... - foo := &pb.Foo{} - if err := ptypes.UnmarshalAny(any, foo); err != nil { + foo := &pb.Foo{...} + any, err := anypb.New(foo) + if err != nil { + ... + } ... - } + foo := &pb.Foo{} + if err := any.UnmarshalTo(foo); err != nil { + ... + } - The pack methods provided by protobuf library will by default - use + The pack methods provided by protobuf library will by default + use - 'type.googleapis.com/full.type.name' as the type URL and the - unpack + 'type.googleapis.com/full.type.name' as the type URL and the + unpack - methods only use the fully qualified type name after the last - '/' + methods only use the fully qualified type name after the last + '/' - in the type URL, for example "foo.bar.com/x/y.z" will yield type + in the type URL, for example "foo.bar.com/x/y.z" will yield + type - name "y.z". + name "y.z". - JSON + JSON - ==== - The JSON representation of an `Any` value uses the regular + The JSON representation of an `Any` value uses the regular - representation of the deserialized, embedded message, with an + representation of the deserialized, embedded message, with an - additional field `@type` which contains the type URL. Example: + additional field `@type` which contains the type URL. Example: - package google.profile; - message Person { - string first_name = 1; - string last_name = 2; - } + package google.profile; + message Person { + string first_name = 1; + string last_name = 2; + } - { - "@type": "type.googleapis.com/google.profile.Person", - "firstName": , - "lastName": - } + { + "@type": "type.googleapis.com/google.profile.Person", + "firstName": , + "lastName": + } - If the embedded message type is well-known and has a custom JSON + If the embedded message type is well-known and has a custom + JSON - representation, that representation will be embedded adding a - field + representation, that representation will be embedded adding a + field - `value` which holds the custom JSON in addition to the `@type` + `value` which holds the custom JSON in addition to the `@type` - field. Example (for message [google.protobuf.Duration][]): + field. Example (for message [google.protobuf.Duration][]): - { - "@type": "type.googleapis.com/google.protobuf.Duration", - "value": "1.212s" - } - expiration: + { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } + description: >- + messages is a list of `sdk.Msg`s that will be executed if the + proposal passes. + title: type: string - format: date-time - description: |- - Grant gives permissions to execute - the provide method with expiration time. - description: authorizations is a list of grants granted for grantee by granter. + description: 'Since: cosmos-sdk 0.47' + title: title is the title of the proposal + summary: + type: string + description: 'Since: cosmos-sdk 0.47' + title: summary is a short summary of the proposal + description: >- + Proposal defines a group proposal. Any member of a group can submit + a proposal + + for a group policy to decide upon. + + A proposal consists of a set of `sdk.Msg`s that will be executed if + the proposal + + passes as well as some optional metadata associated with the + proposal. + description: proposals are the proposals with given group policy. pagination: - description: pagination defines an pagination for the response. + description: pagination defines the pagination in the response. type: object properties: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -47752,267 +71213,161 @@ definitions: was set, its value is undefined otherwise description: >- - QueryGrantsResponse is the response type for the Query/Authorizations RPC - method. - cosmos.feegrant.v1beta1.Grant: + QueryProposalsByGroupPolicyResponse is the Query/ProposalByGroupPolicy + response type. + cosmos.group.v1.QueryTallyResultResponse: type: object properties: - granter: - type: string - description: >- - granter is the address of the user granting an allowance of their - funds. - grantee: - type: string - description: >- - grantee is the address of the user being granted an allowance of - another user's funds. - allowance: - description: allowance can be any of basic and filtered fee allowance. + tally: + description: tally defines the requested tally. type: object properties: - type_url: + yes_count: type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized - - protocol buffer message. This string must contain at least - - one "/" character. The last segment of the URL's path must - represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in a - canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary all types - that they - - expect it to use in the context of Any. However, for URLs which - use the - - scheme `http`, `https`, or no scheme, one can optionally set up a - type - - server that maps type URLs to message definitions as follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in the - official - - protobuf release, and it is not used for type URLs beginning with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) might be - - used with implementation specific semantics. - value: + description: yes_count is the weighted sum of yes votes. + abstain_count: type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above specified - type. - title: Grant is stored in the KVStore to record a grant with full context - cosmos.feegrant.v1beta1.QueryAllowanceResponse: + description: abstain_count is the weighted sum of abstainers. + no_count: + type: string + description: no_count is the weighted sum of no votes. + no_with_veto_count: + type: string + description: no_with_veto_count is the weighted sum of veto. + description: QueryTallyResultResponse is the Query/TallyResult response type. + cosmos.group.v1.QueryVoteByProposalVoterResponse: type: object properties: - allowance: - description: allowance is a allowance granted for grantee by granter. + vote: + description: vote is the vote with given proposal_id and voter. type: object properties: - granter: + proposal_id: type: string - description: >- - granter is the address of the user granting an allowance of their - funds. - grantee: + format: uint64 + description: proposal is the unique ID of the proposal. + voter: type: string - description: >- - grantee is the address of the user being granted an allowance of - another user's funds. - allowance: - description: allowance can be any of basic and filtered fee allowance. - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized - - protocol buffer message. This string must contain at least - - one "/" character. The last segment of the URL's path must - represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in a - canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary all - types that they - - expect it to use in the context of Any. However, for URLs - which use the - - scheme `http`, `https`, or no scheme, one can optionally set - up a type - - server that maps type URLs to message definitions as follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on - the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in the - official - - protobuf release, and it is not used for type URLs beginning - with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) might - be - - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - title: Grant is stored in the KVStore to record a grant with full context + description: voter is the account address of the voter. + option: + description: option is the voter's choice on the proposal. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + metadata: + type: string + description: metadata is any arbitrary metadata attached to the vote. + submit_time: + type: string + format: date-time + description: submit_time is the timestamp when the vote was submitted. description: >- - QueryAllowanceResponse is the response type for the Query/Allowance RPC - method. - cosmos.feegrant.v1beta1.QueryAllowancesResponse: + QueryVoteByProposalVoterResponse is the Query/VoteByProposalVoter response + type. + cosmos.group.v1.QueryVotesByProposalResponse: type: object properties: - allowances: + votes: type: array items: type: object properties: - granter: + proposal_id: type: string - description: >- - granter is the address of the user granting an allowance of - their funds. - grantee: + format: uint64 + description: proposal is the unique ID of the proposal. + voter: type: string - description: >- - grantee is the address of the user being granted an allowance of - another user's funds. - allowance: - description: allowance can be any of basic and filtered fee allowance. - type: object - properties: - type_url: - type: string - description: >- - A URL/resource name that uniquely identifies the type of the - serialized - - protocol buffer message. This string must contain at least - - one "/" character. The last segment of the URL's path must - represent - - the fully qualified name of the type (as in - - `path/google.protobuf.Duration`). The name should be in a - canonical form - - (e.g., leading "." is not accepted). - - - In practice, teams usually precompile into the binary all - types that they - - expect it to use in the context of Any. However, for URLs - which use the - - scheme `http`, `https`, or no scheme, one can optionally set - up a type - - server that maps type URLs to message definitions as - follows: - - - * If no scheme is provided, `https` is assumed. - - * An HTTP GET on the URL must yield a - [google.protobuf.Type][] - value in binary format, or produce an error. - * Applications are allowed to cache lookup results based on - the - URL, or have them precompiled into a binary to avoid any - lookup. Therefore, binary compatibility needs to be preserved - on changes to types. (Use versioned type names to manage - breaking changes.) - - Note: this functionality is not currently available in the - official - - protobuf release, and it is not used for type URLs beginning - with - - type.googleapis.com. - - - Schemes other than `http`, `https` (or the empty scheme) - might be + description: voter is the account address of the voter. + option: + description: option is the voter's choice on the proposal. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + metadata: + type: string + description: metadata is any arbitrary metadata attached to the vote. + submit_time: + type: string + format: date-time + description: submit_time is the timestamp when the vote was submitted. + description: Vote represents a vote for a proposal. + description: votes are the list of votes for given proposal_id. + pagination: + description: pagination defines the pagination in the response. + type: object + properties: + next_key: + type: string + format: byte + description: |- + next_key is the key to be passed to PageRequest.key to + query the next page most efficiently. It will be empty if + there are no more results. + total: + type: string + format: uint64 + title: >- + total is total number of results available if + PageRequest.count_total - used with implementation specific semantics. - value: - type: string - format: byte - description: >- - Must be a valid serialized protocol buffer of the above - specified type. - title: Grant is stored in the KVStore to record a grant with full context - description: allowances are allowance's granted for grantee by granter. + was set, its value is undefined otherwise + description: QueryVotesByProposalResponse is the Query/VotesByProposal response type. + cosmos.group.v1.QueryVotesByVoterResponse: + type: object + properties: + votes: + type: array + items: + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal is the unique ID of the proposal. + voter: + type: string + description: voter is the account address of the voter. + option: + description: option is the voter's choice on the proposal. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + metadata: + type: string + description: metadata is any arbitrary metadata attached to the vote. + submit_time: + type: string + format: date-time + description: submit_time is the timestamp when the vote was submitted. + description: Vote represents a vote for a proposal. + description: votes are the list of votes by given voter. pagination: - description: pagination defines an pagination for the response. + description: pagination defines the pagination in the response. type: object properties: next_key: type: string format: byte - title: |- + description: |- next_key is the key to be passed to PageRequest.key to - query the next page most efficiently + query the next page most efficiently. It will be empty if + there are no more results. total: type: string format: uint64 @@ -48021,9 +71376,66 @@ definitions: PageRequest.count_total was set, its value is undefined otherwise - description: >- - QueryAllowancesResponse is the response type for the Query/Allowances RPC - method. -securityDefinitions: - kms: - type: basic + description: QueryVotesByVoterResponse is the Query/VotesByVoter response type. + cosmos.group.v1.TallyResult: + type: object + properties: + yes_count: + type: string + description: yes_count is the weighted sum of yes votes. + abstain_count: + type: string + description: abstain_count is the weighted sum of abstainers. + no_count: + type: string + description: no_count is the weighted sum of no votes. + no_with_veto_count: + type: string + description: no_with_veto_count is the weighted sum of veto. + description: TallyResult represents the sum of weighted votes for each vote option. + cosmos.group.v1.Vote: + type: object + properties: + proposal_id: + type: string + format: uint64 + description: proposal is the unique ID of the proposal. + voter: + type: string + description: voter is the account address of the voter. + option: + description: option is the voter's choice on the proposal. + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + metadata: + type: string + description: metadata is any arbitrary metadata attached to the vote. + submit_time: + type: string + format: date-time + description: submit_time is the timestamp when the vote was submitted. + description: Vote represents a vote for a proposal. + cosmos.group.v1.VoteOption: + type: string + enum: + - VOTE_OPTION_UNSPECIFIED + - VOTE_OPTION_YES + - VOTE_OPTION_ABSTAIN + - VOTE_OPTION_NO + - VOTE_OPTION_NO_WITH_VETO + default: VOTE_OPTION_UNSPECIFIED + description: |- + VoteOption enumerates the valid vote options for a given proposal. + + - VOTE_OPTION_UNSPECIFIED: VOTE_OPTION_UNSPECIFIED defines an unspecified vote option which will + return an error. + - VOTE_OPTION_YES: VOTE_OPTION_YES defines a yes vote option. + - VOTE_OPTION_ABSTAIN: VOTE_OPTION_ABSTAIN defines an abstain vote option. + - VOTE_OPTION_NO: VOTE_OPTION_NO defines a no vote option. + - VOTE_OPTION_NO_WITH_VETO: VOTE_OPTION_NO_WITH_VETO defines a no with veto vote option. diff --git a/go.mod b/go.mod deleted file mode 100644 index 4b434c19..00000000 --- a/go.mod +++ /dev/null @@ -1,172 +0,0 @@ -module github.com/akash-network/akash-api - -go 1.21 - -require ( - github.com/99designs/keyring v1.2.1 - github.com/boz/go-lifecycle v0.1.1 - github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 - github.com/cosmos/cosmos-sdk v0.45.16 - github.com/edwingeng/deque/v2 v2.1.1 - github.com/gogo/protobuf v1.3.3 - github.com/golang/protobuf v1.5.4 - github.com/grpc-ecosystem/grpc-gateway v1.16.0 - github.com/pkg/errors v0.9.1 - github.com/pseudomuto/protoc-gen-doc v1.5.1 - github.com/regen-network/cosmos-proto v0.3.1 - github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.9.0 - github.com/tendermint/tendermint v0.34.27 - go.step.sm/crypto v0.44.6 - google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 - google.golang.org/grpc v1.63.2 - google.golang.org/protobuf v1.33.0 - gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.26.1 - k8s.io/apimachinery v0.26.1 -) - -replace ( - // use cosmos fork of keyring - github.com/99designs/keyring => github.com/cosmos/keyring v1.2.0 - - // dgrijalva/jwt-go is deprecated and doesn't receive security updates. - // TODO: remove it: https://github.com/cosmos/cosmos-sdk/issues/13134 - github.com/dgrijalva/jwt-go => github.com/golang-jwt/jwt/v4 v4.4.2 - - // Fix upstream GHSA-h395-qcrw-5vmq vulnerability. - // TODO Remove it: https://github.com/cosmos/cosmos-sdk/issues/10409 - github.com/gin-gonic/gin => github.com/gin-gonic/gin v1.8.1 - - // Use regen gogoproto fork - // To be replaced by cosmos/gogoproto in future versions - github.com/gogo/protobuf => github.com/regen-network/protobuf v1.3.3-alpha.regen.1 - github.com/tendermint/tendermint => github.com/akash-network/cometbft v0.34.27-akash - // latest grpc doesn't work with with cosmos-sdk modified proto compiler, so we need to enforce - // the following version across all dependencies. - google.golang.org/grpc => google.golang.org/grpc v1.33.2 -) - -require ( - cosmossdk.io/api v0.2.6 // indirect - cosmossdk.io/core v0.5.1 // indirect - cosmossdk.io/depinject v1.0.0-alpha.3 // indirect - filippo.io/edwards25519 v1.1.0 // indirect - github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect - github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d // indirect - github.com/DataDog/zstd v1.5.0 // indirect - github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect - github.com/Masterminds/semver v1.5.0 // indirect - github.com/Masterminds/sprig v2.15.0+incompatible // indirect - github.com/aokoli/goutils v1.0.1 // indirect - github.com/armon/go-metrics v0.4.1 // indirect - github.com/beorn7/perks v1.0.1 // indirect - github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect - github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect - github.com/cespare/xxhash v1.1.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/cockroachdb/errors v1.9.1 // indirect - github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect - github.com/cockroachdb/pebble v0.0.0-20220817183557-09c6e030a677 // indirect - github.com/cockroachdb/redact v1.1.3 // indirect - github.com/confio/ics23/go v0.9.1 // indirect - github.com/cosmos/btcutil v1.0.4 // indirect - github.com/cosmos/cosmos-db v0.0.0-20221226095112-f3c38ecb5e32 // indirect - github.com/cosmos/cosmos-proto v1.0.0-beta.1 // indirect - github.com/cosmos/go-bip39 v1.0.0 // indirect - github.com/cosmos/gorocksdb v1.2.0 // indirect - github.com/cosmos/iavl v0.19.5 // indirect - github.com/cosmos/ledger-cosmos-go v0.12.2 // indirect - github.com/danieljoos/wincred v1.1.2 // indirect - github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 // indirect - github.com/dgraph-io/badger/v2 v2.2007.4 // indirect - github.com/dgraph-io/ristretto v0.0.3 // indirect - github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/dustin/go-humanize v1.0.0 // indirect - github.com/dvsekhvalnov/jose2go v1.5.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/getsentry/sentry-go v0.17.0 // indirect - github.com/go-kit/kit v0.12.0 // indirect - github.com/go-kit/log v0.2.1 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect - github.com/golang/snappy v0.0.4 // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/gorilla/mux v1.8.1 // indirect - github.com/gorilla/websocket v1.5.1 // indirect - github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect - github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect - github.com/gtank/merlin v0.1.1 // indirect - github.com/gtank/ristretto255 v0.1.2 // indirect - github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-uuid v1.0.1 // indirect - github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect - github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c // indirect - github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3 // indirect - github.com/huandu/xstrings v1.3.3 // indirect - github.com/imdario/mergo v0.3.13 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect - github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac // indirect - github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.0 // indirect - github.com/kr/pretty v0.3.1 // indirect - github.com/kr/text v0.2.0 // indirect - github.com/libp2p/go-buffer-pool v0.1.0 // indirect - github.com/linxGnu/grocksdb v1.7.10 // indirect - github.com/magiconair/properties v1.8.7 // indirect - github.com/mattn/go-isatty v0.0.19 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/mtibben/percent v0.2.1 // indirect - github.com/pelletier/go-toml/v2 v2.1.0 // indirect - github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect - github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect - github.com/pseudomuto/protokit v0.2.0 // indirect - github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect - github.com/rogpeppe/go-internal v1.10.0 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect - github.com/sasha-s/go-deadlock v0.3.1 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/cobra v1.8.0 // indirect - github.com/spf13/viper v1.18.2 // indirect - github.com/stretchr/objx v0.5.2 // indirect - github.com/subosito/gotenv v1.6.0 // indirect - github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect - github.com/tendermint/go-amino v0.16.0 // indirect - github.com/tendermint/tm-db v0.6.7 // indirect - github.com/tidwall/btree v1.5.0 // indirect - github.com/zondax/hid v0.9.1 // indirect - github.com/zondax/ledger-go v0.14.1 // indirect - go.etcd.io/bbolt v1.3.6 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.22.0 // indirect - golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/term v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect - gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/utils v0.0.0-20221107191617-1a15be271d1d // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect -) diff --git a/go.sum b/go.sum deleted file mode 100644 index a358a0c7..00000000 --- a/go.sum +++ /dev/null @@ -1,880 +0,0 @@ -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cosmossdk.io/api v0.2.6 h1:AoNwaLLapcLsphhMK6+o0kZl+D6MMUaHVqSdwinASGU= -cosmossdk.io/api v0.2.6/go.mod h1:u/d+GAxil0nWpl1XnQL8nkziQDIWuBDhv8VnDm/s6dI= -cosmossdk.io/core v0.5.1 h1:vQVtFrIYOQJDV3f7rw4pjjVqc1id4+mE0L9hHP66pyI= -cosmossdk.io/core v0.5.1/go.mod h1:KZtwHCLjcFuo0nmDc24Xy6CRNEL9Vl/MeimQ2aC7NLE= -cosmossdk.io/depinject v1.0.0-alpha.3 h1:6evFIgj//Y3w09bqOUOzEpFj5tsxBqdc5CfkO7z+zfw= -cosmossdk.io/depinject v1.0.0-alpha.3/go.mod h1:eRbcdQ7MRpIPEM5YUJh8k97nxHpYbc3sMUnEtt8HPWU= -dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -filippo.io/edwards25519 v1.0.0-rc.1/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= -filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= -filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= -github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= -github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= -github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= -github.com/CloudyKit/fastprinter v0.0.0-20170127035650-74b38d55f37a/go.mod h1:EFZQ978U7x8IRnstaskI3IysnWY5Ao3QgZUKOXlsAdw= -github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= -github.com/CloudyKit/jet v2.1.3-0.20180809161101-62edd43e4f88+incompatible/go.mod h1:HPYO+50pSWkPoj9Q/eq0aRGByCL6ScRlUmiEX5Zgm+w= -github.com/CloudyKit/jet/v3 v3.0.0/go.mod h1:HKQPgSJmdK8hdoAbKUUWajkHyHo4RaU5rMdUywE7VMo= -github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.4.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= -github.com/DataDog/zstd v1.5.0 h1:+K/VEwIAaPcHiMtQvpLD4lqW7f0Gk3xdYZmI1hD+CXo= -github.com/DataDog/zstd v1.5.0/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= -github.com/HdrHistogram/hdrhistogram-go v1.1.2 h1:5IcZpTvzydCQeHzK4Ef/D5rrSqwxob0t8PQPMybUNFM= -github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= -github.com/Joker/hpp v1.0.0/go.mod h1:8x5n+M1Hp5hC0g8okX3sR3vFQwynaX/UgSOM9MeBKzY= -github.com/Joker/jade v1.0.1-0.20190614124447-d475f43051e7/go.mod h1:6E6s8o2AE4KhCrqr6GRJjdC/gNfTdxkIXvuGZZda2VM= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.15.0+incompatible h1:0gSxPGWS9PAr7U2NsQ2YQg6juRDINkUyuvbb4b2Xm8w= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Shopify/goreferrer v0.0.0-20181106222321-ec9c9a553398/go.mod h1:a1uqRtAwp2Xwc6WNPJEufxJ7fx3npB4UV/JOLmbu5I0= -github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig= -github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= -github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/akash-network/cometbft v0.34.27-akash h1:V1dApDOr8Ee7BJzYyQ7Z9VBtrAul4+baMeA6C49dje0= -github.com/akash-network/cometbft v0.34.27-akash/go.mod h1:BcCbhKv7ieM0KEddnYXvQZR+pZykTKReJJYf7YC7qhw= -github.com/alecthomas/participle/v2 v2.0.0-alpha7 h1:cK4vjj0VSgb3lN1nuKA5F7dw+1s1pWBe5bx7nNCnN+c= -github.com/alecthomas/participle/v2 v2.0.0-alpha7/go.mod h1:NumScqsC42o9x+dGj8/YqsIfhrIQjFEOFovxotbBirA= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= -github.com/aokoli/goutils v1.0.1 h1:7fpzNGoJ3VA8qcrm++XEE1QUe0mIwNeLa02Nwq7RDkg= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= -github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= -github.com/aymerick/raymond v2.0.3-0.20180322193309-b565731e1464+incompatible/go.mod h1:osfaiScAUVup+UC9Nfq76eWqDhXlp+4UYaA8uhTBO6g= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= -github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/boz/go-lifecycle v0.1.1 h1:tG/wff7Zxbkf19g4D4I0G8Y4sq83iT5QjD4rzEf/zrI= -github.com/boz/go-lifecycle v0.1.1/go.mod h1:zdagAUMcC2C0OmQkBlJZFV77uF4GCVaGphAexGi7oho= -github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= -github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= -github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= -github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= -github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= -github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= -github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= -github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/apd/v3 v3.1.0 h1:MK3Ow7LH0W8zkd5GMKA1PvS9qG3bWFI95WaVNfyZJ/w= -github.com/cockroachdb/apd/v3 v3.1.0/go.mod h1:6qgPBMXjATAdD/VefbRP9NoSLKjbB4LCoA7gN4LpHs4= -github.com/cockroachdb/datadriven v1.0.0/go.mod h1:5Ib8Meh+jk1RlHIXej6Pzevx/NLlNvQB9pmSBZErGA4= -github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= -github.com/cockroachdb/errors v1.6.1/go.mod h1:tm6FTP5G81vwJ5lC0SizQo374JNCOPrHyXGitRJoDqM= -github.com/cockroachdb/errors v1.8.1/go.mod h1:qGwQn6JmZ+oMjuLwjWzUNqblqk0xl4CVV3SQbGwK7Ac= -github.com/cockroachdb/errors v1.9.1 h1:yFVvsI0VxmRShfawbt/laCIDy/mtTqqnvoNgiy5bEV8= -github.com/cockroachdb/errors v1.9.1/go.mod h1:2sxOtL2WIc096WSZqZ5h8fa17rdDq9HZOZLBCor4mBk= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/cockroachdb/logtags v0.0.0-20211118104740-dabe8e521a4f/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= -github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= -github.com/cockroachdb/pebble v0.0.0-20220817183557-09c6e030a677 h1:qbb/AE938DFhOajUYh9+OXELpSF9KZw2ZivtmW6eX1Q= -github.com/cockroachdb/pebble v0.0.0-20220817183557-09c6e030a677/go.mod h1:890yq1fUb9b6dGNwssgeUO5vQV9qfXnCPxAJhBQfXw0= -github.com/cockroachdb/redact v1.0.8/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/cockroachdb/redact v1.1.3 h1:AKZds10rFSIj7qADf0g46UixK8NNLwWTNdCIGS5wfSQ= -github.com/cockroachdb/redact v1.1.3/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= -github.com/cockroachdb/sentry-go v0.6.1-cockroachdb.2/go.mod h1:8BT+cPK6xvFOcRlk0R8eg+OTkcqI6baNH4xAkpiYVvQ= -github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/coinbase/rosetta-sdk-go v0.7.9 h1:lqllBjMnazTjIqYrOGv8h8jxjg9+hJazIGZr9ZvoCcA= -github.com/coinbase/rosetta-sdk-go v0.7.9/go.mod h1:0/knutI7XGVqXmmH4OQD8OckFrbQ8yMsUZTG7FXCR2M= -github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= -github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= -github.com/confio/ics23/go v0.9.1 h1:3MV46eeWwO3xCauKyAtuAdJYMyPnnchW4iLr2bTw6/U= -github.com/confio/ics23/go v0.9.1/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/cosmos/btcutil v1.0.4 h1:n7C2ngKXo7UC9gNyMNLbzqz7Asuf+7Qv4gnX/rOdQ44= -github.com/cosmos/btcutil v1.0.4/go.mod h1:Ffqc8Hn6TJUdDgHBwIZLtrLQC1KdJ9jGJl/TvgUaxbU= -github.com/cosmos/cosmos-db v0.0.0-20221226095112-f3c38ecb5e32 h1:zlCp9n3uwQieELltZWHRmwPmPaZ8+XoL2Sj+A2YJlr8= -github.com/cosmos/cosmos-db v0.0.0-20221226095112-f3c38ecb5e32/go.mod h1:kwMlEC4wWvB48zAShGKVqboJL6w4zCLesaNQ3YLU2BQ= -github.com/cosmos/cosmos-proto v1.0.0-beta.1 h1:iDL5qh++NoXxG8hSy93FdYJut4XfgbShIocllGaXx/0= -github.com/cosmos/cosmos-proto v1.0.0-beta.1/go.mod h1:8k2GNZghi5sDRFw/scPL8gMSowT1vDA+5ouxL8GjaUE= -github.com/cosmos/cosmos-sdk v0.45.16 h1:5ba/Bh5/LE55IwHQuCU4fiG4eXeDKtSWzehXRpaKDcw= -github.com/cosmos/cosmos-sdk v0.45.16/go.mod h1:bScuNwWAP0TZJpUf+SHXRU3xGoUPp+X9nAzfeIXts40= -github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= -github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= -github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= -github.com/cosmos/gorocksdb v1.2.0 h1:d0l3jJG8M4hBouIZq0mDUHZ+zjOx044J3nGRskwTb4Y= -github.com/cosmos/gorocksdb v1.2.0/go.mod h1:aaKvKItm514hKfNJpUJXnnOWeBnk2GL4+Qw9NHizILw= -github.com/cosmos/iavl v0.19.5 h1:rGA3hOrgNxgRM5wYcSCxgQBap7fW82WZgY78V9po/iY= -github.com/cosmos/iavl v0.19.5/go.mod h1:X9PKD3J0iFxdmgNLa7b2LYWdsGd90ToV5cAONApkEPw= -github.com/cosmos/keyring v1.2.0 h1:8C1lBP9xhImmIabyXW4c3vFjjLiBdGCmfLUfeZlV1Yo= -github.com/cosmos/keyring v1.2.0/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= -github.com/cosmos/ledger-cosmos-go v0.12.2 h1:/XYaBlE2BJxtvpkHiBm97gFGSGmYGKunKyF3nNqAXZA= -github.com/cosmos/ledger-cosmos-go v0.12.2/go.mod h1:ZcqYgnfNJ6lAXe4HPtWgarNEY+B74i+2/8MhZw4ziiI= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= -github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cucumber/common/gherkin/go/v22 v22.0.0 h1:4K8NqptbvdOrjL9DEea6HFjSpbdT9+Q5kgLpmmsHYl0= -github.com/cucumber/common/gherkin/go/v22 v22.0.0/go.mod h1:3mJT10B2GGn3MvVPd3FwR7m2u4tLhSRhWUqJU4KN4Fg= -github.com/cucumber/common/messages/go/v17 v17.1.1 h1:RNqopvIFyLWnKv0LfATh34SWBhXeoFTJnSrgm9cT/Ts= -github.com/cucumber/common/messages/go/v17 v17.1.1/go.mod h1:bpGxb57tDE385Rb2EohgUadLkAbhoC4IyCFi89u/JQI= -github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= -github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= -github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= -github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= -github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1 h1:YLtO71vCjJRCBcrPMtQ9nqBsqpA1m5sE92cU+pd5Mcc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.1/go.mod h1:hyedUtir6IdtD/7lIxGeCxkaw7y45JueMRL4DIyJDKs= -github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= -github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= -github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= -github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI= -github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= -github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dvsekhvalnov/jose2go v1.5.0 h1:3j8ya4Z4kMCwT5nXIKFSV84YS+HdqSSO0VsTQxaLAeM= -github.com/dvsekhvalnov/jose2go v1.5.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= -github.com/edwingeng/deque/v2 v2.1.1 h1:+xjC3TnaeMPLZMi7QQf9jN2K00MZmTwruApqplbL9IY= -github.com/edwingeng/deque/v2 v2.1.1/go.mod h1:HukI8CQe9KDmZCcURPZRYVYjH79Zy2tIjTF9sN3Bgb0= -github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= -github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flosch/pongo2 v0.0.0-20190707114632-bbf5a6c351f4/go.mod h1:T9YF2M40nIgbVgp3rreNmTged+9HrbNTIQf1PsaIiTA= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= -github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/gavv/httpexpect v2.0.0+incompatible/go.mod h1:x+9tiU1YnrOvnB725RkpoLv1M62hOWzwo5OXotisrKc= -github.com/getsentry/sentry-go v0.12.0/go.mod h1:NSap0JBYWzHND8oMbyi0+XZhUalc1TBdRL1M71JZW2c= -github.com/getsentry/sentry-go v0.17.0 h1:UustVWnOoDFHBS7IJUB2QK/nB5pap748ZEp0swnQJak= -github.com/getsentry/sentry-go v0.17.0/go.mod h1:B82dxtBvxG0KaPD8/hfSV+VcHD+Lg/xUS4JuQn1P4cM= -github.com/ghemawat/stream v0.0.0-20171120220530-696b145b53b9/go.mod h1:106OIgooyS7OzLDOpUGgm9fA3bQENb/cFSyyBmMoJDs= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= -github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= -github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= -github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= -github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= -github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= -github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= -github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= -github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= -github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= -github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/gofrs/uuid v4.3.0+incompatible h1:CaSVZxm5B+7o45rtab4jC2G37WGYX1zQfuU2i6DSvnc= -github.com/gofrs/uuid v4.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gogo/gateway v1.1.0 h1:u0SuhL9+Il+UbjM9VIE3ntfRujKbvVpFvNB4HbjeVQ0= -github.com/gogo/gateway v1.1.0/go.mod h1:S7rR8FRQyG3QFESeSv4l2WnsyzlCLG0CzBbUUo/mbic= -github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= -github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= -github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= -github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= -github.com/golang-jwt/jwt/v4 v4.4.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= -github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= -github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= -github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= -github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= -github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= -github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= -github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomodule/redigo v1.7.1-0.20190724094224-574c33c3df38/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= -github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= -github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= -github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= -github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= -github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= -github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= -github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= -github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= -github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= -github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= -github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= -github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c h1:PdZEHcpa3117kJ1Wa5EYupzCzn9QlBby8Fx2YpZPYvo= -github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3 h1:aSVUgRRRtOrZOC1fYmY9gV0e9z/Iu+xNVSASWjsuyGU= -github.com/hdevalence/ed25519consensus v0.0.0-20220222234857-c00d1f31bab3/go.mod h1:5PC6ZNPde8bBqU/ewGZig35+UIZtw9Ytxez8/q5ZyFE= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/hydrogen18/memlistener v0.0.0-20141126152155-54553eb933fb/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= -github.com/hydrogen18/memlistener v0.0.0-20200120041712-dcc25e7acd91/go.mod h1:qEIFzExnS6016fRpRfxrExeVn2gbClQA99gQhnIcdhE= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/imkira/go-interpol v1.1.0/go.mod h1:z0h2/2T3XF8kyEPpRgJ3kmNv+C43p+I/CoI+jC3w2iA= -github.com/improbable-eng/grpc-web v0.14.1 h1:NrN4PY71A6tAz2sKDvC5JCauENWp0ykG8Oq1H3cpFvw= -github.com/improbable-eng/grpc-web v0.14.1/go.mod h1:zEjGHa8DAlkoOXmswrNvhUGEYQA9UI7DhrGeHR1DMGU= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/iris-contrib/blackfriday v2.0.0+incompatible/go.mod h1:UzZ2bDEoaSGPbkg6SAB4att1aAwTmVIx/5gCVqeyUdI= -github.com/iris-contrib/go.uuid v2.0.0+incompatible/go.mod h1:iz2lgM/1UnEf1kP0L/+fafWORmlnuysV2EMP8MW+qe0= -github.com/iris-contrib/i18n v0.0.0-20171121225848-987a633949d0/go.mod h1:pMCz62A0xJL6I+umB2YTlFRwWXaDFA0jy+5HzGiJjqI= -github.com/iris-contrib/jade v1.1.3/go.mod h1:H/geBymxJhShH5kecoiOCSssPX7QWYH7UaeZTSWddIk= -github.com/iris-contrib/pongo2 v0.0.1/go.mod h1:Ssh+00+3GAZqSQb30AvBRNxBx7rf0GqwkjqxNd0u65g= -github.com/iris-contrib/schema v0.0.1/go.mod h1:urYA3uvUNG1TIIjOSCzHr9/LmbQo8LrOcOqfqxa4hXw= -github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac h1:GcJkaxD5Wy/Ucn+L0USlpbGJy9O6+7r0nBI7ftJ7Uu0= -github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac/go.mod h1:dM7ihgFM8Do6WGIfOXWPgpJ+4bKGR/4ZkYh8HKDdFy4= -github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/errors v0.0.0-20181118221551-089d3ea4e4d5/go.mod h1:W54LbzXuIE0boCoNJfwqpmkKJ1O4TCTZMetAt6jGk7Q= -github.com/juju/loggo v0.0.0-20180524022052-584905176618/go.mod h1:vgyd7OREkbtVEN/8IXZe5Ooef3LQePvuBm9UWj6ZL8U= -github.com/juju/testing v0.0.0-20180920084828-472a3e8b2073/go.mod h1:63prj8cnj0tU0S9OHjGJn+b1h0ZghCndfnbQolrYTwA= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= -github.com/kataras/golog v0.0.9/go.mod h1:12HJgwBIZFNGL0EJnMRhmvGA0PQGx8VFwrZtM4CqbAk= -github.com/kataras/golog v0.0.10/go.mod h1:yJ8YKCmyL+nWjERB90Qwn+bdyBZsaQwU3bTVFgkFIp8= -github.com/kataras/iris/v12 v12.0.1/go.mod h1:udK4vLQKkdDqMGJJVd/msuMtN6hpYJhg/lSzuxjhO+U= -github.com/kataras/iris/v12 v12.1.8/go.mod h1:LMYy4VlP67TQ3Zgriz8RE2h2kMZV2SgMYbq3UhfoFmE= -github.com/kataras/neffos v0.0.10/go.mod h1:ZYmJC07hQPW67eKuzlfY7SO3bC0mw83A3j6im82hfqw= -github.com/kataras/neffos v0.0.14/go.mod h1:8lqADm8PnbeFfL7CLXh1WHw53dG27MC3pgi2R1rmoTE= -github.com/kataras/pio v0.0.0-20190103105442-ea782b38602d/go.mod h1:NV88laa9UiiDuX9AhMbDPkGYSPugBOV6yTZB1l2K9Z0= -github.com/kataras/pio v0.0.2/go.mod h1:hAoW0t9UmXi4R5Oyq5Z4irTbaTsOemSrDGUtaTl7Dro= -github.com/kataras/sitemap v0.0.5/go.mod h1:KY2eugMKiPwsJgx7+U103YZehfvNGOXURubcGyk0Bz8= -github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.9.7/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= -github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= -github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.1.11/go.mod h1:i541M3Fj6f76NZtHSj7TXnyM8n2gaodfvfxNnFqi74g= -github.com/labstack/echo/v4 v4.5.0/go.mod h1:czIriw4a0C1dFun+ObrXp7ok03xON0N1awStJ6ArI7Y= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= -github.com/lib/pq v1.10.6 h1:jbk+ZieJ0D7EVGJYpL9QTz7/YW6UHbmdnZWYyK5cdBs= -github.com/lib/pq v1.10.6/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= -github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= -github.com/linxGnu/grocksdb v1.7.10 h1:dz7RY7GnFUA+GJO6jodyxgkUeGMEkPp3ikt9hAcNGEw= -github.com/linxGnu/grocksdb v1.7.10/go.mod h1:0hTf+iA+GOr0jDX4CgIYyJZxqOH9XlBh6KVj8+zmF34= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-colorable v0.1.11/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mediocregopher/mediocre-go-lib v0.0.0-20181029021733-cb65787f37ed/go.mod h1:dSsfyI2zABAdhcbvkXqgxOxrCsbYeHCPgrZkku60dSg= -github.com/mediocregopher/radix/v3 v3.3.0/go.mod h1:EmfVyvspXz1uZEyPBMyGK+kjWiKQGvsUt6O3Pj+LDCQ= -github.com/mediocregopher/radix/v3 v3.4.2/go.mod h1:8FL3F6UQRXHXIBSPUs5h0RybMF8i4n7wVopoX3x7Bv8= -github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc= -github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= -github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= -github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= -github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= -github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/moul/http2curl v1.0.0/go.mod h1:8UbvGypXm98wA/IqH45anm5Y2Z6ep6O31QGOAZ3H0fQ= -github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= -github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/nats.go v1.8.1/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= -github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= -github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/otiai10/copy v1.6.0 h1:IinKAryFFuPONZ7cm6T6E2QX/vcJwSnlaA5lfoaXIiQ= -github.com/otiai10/copy v1.6.0/go.mod h1:XWfuS3CrI0R6IE0FbgHsEazaXO8G0LpMp9o8tos0x4E= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= -github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo= -github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= -github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= -github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= -github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= -github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= -github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/pseudomuto/protoc-gen-doc v1.5.1 h1:Ah259kcrio7Ix1Rhb6u8FCaOkzf9qRBqXnvAufg061w= -github.com/pseudomuto/protoc-gen-doc v1.5.1/go.mod h1:XpMKYg6zkcpgfpCfQ8GcWBDRtRxOmMR5w7pz4Xo+dYM= -github.com/pseudomuto/protokit v0.2.0 h1:hlnBDcy3YEDXH7kc9gV+NLaN0cDzhDvD1s7Y6FZ8RpM= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= -github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= -github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= -github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/regen-network/cosmos-proto v0.3.1 h1:rV7iM4SSFAagvy8RiyhiACbWEGotmqzywPxOvwMdxcg= -github.com/regen-network/cosmos-proto v0.3.1/go.mod h1:jO0sVX6a1B36nmE8C9xBFXpNwWejXC7QqCOnH3O0+YM= -github.com/regen-network/gocuke v0.6.2 h1:pHviZ0kKAq2U2hN2q3smKNxct6hS0mGByFMHGnWA97M= -github.com/regen-network/gocuke v0.6.2/go.mod h1:zYaqIHZobHyd0xOrHGPQjbhGJsuZ1oElx150u2o1xuk= -github.com/regen-network/protobuf v1.3.3-alpha.regen.1 h1:OHEc+q5iIAXpqiqFKeLpu5NwTIkVXUs48vFMwzqpqY4= -github.com/regen-network/protobuf v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o= -github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= -github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/rs/zerolog v1.27.0 h1:1T7qCieN22GVc8S4Q2yuexzBb1EqjbgjSH9RohbMjKs= -github.com/rs/zerolog v1.27.0/go.mod h1:7frBqO0oezxmnO7GF86FY++uy8I0Tk/If5ni1G9Qc0U= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= -github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= -github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= -github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= -github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= -github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= -github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= -github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= -github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= -github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= -github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= -github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= -github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= -github.com/tendermint/tm-db v0.6.7 h1:fE00Cbl0jayAoqlExN6oyQJ7fR/ZtoVOmvPJ//+shu8= -github.com/tendermint/tm-db v0.6.7/go.mod h1:byQDzFkZV1syXr/ReXS808NxA2xvyuuVgXOJ/088L6I= -github.com/tidwall/btree v1.5.0 h1:iV0yVY/frd7r6qGBXfEYs7DH0gTDgrKTrDjS7xt/IyQ= -github.com/tidwall/btree v1.5.0/go.mod h1:LGm8L/DZjPLmeWGjv5kFrY8dL4uVhMmzmmLYmsObdKE= -github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.6.0/go.mod h1:FstJa9V+Pj9vQ7OJie2qMHdwemEDaDiSdBnvPM1Su9w= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yalp/jsonpath v0.0.0-20180802001716-5cc68e5049a0/go.mod h1:/LWChgwKmvncFJFHJ7Gvn9wZArjbV5/FppcK2fKk/tI= -github.com/yudai/gojsondiff v1.0.0/go.mod h1:AY32+k2cwILAkW1fbgxQ5mUmMiZFgLIV+FBNExI05xg= -github.com/yudai/golcs v0.0.0-20170316035057-ecda9a501e82/go.mod h1:lgjkn3NuSvDfVJdfcVVdX+jpBxNmX4rDAzaS45IcYoM= -github.com/yudai/pp v2.0.1+incompatible/go.mod h1:PuxR/8QJ7cyCkFp/aUDS+JY727OFEZkTdatxwunjIkc= -github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/zondax/hid v0.9.1 h1:gQe66rtmyZ8VeGFcOpbuH3r7erYtNEAezCAYu8LdkJo= -github.com/zondax/hid v0.9.1/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= -github.com/zondax/ledger-go v0.14.1 h1:Pip65OOl4iJ84WTpA4BKChvOufMhhbxED3BaihoZN4c= -github.com/zondax/ledger-go v0.14.1/go.mod h1:fZ3Dqg6qcdXWSOJFKMG8GCTnD7slO/RL2feOQv8K320= -go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.step.sm/crypto v0.44.6 h1:vQg8ujce7fNXDO8EWdriSz+ZSJpYnNh22QrFtRjdyoY= -go.step.sm/crypto v0.44.6/go.mod h1:oKRO4jaf2MaCohJDN+/8ShImkvIgUKfJxxy87gqsnXs= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191227163750-53104e6ec876/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= -golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190327091125-710a502c58a2/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210909193231-528a39cd75f3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181221001348-537d06c36207/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190327201419-c70d86f8b7cf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM= -gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ= -google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= -google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= -google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= -google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= -google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= -google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= -google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= -google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.51.1/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= -gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= -k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= -k8s.io/apimachinery v0.26.1 h1:8EZ/eGJL+hY/MYCNwhmDzVqq2lPl3N3Bo8rvweJwXUQ= -k8s.io/apimachinery v0.26.1/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d h1:0Smp/HP1OH4Rvhe+4B8nWGERtlqAGSftbSbbmm45oFs= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= -nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -pgregory.net/rapid v0.5.3 h1:163N50IHFqr1phZens4FQOdPgfJscR7a562mjQqeo4M= -pgregory.net/rapid v0.5.3/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/go/.golangci.yaml b/go/.golangci.yaml new file mode 100644 index 00000000..43d53693 --- /dev/null +++ b/go/.golangci.yaml @@ -0,0 +1,38 @@ +--- +issues: + exclude: + - comment on exported (method|function|type|const|var) + exclude-use-default: true + +# Skip generated k8s code +run: + exclude-dirs: + - "^/node/types/v1beta1" + - "^/node/types/v1beta2" + - "^/node/market/v1beta3" + exclude-files: + - "\\.pb\\.go$" + - "\\.pb\\.gw\\.go$" + # Skip vendor/ etc + skip-dirs-use-default: true +linters: + disable-all: true + enable: + - unused + - misspell + - gofmt + - gocritic + - goconst + - govet + - ineffassign + - unparam + - staticcheck + - revive + - gosec + - copyloopvar + - prealloc +linters-settings: + gocritic: + disabled-checks: + - ifElseChain + - singleCaseSwitch diff --git a/go/cli/audit_query.go b/go/cli/audit_query.go new file mode 100644 index 00000000..ffe3ea2a --- /dev/null +++ b/go/cli/audit_query.go @@ -0,0 +1,111 @@ +package cli + +import ( + "context" + + "github.com/spf13/cobra" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + + cflags "pkg.akt.dev/go/cli/flags" + types "pkg.akt.dev/go/node/audit/v1" +) + +func GetQueryAuditCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Audit query commands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetAuditProvidersCmd(), + GetAuditProviderCmd(), + ) + + return cmd +} + +func GetAuditProvidersCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "Query for all providers", + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + pageReq, err := sdkclient.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &types.QueryAllProvidersAttributesRequest{ + Pagination: pageReq, + } + + res, err := cl.Query().Audit().AllProvidersAttributes(ctx, params) + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "providers") + + return cmd +} + +func GetAuditProviderCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "get [owner address] [auditor address]", + Short: "Query provider", + Args: cobra.RangeArgs(1, 2), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + owner, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + var res *types.QueryProvidersResponse + if len(args) == 1 { + res, err = cl.Query().Audit().ProviderAttributes(context.Background(), + &types.QueryProviderAttributesRequest{ + Owner: owner.String(), + }, + ) + } else { + var auditor sdk.AccAddress + if auditor, err = sdk.AccAddressFromBech32(args[1]); err != nil { + return err + } + + res, err = cl.Query().Audit().ProviderAuditorAttributes(context.Background(), + &types.QueryProviderAuditorRequest{ + Auditor: auditor.String(), + Owner: owner.String(), + }, + ) + } + + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/audit_tx.go b/go/cli/audit_tx.go new file mode 100644 index 00000000..551c6293 --- /dev/null +++ b/go/cli/audit_tx.go @@ -0,0 +1,224 @@ +package cli + +import ( + "fmt" + "sort" + + "github.com/spf13/cobra" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + + cflags "pkg.akt.dev/go/cli/flags" + types "pkg.akt.dev/go/node/audit/v1" + ptypes "pkg.akt.dev/go/node/provider/v1beta4" + attrtypes "pkg.akt.dev/go/node/types/attributes/v1" +) + +// GetTxAuditCmd returns the transaction commands for audit module +func GetTxAuditCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Audit transaction subcommands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetTxAuditAttributesCmd(), + ) + + return cmd +} + +func GetTxAuditAttributesCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "attr", + Short: "Manage provider attributes", + } + + cmd.AddCommand( + CmdCreateProviderAttributes(), + CmdDeleteProviderAttributes(), + ) + + return cmd +} + +func CmdCreateProviderAttributes() *cobra.Command { + cmd := &cobra.Command{ + Use: "create [provider]", + Short: "Create/update provider attributes", + Args: cobra.MinimumNArgs(1), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + if ((len(args) - 1) % 2) != 0 { + return fmt.Errorf("attributes must be provided as pairs") + } + + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + providerAddress, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + attr, err := readAttributes(cmd, cctx, providerAddress.String(), args[1:]) + if err != nil { + return err + } + + if len(attr) == 0 { + return fmt.Errorf("no attributes provided|found") + } + + msg := &types.MsgSignProviderAttributes{ + Auditor: cctx.GetFromAddress().String(), + Owner: providerAddress.String(), + Attributes: attr, + } + + if err = msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + setCmdProviderFlags(cmd) + + return cmd +} + +func CmdDeleteProviderAttributes() *cobra.Command { + cmd := &cobra.Command{ + Use: "delete [provider]", + Short: "Delete provider attributes", + Args: cobra.MinimumNArgs(1), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + providerAddress, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + keys, err := readKeys(args[1:]) + if err != nil { + return err + } + + msg := &types.MsgDeleteProviderAttributes{ + Auditor: cctx.GetFromAddress().String(), + Owner: providerAddress.String(), + Keys: keys, + } + + if err = msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + setCmdProviderFlags(cmd) + + return cmd +} + +func setCmdProviderFlags(cmd *cobra.Command) { + cflags.AddTxFlagsToCmd(cmd) + + if err := cmd.MarkFlagRequired(cflags.FlagFrom); err != nil { + panic(err.Error()) + } +} + +// readAttributes try read attributes from both cobra arguments or query +// if no arguments were provided then query provider and sign all found +// read from stdin uses trick to check if it's file descriptor is a pipe +// which happens when some data is piped for example cat attr.yaml | akash ... +func readAttributes(cmd *cobra.Command, cctx sdkclient.Context, provider string, args []string) (attrtypes.Attributes, error) { + var attr attrtypes.Attributes + + if len(args) != 0 { + for i := 0; i < len(args); i += 2 { + attr = append(attr, attrtypes.Attribute{ + Key: args[i], + Value: args[i+1], + }) + } + } else { + resp, err := ptypes.NewQueryClient(cctx).Provider(cmd.Context(), &ptypes.QueryProviderRequest{Owner: provider}) + if err != nil { + return nil, err + } + + attr = append(attr, resp.Provider.Attributes...) + } + + sort.SliceStable(attr, func(i, j int) bool { + return attr[i].Key < attr[j].Value + }) + + if checkAttributeDuplicates(attr) { + return nil, fmt.Errorf("supplied attributes with duplicate keys") + } + + return attr, nil +} + +func readKeys(args []string) ([]string, error) { + sort.SliceStable(args, func(i, j int) bool { + return args[i] < args[j] + }) + + if checkKeysDuplicates(args) { + return nil, fmt.Errorf("supplied attributes with duplicate keys") + } + + return args, nil +} + +func checkAttributeDuplicates(attr attrtypes.Attributes) bool { + keys := make(map[string]bool) + + for _, entry := range attr { + if _, value := keys[entry.Key]; !value { + keys[entry.Key] = true + } else { + return true + } + } + return false +} + +func checkKeysDuplicates(k []string) bool { + keys := make(map[string]bool) + + for _, entry := range k { + if _, value := keys[entry]; !value { + keys[entry] = true + } else { + return true + } + } + return false +} diff --git a/go/cli/auth_encode_test.go b/go/cli/auth_encode_test.go new file mode 100644 index 00000000..47cfc39d --- /dev/null +++ b/go/cli/auth_encode_test.go @@ -0,0 +1,103 @@ +package cli_test + +// import ( +// "context" +// "encoding/base64" +// "testing" +// +// "github.com/stretchr/testify/require" +// +// "cosmossdk.io/depinject" +// "github.com/cosmos/cosmos-sdk/client" +// "github.com/cosmos/cosmos-sdk/codec" +// "github.com/cosmos/cosmos-sdk/testutil" +// sdk "github.com/cosmos/cosmos-sdk/types" +// authtestutil "github.com/cosmos/cosmos-sdk/x/auth/testutil" +// +// "pkg.akt.dev/go/cli" +// ) +// +// func TestGetCommandEncode(t *testing.T) { +// var ( +// txCfg client.TxConfig +// legacyAmino *codec.LegacyAmino +// codec codec.Codec +// ) +// +// err := depinject.Inject( +// authtestutil.AppConfig, +// &txCfg, +// &legacyAmino, +// &codec, +// ) +// require.NoError(t, err) +// +// cmd := cli.GetEncodeCommand() +// _ = testutil.ApplyMockIODiscardOutErr(cmd) +// +// // Build a test transaction +// builder := txCfg.NewTxBuilder() +// builder.SetGasLimit(50000) +// builder.SetFeeAmount(sdk.Coins{sdk.NewInt64Coin("atom", 150)}) +// builder.SetMemo("foomemo") +// jsonEncoded, err := txCfg.TxJSONEncoder()(builder.GetTx()) +// require.NoError(t, err) +// +// txFile := testutil.WriteToNewTempFile(t, string(jsonEncoded)) +// txFileName := txFile.Name() +// +// ctx := context.Background() +// clientCtx := client.Context{}. +// WithTxConfig(txCfg). +// WithCodec(codec) +// ctx = context.WithValue(ctx, cli.ClientContextKey, &clientCtx) +// +// cmd.SetArgs([]string{txFileName}) +// err = cmd.ExecuteContext(ctx) +// require.NoError(t, err) +// } +// +// func TestGetCommandDecode(t *testing.T) { +// var ( +// txCfg client.TxConfig +// legacyAmino *codec.LegacyAmino +// codec codec.Codec +// ) +// +// err := depinject.Inject( +// authtestutil.AppConfig, +// &txCfg, +// &legacyAmino, +// &codec, +// ) +// require.NoError(t, err) +// +// clientCtx := client.Context{}. +// WithTxConfig(txCfg). +// WithCodec(codec) +// +// cmd := cli.GetDecodeCommand() +// _ = testutil.ApplyMockIODiscardOutErr(cmd) +// +// clientCtx = clientCtx.WithTxConfig(txCfg) +// +// // Build a test transaction +// builder := txCfg.NewTxBuilder() +// builder.SetGasLimit(50000) +// builder.SetFeeAmount(sdk.Coins{sdk.NewInt64Coin("atom", 150)}) +// builder.SetMemo("foomemo") +// +// // Encode transaction +// txBytes, err := clientCtx.TxConfig.TxEncoder()(builder.GetTx()) +// require.NoError(t, err) +// +// // Convert the transaction into base64 encoded string +// base64Encoded := base64.StdEncoding.EncodeToString(txBytes) +// +// ctx := context.Background() +// ctx = context.WithValue(ctx, cli.ClientContextKey, &clientCtx) +// +// // Execute the command +// cmd.SetArgs([]string{base64Encoded}) +// require.NoError(t, cmd.ExecuteContext(ctx)) +// } diff --git a/go/cli/auth_flags.go b/go/cli/auth_flags.go new file mode 100644 index 00000000..65e36109 --- /dev/null +++ b/go/cli/auth_flags.go @@ -0,0 +1,213 @@ +package cli + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os" + "path/filepath" + "strings" + + "github.com/cosmos/gogoproto/jsonpb" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/tx/signing" + "github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx" +) + +// GasEstimateResponse defines a response definition for tx gas estimation. +type GasEstimateResponse struct { + GasEstimate uint64 `json:"gas_estimate" yaml:"gas_estimate"` +} + +func (gr GasEstimateResponse) String() string { + return fmt.Sprintf("gas estimate: %d", gr.GasEstimate) +} + +// SignTx signs a transaction managed by the TxBuilder using a `name` key stored in Keybase. +// The new signature is appended to the TxBuilder when overwrite=false or overwritten otherwise. +// Don't perform online validation or lookups if offline is true. +func SignTx(txFactory tx.Factory, clientCtx client.Context, name string, txBuilder client.TxBuilder, offline, overwriteSig bool) error { + k, err := txFactory.Keybase().Key(name) + if err != nil { + return err + } + + // Ledger and Multisigs only support LEGACY_AMINO_JSON signing. + if txFactory.SignMode() == signing.SignMode_SIGN_MODE_UNSPECIFIED && + (k.GetType() == keyring.TypeLedger || k.GetType() == keyring.TypeMulti) { + txFactory = txFactory.WithSignMode(signing.SignMode_SIGN_MODE_LEGACY_AMINO_JSON) + } + + pubKey, err := k.GetPubKey() + if err != nil { + return err + } + addr := sdk.AccAddress(pubKey.Address()) + if !isTxSigner(addr, txBuilder.GetTx().GetSigners()) { + return fmt.Errorf("%s: %s", sdkerrors.ErrorInvalidSigner, name) + } + if !offline { + txFactory, err = populateAccountFromState(txFactory, clientCtx, addr) + if err != nil { + return err + } + } + + return tx.Sign(txFactory, name, txBuilder, overwriteSig) +} + +// SignTxWithSignerAddress attaches a signature to a transaction. +// Don't perform online validation or lookups if offline is true, else +// populate account and sequence numbers from a foreign account. +// This function should only be used when signing with a multisig. For +// normal keys, please use SignTx directly. +func SignTxWithSignerAddress(txFactory tx.Factory, clientCtx client.Context, addr sdk.AccAddress, + name string, txBuilder client.TxBuilder, offline, overwrite bool, +) (err error) { + // Multisigs only support LEGACY_AMINO_JSON signing. + if txFactory.SignMode() == signing.SignMode_SIGN_MODE_UNSPECIFIED { + txFactory = txFactory.WithSignMode(signing.SignMode_SIGN_MODE_LEGACY_AMINO_JSON) + } + + // check whether the address is a signer + if !isTxSigner(addr, txBuilder.GetTx().GetSigners()) { + return fmt.Errorf("%s: %s", sdkerrors.ErrorInvalidSigner, name) + } + + if !offline { + txFactory, err = populateAccountFromState(txFactory, clientCtx, addr) + if err != nil { + return err + } + } + + return tx.Sign(txFactory, name, txBuilder, overwrite) +} + +// ReadTxFromFile and decode a StdTx from the given filename. Can pass "-" to read from stdin. +func ReadTxFromFile(ctx client.Context, filename string) (tx sdk.Tx, err error) { + var data []byte + + if filename == "-" { + data, err = io.ReadAll(os.Stdin) + } else { + data, err = os.ReadFile(filename) + } + + if err != nil { + return + } + + return ctx.TxConfig.TxJSONDecoder()(data) +} + +// ReadTxsFromInput reads multiples txs from the given filename(s). Can pass "-" to read from stdin. +// Unlike ReadTxFromFile, this function does not decode the txs. +func ReadTxsFromInput(txCfg client.TxConfig, filenames ...string) (scanner *BatchScanner, err error) { + if len(filenames) == 0 { + return nil, fmt.Errorf("no file name provided") + } + + var infile io.Reader = os.Stdin + if filenames[0] != "-" { + buf := new(bytes.Buffer) + for _, f := range filenames { + data, err := os.ReadFile(filepath.Clean(f)) + if err != nil { + return nil, fmt.Errorf("couldn't read %s: %w", f, err) + } + + if _, err := buf.WriteString(string(data)); err != nil { + return nil, fmt.Errorf("couldn't write to merged file: %w", err) + } + } + + infile = buf + } + + return NewBatchScanner(txCfg, infile), nil +} + +// NewBatchScanner returns a new BatchScanner to read newline-delimited StdTx transactions from r. +func NewBatchScanner(cfg client.TxConfig, r io.Reader) *BatchScanner { + return &BatchScanner{Scanner: bufio.NewScanner(r), cfg: cfg} +} + +// BatchScanner provides a convenient interface for reading batch data such as a file +// of newline-delimited JSON encoded StdTx. +type BatchScanner struct { + *bufio.Scanner + theTx sdk.Tx + cfg client.TxConfig + unmarshalErr error +} + +// Tx returns the most recent Tx unmarshalled by a call to Scan. +func (bs BatchScanner) Tx() sdk.Tx { return bs.theTx } + +// UnmarshalErr returns the first unmarshalling error that was encountered by the scanner. +func (bs BatchScanner) UnmarshalErr() error { return bs.unmarshalErr } + +// Scan advances the Scanner to the next line. +func (bs *BatchScanner) Scan() bool { + if !bs.Scanner.Scan() { + return false + } + + txb, err := bs.cfg.TxJSONDecoder()(bs.Bytes()) + bs.theTx = txb + if err != nil && bs.unmarshalErr == nil { + bs.unmarshalErr = err + return false + } + + return true +} + +func populateAccountFromState( + txBldr tx.Factory, clientCtx client.Context, addr sdk.AccAddress, +) (tx.Factory, error) { + num, seq, err := clientCtx.AccountRetriever.GetAccountNumberSequence(clientCtx, addr) + if err != nil { + return txBldr, err + } + + return txBldr.WithAccountNumber(num).WithSequence(seq), nil +} + +// GetTxEncoder return tx encoder from global sdk configuration if ones is defined. +// Otherwise, returns encoder with default logic. +func GetTxEncoder(cdc *codec.LegacyAmino) (encoder sdk.TxEncoder) { + encoder = sdk.GetConfig().GetTxEncoder() + if encoder == nil { + encoder = legacytx.DefaultTxEncoder(cdc) + } + + return encoder +} + +func ParseQueryResponse(bz []byte) (sdk.SimulationResponse, error) { + var simRes sdk.SimulationResponse + if err := jsonpb.Unmarshal(strings.NewReader(string(bz)), &simRes); err != nil { + return sdk.SimulationResponse{}, err + } + + return simRes, nil +} + +func isTxSigner(user sdk.AccAddress, signers []sdk.AccAddress) bool { + for _, s := range signers { + if bytes.Equal(user.Bytes(), s.Bytes()) { + return true + } + } + + return false +} diff --git a/go/cli/auth_multisign.go b/go/cli/auth_multisign.go new file mode 100644 index 00000000..67498178 --- /dev/null +++ b/go/cli/auth_multisign.go @@ -0,0 +1,422 @@ +package cli + +import ( + "fmt" + "os" + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + errorsmod "cosmossdk.io/errors" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + kmultisig "github.com/cosmos/cosmos-sdk/crypto/keys/multisig" + "github.com/cosmos/cosmos-sdk/crypto/types/multisig" + sdk "github.com/cosmos/cosmos-sdk/types" + signingtypes "github.com/cosmos/cosmos-sdk/types/tx/signing" + "github.com/cosmos/cosmos-sdk/version" + authclient "github.com/cosmos/cosmos-sdk/x/auth/client" + "github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx" + "github.com/cosmos/cosmos-sdk/x/auth/signing" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// BroadcastReq defines a tx broadcasting request. +type BroadcastReq struct { + Tx legacytx.StdTx `json:"tx" yaml:"tx"` + Mode string `json:"mode" yaml:"mode"` +} + +// GetAuthMultiSignCmd returns the multi-sign command +func GetAuthMultiSignCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "multi-sign [file] [name] [[signature]...]", + Aliases: []string{"multisign"}, + Short: "Generate multisig signatures for transactions generated offline", + Long: strings.TrimSpace( + fmt.Sprintf(`Sign transactions created with the --generate-only flag that require multisig signatures. + +Read one or more signatures from one or more [signature] file, generate a multisig signature compliant to the +multisig key [name], and attach the key name to the transaction read from [file]. + +Example: +$ %s tx multisign transaction.json k1k2k3 k1sig.json k2sig.json k3sig.json + +If --signature-only flag is on, output a JSON representation +of only the generated signature. + +If the --offline flag is on, the client will not reach out to an external node. +Account number or sequence number lookups are not performed so you must +set these parameters manually. + +The current multisig implementation defaults to amino-json sign mode. +The SIGN_MODE_DIRECT sign mode is not supported.' +`, + version.AppName, + ), + ), + RunE: makeMultiSignCmd(), + Args: cobra.MinimumNArgs(3), + } + + cmd.Flags().Bool(cflags.FlagSigOnly, false, "Print only the generated signature, then exit") + cmd.Flags().String(cflags.FlagOutputDocument, "", "The document is written to the given file instead of STDOUT") + cmd.Flags().Bool(cflags.FlagAmino, false, "Generate Amino-encoded JSON suitable for submitting to the txs REST endpoint") + cflags.AddTxFlagsToCmd(cmd) + _ = cmd.Flags().MarkHidden(cflags.FlagOutput) + + return cmd +} + +func makeMultiSignCmd() func(cmd *cobra.Command, args []string) (err error) { + return func(cmd *cobra.Command, args []string) (err error) { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + parsedTx, err := authclient.ReadTxFromFile(clientCtx, args[0]) + if err != nil { + return + } + + txFactory, err := tx.NewFactoryCLI(clientCtx, cmd.Flags()) + if err != nil { + return err + } + if txFactory.SignMode() == signingtypes.SignMode_SIGN_MODE_UNSPECIFIED { + txFactory = txFactory.WithSignMode(signingtypes.SignMode_SIGN_MODE_LEGACY_AMINO_JSON) + } + + txCfg := clientCtx.TxConfig + txBuilder, err := txCfg.WrapTxBuilder(parsedTx) + if err != nil { + return err + } + + k, err := getMultisigRecord(clientCtx, args[1]) + if err != nil { + return err + } + pubKey, err := k.GetPubKey() + if err != nil { + return err + } + + addr, err := k.GetAddress() + if err != nil { + return err + } + + multisigPub := pubKey.(*kmultisig.LegacyAminoPubKey) + multisigSig := multisig.NewMultisig(len(multisigPub.PubKeys)) + if !clientCtx.Offline { + accnum, seq, err := clientCtx.AccountRetriever.GetAccountNumberSequence(clientCtx, addr) + if err != nil { + return err + } + + txFactory = txFactory.WithAccountNumber(accnum).WithSequence(seq) + } + + // read each signature and add it to the multisig if valid + for i := 2; i < len(args); i++ { + sigs, err := unmarshalSignatureJSON(clientCtx, args[i]) + if err != nil { + return err + } + + if txFactory.ChainID() == "" { + return fmt.Errorf("set the chain id with either the --chain-id flag or config file") + } + + for _, sig := range sigs { + signingData := signing.SignerData{ + Address: sdk.AccAddress(sig.PubKey.Address()).String(), + ChainID: txFactory.ChainID(), + AccountNumber: txFactory.AccountNumber(), + Sequence: txFactory.Sequence(), + PubKey: sig.PubKey, + } + + err = signing.VerifySignature(sig.PubKey, signingData, sig.Data, txCfg.SignModeHandler(), txBuilder.GetTx()) + if err != nil { + addr, _ := sdk.AccAddressFromHexUnsafe(sig.PubKey.Address().String()) + return fmt.Errorf("couldn't verify signature for address %s", addr) + } + + if err := multisig.AddSignatureV2(multisigSig, sig, multisigPub.GetPubKeys()); err != nil { + return err + } + } + } + + sigV2 := signingtypes.SignatureV2{ + PubKey: multisigPub, + Data: multisigSig, + Sequence: txFactory.Sequence(), + } + + err = txBuilder.SetSignatures(sigV2) + if err != nil { + return err + } + + sigOnly, _ := cmd.Flags().GetBool(cflags.FlagSigOnly) + + aminoJSON, _ := cmd.Flags().GetBool(cflags.FlagAmino) + + var json []byte + + if aminoJSON { + stdTx, err := tx.ConvertTxToStdTx(clientCtx.LegacyAmino, txBuilder.GetTx()) + if err != nil { + return err + } + + req := BroadcastReq{ + Tx: stdTx, + Mode: "sync|async", + } + + json, _ = clientCtx.LegacyAmino.MarshalJSON(req) + + } else { + json, err = marshalSignatureJSON(txCfg, txBuilder, sigOnly) + if err != nil { + return err + } + } + + closeFunc, err := setOutputFile(cmd) + if err != nil { + return err + } + + defer closeFunc() + + cmd.Printf("%s\n", json) + return nil + } +} + +func GetMultiSignBatchCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "multisign-batch [file] [name] [[signature-file]...]", + Aliases: []string{"multi-sign-batch"}, + Short: "Assemble multisig transactions in batch from batch signatures", + Long: strings.TrimSpace( + fmt.Sprintf(`Assemble a batch of multisig transactions generated by batch sign command. + +Read one or more signatures from one or more [signature] file, generate a multisig signature compliant to the +multisig key [name], and attach the key name to the transaction read from [file]. + +Example: +$ %s tx multisign-batch transactions.json multisigk1k2k3 k1sigs.json k2sigs.json k3sig.json + +The current multisig implementation defaults to amino-json sign mode. +The SIGN_MODE_DIRECT sign mode is not supported.' +`, version.AppName, + ), + ), + PreRun: preSignCmd, + RunE: makeBatchMultisignCmd(), + Args: cobra.MinimumNArgs(3), + } + + cmd.Flags().Bool(cflags.FlagNoAutoIncrement, false, "disable sequence auto increment") + cmd.Flags().String( + cflags.FlagMultisig, "", + "Address of the multisig account that the transaction signs on behalf of", + ) + cmd.Flags().String(cflags.FlagOutputDocument, "", "The document is written to the given file instead of STDOUT") + cflags.AddTxFlagsToCmd(cmd) + _ = cmd.Flags().MarkHidden(cflags.FlagOutput) // signing makes sense to output only json + + return cmd +} + +func makeBatchMultisignCmd() func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, args []string) (err error) { + var clientCtx client.Context + + clientCtx, err = client.GetClientTxContext(cmd) + if err != nil { + return err + } + + txCfg := clientCtx.TxConfig + txFactory, err := tx.NewFactoryCLI(clientCtx, cmd.Flags()) + if err != nil { + return err + } + if txFactory.SignMode() == signingtypes.SignMode_SIGN_MODE_UNSPECIFIED { + txFactory = txFactory.WithSignMode(signingtypes.SignMode_SIGN_MODE_LEGACY_AMINO_JSON) + } + + // reads tx from args[0] + scanner, err := authclient.ReadTxsFromInput(txCfg, args[0]) + if err != nil { + return err + } + + k, err := getMultisigRecord(clientCtx, args[1]) + if err != nil { + return err + } + + var signatureBatch [][]signingtypes.SignatureV2 + for i := 2; i < len(args); i++ { + sigs, err := readSignaturesFromFile(clientCtx, args[i]) + if err != nil { + return err + } + + signatureBatch = append(signatureBatch, sigs) + } + + addr, err := k.GetAddress() + if err != nil { + return err + } + + if !clientCtx.Offline { + accnum, seq, err := clientCtx.AccountRetriever.GetAccountNumberSequence(clientCtx, addr) + if err != nil { + return err + } + + txFactory = txFactory.WithAccountNumber(accnum).WithSequence(seq) + } + + // prepare output document + closeFunc, err := setOutputFile(cmd) + if err != nil { + return err + } + + defer closeFunc() + clientCtx.WithOutput(cmd.OutOrStdout()) + + for i := 0; scanner.Scan(); i++ { + txBldr, err := txCfg.WrapTxBuilder(scanner.Tx()) + if err != nil { + return err + } + pubKey, err := k.GetPubKey() + if err != nil { + return err + } + multisigPub := pubKey.(*kmultisig.LegacyAminoPubKey) + multisigSig := multisig.NewMultisig(len(multisigPub.PubKeys)) + signingData := signing.SignerData{ + Address: sdk.AccAddress(pubKey.Address()).String(), + ChainID: txFactory.ChainID(), + AccountNumber: txFactory.AccountNumber(), + Sequence: txFactory.Sequence(), + PubKey: pubKey, + } + + for _, sig := range signatureBatch { + err = signing.VerifySignature(sig[i].PubKey, signingData, sig[i].Data, txCfg.SignModeHandler(), txBldr.GetTx()) + if err != nil { + return fmt.Errorf("couldn't verify signature: %w %v", err, sig) + } + + if err := multisig.AddSignatureV2(multisigSig, sig[i], multisigPub.GetPubKeys()); err != nil { + return err + } + } + + sigV2 := signingtypes.SignatureV2{ + PubKey: multisigPub, + Data: multisigSig, + Sequence: txFactory.Sequence(), + } + + err = txBldr.SetSignatures(sigV2) + if err != nil { + return err + } + + sigOnly, _ := cmd.Flags().GetBool(cflags.FlagSigOnly) + aminoJSON, _ := cmd.Flags().GetBool(cflags.FlagAmino) + + var json []byte + + if aminoJSON { + stdTx, err := tx.ConvertTxToStdTx(clientCtx.LegacyAmino, txBldr.GetTx()) + if err != nil { + return err + } + + req := BroadcastReq{ + Tx: stdTx, + Mode: "sync|async", + } + + json, _ = clientCtx.LegacyAmino.MarshalJSON(req) + + } else { + json, err = marshalSignatureJSON(txCfg, txBldr, sigOnly) + if err != nil { + return err + } + } + + err = clientCtx.PrintString(fmt.Sprintf("%s\n", json)) + if err != nil { + return err + } + + if viper.GetBool(cflags.FlagNoAutoIncrement) { + continue + } + sequence := txFactory.Sequence() + 1 + txFactory = txFactory.WithSequence(sequence) + } + + return scanner.UnmarshalErr() + } +} + +func unmarshalSignatureJSON(clientCtx client.Context, filename string) (sigs []signingtypes.SignatureV2, err error) { + var bytes []byte + if bytes, err = os.ReadFile(filename); err != nil { + return + } + return clientCtx.TxConfig.UnmarshalSignatureJSON(bytes) +} + +func readSignaturesFromFile(ctx client.Context, filename string) (sigs []signingtypes.SignatureV2, err error) { + bz, err := os.ReadFile(filename) + if err != nil { + return nil, err + } + + newString := strings.TrimSuffix(string(bz), "\n") + lines := strings.Split(newString, "\n") + + for _, bz := range lines { + sig, err := ctx.TxConfig.UnmarshalSignatureJSON([]byte(bz)) + if err != nil { + return nil, err + } + + sigs = append(sigs, sig...) + } + return sigs, nil +} + +func getMultisigRecord(clientCtx client.Context, name string) (*keyring.Record, error) { + kb := clientCtx.Keyring + multisigRecord, err := kb.Key(name) + if err != nil { + return nil, errorsmod.Wrap(err, "error getting keybase multisig account") + } + + return multisigRecord, nil +} diff --git a/go/cli/auth_query.go b/go/cli/auth_query.go new file mode 100644 index 00000000..650cfa97 --- /dev/null +++ b/go/cli/auth_query.go @@ -0,0 +1,429 @@ +package cli + +import ( + "encoding/hex" + "fmt" + "strconv" + "strings" + + "github.com/spf13/cobra" + + tmtypes "github.com/cometbft/cometbft/types" + + errorsmod "cosmossdk.io/errors" + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/query" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/auth/types" + + cflags "pkg.akt.dev/go/cli/flags" + nutils "pkg.akt.dev/go/node/utils" +) + +const ( + typeHash = "hash" + typeAccSeq = "acc_seq" + typeSig = "signature" + + eventFormat = "{eventType}.{eventAttribute}={value}" +) + +// GetQueryAuthCmd returns the transaction commands for this module +func GetQueryAuthCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Querying commands for the auth module", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetQueryAuthAccountCmd(), + GetQueryAuthAccountAddressByIDCmd(), + GetQueryAuthAccountsCmd(), + GetQueryAuthParamsCmd(), + GetQueryAuthModuleAccountsCmd(), + GetQueryAuthModuleAccountByNameCmd(), + ) + + return cmd +} + +// GetQueryAuthParamsCmd returns the command handler for evidence parameter querying. +func GetQueryAuthParamsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "Query the current auth parameters", + Args: cobra.NoArgs, + Long: strings.TrimSpace(`Query the current auth parameters: + +$ query auth params +`), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + res, err := cl.Query().Auth().Params(ctx, &types.QueryParamsRequest{}) + if err != nil { + return err + } + + return cl.PrintMessage(&res.Params) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryAuthAccountCmd returns a query account that will display the state of the +// account at a given address. +func GetQueryAuthAccountCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "account [address]", + Short: "Query for account by address", + Args: cobra.ExactArgs(1), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + key, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + res, err := cl.Query().Auth().Account(ctx, &types.QueryAccountRequest{Address: key.String()}) + if err != nil { + info, err2 := cl.Node().SyncInfo(ctx) + if err2 != nil { + return err2 + } + + catchingUp := info.CatchingUp + if !catchingUp { + return errorsmod.Wrapf(err, "your node may be syncing, please check node status using `/status`") + } + return err + } + + return cl.PrintMessage(&res.Account) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryAuthAccountAddressByIDCmd returns a query account that will display the account address of a given account id. +func GetQueryAuthAccountAddressByIDCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "address-by-acc-num [acc-num]", + Aliases: []string{"address-by-id"}, + Short: "Query for an address by account number", + Args: cobra.ExactArgs(1), + Example: fmt.Sprintf("%s q auth address-by-acc-num 1", version.AppName), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + accNum, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return err + } + + res, err := cl.Query().Auth().AccountAddressByID(ctx, &types.QueryAccountAddressByIDRequest{ + AccountId: accNum, + }) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryAuthAccountsCmd returns a query command that will display a list of accounts +func GetQueryAuthAccountsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "accounts", + Short: "Query all the accounts", + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Auth().Accounts(ctx, &types.QueryAccountsRequest{Pagination: pageReq}) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "all-accounts") + + return cmd +} + +// GetQueryAuthModuleAccountsCmd returns a list of all the existing module accounts with their account information and permissions +func GetQueryAuthModuleAccountsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "module-accounts", + Short: "Query all module accounts", + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + res, err := cl.Query().Auth().ModuleAccounts(ctx, &types.QueryModuleAccountsRequest{}) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryAuthModuleAccountByNameCmd returns a command to +func GetQueryAuthModuleAccountByNameCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "module-account [module-name]", + Short: "Query module account info by module name", + Args: cobra.ExactArgs(1), + Example: fmt.Sprintf("%s q auth module-account auth", version.AppName), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + moduleName := args[0] + if len(moduleName) == 0 { + return fmt.Errorf("module name should not be empty") + } + + res, err := cl.Query().Auth().ModuleAccountByName(ctx, &types.QueryModuleAccountByNameRequest{Name: moduleName}) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryAuthTxsByEventsCmd returns a command to search through transactions by events. +func GetQueryAuthTxsByEventsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "txs", + Short: "Query for paginated transactions that match a set of events", + Long: strings.TrimSpace( + fmt.Sprintf(` +Search for transactions that match the exact given events where results are paginated. +Each event takes the form of '%s'. Please refer +to each module's documentation for the full set of events to query for. Each module +documents its respective events under 'xx_events.md'. + +Example: +$ %s query txs --%s 'message.sender=cosmos1...&message.action=withdraw_delegator_reward' --page 1 --limit 30 +`, eventFormat, version.AppName, cflags.FlagEvents), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + cctx := cl.ClientContext() + + eventsRaw, _ := cmd.Flags().GetString(cflags.FlagEvents) + eventsStr := strings.Trim(eventsRaw, "'") + + var events []string + if strings.Contains(eventsStr, "&") { + events = strings.Split(eventsStr, "&") + } else { + events = append(events, eventsStr) + } + + var tmEvents []string + + for _, event := range events { + if !strings.Contains(event, "=") { + return fmt.Errorf("invalid event; event %s should be of the format: %s", event, eventFormat) + } else if strings.Count(event, "=") > 1 { + return fmt.Errorf("invalid event; event %s should be of the format: %s", event, eventFormat) + } + + tokens := strings.Split(event, "=") + if tokens[0] == tmtypes.TxHeightKey { + event = fmt.Sprintf("%s=%s", tokens[0], tokens[1]) + } else { + event = fmt.Sprintf("%s='%s'", tokens[0], tokens[1]) + } + + tmEvents = append(tmEvents, event) + } + + page, _ := cmd.Flags().GetInt(cflags.FlagPage) + limit, _ := cmd.Flags().GetInt(cflags.FlagLimit) + + txs, err := nutils.QueryTxsByEvents(ctx, cctx, tmEvents, page, limit, "") + if err != nil { + return err + } + + return cctx.PrintProto(txs) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cmd.Flags().Int(cflags.FlagPage, query.DefaultPage, "Query a specific page of paginated results") + cmd.Flags().Int(cflags.FlagLimit, query.DefaultLimit, "Query number of transactions results per page returned") + cmd.Flags().String(cflags.FlagEvents, "", fmt.Sprintf("list of transaction events in the form of %s", eventFormat)) + _ = cmd.MarkFlagRequired(cflags.FlagEvents) + + return cmd +} + +// GetQueryAuthTxCmd implements the default command for a tx query. +func GetQueryAuthTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "tx --type=[hash|acc_seq|signature] [hash|acc_seq|signature]", + Short: "Query for a transaction by hash, \"/\" combination or comma-separated signatures in a committed block", + Long: strings.TrimSpace(fmt.Sprintf(` +Example: +$ %s query tx +$ %s query tx --%s=%s / +$ %s query tx --%s=%s , +`, + version.AppName, + version.AppName, cflags.FlagType, typeAccSeq, + version.AppName, cflags.FlagType, typeSig)), + Args: cobra.ExactArgs(1), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + cctx := cl.ClientContext() + + typ, _ := cmd.Flags().GetString(cflags.FlagType) + + switch typ { + case typeHash: + { + if args[0] == "" { + return fmt.Errorf("argument should be a tx hash") + } + + hash, err := hex.DecodeString(args[0]) + if err != nil { + return err + } + + // If hash is given, then query the tx by hash. + output, err := nutils.QueryTx(ctx, cctx, hash) + if err != nil { + return err + } + + if output.Empty() { + return fmt.Errorf("no transaction found with hash %s", args[0]) + } + + return cl.PrintMessage(output) + } + case typeSig: + { + sigParts, err := ParseSigArgs(args) + if err != nil { + return err + } + tmEvents := make([]string, len(sigParts)) + for i, sig := range sigParts { + tmEvents[i] = fmt.Sprintf("%s.%s='%s'", sdk.EventTypeTx, sdk.AttributeKeySignature, sig) + } + + txs, err := nutils.QueryTxsByEvents(ctx, cctx, tmEvents, query.DefaultPage, query.DefaultLimit, "") + if err != nil { + return err + } + if len(txs.Txs) == 0 { + return fmt.Errorf("found no txs matching given signatures") + } + if len(txs.Txs) > 1 { + // This case means there's a bug somewhere else in the code. Should not happen. + return sdkerrors.ErrLogic.Wrapf("found %d txs matching given signatures", len(txs.Txs)) + } + + return cl.PrintMessage(txs.Txs[0]) + } + case typeAccSeq: + { + if args[0] == "" { + return fmt.Errorf("`acc_seq` type takes an argument '/'") + } + + tmEvents := []string{ + fmt.Sprintf("%s.%s='%s'", sdk.EventTypeTx, sdk.AttributeKeyAccountSequence, args[0]), + } + txs, err := nutils.QueryTxsByEvents(ctx, cctx, tmEvents, query.DefaultPage, query.DefaultLimit, "") + if err != nil { + return err + } + if len(txs.Txs) == 0 { + return fmt.Errorf("found no txs matching given address and sequence combination") + } + if len(txs.Txs) > 1 { + // This case means there's a bug somewhere else in the code. Should not happen. + return fmt.Errorf("found %d txs matching given address and sequence combination", len(txs.Txs)) + } + + return cl.PrintMessage(txs.Txs[0]) + } + default: + return fmt.Errorf("unknown --%s value %s", cflags.FlagType, typ) + } + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cmd.Flags().String(cflags.FlagType, typeHash, fmt.Sprintf("The type to be used when querying tx, can be one of \"%s\", \"%s\", \"%s\"", typeHash, typeAccSeq, typeSig)) + + return cmd +} + +// ParseSigArgs parses comma-separated signatures from the CLI arguments. +func ParseSigArgs(args []string) ([]string, error) { + if len(args) != 1 || args[0] == "" { + return nil, fmt.Errorf("argument should be comma-separated signatures") + } + + return strings.Split(args[0], ","), nil +} diff --git a/go/cli/auth_query_test.go b/go/cli/auth_query_test.go new file mode 100644 index 00000000..b166abd9 --- /dev/null +++ b/go/cli/auth_query_test.go @@ -0,0 +1,34 @@ +package cli_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "pkg.akt.dev/go/cli" +) + +func TestParseSigs(t *testing.T) { + cases := []struct { + name string + args []string + expErr bool + expNumSigs int + }{ + {"no args", []string{}, true, 0}, + {"empty args", []string{""}, true, 0}, + {"too many args", []string{"foo", "bar"}, true, 0}, + {"1 sig", []string{"foo"}, false, 1}, + {"3 sigs", []string{"foo,bar,baz"}, false, 3}, + } + + for _, tc := range cases { + sigs, err := cli.ParseSigArgs(tc.args) + if tc.expErr { + require.Error(t, err) + } else { + require.NoError(t, err) + require.Equal(t, tc.expNumSigs, len(sigs)) + } + } +} diff --git a/go/cli/auth_suite_test.go b/go/cli/auth_suite_test.go new file mode 100644 index 00000000..43bbeba8 --- /dev/null +++ b/go/cli/auth_suite_test.go @@ -0,0 +1,1633 @@ +package cli_test + +import ( + "bytes" + "context" + "fmt" + "io" + "strings" + + sdkmath "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + "github.com/cosmos/cosmos-sdk/types/tx" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + + // "cosmossdk.io/math" + abci "github.com/cometbft/cometbft/abci/types" + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + "github.com/cosmos/cosmos-sdk/client" + // "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + kmultisig "github.com/cosmos/cosmos-sdk/crypto/keys/multisig" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/cosmos-sdk/testutil" + // "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + // "github.com/cosmos/cosmos-sdk/types/tx" + "github.com/cosmos/cosmos-sdk/x/auth" + // authcli "github.com/cosmos/cosmos-sdk/x/auth/client/cli" + // authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/cosmos/cosmos-sdk/x/bank" + // banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "github.com/cosmos/cosmos-sdk/x/gov" + // govtestutil "github.com/cosmos/cosmos-sdk/x/gov/client/testutil" + // govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +type AuthCLITestSuite struct { + CLITestSuite + val sdk.AccAddress + val1 sdk.AccAddress +} + +func (s *AuthCLITestSuite) SetupSuite() { + s.encCfg = testutilmod.MakeTestEncodingConfig(auth.AppModuleBasic{}, bank.AppModuleBasic{}, gov.AppModuleBasic{}) + s.kr = keyring.NewInMemory(s.encCfg.Codec) + s.baseCtx = client.Context{}. + WithKeyring(s.kr). + WithTxConfig(s.encCfg.TxConfig). + WithCodec(s.encCfg.Codec). + WithLegacyAmino(s.encCfg.Amino). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain") + + var outBuf bytes.Buffer + ctxGen := func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&sdk.TxResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + } + s.cctx = ctxGen().WithOutput(&outBuf).WithSignModeStr("direct") + + kb := s.cctx.Keyring + valAcc, _, err := kb.NewMnemonic("newAccount", keyring.English, sdk.FullFundraiserPath, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + s.Require().NoError(err) + s.val, err = valAcc.GetAddress() + s.Require().NoError(err) + + account1, _, err := kb.NewMnemonic("newAccount1", keyring.English, sdk.FullFundraiserPath, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + s.Require().NoError(err) + s.val1, err = account1.GetAddress() + s.Require().NoError(err) + + account2, _, err := kb.NewMnemonic("newAccount2", keyring.English, sdk.FullFundraiserPath, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + s.Require().NoError(err) + pub1, err := account1.GetPubKey() + s.Require().NoError(err) + pub2, err := account2.GetPubKey() + s.Require().NoError(err) + + // Create a dummy account for testing purpose + _, _, err = kb.NewMnemonic("dummyAccount", keyring.English, sdk.FullFundraiserPath, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + s.Require().NoError(err) + + multi := kmultisig.NewLegacyAminoPubKey(2, []cryptotypes.PubKey{pub1, pub2}) + _, err = kb.SaveMultisig("multi", multi) + s.Require().NoError(err) +} + +func (s *AuthCLITestSuite) TestCLIValidateSignatures() { + sendTokens := sdk.NewCoins( + sdk.NewCoin("testtoken", sdkmath.NewInt(10)), + sdk.NewCoin("uakt", sdkmath.NewInt(10))) + + res, err := s.createBankMsg( + s.cctx, + s.val, + sendTokens, + cli.TestFlags().WithGenerateOnly()...) + s.Require().NoError(err) + + // write unsigned tx to file + unsignedTx := testutil.WriteToNewTempFile(s.T(), res.String()) + defer func() { + _ = unsignedTx.Close() + }() + + res, err = clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + unsignedTx.Name(), + ). + WithFrom(s.val.String()). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().NoError(err) + signedTx, err := s.cctx.TxConfig.TxJSONDecoder()(res.Bytes()) + s.Require().NoError(err) + + signedTxFile := testutil.WriteToNewTempFile(s.T(), res.String()) + defer func() { + _ = signedTxFile.Close() + }() + + txBuilder, err := s.cctx.TxConfig.WrapTxBuilder(signedTx) + s.Require().NoError(err) + _, err = clitestutil.TxValidateSignaturesExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + signedTxFile.Name(), + )...) + s.Require().NoError(err) + + txBuilder.SetMemo("MODIFIED TX") + bz, err := s.cctx.TxConfig.TxJSONEncoder()(txBuilder.GetTx()) + s.Require().NoError(err) + + modifiedTxFile := testutil.WriteToNewTempFile(s.T(), string(bz)) + defer func() { + _ = modifiedTxFile.Close() + }() + + _, err = clitestutil.TxValidateSignaturesExec(context.Background(), s.cctx, modifiedTxFile.Name()) + s.Require().EqualError(err, "signatures validation failed") +} + +func (s *AuthCLITestSuite) TestCLISignBatch() { + sendTokens := sdk.NewCoins( + sdk.NewCoin("testtoken", sdkmath.NewInt(10)), + sdk.NewCoin("uakt", sdkmath.NewInt(10)), + ) + + generatedStd, err := s.createBankMsg( + s.cctx, + s.val, + sendTokens, + fmt.Sprintf("--%s=true", cflags.FlagGenerateOnly)) + s.Require().NoError(err) + + outputFile := testutil.WriteToNewTempFile(s.T(), strings.Repeat(generatedStd.String(), 3)) + defer func() { + _ = outputFile.Close() + }() + s.cctx.HomeDir = strings.Replace(s.cctx.HomeDir, "simd", "simcli", 1) + + // sign-batch file - offline is set but account-number and sequence are not + _, err = clitestutil.TxSignBatchExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + outputFile.Name(), + ). + WithFrom(s.val.String()). + WithChainID(s.cctx.ChainID). + WithOffline()...) + s.Require().EqualError(err, "required flag(s) \"account-number\", \"sequence\" not set") + + // sign-batch file - offline and sequence is set but account-number is not set + _, err = clitestutil.TxSignBatchExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + outputFile.Name(), + ). + WithFrom(s.val.String()). + WithChainID(s.cctx.ChainID). + WithOffline(). + WithSequence(1)...) + s.Require().EqualError(err, "required flag(s) \"account-number\" not set") + + // sign-batch file - offline and account-number is set but sequence is not set + _, err = clitestutil.TxSignBatchExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + outputFile.Name(), + ). + WithFrom(s.val.String()). + WithChainID(s.cctx.ChainID). + WithOffline(). + WithAccountNumber(1)...) + s.Require().EqualError(err, "required flag(s) \"sequence\" not set") +} + +func (s *AuthCLITestSuite) TestCLIQueryTxCmdByHash() { + sendTokens := sdk.NewInt64Coin("uakt", 10) + + // Send coins. + out, err := s.createBankMsg( + s.cctx, s.val, + sdk.NewCoins(sendTokens), + ) + s.Require().NoError(err) + + var txRes sdk.TxResponse + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &txRes)) + + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + "not enough args", + []string{}, + "", + }, + { + "with invalid hash", + cli.TestFlags(). + With("somethinginvalid"). + WithOutputJSON(), + `[somethinginvalid --output=json]`, + }, + { + "with valid and not existing hash", + cli.TestFlags(). + With("C7E7D3A86A17AB3A321172239F3B61357937AF0F25D9FA4D2F4DCCAD9B0D7747"). + WithOutputJSON(), + `[C7E7D3A86A17AB3A321172239F3B61357937AF0F25D9FA4D2F4DCCAD9B0D7747 --output=json`, + }, + { + "happy case", + cli.TestFlags(). + With(txRes.TxHash). + WithOutputJSON(), + fmt.Sprintf("%s --%s=json", txRes.TxHash, cflags.FlagOutput), + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryAuthTxCmd() + cmd.SetArgs(tc.args) + + if len(tc.args) != 0 { + s.Require().Contains(fmt.Sprint(cmd), tc.expCmdOutput) + } + }) + } +} + +func (s *AuthCLITestSuite) TestCLIQueryTxCmdByEvents() { + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + "invalid --type", + cli.TestFlags(). + WithType("foo"). + With("bar"). + WithOutputJSON(), + "--type=foo bar --output=json", + }, + { + "--type=acc_seq with no addr+seq", + cli.TestFlags(). + WithType("acc_seq"). + With(""). + WithOutputJSON(), + "--type=acc_seq --output=json", + }, + { + "non-existing addr+seq combo", + cli.TestFlags(). + WithType("acc_seq"). + With("foobar"). + WithOutputJSON(), + "--type=acc_seq foobar --output=json", + }, + { + "--type=signature with no signature", + cli.TestFlags(). + WithType("signature"). + With(""). + WithOutputJSON(), + "--type=signature --output=json", + }, + { + "non-existing signatures", + cli.TestFlags(). + WithType("signature"). + With("foo"). + WithOutputJSON(), + "--type=signature foo --output=json", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryAuthTxCmd() + cmd.SetArgs(tc.args) + + if len(tc.args) != 0 { + s.Require().Contains(fmt.Sprint(cmd), tc.expCmdOutput) + } + }) + } +} + +func (s *AuthCLITestSuite) TestCLIQueryTxsCmdByEvents() { + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + "fee event happy case", + cli.TestFlags(). + WithEvents(fmt.Sprintf("tx.fee=%s", sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10))).String())). + WithOutputJSON(), + "", + }, + { + "no matching fee event", + cli.TestFlags(). + WithEvents(fmt.Sprintf("tx.fee=%s", sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(0))).String())). + WithOutputJSON(), + "", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryAuthTxsByEventsCmd() + + if len(tc.args) != 0 { + s.Require().Contains(fmt.Sprint(cmd), tc.expCmdOutput) + } + }) + } +} + +func (s *AuthCLITestSuite) TestCLISendGenerateSignAndBroadcast() { + sendTokens := sdk.NewCoin("uakt", sdk.TokensFromConsensusPower(10, cli.DefaultPowerReduction)) + + normalGeneratedTx, err := s.createBankMsg( + s.cctx, + s.val, + sdk.NewCoins(sendTokens), + cli.TestFlags(). + WithGas(cflags.DefaultGasLimit). + WithGenerateOnly()...) + s.Require().NoError(err) + + txCfg := s.cctx.TxConfig + + normalGeneratedStdTx, err := txCfg.TxJSONDecoder()(normalGeneratedTx.Bytes()) + s.Require().NoError(err) + + txBuilder, err := txCfg.WrapTxBuilder(normalGeneratedStdTx) + s.Require().NoError(err) + s.Require().Equal(txBuilder.GetTx().GetGas(), uint64(cflags.DefaultGasLimit)) + s.Require().Equal(len(txBuilder.GetTx().GetMsgs()), 1) + + sigs, err := txBuilder.GetTx().GetSignaturesV2() + s.Require().NoError(err) + s.Require().Equal(0, len(sigs)) + + // Test generate sendTx with --gas=$amount + limitedGasGeneratedTx, err := s.createBankMsg( + s.cctx, + s.val, + sdk.NewCoins(sendTokens), + cli.TestFlags(). + WithGas(100). + WithGenerateOnly()...) + s.Require().NoError(err) + + limitedGasStdTx, err := txCfg.TxJSONDecoder()(limitedGasGeneratedTx.Bytes()) + s.Require().NoError(err) + + txBuilder, err = txCfg.WrapTxBuilder(limitedGasStdTx) + s.Require().NoError(err) + s.Require().Equal(txBuilder.GetTx().GetGas(), uint64(100)) + s.Require().Equal(len(txBuilder.GetTx().GetMsgs()), 1) + + sigs, err = txBuilder.GetTx().GetSignaturesV2() + s.Require().NoError(err) + s.Require().Equal(0, len(sigs)) + + // Test generate sendTx, estimate gas + finalGeneratedTx, err := s.createBankMsg( + s.cctx, + s.val, + sdk.NewCoins(sendTokens), + cli.TestFlags(). + WithGas(cflags.DefaultGasLimit). + WithGenerateOnly()...) + s.Require().NoError(err) + + finalStdTx, err := txCfg.TxJSONDecoder()(finalGeneratedTx.Bytes()) + s.Require().NoError(err) + + txBuilder, err = txCfg.WrapTxBuilder(finalStdTx) + s.Require().NoError(err) + s.Require().Equal(uint64(flags.DefaultGasLimit), txBuilder.GetTx().GetGas()) + s.Require().Equal(len(finalStdTx.GetMsgs()), 1) + + // Write the output to disk + unsignedTxFile := testutil.WriteToNewTempFile(s.T(), finalGeneratedTx.String()) + defer func() { + _ = unsignedTxFile.Close() + }() + + // Test validate-signatures + res, err := clitestutil.TxValidateSignaturesExec( + context.Background(), + s.cctx, + unsignedTxFile.Name()) + s.Require().EqualError(err, "signatures validation failed") + s.Require().True(strings.Contains(res.String(), fmt.Sprintf("Signers:\n 0: %v\n\nSignatures:\n\n", s.val.String()))) + + // Test sign + + // Does not work in offline mode + _, err = clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(unsignedTxFile.Name()). + WithFrom(s.val.String()). + WithOffline()...) + s.Require().EqualError(err, "required flag(s) \"account-number\", \"sequence\" not set") + + // But works offline if we set account number and sequence + s.cctx.HomeDir = strings.Replace(s.cctx.HomeDir, "simd", "simcli", 1) + _, err = clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(unsignedTxFile.Name()). + WithFrom(s.val.String()). + WithOffline(). + WithAccountNumber(1). + WithSequence(1)...) + s.Require().NoError(err) + + // Sign transaction + signedTx, err := clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(unsignedTxFile.Name()). + WithFrom(s.val.String())...) + s.Require().NoError(err) + + signedFinalTx, err := txCfg.TxJSONDecoder()(signedTx.Bytes()) + s.Require().NoError(err) + + txBuilder, err = s.cctx.TxConfig.WrapTxBuilder(signedFinalTx) + s.Require().NoError(err) + s.Require().Equal(len(txBuilder.GetTx().GetMsgs()), 1) + + sigs, err = txBuilder.GetTx().GetSignaturesV2() + s.Require().NoError(err) + s.Require().Equal(1, len(sigs)) + s.Require().Equal(s.val.String(), txBuilder.GetTx().GetSigners()[0].String()) + + // Write the output to disk + signedTxFile := testutil.WriteToNewTempFile(s.T(), signedTx.String()) + defer func() { + _ = signedTxFile.Close() + }() + + // validate Signature + res, err = clitestutil.TxValidateSignaturesExec( + context.Background(), + s.cctx, + signedTxFile.Name()) + s.Require().NoError(err) + s.Require().True(strings.Contains(res.String(), "[OK]")) + + // Test broadcast + + // Does not work in offline mode + _, err = clitestutil.TxBroadcastExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(signedTxFile.Name()). + WithFrom(s.val.String()). + WithOffline()...) + s.Require().EqualError(err, "cannot broadcast tx during offline mode") + + // Broadcast correct transaction. + _, err = clitestutil.TxBroadcastExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(signedTxFile.Name()). + WithFrom(s.val.String()). + WithBroadcastModeSync()...) + s.Require().NoError(err) +} + +func (s *AuthCLITestSuite) TestCLIMultisignInsufficientCosigners() { + // Fetch account and a multisig info + account1, err := s.cctx.Keyring.Key("newAccount1") + s.Require().NoError(err) + + multisigRecord, err := s.cctx.Keyring.Key("multi") + s.Require().NoError(err) + + addr, err := multisigRecord.GetAddress() + s.Require().NoError(err) + // Send coins from validator to multisig. + _, err = s.createBankMsg( + s.cctx, + addr, + sdk.NewCoins( + sdk.NewInt64Coin("uakt", 10), + ), + ) + s.Require().NoError(err) + + // Generate multisig transaction. + multiGeneratedTx, err := clitestutil.ExecSend( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + addr.String(), + s.val.String(), + sdk.NewCoins( + sdk.NewInt64Coin("uakt", 5), + ).String(), + ). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithGenerateOnly()...) + s.Require().NoError(err) + + // Save tx to file + multiGeneratedTxFile := testutil.WriteToNewTempFile(s.T(), multiGeneratedTx.String()) + defer func() { + _ = multiGeneratedTxFile.Close() + }() + + // Multisign, sign with one signature + s.cctx.HomeDir = strings.Replace(s.cctx.HomeDir, "simd", "simcli", 1) + addr1, err := account1.GetAddress() + s.Require().NoError(err) + + account1Signature, err := clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + multiGeneratedTxFile.Name(), + ). + WithFrom(addr1.String()). + WithSignMode(cflags.SignModeLegacyAminoJSON). + WithMultisig(addr.String())...) + s.Require().NoError(err) + + sign1File := testutil.WriteToNewTempFile(s.T(), account1Signature.String()) + defer func() { + _ = sign1File.Close() + }() + + multiSigWith1Signature, err := clitestutil.TxMultiSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + multiGeneratedTxFile.Name(), + multisigRecord.Name, + sign1File.Name(), + )...) + s.Require().NoError(err) + + // Save tx to file + multiSigWith1SignatureFile := testutil.WriteToNewTempFile(s.T(), multiSigWith1Signature.String()) + defer func() { + _ = multiSigWith1SignatureFile.Close() + }() + + _, err = clitestutil.TxValidateSignaturesExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + multiSigWith1SignatureFile.Name())...) + s.Require().Error(err) +} + +func (s *AuthCLITestSuite) TestCLIEncode() { + sendTokens := sdk.NewCoin("uakt", sdk.TokensFromConsensusPower(10, cli.DefaultPowerReduction)) + + normalGeneratedTx, err := s.createBankMsg( + s.cctx, + s.val, + sdk.NewCoins(sendTokens), + cli.TestFlags(). + WithGenerateOnly(). + WithNote("deadbeef"). + WithFrom(s.val.String())...) + s.Require().NoError(err) + + savedTxFile := testutil.WriteToNewTempFile(s.T(), normalGeneratedTx.String()) + defer func() { + _ = savedTxFile.Close() + }() + + // Encode + encodeExec, err := clitestutil.TxEncodeExec( + context.Background(), + s.cctx, + savedTxFile.Name()) + s.Require().NoError(err) + + trimmedBase64 := strings.Trim(encodeExec.String(), "\"\n") + // Check that the transaction decodes as expected + decodedTx, err := clitestutil.TxDecodeExec( + context.Background(), + s.cctx, + trimmedBase64) + s.Require().NoError(err) + + txCfg := s.cctx.TxConfig + theTx, err := txCfg.TxJSONDecoder()(decodedTx.Bytes()) + s.Require().NoError(err) + txBuilder, err := s.cctx.TxConfig.WrapTxBuilder(theTx) + s.Require().NoError(err) + s.Require().Equal("deadbeef", txBuilder.GetTx().GetMemo()) +} + +func (s *AuthCLITestSuite) TestCLIMultisignSortSignatures() { + // Generate 2 accounts and a multisig. + account1, err := s.cctx.Keyring.Key("newAccount1") + s.Require().NoError(err) + + account2, err := s.cctx.Keyring.Key("newAccount2") + s.Require().NoError(err) + + multisigRecord, err := s.cctx.Keyring.Key("multi") + s.Require().NoError(err) + + // Generate dummy account which is not a part of multisig. + dummyAcc, err := s.cctx.Keyring.Key("dummyAccount") + s.Require().NoError(err) + + addr, err := multisigRecord.GetAddress() + s.Require().NoError(err) + + // Generate multisig transaction. + multiGeneratedTx, err := clitestutil.ExecSend( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + addr.String(), + s.val.String(), + sdk.NewCoins( + sdk.NewInt64Coin("uakt", 5), + ).String(), + ). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithGenerateOnly()...) + s.Require().NoError(err) + + // Save tx to file + multiGeneratedTxFile := testutil.WriteToNewTempFile(s.T(), multiGeneratedTx.String()) + defer func() { + _ = multiGeneratedTxFile.Close() + }() + + // Sign with account1 + addr1, err := account1.GetAddress() + s.Require().NoError(err) + s.cctx.HomeDir = strings.Replace(s.cctx.HomeDir, "simd", "simcli", 1) + account1Signature, err := clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + multiGeneratedTxFile.Name(), + ). + WithFrom(addr1.String()). + WithMultisig(addr.String()). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().NoError(err) + + sign1File := testutil.WriteToNewTempFile(s.T(), account1Signature.String()) + defer func() { + _ = sign1File.Close() + }() + + // Sign with account2 + addr2, err := account2.GetAddress() + s.Require().NoError(err) + account2Signature, err := clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + multiGeneratedTxFile.Name(), + ). + WithFrom(addr2.String()). + WithMultisig(addr.String()). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().NoError(err) + + sign2File := testutil.WriteToNewTempFile(s.T(), account2Signature.String()) + defer func() { + _ = sign2File.Close() + }() + + // Sign with dummy account + dummyAddr, err := dummyAcc.GetAddress() + s.Require().NoError(err) + _, err = clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + multiGeneratedTxFile.Name(), + ). + WithFrom(dummyAddr.String()). + WithMultisig(addr.String()). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().Error(err) + s.Require().Contains(err.Error(), "signing key is not a part of multisig key") + + multiSigWith2Signatures, err := clitestutil.TxMultiSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + multiGeneratedTxFile.Name(), + multisigRecord.Name, + sign1File.Name(), + sign2File.Name()). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().NoError(err) + + // Write the output to disk + signedTxFile := testutil.WriteToNewTempFile(s.T(), multiSigWith2Signatures.String()) + defer func() { + _ = signedTxFile.Close() + }() + + _, err = clitestutil.TxValidateSignaturesExec( + context.Background(), + s.cctx, + signedTxFile.Name()) + s.Require().NoError(err) + + _, err = clitestutil.TxBroadcastExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(signedTxFile.Name()). + WithBroadcastModeSync()...) + s.Require().NoError(err) +} + +func (s *AuthCLITestSuite) TestSignWithMultisig() { + // Generate a account for signing. + account1, err := s.cctx.Keyring.Key("newAccount1") + s.Require().NoError(err) + + addr1, err := account1.GetAddress() + s.Require().NoError(err) + + // Create an address that is not in the keyring, will be used to simulate `--multisig` + multisig := "akash1hd6fsrvnz6qkp87s3u86ludegq97agxsqdr9ad" + multisigAddr, err := sdk.AccAddressFromBech32(multisig) + s.Require().NoError(err) + + // Generate a transaction for testing --multisig with an address not in the keyring. + multisigTx, err := clitestutil.ExecSend( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + s.val.String(), + s.val.String(), + sdk.NewCoins( + sdk.NewInt64Coin("uakt", 5), + ).String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithGenerateOnly(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10))))...) + s.Require().NoError(err) + + // Save multi tx to file + multiGeneratedTx2File := testutil.WriteToNewTempFile(s.T(), multisigTx.String()) + defer func() { + _ = multiGeneratedTx2File.Close() + }() + + // Sign using multisig. We're signing a tx on behalf of the multisig address, + // even though the tx signer is NOT the multisig address. This is fine though, + // as the main point of this test is to test the `--multisig` flag with an address + // that is not in the keyring. + _, err = clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(multiGeneratedTx2File.Name()). + WithFrom(addr1.String()). + WithMultisig(multisigAddr.String()). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + + s.Require().Contains(err.Error(), "error getting account from keybase") +} + +func (s *AuthCLITestSuite) TestCLIMultisign() { + // Generate 2 accounts and a multisig. + account1, err := s.cctx.Keyring.Key("newAccount1") + s.Require().NoError(err) + + account2, err := s.cctx.Keyring.Key("newAccount2") + s.Require().NoError(err) + + multisigRecord, err := s.cctx.Keyring.Key("multi") + s.Require().NoError(err) + + addr, err := multisigRecord.GetAddress() + s.Require().NoError(err) + + // Generate multisig transaction. + multiGeneratedTx, err := clitestutil.ExecSend( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + addr.String(), + s.val.String(), + sdk.NewCoins( + sdk.NewInt64Coin("uakt", 5), + ).String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithGenerateOnly(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10))))..., + ) + s.Require().NoError(err) + + // Save tx to file + multiGeneratedTxFile := testutil.WriteToNewTempFile(s.T(), multiGeneratedTx.String()) + defer func() { + _ = multiGeneratedTxFile.Close() + }() + + addr1, err := account1.GetAddress() + s.Require().NoError(err) + // Sign with account1 + s.cctx.HomeDir = strings.Replace(s.cctx.HomeDir, "simd", "simcli", 1) + account1Signature, err := clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(multiGeneratedTxFile.Name()). + WithFrom(addr1.String()). + WithMultisig(addr.String()). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().NoError(err) + + sign1File := testutil.WriteToNewTempFile(s.T(), account1Signature.String()) + defer func() { + _ = sign1File.Close() + }() + + addr2, err := account2.GetAddress() + s.Require().NoError(err) + // Sign with account2 + account2Signature, err := clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + multiGeneratedTxFile.Name()). + WithFrom(addr2.String()). + WithMultisig(addr.String()). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().NoError(err) + + sign2File := testutil.WriteToNewTempFile(s.T(), account2Signature.String()) + defer func() { + _ = sign2File.Close() + }() + + s.cctx.Offline = false + multiSigWith2Signatures, err := clitestutil.TxMultiSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + multiGeneratedTxFile.Name(), + multisigRecord.Name, + sign1File.Name(), + sign2File.Name()). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().NoError(err) + + // Write the output to disk + signedTxFile := testutil.WriteToNewTempFile(s.T(), multiSigWith2Signatures.String()) + defer func() { + _ = signedTxFile.Close() + }() + + _, err = clitestutil.TxValidateSignaturesExec(context.Background(), s.cctx, signedTxFile.Name()) + s.Require().NoError(err) + + _, err = clitestutil.TxBroadcastExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(signedTxFile.Name()). + WithBroadcastModeSync()...) + s.Require().NoError(err) +} + +func (s *AuthCLITestSuite) TestSignBatchMultisig() { + // Fetch 2 accounts and a multisig. + account1, err := s.cctx.Keyring.Key("newAccount1") + s.Require().NoError(err) + account2, err := s.cctx.Keyring.Key("newAccount2") + s.Require().NoError(err) + multisigRecord, err := s.cctx.Keyring.Key("multi") + s.Require().NoError(err) + + addr, err := multisigRecord.GetAddress() + s.Require().NoError(err) + // Send coins from validator to multisig. + sendTokens := sdk.NewInt64Coin("uakt", 10) + _, err = s.createBankMsg( + s.cctx, + addr, + sdk.NewCoins(sendTokens), + ) + s.Require().NoError(err) + + generatedStd, err := clitestutil.ExecSend( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + addr.String(), + s.val.String(), + sdk.NewCoins( + sdk.NewCoin("uakt", sdkmath.NewInt(1)), + ).String(), + ). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithGenerateOnly()..., + ) + s.Require().NoError(err) + + // Write the output to disk + filename := testutil.WriteToNewTempFile(s.T(), strings.Repeat(generatedStd.String(), 1)) + defer func() { + _ = filename.Close() + }() + + s.cctx.HomeDir = strings.Replace(s.cctx.HomeDir, "simd", "simcli", 1) + + addr1, err := account1.GetAddress() + s.Require().NoError(err) + // sign-batch file + res, err := clitestutil.TxSignBatchExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + filename.Name(), + ). + WithFrom(addr1.String()). + WithChainID(s.cctx.ChainID). + WithSignatureOnly(). + WithMultisig(addr.String()). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().NoError(err) + s.Require().Equal(1, len(strings.Split(strings.Trim(res.String(), "\n"), "\n"))) + // write sigs to file + file1 := testutil.WriteToNewTempFile(s.T(), res.String()) + defer func() { + _ = file1.Close() + }() + + addr2, err := account2.GetAddress() + s.Require().NoError(err) + // sign-batch file with account2 + res, err = clitestutil.TxSignBatchExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + filename.Name(), + ). + WithFrom(addr2.String()). + WithChainID(s.cctx.ChainID). + WithSignatureOnly(). + WithMultisig(addr.String()). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().NoError(err) + s.Require().Equal(1, len(strings.Split(strings.Trim(res.String(), "\n"), "\n"))) + // write sigs to file2 + file2 := testutil.WriteToNewTempFile(s.T(), res.String()) + defer func() { + _ = file2.Close() + }() + _, err = clitestutil.TxMultiSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + filename.Name(), + multisigRecord.Name, + file1.Name(), + file2.Name()). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().NoError(err) +} + +func (s *AuthCLITestSuite) TestGetBroadcastCommandOfflineFlag() { + cmd := cli.GetBroadcastCommand() + _ = testutil.ApplyMockIODiscardOutErr(cmd) + cmd.SetArgs(cli.TestFlags().With("").WithOffline()) + + s.Require().EqualError(cmd.Execute(), "cannot broadcast tx during offline mode") +} + +func (s *AuthCLITestSuite) TestGetBroadcastCommandWithoutOfflineFlag() { + txCfg := s.cctx.TxConfig + cctx := client.Context{} + cctx = cctx.WithTxConfig(txCfg).WithCodec(s.cctx.Codec).WithLegacyAmino(s.cctx.LegacyAmino) + + // Create new file with tx + builder := txCfg.NewTxBuilder() + builder.SetGasLimit(200000) + from, err := sdk.AccAddressFromBech32("akash1cxlt8kznps92fwu3j6npahx4mjfutydy5g5de5") + s.Require().NoError(err) + + to, err := sdk.AccAddressFromBech32("akash1cxlt8kznps92fwu3j6npahx4mjfutydy5g5de5") + s.Require().NoError(err) + + err = builder.SetMsgs(banktypes.NewMsgSend(from, to, sdk.Coins{sdk.NewInt64Coin("uakt", 10000)})) + s.Require().NoError(err) + + txContents, err := txCfg.TxJSONEncoder()(builder.GetTx()) + s.Require().NoError(err) + + txFile := testutil.WriteToNewTempFile(s.T(), string(txContents)) + defer func() { + _ = txFile.Close() + }() + + ctx := context.Background() + ctx = context.WithValue(ctx, client.ClientContextKey, &cctx) + + cmd := cli.GetBroadcastCommand() + _, out := testutil.ApplyMockIO(cmd) + + cmd.SetArgs( + cli.TestFlags(). + With(txFile.Name()). + WithBroadcastModeSync(), + ) + + err = cmd.ExecuteContext(ctx) + + s.Require().Error(err) + s.Require().Contains(err.Error(), "connect: connection refused") + s.Require().Contains(out.String(), "connect: connection refused") +} + +func (s *AuthCLITestSuite) TestQueryParamsCmd() { + testCases := []struct { + name string + args []string + expectErr bool + }{ + { + "happy case", + cli.TestFlags(). + WithOutputJSON(), + false, + }, + { + "with specific height", + cli.TestFlags(). + WithHeight(1). + WithOutputJSON(), + false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryAuthParamsCmd() + cctx := s.cctx + + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + s.Require().NotEqual("internal", err.Error()) + } else { + var authParams authtypes.Params + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &authParams)) + s.Require().NotNil(authParams.MaxMemoCharacters) + } + }) + } +} + +// TestTxWithoutPublicKey makes sure sending a proto tx message without the +// public key doesn't cause any error in the RPC layer (broadcast). +// See https://github.com/cosmos/cosmos-sdk/issues/7585 for more details. +func (s *AuthCLITestSuite) TestTxWithoutPublicKey() { + txCfg := s.cctx.TxConfig + + // Create a txBuilder with an unsigned tx. + txBuilder := txCfg.NewTxBuilder() + msg := banktypes.NewMsgSend( + s.val, + s.val, + sdk.NewCoins( + sdk.NewCoin("Stake", sdkmath.NewInt(10)), + )) + err := txBuilder.SetMsgs(msg) + s.Require().NoError(err) + + txBuilder.SetFeeAmount(sdk.NewCoins(sdk.NewCoin("Stake", sdkmath.NewInt(150)))) + txBuilder.SetGasLimit(testdata.NewTestGasLimit()) + + // Create a file with the unsigned tx. + txJSON, err := txCfg.TxJSONEncoder()(txBuilder.GetTx()) + s.Require().NoError(err) + + unsignedTxFile := testutil.WriteToNewTempFile(s.T(), string(txJSON)) + defer func() { + _ = unsignedTxFile.Close() + }() + + // Sign the file with the unsignedTx. + signedTx, err := clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(unsignedTxFile.Name()). + WithFrom(s.val.String()). + WithOverwrite()...) + s.Require().NoError(err) + + // Remove the signerInfo's `public_key` field manually from the signedTx. + // Note: this method is only used for test purposes! In general, one should + // use txBuilder and TxEncoder/TxDecoder to manipulate txs. + var tx tx.Tx + err = s.cctx.Codec.UnmarshalJSON(signedTx.Bytes(), &tx) + s.Require().NoError(err) + + tx.AuthInfo.SignerInfos[0].PublicKey = nil + // Re-encode the tx again, to another file. + txJSON, err = s.cctx.Codec.MarshalJSON(&tx) + s.Require().NoError(err) + + signedTxFile := testutil.WriteToNewTempFile(s.T(), string(txJSON)) + defer func() { + _ = signedTxFile.Close() + }() + s.Require().True(strings.Contains(string(txJSON), "\"public_key\":null")) + + // Broadcast tx, test that it shouldn't panic. + out, err := clitestutil.TxBroadcastExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(signedTxFile.Name()). + WithBroadcastModeSync()...) + s.Require().NoError(err) + + var res sdk.TxResponse + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &res)) + s.Require().NotEqual(0, res.Code) +} + +// TestSignWithMultiSignersAminoJSON tests the case where a transaction with 2 +// messages which has to be signed with 2 different keys. Sign and append the +// signatures using the CLI with Amino signing mode. Finally, send the +// transaction to the blockchain. +func (s *AuthCLITestSuite) TestSignWithMultiSignersAminoJSON() { + val0, val1 := s.val, s.val1 + val0Coin := sdk.NewCoin("test1token", sdkmath.NewInt(10)) + val1Coin := sdk.NewCoin("test2token", sdkmath.NewInt(10)) + _, _, addr1 := testdata.KeyTestPubAddr() + + // Creating a tx with 2 msgs from 2 signers: val0 and val1. + // The validators need to sign with SIGN_MODE_LEGACY_AMINO_JSON, + // because DIRECT doesn't support multi signers via the CLI. + // Since we use amino, we don't need to pre-populate signer_infos. + txBuilder := s.cctx.TxConfig.NewTxBuilder() + err := txBuilder.SetMsgs( + banktypes.NewMsgSend(val0, addr1, sdk.NewCoins(val0Coin)), + banktypes.NewMsgSend(val1, addr1, sdk.NewCoins(val1Coin)), + ) + s.Require().NoError(err) + txBuilder.SetFeeAmount(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))) + txBuilder.SetGasLimit(testdata.NewTestGasLimit() * 2) + s.Require().Equal([]sdk.AccAddress{val0, val1}, txBuilder.GetTx().GetSigners()) + + // Write the unsigned tx into a file. + txJSON, err := s.cctx.TxConfig.TxJSONEncoder()(txBuilder.GetTx()) + s.Require().NoError(err) + unsignedTxFile := testutil.WriteToNewTempFile(s.T(), string(txJSON)) + defer func() { + _ = unsignedTxFile.Close() + }() + + // Let val0 sign first the file with the unsignedTx. + signedByVal0, err := clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(unsignedTxFile.Name()). + WithFrom(val0.String()). + WithOverwrite(). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().NoError(err) + signedByVal0File := testutil.WriteToNewTempFile(s.T(), signedByVal0.String()) + defer func() { + _ = signedByVal0File.Close() + }() + + // Then let val1 sign the file with signedByVal0. + val1AccNum, val1Seq, err := s.cctx.AccountRetriever.GetAccountNumberSequence(s.cctx, val1) + s.Require().NoError(err) + + signedTx, err := clitestutil.TxSignExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(signedByVal0File.Name()). + WithFrom(val1.String()). + WithOffline(). + WithAccountNumber(val1AccNum). + WithSequence(val1Seq). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().NoError(err) + signedTxFile := testutil.WriteToNewTempFile(s.T(), signedTx.String()) + defer func() { + _ = signedTxFile.Close() + }() + + res, err := clitestutil.TxBroadcastExec( + context.Background(), + s.cctx, + cli.TestFlags(). + With(signedTxFile.Name()). + WithBroadcastModeSync()...) + s.Require().NoError(err) + + var txRes sdk.TxResponse + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(res.Bytes(), &txRes)) + s.Require().Equal(uint32(0), txRes.Code, txRes.RawLog) +} + +func (s *AuthCLITestSuite) TestAuxSigner() { + val0Coin := sdk.NewCoin("testtoken", sdkmath.NewInt(10)) + + testCases := []struct { + name string + args []string + expectErr bool + }{ + { + "error with SIGN_MODE_DIRECT_AUX and --aux unset", + cli.TestFlags(). + WithSignMode(cflags.SignModeDirectAux), + true, + }, + { + "no error with SIGN_MODE_DIRECT_AUX mode and generate-only set (ignores generate-only)", + cli.TestFlags(). + WithSignMode(cflags.SignModeDirectAux). + WithGenerateOnly(), + false, + }, + { + "no error with SIGN_MODE_DIRECT_AUX mode and generate-only, tip flag set", + cli.TestFlags(). + WithSignMode(cflags.SignModeDirectAux). + WithGenerateOnly(). + WithTip(val0Coin), + false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxGovSubmitLegacyProposalCmd() + _, err := clitestutil.ExecTestCLICmd( + context.Background(), + s.cctx, + cmd, + cli.TestFlags(). + WithTitle("Text Proposal"). + WithDescription("test desc"). + WithProposalType(govtypes.ProposalTypeText). + WithFrom(s.val.String()). + Append(tc.args)...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + } + }) + } +} + +func (s *AuthCLITestSuite) TestAuxToFeeWithTips() { + // Skipping this test as it needs a simapp with the TipDecorator in post handler. + s.T().Skip() + + require := s.Require() + + kb := s.cctx.Keyring + acc, _, err := kb.NewMnemonic("tipperAccount", keyring.English, sdk.FullFundraiserPath, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + require.NoError(err) + + tipper, err := acc.GetAddress() + require.NoError(err) + tipperInitialBal := sdk.NewCoin("testtoken", sdkmath.NewInt(10000)) + + feePayer := s.val + fee := sdk.NewCoin("uakt", sdkmath.NewInt(1000)) + tip := sdk.NewCoin("testtoken", sdkmath.NewInt(1000)) + + _, err = s.createBankMsg(s.cctx, tipper, sdk.NewCoins(tipperInitialBal)) + require.NoError(err) + + bal := s.getBalances(s.cctx, tipper, tip.Denom) + require.True(bal.Equal(tipperInitialBal.Amount)) + + testCases := []struct { + name string + tipper sdk.AccAddress + feePayer sdk.AccAddress + tip sdk.Coin + expectErrAux bool + expectErrBroadCast bool + errMsg string + tipperArgs []string + feePayerArgs []string + }{ + { + name: "when --aux and --sign-mode = direct set: error", + tipper: tipper, + feePayer: feePayer, + tip: tip, + tipperArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeDirect). + WithTip(tip). + WithAux(), + expectErrAux: true, + feePayerArgs: cli.TestFlags(). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFrom(feePayer.String()). + WithFees(sdk.Coins{fee}), + }, + { + name: "both tipper, fee payer uses AMINO: no error", + tipper: tipper, + feePayer: feePayer, + tip: tip, + tipperArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeLegacyAminoJSON). + WithTip(tip). + WithAux(), + feePayerArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeLegacyAminoJSON). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFrom(feePayer.String()). + WithFees(sdk.Coins{fee}), + }, + { + name: "tipper uses DIRECT_AUX, fee payer uses AMINO: no error", + tipper: tipper, + feePayer: feePayer, + tip: tip, + tipperArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeDirectAux). + WithTip(tip). + WithAux(), + feePayerArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeLegacyAminoJSON). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFrom(feePayer.String()). + WithFees(sdk.Coins{fee}), + }, + { + name: "--tip flag unset: no error", + tipper: tipper, + feePayer: feePayer, + tip: sdk.Coin{Denom: "testtoken", Amount: sdkmath.NewInt(0)}, + tipperArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeDirectAux). + WithAux(), + feePayerArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeLegacyAminoJSON). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFrom(feePayer.String()). + WithFees(sdk.Coins{fee}), + }, + { + name: "legacy amino json: no error", + tipper: tipper, + feePayer: feePayer, + tip: tip, + tipperArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeLegacyAminoJSON). + WithTip(tip). + WithAux(), + feePayerArgs: cli.TestFlags(). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFrom(feePayer.String()). + WithFees(sdk.Coins{fee}), + }, + { + name: "tipper uses direct aux, fee payer uses direct: happy case", + tipper: tipper, + feePayer: feePayer, + tip: tip, + tipperArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeDirectAux). + WithTip(tip). + WithAux(), + feePayerArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeDirect). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFrom(feePayer.String()). + WithFees(sdk.Coins{fee}), + }, + { + name: "chain-id mismatch: error", + tipper: tipper, + feePayer: feePayer, + tip: tip, + tipperArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeDirectAux). + WithTip(tip). + WithAux(), + expectErrAux: false, + feePayerArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeDirect). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFrom(feePayer.String()). + WithFees(sdk.Coins{fee}). + WithChainID("foobar"), + expectErrBroadCast: true, + }, + { + name: "wrong denom in tip: error", + tipper: tipper, + feePayer: feePayer, + tip: sdk.Coin{Denom: "testtoken", Amount: sdkmath.NewInt(0)}, + tipperArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeDirectAux). + WithTip(sdk.Coin{Denom: "wrongDenom", Amount: sdkmath.NewInt(100)}). + WithAux(), + feePayerArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeDirect). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFrom(feePayer.String()). + WithFees(sdk.Coins{fee}), + errMsg: "insufficient funds", + }, + { + name: "insufficient fees: error", + tipper: tipper, + feePayer: feePayer, + tip: sdk.Coin{Denom: "testtoken", Amount: sdkmath.NewInt(0)}, + tipperArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeDirectAux). + WithTip(tip). + WithAux(), + feePayerArgs: cli.TestFlags(). + WithSignMode(cflags.SignModeDirect). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFrom(feePayer.String()), + errMsg: "insufficient fees", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxGovSubmitLegacyProposalCmd() + res, err := clitestutil.ExecTestCLICmd( + context.Background(), + s.cctx, + cmd, + cli.TestFlags(). + WithTitle("test"). + WithDescription("test desc"). + WithProposalType(govtypes.ProposalTypeText). + WithFrom(s.val.String()). + Append(tc.tipperArgs)...) + + if tc.expectErrAux { + require.Error(err) + } else { + require.NoError(err) + genTxFile := testutil.WriteToNewTempFile(s.T(), string(res.Bytes())) + defer func() { + _ = genTxFile.Close() + }() + + // broadcast the tx + res, err = clitestutil.TxAuxToFeeExec( + context.Background(), + s.cctx, + genTxFile.Name(), + tc.feePayerArgs..., + ) + + switch { + case tc.expectErrBroadCast: + require.Error(err) + + case tc.errMsg != "": + require.NoError(err) + + var txRes sdk.TxResponse + require.NoError(s.cctx.Codec.UnmarshalJSON(res.Bytes(), &txRes)) + + require.Contains(txRes.RawLog, tc.errMsg) + + default: + require.NoError(err) + + var txRes sdk.TxResponse + require.NoError(s.cctx.Codec.UnmarshalJSON(res.Bytes(), &txRes)) + + require.Equal(uint32(0), txRes.Code) + require.NotNil(int64(0), txRes.Height) + + bal = s.getBalances(s.cctx, tipper, tc.tip.Denom) + tipperInitialBal = tipperInitialBal.Sub(tc.tip) + require.True(bal.Equal(tipperInitialBal.Amount)) + } + } + }) + } +} + +func (s *AuthCLITestSuite) getBalances(cctx client.Context, addr sdk.AccAddress, denom string) sdkmath.Int { + resp, err := clitestutil.QueryBalancesExec( + context.Background(), + cctx, + cli.TestFlags(). + With(addr.String())...) + s.Require().NoError(err) + + var balRes banktypes.QueryAllBalancesResponse + err = cctx.Codec.UnmarshalJSON(resp.Bytes(), &balRes) + s.Require().NoError(err) + startTokens := balRes.Balances.AmountOf(denom) + return startTokens +} + +func (s *AuthCLITestSuite) createBankMsg(cctx client.Context, toAddr sdk.AccAddress, amount sdk.Coins, extraFlags ...string) (testutil.BufferWriter, error) { + return clitestutil.ExecSend( + context.Background(), + cctx, + cli.TestFlags(). + With( + s.val.String(), + toAddr.String(), + amount.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + Append(extraFlags)...) +} diff --git a/go/cli/auth_tips.go b/go/cli/auth_tips.go new file mode 100644 index 00000000..42b5e55b --- /dev/null +++ b/go/cli/auth_tips.go @@ -0,0 +1,80 @@ +package cli + +import ( + "fmt" + "os" + + "github.com/cosmos/cosmos-sdk/types/tx" + "github.com/spf13/cobra" + + cflags "pkg.akt.dev/go/cli/flags" +) + +func GetAuxToFeeCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "aux-to-fee ", + Short: "Includes the aux signer data in the tx, broadcast the tx, and sends the tip amount to the broadcaster", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + auxSignerData := tx.AuxSignerData{} + + bytes, err := os.ReadFile(args[0]) + if err != nil { + return err + } + + err = cctx.Codec.UnmarshalJSON(bytes, &auxSignerData) + if err != nil { + return err + } + + if auxSignerData.SignDoc.ChainId != cctx.ChainID { + return fmt.Errorf("expected chain-id %s, got %s in aux signer data", cctx.ChainID, auxSignerData.SignDoc.ChainId) + } + + txBuilder := cctx.TxConfig.NewTxBuilder() + err = txBuilder.AddAuxSignerData(auxSignerData) + if err != nil { + return err + } + + txBuilder.SetFeePayer(cctx.FromAddress) + // txBuilder.SetFeeAmount(f.Fees()) + // txBuilder.SetGasLimit(f.Gas()) + + // if cctx.GenerateOnly { + // json, err := cctx.TxConfig.TxJSONEncoder()(txBuilder.GetTx()) + // if err != nil { + // return err + // } + // return cctx.PrintString(fmt.Sprintf("%s\n", json)) + // } + + // err = authclient.SignTx(f, cctx, cctx.FromName, txBuilder, cctx.Offline, false) + // if err != nil { + // return err + // } + // + // txBytes, err := cctx.TxConfig.TxEncoder()(txBuilder.GetTx()) + // if err != nil { + // return err + // } + + // broadcast to a Tendermint node + res, err := cl.Tx().BroadcastTx(ctx, txBuilder.GetTx()) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/auth_tx.go b/go/cli/auth_tx.go new file mode 100644 index 00000000..1f804851 --- /dev/null +++ b/go/cli/auth_tx.go @@ -0,0 +1,661 @@ +package cli + +import ( + "encoding/base64" + "encoding/hex" + "errors" + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/tx" + kmultisig "github.com/cosmos/cosmos-sdk/crypto/keys/multisig" + sdk "github.com/cosmos/cosmos-sdk/types" + authclient "github.com/cosmos/cosmos-sdk/x/auth/client" + authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetSignBatchCommand returns the transaction sign-batch command. +func GetSignBatchCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "sign-batch [file] ([file2]...)", + Short: "Sign transaction batch files", + Long: `Sign batch files of transactions generated with --generate-only. +The command processes list of transactions from a file (one StdTx each line), or multiple files. +Then generates signed transactions or signatures and print their JSON encoding, delimited by '\n'. +As the signatures are generated, the command updates the account and sequence number accordingly. + +If the --signature-only flag is set, it will output the signature parts only. + +The --offline flag makes sure that the client will not reach out to full node. +As a result, the account and the sequence number queries will not be performed and +it is required to set such parameters manually. Note, invalid values will cause +the transaction to fail. The sequence will be incremented automatically for each +transaction that is signed. + +If --account-number or --sequence flag is used when offline=false, they are ignored and +overwritten by the default flag values. + +The --multisig= flag generates a signature on behalf of a multisig +account key. It implies --signature-only. +`, + PreRun: preSignCmd, + RunE: makeSignBatchCmd(), + Args: cobra.MinimumNArgs(1), + } + + cmd.Flags().String(cflags.FlagMultisig, "", "Address or key name of the multisig account on behalf of which the transaction shall be signed") + cmd.Flags().String(cflags.FlagOutputDocument, "", "The document will be written to the given file instead of STDOUT") + cmd.Flags().Bool(cflags.FlagSigOnly, false, "Print only the generated signature, then exit") + cmd.Flags().Bool(cflags.FlagAppend, false, "Combine all message and generate single signed transaction for broadcast.") + + cflags.AddTxFlagsToCmd(cmd) + + _ = cmd.MarkFlagRequired(cflags.FlagFrom) + + return cmd +} + +func makeSignBatchCmd() func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, args []string) error { + cctx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + txFactory, err := tx.NewFactoryCLI(cctx, cmd.Flags()) + if err != nil { + return err + } + txCfg := cctx.TxConfig + printSignatureOnly, _ := cmd.Flags().GetBool(cflags.FlagSigOnly) + + ms, err := cmd.Flags().GetString(cflags.FlagMultisig) + if err != nil { + return err + } + + // prepare output document + closeFunc, err := setOutputFile(cmd) + if err != nil { + return err + } + defer closeFunc() + cctx.WithOutput(cmd.OutOrStdout()) + + // reads tx from args + scanner, err := ReadTxsFromInput(txCfg, args...) + if err != nil { + return err + } + + if !cctx.Offline { + if ms == "" { + from, err := cmd.Flags().GetString(cflags.FlagFrom) + if err != nil { + return err + } + + addr, _, _, err := client.GetFromFields(cctx, txFactory.Keybase(), from) + if err != nil { + return err + } + + acc, err := txFactory.AccountRetriever().GetAccount(cctx, addr) + if err != nil { + return err + } + + txFactory = txFactory.WithAccountNumber(acc.GetAccountNumber()).WithSequence(acc.GetSequence()) + } else { + txFactory = txFactory.WithAccountNumber(0).WithSequence(0) + } + } + + appendMessagesToSingleTx, _ := cmd.Flags().GetBool(cflags.FlagAppend) + // Combines all tx msgs and create single signed transaction + if appendMessagesToSingleTx { + txBuilder := cctx.TxConfig.NewTxBuilder() + msgs := make([]sdk.Msg, 0) + newGasLimit := uint64(0) + + for scanner.Scan() { + unsignedStdTx := scanner.Tx() + fe, err := cctx.TxConfig.WrapTxBuilder(unsignedStdTx) + if err != nil { + return err + } + // increment the gas + newGasLimit += fe.GetTx().GetGas() + // append messages + msgs = append(msgs, unsignedStdTx.GetMsgs()...) + } + // set the new appened msgs into builder + _ = txBuilder.SetMsgs(msgs...) + + // set the memo,fees,feeGranter,feePayer from cmd flags + txBuilder.SetMemo(txFactory.Memo()) + txBuilder.SetFeeAmount(txFactory.Fees()) + txBuilder.SetFeeGranter(cctx.FeeGranter) + txBuilder.SetFeePayer(cctx.FeePayer) + + // set the gasLimit + txBuilder.SetGasLimit(newGasLimit) + + // sign the txs + if ms == "" { + from, _ := cmd.Flags().GetString(cflags.FlagFrom) + if err := sign(cctx, txBuilder, txFactory, from); err != nil { + return err + } + } else { + if err := multisigSign(cctx, txBuilder, txFactory, ms); err != nil { + return err + } + } + + json, err := marshalSignatureJSON(txCfg, txBuilder, printSignatureOnly) + if err != nil { + return err + } + + cmd.Printf("%s\n", json) + } else { + // It will generate signed tx for each tx + for sequence := txFactory.Sequence(); scanner.Scan(); sequence++ { + unsignedStdTx := scanner.Tx() + txFactory = txFactory.WithSequence(sequence) + txBuilder, err := txCfg.WrapTxBuilder(unsignedStdTx) + if err != nil { + return err + } + + // sign the txs + if ms == "" { + from, _ := cmd.Flags().GetString(cflags.FlagFrom) + if err := sign(cctx, txBuilder, txFactory, from); err != nil { + return err + } + } else { + if err := multisigSign(cctx, txBuilder, txFactory, ms); err != nil { + return err + } + } + + json, err := marshalSignatureJSON(txCfg, txBuilder, printSignatureOnly) + if err != nil { + return err + } + cmd.Printf("%s\n", json) + } + } + + if err := scanner.UnmarshalErr(); err != nil { + return err + } + + return scanner.UnmarshalErr() + } +} + +func sign(clientCtx client.Context, txBuilder client.TxBuilder, txFactory tx.Factory, from string) error { + _, fromName, _, err := client.GetFromFields(clientCtx, txFactory.Keybase(), from) + if err != nil { + return fmt.Errorf("error getting account from keybase: %w", err) + } + + if err = SignTx(txFactory, clientCtx, fromName, txBuilder, true, true); err != nil { + return err + } + + return nil +} + +func multisigSign(clientCtx client.Context, txBuilder client.TxBuilder, txFactory tx.Factory, multisig string) error { + multisigAddr, _, _, err := client.GetFromFields(clientCtx, txFactory.Keybase(), multisig) + if err != nil { + return fmt.Errorf("error getting account from keybase: %w", err) + } + + if err = SignTxWithSignerAddress( + txFactory, + clientCtx, + multisigAddr, + clientCtx.GetFromName(), + txBuilder, + clientCtx.Offline, + true, + ); err != nil { + return err + } + + return nil +} + +func setOutputFile(cmd *cobra.Command) (func(), error) { + outputDoc, _ := cmd.Flags().GetString(cflags.FlagOutputDocument) + if outputDoc == "" { + return func() {}, nil + } + + fp, err := os.OpenFile(outputDoc, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o644) + if err != nil { + return func() {}, err + } + + cmd.SetOut(fp) + + return func() { _ = fp.Close() }, nil +} + +// GetSignCommand returns the transaction sign command. +func GetSignCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "sign [file]", + Short: "Sign a transaction generated offline", + Long: `Sign a transaction created with the --generate-only flag. +It will read a transaction from [file], sign it, and print its JSON encoding. + +If the --signature-only flag is set, it will output the signature parts only. + +The --offline flag makes sure that the client will not reach out to full node. +As a result, the account and sequence number queries will not be performed and +it is required to set such parameters manually. Note, invalid values will cause +the transaction to fail. + +The --multisig= flag generates a signature on behalf of a multisig account +key. It implies --signature-only. Full multisig signed transactions may eventually +be generated via the 'multisign' command. +`, + PreRun: preSignCmd, + RunE: makeSignCmd(), + Args: cobra.ExactArgs(1), + } + + cmd.Flags().String(cflags.FlagMultisig, "", "Address or key name of the multisig account on behalf of which the transaction shall be signed") + cmd.Flags().Bool(cflags.FlagOverwrite, false, "Overwrite existing signatures with a new one. If disabled, new signature will be appended") + cmd.Flags().Bool(cflags.FlagSigOnly, false, "Print only the signatures") + cmd.Flags().String(cflags.FlagOutputDocument, "", "The document will be written to the given file instead of STDOUT") + cmd.Flags().Bool(cflags.FlagAmino, false, "Generate Amino encoded JSON suitable for submiting to the txs REST endpoint") + cflags.AddTxFlagsToCmd(cmd) + + _ = cmd.MarkFlagRequired(cflags.FlagFrom) + + return cmd +} + +func preSignCmd(cmd *cobra.Command, _ []string) { + // Conditionally mark the account and sequence numbers required as no RPC + // query will be done. + if offline, _ := cmd.Flags().GetBool(cflags.FlagOffline); offline { + _ = cmd.MarkFlagRequired(cflags.FlagAccountNumber) + _ = cmd.MarkFlagRequired(cflags.FlagSequence) + } +} + +func makeSignCmd() func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, args []string) (err error) { + cctx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + cctx, txF, newTx, err := readTxAndInitContexts(cctx, cmd, args[0]) + if err != nil { + return err + } + + return signTx(cmd, cctx, txF, newTx) + } +} + +func signTx(cmd *cobra.Command, cctx client.Context, txF tx.Factory, newTx sdk.Tx) error { + f := cmd.Flags() + txCfg := cctx.TxConfig + txBuilder, err := txCfg.WrapTxBuilder(newTx) + if err != nil { + return err + } + + printSignatureOnly, err := cmd.Flags().GetBool(cflags.FlagSigOnly) + if err != nil { + return err + } + + multisig, err := cmd.Flags().GetString(cflags.FlagMultisig) + if err != nil { + return err + } + + if (multisig != "") && (cctx.SignModeStr != cflags.SignModeLegacyAminoJSON) { + return errors.New("multisig supports only \"amino-json\" sign mode") + } + + from, err := cmd.Flags().GetString(cflags.FlagFrom) + if err != nil { + return err + } + + _, fromName, _, err := client.GetFromFields(cctx, txF.Keybase(), from) + if err != nil { + return fmt.Errorf("error getting account from keybase: %w", err) + } + + overwrite, err := f.GetBool(cflags.FlagOverwrite) + if err != nil { + return err + } + + if multisig != "" { + // Bech32 decode error, maybe it's a name, we try to fetch from keyring + multisigAddr, multisigName, _, err := client.GetFromFields(cctx, txF.Keybase(), multisig) + if err != nil { + return fmt.Errorf("error getting account from keybase: %w", err) + } + multisigKey, err := getMultisigRecord(cctx, multisigName) + if err != nil { + return err + } + multisigPubKey, err := multisigKey.GetPubKey() + if err != nil { + return err + } + multisigLegacyPub := multisigPubKey.(*kmultisig.LegacyAminoPubKey) + + fromRecord, err := cctx.Keyring.Key(fromName) + if err != nil { + return fmt.Errorf("error getting account from keybase: %w", err) + } + fromPubKey, err := fromRecord.GetPubKey() + if err != nil { + return err + } + + var found bool + for _, pubkey := range multisigLegacyPub.GetPubKeys() { + if pubkey.Equals(fromPubKey) { + found = true + } + } + if !found { + return fmt.Errorf("signing key is not a part of multisig key") + } + err = SignTxWithSignerAddress( + txF, cctx, multisigAddr, fromName, txBuilder, cctx.Offline, overwrite) + if err != nil { + return err + } + printSignatureOnly = true + } else { + err = SignTx(txF, cctx, cctx.GetFromName(), txBuilder, cctx.Offline, overwrite) + } + if err != nil { + return err + } + + aminoJSON, err := f.GetBool(cflags.FlagAmino) + if err != nil { + return err + } + + bMode, err := f.GetString(cflags.FlagBroadcastMode) + if err != nil { + return err + } + + // set output + closeFunc, err := setOutputFile(cmd) + if err != nil { + return err + } + + defer closeFunc() + cctx.WithOutput(cmd.OutOrStdout()) + + var json []byte + if aminoJSON { + stdTx, err := tx.ConvertTxToStdTx(cctx.LegacyAmino, txBuilder.GetTx()) + if err != nil { + return err + } + req := BroadcastReq{ + Tx: stdTx, + Mode: bMode, + } + json, err = cctx.LegacyAmino.MarshalJSON(req) + if err != nil { + return err + } + } else { + json, err = marshalSignatureJSON(txCfg, txBuilder, printSignatureOnly) + if err != nil { + return err + } + } + + cmd.Printf("%s\n", json) + + return err +} + +func marshalSignatureJSON(txConfig client.TxConfig, txBldr client.TxBuilder, signatureOnly bool) ([]byte, error) { + parsedTx := txBldr.GetTx() + if signatureOnly { + sigs, err := parsedTx.GetSignaturesV2() + if err != nil { + return nil, err + } + return txConfig.MarshalSignatureJSON(sigs) + } + + return txConfig.TxJSONEncoder()(parsedTx) +} + +func GetValidateSignaturesCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "validate-signatures [file]", + Short: "validate transactions signatures", + Long: `Print the addresses that must sign the transaction, those who have already +signed it, and make sure that signatures are in the correct order. + +The command would check whether all required signers have signed the transactions, whether +the signatures were collected in the right order, and if the signature is valid over the +given transaction. If the --offline flag is also set, signature validation over the +transaction will be not be performed as that will require RPC communication with a full node. +`, + PreRun: preSignCmd, + RunE: makeValidateSignaturesCmd(), + Args: cobra.ExactArgs(1), + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +func makeValidateSignaturesCmd() func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, args []string) error { + cctx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + cctx, txBldr, stdTx, err := readTxAndInitContexts(cctx, cmd, args[0]) + if err != nil { + return err + } + + if !printAndValidateSigs(cmd, cctx, txBldr.ChainID(), stdTx, cctx.Offline) { + return fmt.Errorf("signatures validation failed") + } + + return nil + } +} + +// printAndValidateSigs will validate the signatures of a given transaction over its +// expected signers. In addition, if offline has not been supplied, the signature is +// verified over the transaction sign bytes. Returns false if the validation fails. +func printAndValidateSigs( + cmd *cobra.Command, cctx client.Context, chainID string, tx sdk.Tx, offline bool, +) bool { + sigTx := tx.(authsigning.SigVerifiableTx) + signModeHandler := cctx.TxConfig.SignModeHandler() + + cmd.Println("Signers:") + signers := sigTx.GetSigners() + for i, signer := range signers { + cmd.Printf(" %v: %v\n", i, signer.String()) + } + + success := true + sigs, err := sigTx.GetSignaturesV2() + if err != nil { + panic(err) + } + cmd.Println("") + cmd.Println("Signatures:") + + if len(sigs) != len(signers) { + success = false + } + + for i, sig := range sigs { + var ( + pubKey = sig.PubKey + multiSigHeader string + multiSigMsg string + sigAddr = sdk.AccAddress(pubKey.Address()) + sigSanity = "OK" + ) + + if i >= len(signers) || !sigAddr.Equals(signers[i]) { + sigSanity = "ERROR: signature does not match its respective signer" + success = false + } + + // validate the actual signature over the transaction bytes since we can + // reach out to a full node to query accounts. + if !offline && success { + accNum, accSeq, err := cctx.AccountRetriever.GetAccountNumberSequence(cctx, sigAddr) + if err != nil { + cmd.PrintErrf("failed to get account: %s\n", sigAddr) + return false + } + + signingData := authsigning.SignerData{ + Address: sigAddr.String(), + ChainID: chainID, + AccountNumber: accNum, + Sequence: accSeq, + PubKey: pubKey, + } + err = authsigning.VerifySignature(pubKey, signingData, sig.Data, signModeHandler, sigTx) + if err != nil { + return false + } + } + + cmd.Printf(" %d: %s\t\t\t[%s]%s%s\n", i, sigAddr.String(), sigSanity, multiSigHeader, multiSigMsg) + } + + cmd.Println("") + + return success +} + +func readTxAndInitContexts(clientCtx client.Context, cmd *cobra.Command, filename string) (client.Context, tx.Factory, sdk.Tx, error) { + stdTx, err := authclient.ReadTxFromFile(clientCtx, filename) + if err != nil { + return clientCtx, tx.Factory{}, nil, err + } + + txFactory, err := tx.NewFactoryCLI(clientCtx, cmd.Flags()) + if err != nil { + return clientCtx, tx.Factory{}, nil, err + } + + return clientCtx, txFactory, stdTx, nil +} + +// GetEncodeCommand returns the encode command to take a JSONified transaction and turn it into +// Amino-serialized bytes +func GetEncodeCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "encode [file]", + Short: "Encode transactions generated offline", + Long: `Encode transactions created with the --generate-only flag or signed with the sign command. +Read a transaction from , serialize it to the Protobuf wire protocol, and output it as base64. +If you supply a dash (-) argument in place of an input filename, the command reads from standard input.`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + cctx := client.GetClientContextFromCmd(cmd) + + txt, err := authclient.ReadTxFromFile(cctx, args[0]) + if err != nil { + return err + } + + // re-encode it + txb, err := cctx.TxConfig.TxEncoder()(txt) + if err != nil { + return err + } + + // base64 encode the encoded tx bytes + txBytesBase64 := base64.StdEncoding.EncodeToString(txb) + + return cctx.PrintString(txBytesBase64 + "\n") + }, + } + + cflags.AddTxFlagsToCmd(cmd) + _ = cmd.Flags().MarkHidden(cflags.FlagOutput) // encoding makes sense to output only json + + return cmd +} + +const flagHex = "hex" + +// GetDecodeCommand returns the decode command to take serialized bytes and turn +// it into a JSON-encoded transaction. +func GetDecodeCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "decode [protobuf-byte-string]", + Short: "Decode a binary encoded transaction string", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + cctx := client.GetClientContextFromCmd(cmd) + var txBytes []byte + + if useHex, _ := cmd.Flags().GetBool(flagHex); useHex { + txBytes, err = hex.DecodeString(args[0]) + } else { + txBytes, err = base64.StdEncoding.DecodeString(args[0]) + } + if err != nil { + return err + } + + txb, err := cctx.TxConfig.TxDecoder()(txBytes) + if err != nil { + return err + } + + json, err := cctx.TxConfig.TxJSONEncoder()(txb) + if err != nil { + return err + } + + return cctx.PrintBytes(json) + }, + } + + cmd.Flags().BoolP(flagHex, "x", false, "Treat input as hexadecimal instead of base64") + cflags.AddTxFlagsToCmd(cmd) + _ = cmd.Flags().MarkHidden(cflags.FlagOutput) // decoding makes sense to output only json + + return cmd +} diff --git a/go/cli/authz_query.go b/go/cli/authz_query.go new file mode 100644 index 00000000..402c0e68 --- /dev/null +++ b/go/cli/authz_query.go @@ -0,0 +1,198 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/authz" + bank "github.com/cosmos/cosmos-sdk/x/bank/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetQueryAuthzCmd returns the cli query commands for this module +func GetQueryAuthzCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: authz.ModuleName, + Short: "Querying commands for the authz module", + Long: "", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetQueryAuthzGrantsCmd(), + GetQueryAuthzGranterGrantsCmd(), + GetQueryAuthzGranteeGrantsCmd(), + ) + + return cmd +} + +// GetQueryAuthzGrantsCmd implements the query authorization command. +func GetQueryAuthzGrantsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "grants [granter-addr] [grantee-addr] [msg-type-url]?", + Args: cobra.RangeArgs(2, 3), + Short: "query grants for a granter-grantee pair and optionally a msg-type-url", + Long: strings.TrimSpace( + fmt.Sprintf(`Query authorization grants for a granter-grantee pair. If msg-type-url +is set, it will select grants only for that msg type. +Examples: +$ %s query %s grants cosmos1skj.. cosmos1skjwj.. +$ %s query %s grants cosmos1skjw.. cosmos1skjwj.. %s +`, + version.AppName, authz.ModuleName, + version.AppName, authz.ModuleName, bank.SendAuthorization{}.MsgTypeURL()), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx, err := client.GetClientQueryContext(cmd) + if err != nil { + return err + } + queryClient := authz.NewQueryClient(clientCtx) + + granter, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + grantee, err := sdk.AccAddressFromBech32(args[1]) + if err != nil { + return err + } + msgAuthorized := "" + if len(args) >= 3 { + msgAuthorized = args[2] + } + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + res, err := queryClient.Grants( + cmd.Context(), + &authz.QueryGrantsRequest{ + Granter: granter.String(), + Grantee: grantee.String(), + MsgTypeUrl: msgAuthorized, + Pagination: pageReq, + }, + ) + if err != nil { + return err + } + + return clientCtx.PrintProto(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "grants") + + return cmd +} + +// GetQueryAuthzGranterGrantsCmd returns cmd to query for all grants for a granter. +func GetQueryAuthzGranterGrantsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "grants-by-granter [granter-addr]", + Args: cobra.ExactArgs(1), + Short: "query authorization grants granted by granter", + Long: strings.TrimSpace( + fmt.Sprintf(`Query authorization grants granted by granter. +Examples: +$ %s q %s grants-by-granter cosmos1skj.. +`, + version.AppName, authz.ModuleName), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + granter, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Authz().GranterGrants( + cmd.Context(), + &authz.QueryGranterGrantsRequest{ + Granter: granter.String(), + Pagination: pageReq, + }, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "granter-grants") + + return cmd +} + +// GetQueryAuthzGranteeGrantsCmd returns cmd to query for all grants for a grantee. +func GetQueryAuthzGranteeGrantsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "grants-by-grantee [grantee-addr]", + Args: cobra.ExactArgs(1), + Short: "query authorization grants granted to a grantee", + Long: strings.TrimSpace( + fmt.Sprintf(`Query authorization grants granted to a grantee. +Examples: +$ %s q %s grants-by-grantee cosmos1skj.. +`, + version.AppName, authz.ModuleName), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + grantee, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Authz().GranteeGrants( + cmd.Context(), + &authz.QueryGranteeGrantsRequest{ + Grantee: grantee.String(), + Pagination: pageReq, + }, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "grantee-grants") + + return cmd +} diff --git a/go/cli/authz_query_test.go b/go/cli/authz_query_test.go new file mode 100644 index 00000000..e26ceb52 --- /dev/null +++ b/go/cli/authz_query_test.go @@ -0,0 +1,236 @@ +package cli_test + +import ( + "context" + "fmt" + "time" + + sdkmath "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/authz" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +func (s *AuthzCLITestSuite) TestQueryAuthorizations() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + grantee := s.grantee[0] + twoHours := time.Now().Add(time.Minute * time.Duration(120)).Unix() + + _, err := clitestutil.ExecCreateGrant( + context.Background(), + s.cctx, + cli.TestFlags(). + With(grantee.String(), "send"). + WithSpendLimit("100uakt"). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithSignMode("direct"). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10))))..., + ) + s.Require().NoError(err) + + testCases := []struct { + name string + args []string + expectErr bool + expErrMsg string + }{ + { + "Error: Invalid grantee", + []string{ + val[0].Address.String(), + "invalid grantee", + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + true, + "decoding bech32 failed: invalid character in string: ' '", + }, + { + "Error: Invalid granter", + []string{ + "invalid granter", + grantee.String(), + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + true, + "decoding bech32 failed: invalid character in string: ' '", + }, + { + "Valid txn (json)", + []string{ + val[0].Address.String(), + grantee.String(), + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + false, + ``, + }, + } + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryAuthzGrantsCmd() + resp, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + s.Require().Contains(string(resp.Bytes()), tc.expErrMsg) + } else { + s.Require().NoError(err) + var grants authz.QueryGrantsResponse + err = s.cctx.Codec.UnmarshalJSON(resp.Bytes(), &grants) + s.Require().NoError(err) + } + }) + } +} + +func (s *AuthzCLITestSuite) TestQueryAuthorization() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + grantee := s.grantee[0] + twoHours := time.Now().Add(time.Minute * time.Duration(120)).Unix() + + _, err := clitestutil.ExecCreateGrant( + context.Background(), + s.cctx, + grantee.String(), + "send", + fmt.Sprintf("--%s=100stake", cflags.FlagSpendLimit), + fmt.Sprintf("--%s=true", cflags.FlagSkipConfirmation), + fmt.Sprintf("--%s=%s", cflags.FlagFrom, val[0].Address), + fmt.Sprintf("--%s=%s", cflags.FlagBroadcastMode, cflags.BroadcastSync), + fmt.Sprintf("--%s=%d", cflags.FlagExpiration, twoHours), + fmt.Sprintf("--%s=%s", cflags.FlagFees, sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10))).String()), + ) + s.Require().NoError(err) + + testCases := []struct { + name string + args []string + expectErr bool + }{ + { + "Error: Invalid grantee", + []string{ + val[0].Address.String(), + "invalid grantee", + typeMsgSend, + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + true, + }, + { + "Error: Invalid granter", + []string{ + "invalid granter", + grantee.String(), + typeMsgSend, + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + true, + }, + { + "Valid txn (json)", + []string{ + val[0].Address.String(), + grantee.String(), + typeMsgSend, + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + false, + }, + { + "Valid txn with allowed list (json)", + []string{ + val[0].Address.String(), + s.grantee[3].String(), + typeMsgSend, + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + false, + }, + } + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryAuthzGrantsCmd() + _, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + } + }) + } +} + +func (s *AuthzCLITestSuite) TestQueryGranterGrants() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + grantee := s.grantee[0] + require := s.Require() + + testCases := []struct { + name string + args []string + expectErr bool + expectedErr string + }{ + { + "invalid address", + []string{ + "invalid-address", + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + true, + "decoding bech32 failed", + }, + { + "no authorization found", + []string{ + grantee.String(), + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + false, + "", + }, + { + "valid case", + []string{ + val[0].Address.String(), + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + false, + "", + }, + { + "valid case with pagination", + []string{ + val[0].Address.String(), + "--limit=2", + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + false, + "", + }, + } + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryAuthzGranterGrantsCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + require.Error(err) + require.Contains(out.String(), tc.expectedErr) + } else { + require.NoError(err) + var grants authz.QueryGranterGrantsResponse + require.NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &grants)) + } + }) + } +} diff --git a/go/cli/authz_suite_test.go b/go/cli/authz_suite_test.go new file mode 100644 index 00000000..ad9d2ddc --- /dev/null +++ b/go/cli/authz_suite_test.go @@ -0,0 +1,152 @@ +package cli_test + +import ( + "bytes" + "context" + "fmt" + "io" + "time" + + sdkmath "cosmossdk.io/math" + abci "github.com/cometbft/cometbft/abci/types" + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/bank" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "github.com/cosmos/cosmos-sdk/x/gov" + govv1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + govv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + + "pkg.akt.dev/go/cli" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +var ( + typeMsgSend = banktypes.SendAuthorization{}.MsgTypeURL() + typeMsgVote = sdk.MsgTypeURL(&govv1.MsgVote{}) + typeMsgSubmitProposal = sdk.MsgTypeURL(&govv1.MsgSubmitProposal{}) +) + +type AuthzCLITestSuite struct { + CLITestSuite + grantee []sdk.AccAddress + addrs []sdk.AccAddress +} + +func (s *AuthzCLITestSuite) SetupSuite() { + s.encCfg = testutilmod.MakeTestEncodingConfig(gov.AppModuleBasic{}, bank.AppModuleBasic{}) + s.kr = keyring.NewInMemory(s.encCfg.Codec) + s.baseCtx = client.Context{}. + WithKeyring(s.kr). + WithTxConfig(s.encCfg.TxConfig). + WithCodec(s.encCfg.Codec). + WithLegacyAmino(s.encCfg.Amino). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain") + + ctxGen := func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&sdk.TxResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + } + var outBuf bytes.Buffer + + s.cctx = ctxGen(). + WithOutput(&outBuf). + WithSignModeStr("direct") + + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + s.grantee = make([]sdk.AccAddress, 6) + + s.addrs = make([]sdk.AccAddress, 1) + s.addrs[0] = s.createAccount("validator address") + + // Send some funds to the new account. + // Create new account in the keyring. + s.grantee[0] = s.createAccount("grantee1") + s.msgSendExec(s.grantee[0]) + + // create a proposal with deposit + _, err := clitestutil.ExecGovSubmitLegacyProposal( + context.Background(), + s.cctx, + cli.TestFlags(). + WithFrom(val[0].Address.String()). + WithTitle("Text Proposal 1"). + WithSkipConfirm(). + WithDescription("Where is the title!?"). + WithProposalType(govv1beta1.ProposalTypeText). + WithDeposit(sdk.NewCoin("uakt", sdkmath.NewInt(10000000)))...) + s.Require().NoError(err) + + // Create new account in the keyring. + s.grantee[1] = s.createAccount("grantee2") + // Send some funds to the new account. + s.msgSendExec(s.grantee[1]) + + // grant send authorization to grantee2 + out, err := clitestutil.ExecCreateGrant( + context.Background(), + s.cctx, + cli.TestFlags(). + With(s.grantee[1].String(), "send"). + WithSpendLimit("100uakt"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10)))). + WithExpiration(fmt.Sprintf("%d", time.Now().Add(time.Minute*time.Duration(120)).Unix()))...) + s.Require().NoError(err) + + var response sdk.TxResponse + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &response), out.String()) + + // Create new account in the keyring. + s.grantee[2] = s.createAccount("grantee3") + + // grant send authorization to grantee3 + _, err = clitestutil.ExecCreateGrant( + context.Background(), + s.cctx, + cli.TestFlags(). + With(s.grantee[2].String(), "send"). + WithSpendLimit("100uakt"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10)))). + WithExpiration(fmt.Sprintf("%d", time.Now().Add(time.Minute*time.Duration(120)).Unix()))...) + s.Require().NoError(err) + + // Create new accounts in the keyring. + s.grantee[3] = s.createAccount("grantee4") + s.msgSendExec(s.grantee[3]) + + s.grantee[4] = s.createAccount("grantee5") + s.grantee[5] = s.createAccount("grantee6") + + // grant send authorization with allow list to grantee4 + out, err = clitestutil.ExecCreateGrant( + context.Background(), + s.cctx, + cli.TestFlags(). + With(s.grantee[3].String(), "send"). + WithSpendLimit("100uakt"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10)))). + WithExpiration(fmt.Sprintf("%d", time.Now().Add(time.Minute*time.Duration(120)).Unix())). + WithAllowList(s.grantee[4].String())...) + s.Require().NoError(err) + + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &response), out.String()) +} diff --git a/go/cli/authz_tx.go b/go/cli/authz_tx.go new file mode 100644 index 00000000..7281f1b2 --- /dev/null +++ b/go/cli/authz_tx.go @@ -0,0 +1,324 @@ +package cli + +import ( + "errors" + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/authz" + bank "github.com/cosmos/cosmos-sdk/x/bank/types" + staking "github.com/cosmos/cosmos-sdk/x/staking/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// Flag names and values +const ( + delegate = "delegate" + redelegate = "redelegate" + unbond = "unbond" +) + +// GetTxAuthzCmd returns the transaction commands for this module +func GetTxAuthzCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: authz.ModuleName, + Short: "Authorization transactions subcommands", + Long: "Authorize and revoke access to execute transactions on behalf of your address", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetTxAuthzGrantAuthorizationCmd(), + GetTxAuthzRevokeAuthorizationCmd(), + GetTxAuthzExecAuthorizationCmd(), + ) + + return cmd +} + +// GetTxAuthzGrantAuthorizationCmd returns a CLI command handler for creating a MsgGrant transaction. +func GetTxAuthzGrantAuthorizationCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "grant --from ", + Short: "Grant authorization to an address", + Long: strings.TrimSpace( + fmt.Sprintf(`create a new grant authorization to an address to execute a transaction on your behalf: + +Examples: + $ %s tx %s grant akash1skjw.. send --spend-limit=1000uakt --from= + $ %s tx %s grant akash1skjw.. generic --msg-type=/cosmos.gov.v1.MsgVote --from= + `, version.AppName, authz.ModuleName, version.AppName, authz.ModuleName), + ), + Args: cobra.ExactArgs(2), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + grantee, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + var authorization authz.Authorization + switch args[1] { + case "send": + limit, err := cmd.Flags().GetString(cflags.FlagSpendLimit) + if err != nil { + return err + } + + spendLimit, err := sdk.ParseCoinsNormalized(limit) + if err != nil { + return err + } + + if !spendLimit.IsAllPositive() { + return fmt.Errorf("spend-limit should be greater than zero") + } + + allowList, err := cmd.Flags().GetStringSlice(cflags.FlagAllowList) + if err != nil { + return err + } + + allowed, err := bech32toAccAddresses(allowList) + if err != nil { + return err + } + + authorization = bank.NewSendAuthorization(spendLimit, allowed) + + case "generic": + msgType, err := cmd.Flags().GetString(cflags.FlagMsgType) + if err != nil { + return err + } + + authorization = authz.NewGenericAuthorization(msgType) + case delegate, unbond, redelegate: + limit, err := cmd.Flags().GetString(cflags.FlagSpendLimit) + if err != nil { + return err + } + + allowValidators, err := cmd.Flags().GetStringSlice(cflags.FlagAllowedValidators) + if err != nil { + return err + } + + denyValidators, err := cmd.Flags().GetStringSlice(cflags.FlagDenyValidators) + if err != nil { + return err + } + + var delegateLimit *sdk.Coin + if limit != "" { + spendLimit, err := sdk.ParseCoinNormalized(limit) + if err != nil { + return err + } + + res, err := cl.Query().Staking().Params(cmd.Context(), &staking.QueryParamsRequest{}) + if err != nil { + return err + } + + if spendLimit.Denom != res.Params.BondDenom { + return fmt.Errorf("invalid denom %s; coin denom should match the current bond denom %s", spendLimit.Denom, res.Params.BondDenom) + } + + if !spendLimit.IsPositive() { + return fmt.Errorf("spend-limit should be greater than zero") + } + delegateLimit = &spendLimit + } + + allowed, err := bech32toValAddresses(allowValidators) + if err != nil { + return err + } + + denied, err := bech32toValAddresses(denyValidators) + if err != nil { + return err + } + + switch args[1] { + case delegate: + authorization, err = staking.NewStakeAuthorization(allowed, denied, staking.AuthorizationType_AUTHORIZATION_TYPE_DELEGATE, delegateLimit) + case unbond: + authorization, err = staking.NewStakeAuthorization(allowed, denied, staking.AuthorizationType_AUTHORIZATION_TYPE_UNDELEGATE, delegateLimit) + default: + authorization, err = staking.NewStakeAuthorization(allowed, denied, staking.AuthorizationType_AUTHORIZATION_TYPE_REDELEGATE, delegateLimit) + } + if err != nil { + return err + } + + default: + return fmt.Errorf("invalid authorization type, %s", args[1]) + } + + expire, err := getExpireTime(cmd) + if err != nil { + return err + } + + msg, err := authz.NewMsgGrant(cctx.GetFromAddress(), grantee, authorization, expire) + if err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + cmd.Flags().String(cflags.FlagMsgType, "", "The Msg method name for which we are creating a GenericAuthorization") + cmd.Flags().String(cflags.FlagSpendLimit, "", "SpendLimit for Send Authorization, an array of Coins allowed spend") + cmd.Flags().StringSlice(cflags.FlagAllowedValidators, []string{}, "Allowed validators addresses separated by ,") + cmd.Flags().StringSlice(cflags.FlagDenyValidators, []string{}, "Deny validators addresses separated by ,") + cmd.Flags().StringSlice(cflags.FlagAllowList, []string{}, "Allowed addresses grantee is allowed to send funds separated by ,") + cmd.Flags().Int64(cflags.FlagExpiration, 0, "Expire time as Unix timestamp. Set zero (0) for no expiry. Default is 0.") + + return cmd +} + +func getExpireTime(cmd *cobra.Command) (*time.Time, error) { + exp, err := cmd.Flags().GetInt64(cflags.FlagExpiration) + if err != nil { + return nil, err + } + if exp == 0 { + return nil, nil + } + e := time.Unix(exp, 0) + return &e, nil +} + +// GetTxAuthzRevokeAuthorizationCmd returns a CLI command handler for creating a MsgRevoke transaction. +func GetTxAuthzRevokeAuthorizationCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "revoke [grantee] [msg-type-url] --from=[granter]", + Short: "revoke authorization", + Long: strings.TrimSpace( + fmt.Sprintf(`revoke authorization from a granter to a grantee: +Example: + $ %s tx %s revoke akash1skj.. %s --from= + `, version.AppName, authz.ModuleName, bank.SendAuthorization{}.MsgTypeURL()), + ), + Args: cobra.ExactArgs(2), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + grantee, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + granter := cctx.GetFromAddress() + msgAuthorized := args[1] + msg := authz.NewMsgRevoke(granter, grantee, msgAuthorized) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{&msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + cflags.AddTxFlagsToCmd(cmd) + return cmd +} + +// GetTxAuthzExecAuthorizationCmd returns a CLI command handler for creating a MsgExec transaction. +func GetTxAuthzExecAuthorizationCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "exec [tx-json-file] --from [grantee]", + Short: "execute tx on behalf of granter account", + Long: strings.TrimSpace( + fmt.Sprintf(`execute tx on behalf of granter account: +Example: + $ %s tx %s exec tx.json --from grantee + $ %s tx bank send --from --chain-id --generate-only > tx.json && %s tx %s exec tx.json --from grantee + `, version.AppName, authz.ModuleName, version.AppName, version.AppName, authz.ModuleName), + ), + Args: cobra.ExactArgs(1), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + grantee := cctx.GetFromAddress() + + if offline, _ := cmd.Flags().GetBool(cflags.FlagOffline); offline { + return errors.New("cannot broadcast tx during offline mode") + } + + theTx, err := ReadTxFromFile(cctx, args[0]) + if err != nil { + return err + } + msg := authz.NewMsgExec(grantee, theTx.GetMsgs()) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{&msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// bech32toValAddresses returns []ValAddress from a list of Bech32 string addresses. +func bech32toValAddresses(validators []string) ([]sdk.ValAddress, error) { + vals := make([]sdk.ValAddress, len(validators)) + for i, validator := range validators { + addr, err := sdk.ValAddressFromBech32(validator) + if err != nil { + return nil, err + } + vals[i] = addr + } + return vals, nil +} + +// bech32toAccAddresses returns []AccAddress from a list of Bech32 string addresses. +func bech32toAccAddresses(accAddrs []string) ([]sdk.AccAddress, error) { + addrs := make([]sdk.AccAddress, len(accAddrs)) + for i, addr := range accAddrs { + accAddr, err := sdk.AccAddressFromBech32(addr) + if err != nil { + return nil, err + } + addrs[i] = accAddr + } + return addrs, nil +} diff --git a/go/cli/authz_tx_test.go b/go/cli/authz_tx_test.go new file mode 100644 index 00000000..5d6b9804 --- /dev/null +++ b/go/cli/authz_tx_test.go @@ -0,0 +1,825 @@ +package cli_test + +import ( + "context" + "fmt" + "time" + + sdkmath "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + authzclitestutil "github.com/cosmos/cosmos-sdk/x/authz/client/testutil" + "github.com/cosmos/gogoproto/proto" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +func (s *AuthzCLITestSuite) createAccount(uid string) sdk.AccAddress { + // Create new account in the keyring. + k, _, err := s.cctx.Keyring.NewMnemonic(uid, keyring.English, sdk.FullFundraiserPath, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + s.Require().NoError(err) + + addr, err := k.GetAddress() + s.Require().NoError(err) + + return addr +} + +func (s *AuthzCLITestSuite) msgSendExec(grantee sdk.AccAddress) { + val := s.addrs[0] + + // Send some funds to the new account. + out, err := clitestutil.ExecSend( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + val.String(), + grantee.String(), + sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(200))).String(), + ). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10))))...) + s.Require().NoError(err) + s.Require().Contains(out.String(), `"code":0`) +} + +func (s *AuthzCLITestSuite) TestCLITxGrantAuthorization() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + grantee := s.grantee[0] + + twoHours := time.Now().Add(time.Minute * 120).Unix() + pastHour := time.Now().Add(-time.Minute * 60).Unix() + + testCases := []struct { + name string + args []string + expectErr bool + expErrMsg string + }{ + { + "Invalid granter Address", + cli.TestFlags(). + With( + "grantee_addr", + "send", + ). + WithSpendLimit("100uakt"). + WithFrom("granter"). + WithGenerateOnly(). + WithExpiration(fmt.Sprintf("%d", twoHours)), + true, + "key not found", + }, + { + "Invalid grantee Address", + cli.TestFlags(). + With( + "grantee_addr", + "send", + ). + WithSpendLimit("100uakt"). + WithFrom(val[0].Address.String()). + WithGenerateOnly(). + WithExpiration(fmt.Sprintf("%d", twoHours)), + true, + "invalid separator index", + }, + { + "Invalid expiration time", + cli.TestFlags(). + With( + grantee.String(), + "send", + ). + WithSpendLimit("100uakt"). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", pastHour)), + true, + "", + }, + { + "fail with error invalid msg-type", + cli.TestFlags(). + With( + grantee.String(), + "generic", + ). + WithMsgType("invalid-msg-type"). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithExpiration(fmt.Sprintf("%d", twoHours)), + false, + "", + }, + { + "invalid bond denom for tx delegate authorization allowed validators", + cli.TestFlags(). + With( + grantee.String(), + "delegate", + ). + WithSpendLimit("100xyz"). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithAllowedValidators(sdk.ValAddress(s.addrs[0]).String()), + true, + "invalid denom", + }, + { + "invalid bond denom for tx delegate authorization deny validators", + cli.TestFlags(). + With( + grantee.String(), + "delegate", + ). + WithSpendLimit("100xyz"). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithDenyValidators(sdk.ValAddress(s.addrs[0]).String()), + true, + "invalid denom", + }, + { + "invalid bond denom for tx undelegate authorization", + cli.TestFlags(). + With( + grantee.String(), + "unbond", + ). + WithSpendLimit("100xyz"). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithAllowedValidators(sdk.ValAddress(s.addrs[0]).String()), + true, + "invalid denom", + }, + { + "invalid bond denom for tx redelegate authorization", + cli.TestFlags(). + With( + grantee.String(), + "redelegate", + ). + WithSpendLimit("100xyz"). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithAllowedValidators(sdk.ValAddress(s.addrs[0]).String()), + true, + "invalid denom", + }, + { + "invalid decimal coin expression with more than single coin", + cli.TestFlags(). + With( + grantee.String(), + "delegate", + ). + WithSpendLimit("100uakt,20xyz"). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithAllowedValidators(sdk.ValAddress(s.addrs[0]).String()), + true, + "invalid decimal coin expression", + }, + { + "Valid tx send authorization", + cli.TestFlags(). + With( + grantee.String(), + "send", + ). + WithSpendLimit("100uakt"). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + false, + "", + }, + { + "Valid tx send authorization with allow list", + cli.TestFlags(). + With( + grantee.String(), + "send", + ). + WithSpendLimit("100uakt"). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithAllowList(s.grantee[1].String()), + false, + "", + }, + { + "Invalid tx send authorization with duplicate allow list", + cli.TestFlags(). + With( + grantee.String(), + "send", + ). + WithSpendLimit("100uakt"). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithAllowList(fmt.Sprintf("%s,%s", s.grantee[1], s.grantee[1])), + true, + "duplicate entry", + }, + { + "Valid tx generic authorization", + cli.TestFlags(). + With( + grantee.String(), + "generic", + ). + WithMsgType(typeMsgVote). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + false, + "", + }, + { + "fail when granter = grantee", + cli.TestFlags(). + With( + grantee.String(), + "generic", + ). + WithMsgType(typeMsgVote). + WithSkipConfirm(). + WithFrom(grantee.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + true, + "grantee and granter should be different", + }, + { + "Valid tx with amino", + cli.TestFlags(). + With( + grantee.String(), + "generic", + ). + WithMsgType(typeMsgVote). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithSignMode(cflags.SignModeLegacyAminoJSON), + false, + "", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + out, err := authzclitestutil.CreateGrant(s.cctx, + tc.args, + ) + if tc.expectErr { + s.Require().Error(err, out) + s.Require().Contains(err.Error(), tc.expErrMsg) + } else { + var txResp sdk.TxResponse + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &txResp), out.String()) + } + }) + } +} + +func (s *AuthzCLITestSuite) TestCmdRevokeAuthorizations() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + grantee := s.grantee[0] + twoHours := time.Now().Add(time.Minute * time.Duration(120)).Unix() + + // send-authorization + _, err := clitestutil.ExecCreateGrant( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + grantee.String(), + "send", + ). + WithSpendLimit("100uakt"). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10))))...) + s.Require().NoError(err) + + // generic-authorization + _, err = clitestutil.ExecCreateGrant( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + grantee.String(), + "generic", + ). + WithMsgType(typeMsgVote). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10))))...) + s.Require().NoError(err) + + // generic-authorization used for amino testing + _, err = clitestutil.ExecCreateGrant( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + grantee.String(), + "generic", + ). + WithMsgType(typeMsgSubmitProposal). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithSignMode(cflags.SignModeLegacyAminoJSON)...) + s.Require().NoError(err) + testCases := []struct { + name string + args []string + respType proto.Message + expectErr bool + }{ + { + "invalid grantee address", + cli.TestFlags(). + With( + "invalid grantee", + typeMsgSend, + ). + WithFrom(val[0].Address.String()). + WithGenerateOnly(), + nil, + true, + }, + { + "invalid granter address", + cli.TestFlags(). + With( + grantee.String(), + typeMsgSend, + ). + WithFrom("granter"). + WithGenerateOnly(), + nil, + true, + }, + { + "Valid tx send authorization", + cli.TestFlags(). + With( + grantee.String(), + typeMsgSend, + ). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + &sdk.TxResponse{}, + false, + }, + { + "Valid tx generic authorization", + cli.TestFlags(). + With( + grantee.String(), + typeMsgVote, + ). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + &sdk.TxResponse{}, + false, + }, + { + "Valid tx with amino", + cli.TestFlags(). + With( + grantee.String(), + typeMsgSubmitProposal, + ). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithSignMode(cflags.SignModeLegacyAminoJSON), + &sdk.TxResponse{}, + false, + }, + } + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxAuthzRevokeAuthorizationCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + } + }) + } +} + +func (s *AuthzCLITestSuite) TestExecAuthorizationWithExpiration() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + grantee := s.grantee[0] + tenSeconds := time.Now().Add(time.Second * time.Duration(10)).Unix() + + _, err := clitestutil.ExecCreateGrant( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + grantee.String(), + "generic", + ). + WithMsgType(typeMsgVote). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", tenSeconds)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10))))...) + s.Require().NoError(err) + // msg vote + voteTx := fmt.Sprintf(`{"body":{"messages":[{"@type":"/cosmos.gov.v1.MsgVote","proposal_id":"1","voter":"%s","option":"VOTE_OPTION_YES"}],"memo":"","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":[]}`, val[0].Address.String()) + execMsg := testutil.WriteToNewTempFile(s.T(), voteTx) + defer func() { + _ = execMsg.Close() + }() + + // waiting for authorization to expire + time.Sleep(12 * time.Second) + + cmd := cli.GetTxAuthzExecAuthorizationCmd() + + out, err := clitestutil.ExecTestCLICmd( + context.Background(), + s.cctx, + cmd, + cli.TestFlags(). + With( + execMsg.Name(), + ). + WithFrom(grantee.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10))))...) + s.Require().NoError(err) + var response sdk.TxResponse + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &response), out.String()) +} + +func (s *AuthzCLITestSuite) TestNewExecGenericAuthorized() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + grantee := s.grantee[0] + twoHours := time.Now().Add(time.Minute * time.Duration(120)).Unix() + + _, err := clitestutil.ExecCreateGrant( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + grantee.String(), + "generic", + ). + WithMsgType(typeMsgVote). + WithSkipConfirm(). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10))))...) + s.Require().NoError(err) + + // msg vote + voteTx := fmt.Sprintf(`{"body":{"messages":[{"@type":"/cosmos.gov.v1.MsgVote","proposal_id":"1","voter":"%s","option":"VOTE_OPTION_YES"}],"memo":"","timeout_height":"0","extension_options":[],"non_critical_extension_options":[]},"auth_info":{"signer_infos":[],"fee":{"amount":[],"gas_limit":"200000","payer":"","granter":""}},"signatures":[]}`, val[0].Address.String()) + execMsg := testutil.WriteToNewTempFile(s.T(), voteTx) + defer func() { + _ = execMsg.Close() + }() + + testCases := []struct { + name string + args []string + respType proto.Message + expectErr bool + }{ + { + "fail invalid grantee", + cli.TestFlags(). + With( + execMsg.Name(), + ). + WithFrom("grantee"). + WithBroadcastModeSync(). + WithGenerateOnly(), + nil, + true, + }, + { + "fail invalid json path", + cli.TestFlags(). + With( + "/invalid/file.txt", + ). + WithFrom(grantee.String()). + WithBroadcastModeSync(), + nil, + true, + }, + { + "valid txn", + cli.TestFlags(). + With( + execMsg.Name(), + ). + WithFrom(grantee.String()). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + &sdk.TxResponse{}, + false, + }, + { + "valid tx with amino", + cli.TestFlags(). + With( + execMsg.Name(), + ). + WithFrom(grantee.String()). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithSignMode(cflags.SignModeLegacyAminoJSON), + &sdk.TxResponse{}, + false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxAuthzExecAuthorizationCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + } + }) + } +} + +func (s *AuthzCLITestSuite) TestNewExecGrantAuthorized() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + grantee := s.grantee[0] + twoHours := time.Now().Add(time.Minute * time.Duration(120)).Unix() + + _, err := clitestutil.ExecCreateGrant( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + grantee.String(), + "send", + ). + WithSpendLimit("12uakt"). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithExpiration(fmt.Sprintf("%d", twoHours))...) + s.Require().NoError(err) + + tokens := sdk.NewCoins( + sdk.NewCoin("testtoken", sdkmath.NewInt(12)), + ) + + normalGeneratedTx, err := clitestutil.ExecSend( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + val[0].Address.String(), + grantee.String(), + tokens.String(), + ). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithGenerateOnly()...) + s.Require().NoError(err) + execMsg := testutil.WriteToNewTempFile(s.T(), normalGeneratedTx.String()) + defer func() { + _ = execMsg.Close() + }() + + testCases := []struct { + name string + args []string + expectErr bool + expectErrMsg string + }{ + { + "valid txn", + cli.TestFlags(). + With( + execMsg.Name(), + ). + WithFrom(grantee.String()). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + false, + "", + }, + { + "error over spent", + cli.TestFlags(). + With( + execMsg.Name(), + ). + WithFrom(grantee.String()). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + false, + "", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxAuthzExecAuthorizationCmd() + cctx := s.cctx + + var response sdk.TxResponse + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + + switch { + case tc.expectErrMsg != "": + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), &response), out.String()) + s.Require().Contains(response.RawLog, tc.expectErrMsg) + + case tc.expectErr: + s.Require().Error(err) + + default: + s.Require().NoError(err) + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), &response), out.String()) + } + }) + } +} + +func (s *AuthzCLITestSuite) TestExecSendAuthzWithAllowList() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + grantee := s.grantee[3] + + allowedAddr := s.grantee[4] + notAllowedAddr := s.grantee[5] + twoHours := time.Now().Add(time.Minute * time.Duration(120)).Unix() + + _, err := clitestutil.ExecCreateGrant( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + grantee.String(), + "send", + ). + WithSpendLimit("100uakt"). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithExpiration(fmt.Sprintf("%d", twoHours)). + WithAllowList(allowedAddr.String())...) + s.Require().NoError(err) + + tokens := sdk.NewCoins( + sdk.NewCoin("stake", sdkmath.NewInt(12)), + ) + + validGeneratedTx, err := clitestutil.ExecSend( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + val[0].Address.String(), + allowedAddr.String(), + tokens.String(), + ). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithGenerateOnly()...) + + s.Require().NoError(err) + execMsg := testutil.WriteToNewTempFile(s.T(), validGeneratedTx.String()) + defer func() { + _ = execMsg.Close() + }() + + invalidGeneratedTx, err := clitestutil.ExecSend( + context.Background(), + s.cctx, + cli.TestFlags(). + With( + val[0].Address.String(), + notAllowedAddr.String(), + tokens.String(), + ). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithGenerateOnly()...) + s.Require().NoError(err) + execMsg1 := testutil.WriteToNewTempFile(s.T(), invalidGeneratedTx.String()) + defer func() { + _ = execMsg1.Close() + }() + + // test sending to allowed address + args := cli.TestFlags(). + With( + execMsg.Name(), + ). + WithFrom(grantee.String()). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))) + + var response sdk.TxResponse + + cmd := cli.GetTxAuthzExecAuthorizationCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, args...) + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &response), out.String()) + + // test sending to not allowed address + args = cli.TestFlags(). + With( + execMsg1.Name(), + ). + WithFrom(grantee.String()). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))) + + out, err = clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, args...) + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &response), out.String()) +} diff --git a/go/cli/bank_query.go b/go/cli/bank_query.go new file mode 100644 index 00000000..16c980c5 --- /dev/null +++ b/go/cli/bank_query.go @@ -0,0 +1,319 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/bank/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetQueryBankCmd returns the parent command for all x/bank CLi query commands. The +// provided cctx should have, at a minimum, a verifier, Tendermint RPC client, +// and marshaler set. +func GetQueryBankCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Querying commands for the bank module", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetQueryBankBalancesCmd(), + GetQueryBankSpendableBalancesCmd(), + GetQueryBankTotalSupplyCmd(), + GetQueryBankDenomsMetadataCmd(), + GetQueryBankSendEnabledCmd(), + ) + + return cmd +} + +func GetQueryBankBalancesCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "balances [address]", + Short: "Query for account balances by address", + Long: strings.TrimSpace( + fmt.Sprintf(`Query the total balance of an account or of a specific denomination. + +Example: + $ %s query %s balances [address] + $ %s query %s balances [address] --denom=[denom] +`, + version.AppName, types.ModuleName, version.AppName, types.ModuleName, + ), + ), + Args: cobra.ExactArgs(1), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + denom, err := cmd.Flags().GetString(cflags.FlagDenom) + if err != nil { + return err + } + + addr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + if denom == "" { + params := types.NewQueryAllBalancesRequest(addr, pageReq) + + res, err := cl.Query().Bank().AllBalances(ctx, params) + if err != nil { + return err + } + + return cl.PrintMessage(&res) + } + + params := types.NewQueryBalanceRequest(addr, denom) + + res, err := cl.Query().Bank().Balance(ctx, params) + if err != nil { + return err + } + + return cl.PrintMessage(&res.Balance) + }, + } + + cmd.Flags().String(cflags.FlagDenom, "", "The specific balance denomination to query for") + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "all balances") + + return cmd +} + +func GetQueryBankSpendableBalancesCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "spendable-balances [address]", + Short: "Query for account spendable balances by address", + Example: fmt.Sprintf("$ %s query %s spendable-balances [address]", version.AppName, types.ModuleName), + Args: cobra.ExactArgs(1), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + denom, err := cmd.Flags().GetString(cflags.FlagDenom) + if err != nil { + return err + } + + addr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + if denom == "" { + params := types.NewQuerySpendableBalancesRequest(addr, pageReq) + + res, err := cl.Query().Bank().SpendableBalances(ctx, params) + if err != nil { + return err + } + + return cl.PrintMessage(res) + } + + params := types.NewQuerySpendableBalanceByDenomRequest(addr, denom) + + res, err := cl.Query().Bank().SpendableBalanceByDenom(ctx, params) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cmd.Flags().String(cflags.FlagDenom, "", "The specific balance denomination to query for") + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "spendable balances") + + return cmd +} + +// GetQueryBankDenomsMetadataCmd defines the cobra command to query client denomination metadata. +func GetQueryBankDenomsMetadataCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "denom-metadata", + Short: "Query the client metadata for coin denominations", + Long: strings.TrimSpace( + fmt.Sprintf(`Query the client metadata for all the registered coin denominations + +Example: + To query for the client metadata of all coin denominations use: + $ %s query %s denom-metadata + +To query for the client metadata of a specific coin denomination use: + $ %s query %s denom-metadata --denom=[denom] +`, + version.AppName, types.ModuleName, version.AppName, types.ModuleName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + denom, err := cmd.Flags().GetString(cflags.FlagDenom) + if err != nil { + return err + } + + if denom == "" { + res, err := cl.Query().Bank().DenomsMetadata(cmd.Context(), &types.QueryDenomsMetadataRequest{}) + if err != nil { + return err + } + + return cl.PrintMessage(res) + } + + res, err := cl.Query().Bank().DenomMetadata(cmd.Context(), &types.QueryDenomMetadataRequest{Denom: denom}) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cmd.Flags().String(cflags.FlagDenom, "", "The specific denomination to query client metadata for") + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +func GetQueryBankTotalSupplyCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "total", + Short: "Query the total supply of coins of the chain", + Args: cobra.NoArgs, + Long: strings.TrimSpace( + fmt.Sprintf(`Query total supply of coins that are held by accounts in the chain. + +Example: + $ %s query %s total + +To query for the total supply of a specific coin denomination use: + $ %s query %s total --denom=[denom] +`, + version.AppName, types.ModuleName, version.AppName, types.ModuleName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + denom, err := cmd.Flags().GetString(cflags.FlagDenom) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + if denom == "" { + res, err := cl.Query().Bank().TotalSupply(ctx, &types.QueryTotalSupplyRequest{Pagination: pageReq}) + if err != nil { + return err + } + + return cl.PrintMessage(res) + } + + res, err := cl.Query().Bank().SupplyOf(ctx, &types.QuerySupplyOfRequest{Denom: denom}) + if err != nil { + return err + } + + return cl.PrintMessage(&res.Amount) + }, + } + + cmd.Flags().String(cflags.FlagDenom, "", "The specific balance denomination to query for") + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "all supply totals") + + return cmd +} + +func GetQueryBankSendEnabledCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "send-enabled [denom1 ...]", + Short: "Query for send enabled entries", + Long: strings.TrimSpace(`Query for send enabled entries that have been specifically set. + +To look up one or more specific denoms, supply them as arguments to this command. +To look up all denoms, do not provide any arguments. +`, + ), + Example: strings.TrimSpace( + fmt.Sprintf(`Getting one specific entry: + $ %[1]s query %[2]s send-enabled foocoin + +Getting two specific entries: + $ %[1]s query %[2]s send-enabled foocoin barcoin + +Getting all entries: + $ %[1]s query %[2]s send-enabled +`, + version.AppName, types.ModuleName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + reqPag, err := client.ReadPageRequest(client.MustFlagSetWithPageKeyDecoded(cmd.Flags())) + if err != nil { + return err + } + + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + req := &types.QuerySendEnabledRequest{ + Denoms: args, + Pagination: reqPag, + } + + res, err := cl.Query().Bank().SendEnabled(cmd.Context(), req) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "send enabled entries") + + return cmd +} diff --git a/go/cli/bank_query_test.go b/go/cli/bank_query_test.go new file mode 100644 index 00000000..0e236120 --- /dev/null +++ b/go/cli/bank_query_test.go @@ -0,0 +1,374 @@ +package cli_test + +import ( + "context" + "fmt" + + abci "github.com/cometbft/cometbft/abci/types" + "github.com/cosmos/gogoproto/proto" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/bank/types" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +func (s *BankCLITestSuite) TestGetBalancesCmd() { + accounts := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + ctxGen func() client.Context + args []string + expectResult proto.Message + expectErr bool + }{ + { + "valid query", + func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&types.QueryAllBalancesResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + }, + cli.TestFlags(). + With(accounts[0].Address.String()). + WithOutputJSON(), + &types.QueryAllBalancesResponse{}, + false, + }, + { + "valid query with denom", + func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&types.QueryBalanceResponse{ + Balance: &sdk.Coin{}, + }) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + }, + cli.TestFlags(). + With(accounts[0].Address.String()). + WithDenom("photon"). + WithOutputJSON(), + &sdk.Coin{}, + false, + }, + { + "invalid Address", + func() client.Context { + return s.baseCtx + }, + cli.TestFlags(). + With("foo"), + nil, + true, + }, + { + "invalid denom", + func() client.Context { + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Code: 1, + }) + return s.baseCtx.WithClient(c) + }, + cli.TestFlags(). + With(accounts[0].Address.String()). + WithDenom("foo"). + WithOutputJSON(), + nil, + true, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryBankBalancesCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), tc.ctxGen(), cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(s.encCfg.Codec.UnmarshalJSON(out.Bytes(), tc.expectResult)) + } + }) + } +} + +func (s *BankCLITestSuite) TestGetSpendableBalancesCmd() { + accounts := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + ctxGen func() client.Context + args []string + expectResult proto.Message + expectErr bool + }{ + { + "valid query", + func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&types.QuerySpendableBalancesResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + }, + []string{ + accounts[0].Address.String(), + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + &types.QuerySpendableBalancesResponse{}, + false, + }, + { + "valid query with denom flag", + func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&types.QuerySpendableBalanceByDenomRequest{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + }, + []string{ + accounts[0].Address.String(), + fmt.Sprintf("--%s=json", cflags.FlagOutput), + fmt.Sprintf("--%s=photon", cflags.FlagDenom), + }, + &types.QuerySpendableBalanceByDenomResponse{}, + false, + }, + { + "invalid Address", + func() client.Context { + return s.baseCtx + }, + []string{ + "foo", + }, + nil, + true, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryBankSpendableBalancesCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), tc.ctxGen(), cmd, tc.args...) + + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(s.encCfg.Codec.UnmarshalJSON(out.Bytes(), tc.expectResult)) + } + }) + } +} + +func (s *BankCLITestSuite) TestGetCmdDenomsMetadata() { + testCases := []struct { + name string + ctxGen func() client.Context + args []string + expectResult proto.Message + expectErr bool + }{ + { + "valid query", + func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&types.QueryDenomsMetadataResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + }, + []string{ + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + &types.QueryDenomsMetadataResponse{}, + false, + }, + { + "valid query with denom", + func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&types.QueryDenomMetadataResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + }, + []string{ + fmt.Sprintf("--%s=photon", cflags.FlagDenom), + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + &types.QueryDenomMetadataResponse{}, + false, + }, + { + "invalid query with denom", + func() client.Context { + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Code: 1, + }) + return s.baseCtx.WithClient(c) + }, + []string{ + fmt.Sprintf("--%s=foo", cflags.FlagDenom), + }, + nil, + true, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryBankDenomsMetadataCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), tc.ctxGen(), cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(s.encCfg.Codec.UnmarshalJSON(out.Bytes(), tc.expectResult)) + s.Require().NoError(err) + } + }) + } +} + +func (s *BankCLITestSuite) TestGetCmdQueryTotalSupply() { + testCases := []struct { + name string + ctxGen func() client.Context + args []string + expectResult proto.Message + expectErr bool + }{ + { + "valid query", + func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&types.QueryTotalSupplyResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + }, + []string{ + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + &types.QueryTotalSupplyResponse{}, + false, + }, + { + "valid query with denom", + func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&types.QuerySupplyOfResponse{ + Amount: sdk.Coin{}, + }) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + }, + []string{ + fmt.Sprintf("--%s=photon", cflags.FlagDenom), + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + &sdk.Coin{}, + false, + }, + { + "invalid query with denom", + func() client.Context { + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Code: 1, + }) + return s.baseCtx.WithClient(c) + }, + []string{ + fmt.Sprintf("--%s=foo", cflags.FlagDenom), + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + nil, + true, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryBankTotalSupplyCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), tc.ctxGen(), cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(s.encCfg.Codec.UnmarshalJSON(out.Bytes(), tc.expectResult)) + s.Require().NoError(err) + } + }) + } +} + +func (s *BankCLITestSuite) TestGetCmdQuerySendEnabled() { + testCases := []struct { + name string + ctxGen func() client.Context + args []string + expectResult proto.Message + expectErr bool + }{ + { + "valid query", + func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&types.QuerySendEnabledResponse{ + SendEnabled: []*types.SendEnabled{}, + }) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + }, + []string{ + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + &types.QuerySendEnabledResponse{}, + false, + }, + { + "valid query with denoms", + func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&types.QuerySendEnabledResponse{ + SendEnabled: []*types.SendEnabled{}, + }) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + }, + []string{ + "photon", + "stake", + fmt.Sprintf("--%s=json", cflags.FlagOutput), + }, + &types.QuerySendEnabledResponse{}, + false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryBankSendEnabledCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), tc.ctxGen(), cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(s.encCfg.Codec.UnmarshalJSON(out.Bytes(), tc.expectResult)) + s.Require().NoError(err) + } + }) + } +} diff --git a/go/cli/bank_suite_test.go b/go/cli/bank_suite_test.go new file mode 100644 index 00000000..c6b7ca0d --- /dev/null +++ b/go/cli/bank_suite_test.go @@ -0,0 +1,46 @@ +package cli_test + +import ( + "bytes" + "io" + + abci "github.com/cometbft/cometbft/abci/types" + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli" + sdk "github.com/cosmos/cosmos-sdk/types" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/bank" +) + +type BankCLITestSuite struct { + CLITestSuite +} + +func (s *BankCLITestSuite) SetupSuite() { + s.encCfg = testutilmod.MakeTestEncodingConfig(bank.AppModuleBasic{}) + s.kr = keyring.NewInMemory(s.encCfg.Codec) + s.baseCtx = client.Context{}. + WithKeyring(s.kr). + WithTxConfig(s.encCfg.TxConfig). + WithCodec(s.encCfg.Codec). + WithLegacyAmino(s.encCfg.Amino). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithSignModeStr("direct") + + var outBuf bytes.Buffer + ctxGen := func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&sdk.TxResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + } + + s.cctx = ctxGen(). + WithOutput(&outBuf). + WithSignModeStr("direct") +} diff --git a/go/cli/bank_tx.go b/go/cli/bank_tx.go new file mode 100644 index 00000000..35fd23ef --- /dev/null +++ b/go/cli/bank_tx.go @@ -0,0 +1,163 @@ +package cli + +import ( + "fmt" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/bank/types" + "github.com/spf13/cobra" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetTxBankCmd returns a root CLI command handler for all x/bank transaction commands. +func GetTxBankCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Bank transaction subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetTxBankSendTxCmd(), + GetTxBankMultiSendTxCmd(), + ) + + return cmd +} + +// GetTxBankSendTxCmd returns a CLI command handler for creating a MsgSend transaction. +func GetTxBankSendTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "send [to_address] [amount]", + Short: "Send funds from one account to another.", + Long: `Send funds from one account to another. +Note, the '--from' flag is ignored as it is implied from [from_key_or_address] +When using '--dry-run' a key name cannot be used, only a bech32 address. +`, + Args: cobra.ExactArgs(3), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := cmd.Flags().Set(flags.FlagFrom, args[0]); err != nil { + return err + } + + return TxPersistentPreRunE(cmd, args) + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + cl := MustClientFromContext(ctx) + + toAddr, err := sdk.AccAddressFromBech32(args[1]) + if err != nil { + return err + } + + coins, err := sdk.ParseCoinsNormalized(args[2]) + if err != nil { + return err + } + + msg := types.NewMsgSend(cl.ClientContext().GetFromAddress(), toAddr, coins) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxBankMultiSendTxCmd returns a CLI command handler for creating a MsgMultiSend transaction. +// For a better UX this command is limited to send funds from one account to two or more accounts. +func GetTxBankMultiSendTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "multi-send [from_key_or_address] [to_address_1 to_address_2 ...] [amount]", + Short: "Send funds from one account to two or more accounts.", + Long: `Send funds from one account to two or more accounts. +By default, sends the [amount] to each address of the list. +Using the '--split' flag, the [amount] is split equally between the addresses. +Note, the '--from' flag is ignored as it is implied from [from_key_or_address] and +separate addresses with space. +When using '--dry-run' a key name cannot be used, only a bech32 address.`, + Example: fmt.Sprintf("%s tx bank multi-send cosmos1... cosmos1... cosmos1... cosmos1... 10stake", version.AppName), + Args: cobra.MinimumNArgs(4), + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := cmd.Flags().Set(flags.FlagFrom, args[0]); err != nil { + return err + } + + return TxPersistentPreRunE(cmd, args) + }, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + + coins, err := sdk.ParseCoinsNormalized(args[len(args)-1]) + if err != nil { + return err + } + + if coins.IsZero() { + return fmt.Errorf("must send positive amount") + } + + split, err := cmd.Flags().GetBool(cflags.FlagSplit) + if err != nil { + return err + } + + totalAddrs := sdk.NewInt(int64(len(args) - 2)) + // coins to be received by the addresses + sendCoins := coins + if split { + sendCoins = coins.QuoInt(totalAddrs) + } + + var output []types.Output + for _, arg := range args[1 : len(args)-1] { + toAddr, err := sdk.AccAddressFromBech32(arg) + if err != nil { + return err + } + + output = append(output, types.NewOutput(toAddr, sendCoins)) + } + + // amount to be sent from the from address + var amount sdk.Coins + if split { + // user input: 1000stake to send to 3 addresses + // actual: 333stake to each address (=> 999stake actually sent) + amount = sendCoins.MulInt(totalAddrs) + } else { + amount = coins.MulInt(totalAddrs) + } + + msg := types.NewMsgMultiSend([]types.Input{types.NewInput(cl.ClientContext().FromAddress, amount)}, output) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cmd.Flags().Bool(cflags.FlagSplit, false, "Send the equally split token amount to each address") + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/bank_tx_test.go b/go/cli/bank_tx_test.go new file mode 100644 index 00000000..97cc060c --- /dev/null +++ b/go/cli/bank_tx_test.go @@ -0,0 +1,187 @@ +package cli_test + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + + "pkg.akt.dev/go/cli" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +func (s *BankCLITestSuite) TestSendTxCmd() { + accounts := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + commonArgs := cli.TestFlags(). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("photon", sdk.NewInt(10)))). + WithChainID("test-chain") + + testCases := []struct { + name string + ctxGen func() client.Context + args []string + expectErr bool + }{ + { + "valid transaction", + func() client.Context { + return s.baseCtx + }, + cli.TestFlags(). + With( + accounts[0].Address.String(), + accounts[0].Address.String(), + sdk.NewCoins( + sdk.NewCoin("uakt", sdk.NewInt(10)), + sdk.NewCoin("photon", sdk.NewInt(40)), + ).String()). + Append(commonArgs), + false, + }, + { + "invalid to Address", + func() client.Context { + return s.baseCtx + }, + cli.TestFlags(). + With( + accounts[0].Address.String(), + sdk.AccAddress{}.String(), + sdk.NewCoins( + sdk.NewCoin("uakt", sdk.NewInt(10)), + sdk.NewCoin("photon", sdk.NewInt(40)), + ).String()). + Append(commonArgs), + true, + }, + { + "invalid coins", + func() client.Context { + return s.baseCtx + }, + cli.TestFlags(). + With( + accounts[0].Address.String(), + accounts[0].Address.String(), + ). + Append(commonArgs), + true, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cctx := tc.ctxGen() + + cmd := cli.GetTxBankSendTxCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + var response sdk.TxResponse + s.Require().NoError(err) + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), &response), out.String()) + } + }) + } +} + +func (s *BankCLITestSuite) TestMultiSendTxCmd() { + accounts := testutil.CreateKeyringAccounts(s.T(), s.kr, 3) + + commonArgs := cli.TestFlags(). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("photon", sdk.NewInt(10)))). + WithChainID("test-chain") + + testCases := []struct { + name string + ctxGen func() client.Context + args []string + expectErr bool + }{ + { + "valid transaction", + func() client.Context { + return s.baseCtx + }, + cli.TestFlags(). + With( + accounts[0].Address.String(), + accounts[1].Address.String(), + accounts[2].Address.String(), + sdk.NewCoins( + sdk.NewCoin("uakt", sdk.NewInt(10)), + sdk.NewCoin("photon", sdk.NewInt(40))).String()). + Append(commonArgs), + false, + }, + { + "invalid from Address", + func() client.Context { + return s.baseCtx + }, + cli.TestFlags(). + With( + "foo", + accounts[1].Address.String(), + accounts[2].Address.String(), + sdk.NewCoins( + sdk.NewCoin("uakt", sdk.NewInt(10)), + sdk.NewCoin("photon", sdk.NewInt(40))).String()). + Append(commonArgs), + + true, + }, + { + "invalid recipients", + func() client.Context { + return s.baseCtx + }, + cli.TestFlags(). + With( + accounts[0].Address.String(), + accounts[1].Address.String(), + "bar", + sdk.NewCoins( + sdk.NewCoin("uakt", sdk.NewInt(10)), + sdk.NewCoin("photon", sdk.NewInt(40))).String()). + Append(commonArgs), + true, + }, + { + "invalid amount", + func() client.Context { + return s.baseCtx + }, + cli.TestFlags(). + With( + accounts[0].Address.String(), + accounts[1].Address.String(), + accounts[2].Address.String()). + Append(commonArgs), + true, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cctx := tc.ctxGen() + + cmd := cli.GetTxBankMultiSendTxCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + var response sdk.TxResponse + s.Require().NoError(err) + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), &response), out.String()) + } + }) + } +} diff --git a/go/cli/bech32.go b/go/cli/bech32.go new file mode 100644 index 00000000..d8d0c8c3 --- /dev/null +++ b/go/cli/bech32.go @@ -0,0 +1,48 @@ +package cli + +import ( + "github.com/cosmos/cosmos-sdk/types/bech32" + "github.com/spf13/cobra" + + "pkg.akt.dev/go/sdkutil" +) + +var flagBech32Prefix = "prefix" + +// ConvertBech32Cmd get cmd to convert any bech32 address to an akash prefix. +func ConvertBech32Cmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "bech32-convert [bech32 string]", + Short: "Convert any bech32 string to the akash prefix", + Long: `Convert any bech32 string to the akash prefix +Especially useful for converting cosmos addresses to akash addresses +Example: + akash bech32-convert akash1ey69r37gfxvxg62sh4r0ktpuc46pzjrmz29g45 + `, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + bech32prefix, err := cmd.Flags().GetString(flagBech32Prefix) + if err != nil { + return err + } + + _, bz, err := bech32.DecodeAndConvert(args[0]) + if err != nil { + return err + } + + bech32Addr, err := bech32.ConvertAndEncode(bech32prefix, bz) + if err != nil { + panic(err) + } + + cmd.Println(bech32Addr) + + return nil + }, + } + + cmd.Flags().StringP(flagBech32Prefix, "p", sdkutil.Bech32PrefixAccAddr, "Bech32 Prefix to encode to") + + return cmd +} diff --git a/go/cli/broadcast.go b/go/cli/broadcast.go new file mode 100644 index 00000000..0fc074ac --- /dev/null +++ b/go/cli/broadcast.go @@ -0,0 +1,59 @@ +package cli + +import ( + "errors" + "strings" + + "github.com/spf13/cobra" + + authclient "github.com/cosmos/cosmos-sdk/x/auth/client" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetBroadcastCommand returns the tx broadcast command. +func GetBroadcastCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "broadcast [file_path]", + Short: "Broadcast transactions generated offline", + Long: strings.TrimSpace(`Broadcast transactions created with the --generate-only +flag and signed with the sign command. Read a transaction from [file_path] and +broadcast it to a node. If you supply a dash (-) argument in place of an input +filename, the command reads from standard input. + +$ tx broadcast ./mytxn.json +`), + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + cctx, err := GetClientTxContext(cmd) + if err != nil { + return err + } + + if cctx.Offline { + return errors.New("cannot broadcast tx during offline mode") + } + + stdTx, err := authclient.ReadTxFromFile(cctx, args[0]) + if err != nil { + return err + } + + txb, err := cctx.TxConfig.TxEncoder()(stdTx) + if err != nil { + return err + } + + res, err := cctx.BroadcastTx(txb) + if err != nil { + return err + } + + return cctx.PrintProto(res) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/cctx.go b/go/cli/cctx.go new file mode 100644 index 00000000..fc238e5e --- /dev/null +++ b/go/cli/cctx.go @@ -0,0 +1,311 @@ +package cli + +import ( + "crypto/tls" + "errors" + "fmt" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "google.golang.org/grpc" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" + + cflags "pkg.akt.dev/go/cli/flags" +) + +const ClientContextKey = sdk.ContextKey("client.context") +const ServerContextKey = sdk.ContextKey("server.context") + +// SetCmdClientContextHandler is to be used in a command pre-hook execution to +// read flags that populate a Context and sets that to the command's Context. +func SetCmdClientContextHandler(cctx sdkclient.Context, cmd *cobra.Command) (err error) { + cctx, err = ReadPersistentCommandFlags(cctx, cmd.Flags()) + if err != nil { + return err + } + + return SetCmdClientContext(cmd, cctx) +} + +// GetClientContextFromCmd returns a Context from a command or an empty Context +// if it has not been set. +func GetClientContextFromCmd(cmd *cobra.Command) sdkclient.Context { + if v := cmd.Context().Value(ClientContextKey); v != nil { + clientCtxPtr := v.(*sdkclient.Context) + return *clientCtxPtr + } + + return sdkclient.Context{} +} + +// SetCmdClientContext sets a command's Context value to the provided argument. +func SetCmdClientContext(cmd *cobra.Command, cctx sdkclient.Context) error { + v := cmd.Context().Value(ClientContextKey) + if v == nil { + return errors.New("client context not set") + } + + clientCtxPtr := v.(*sdkclient.Context) + *clientCtxPtr = cctx + + return nil +} + +// GetClientQueryContext returns a Context from a command with fields set based on flags +// defined in AddQueryFlagsToCmd. An error is returned if any flag query fails. +// +// - client.Context field not pre-populated & flag not set: uses default flag value +// - client.Context field not pre-populated & flag set: uses set flag value +// - client.Context field pre-populated & flag not set: uses pre-populated value +// - client.Context field pre-populated & flag set: uses set flag value +func GetClientQueryContext(cmd *cobra.Command) (sdkclient.Context, error) { + ctx := GetClientContextFromCmd(cmd) + return ReadQueryCommandFlags(ctx, cmd.Flags()) +} + +// GetClientTxContext returns a Context from a command with fields set based on flags +// defined in AddTxFlagsToCmd. An error is returned if any flag query fails. +// +// - client.Context field not pre-populated & flag not set: uses default flag value +// - client.Context field not pre-populated & flag set: uses set flag value +// - client.Context field pre-populated & flag not set: uses pre-populated value +// - client.Context field pre-populated & flag set: uses set flag value +func GetClientTxContext(cmd *cobra.Command) (sdkclient.Context, error) { + ctx := GetClientContextFromCmd(cmd) + return ReadTxCommandFlags(ctx, cmd.Flags()) +} + +// ReadQueryCommandFlags returns an updated Context with fields set based on flags +// defined in AddQueryFlagsToCmd. An error is returned if any flag query fails. +// +// Note, the provided clientCtx may have field pre-populated. The following order +// of precedence occurs: +// +// - client.Context field not pre-populated & flag not set: uses default flag value +// - client.Context field not pre-populated & flag set: uses set flag value +// - client.Context field pre-populated & flag not set: uses pre-populated value +// - client.Context field pre-populated & flag set: uses set flag value +func ReadQueryCommandFlags(cctx sdkclient.Context, flagSet *pflag.FlagSet) (sdkclient.Context, error) { + if cctx.Height == 0 || flagSet.Changed(cflags.FlagHeight) { + height, _ := flagSet.GetInt64(cflags.FlagHeight) + cctx = cctx.WithHeight(height) + } + + if !cctx.UseLedger || flagSet.Changed(cflags.FlagUseLedger) { + useLedger, _ := flagSet.GetBool(cflags.FlagUseLedger) + cctx = cctx.WithUseLedger(useLedger) + } + + return ReadPersistentCommandFlags(cctx, flagSet) +} + +// ReadPersistentCommandFlags returns a Context with fields set for "persistent" +// or common flags that do not necessarily change with context. +// +// Note, the provided clientCtx may have field pre-populated. The following order +// of precedence occurs: +// +// - client.Context field not pre-populated & flag not set: uses default flag value +// - client.Context field not pre-populated & flag set: uses set flag value +// - client.Context field pre-populated & flag not set: uses pre-populated value +// - client.Context field pre-populated & flag set: uses set flag value +func ReadPersistentCommandFlags(cctx sdkclient.Context, flagSet *pflag.FlagSet) (sdkclient.Context, error) { + if cctx.OutputFormat == "" || flagSet.Changed(cflags.FlagOutput) { + output, _ := flagSet.GetString(cflags.FlagOutput) + cctx = cctx.WithOutputFormat(output) + } + + if cctx.HomeDir == "" || flagSet.Changed(cflags.FlagHome) { + homeDir, _ := flagSet.GetString(cflags.FlagHome) + cctx = cctx.WithHomeDir(homeDir) + } + + if !cctx.Simulate || flagSet.Changed(cflags.FlagDryRun) { + dryRun, _ := flagSet.GetBool(cflags.FlagDryRun) + cctx = cctx.WithSimulation(dryRun) + } + + if cctx.KeyringDir == "" || flagSet.Changed(cflags.FlagKeyringDir) { + keyringDir, _ := flagSet.GetString(cflags.FlagKeyringDir) + + // The keyring directory is optional and falls back to the home directory + // if omitted. + if keyringDir == "" { + keyringDir = cctx.HomeDir + } + + cctx = cctx.WithKeyringDir(keyringDir) + } + + if cctx.ChainID == "" || flagSet.Changed(cflags.FlagChainID) { + chainID, _ := flagSet.GetString(cflags.FlagChainID) + cctx = cctx.WithChainID(chainID) + } + + if cctx.Keyring == nil || flagSet.Changed(cflags.FlagKeyringBackend) { + keyringBackend, _ := flagSet.GetString(cflags.FlagKeyringBackend) + + if keyringBackend != "" { + kr, err := sdkclient.NewKeyringFromBackend(cctx, keyringBackend) + if err != nil { + return cctx, err + } + + cctx = cctx.WithKeyring(kr) + } + } + + if cctx.Client == nil || flagSet.Changed(cflags.FlagNode) { + rpcURI, _ := flagSet.GetString(cflags.FlagNode) + if rpcURI != "" { + cctx = cctx.WithNodeURI(rpcURI) + + client, err := sdkclient.NewClientFromNode(rpcURI) + if err != nil { + return cctx, err + } + + cctx = cctx.WithClient(client) + } + } + + if cctx.GRPCClient == nil || flagSet.Changed(cflags.FlagGRPC) { + grpcURI, _ := flagSet.GetString(cflags.FlagGRPC) + if grpcURI != "" { + var dialOpts []grpc.DialOption + + useInsecure, _ := flagSet.GetBool(cflags.FlagGRPCInsecure) + if useInsecure { + dialOpts = append(dialOpts, grpc.WithTransportCredentials(insecure.NewCredentials())) + } else { + dialOpts = append(dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{ + MinVersion: tls.VersionTLS12, + }))) + } + + grpcClient, err := grpc.NewClient(grpcURI, dialOpts...) + if err != nil { + return sdkclient.Context{}, err + } + cctx = cctx.WithGRPCClient(grpcClient) + } + } + + return cctx, nil +} + +// ReadTxCommandFlags returns an updated Context with fields set based on flags +// defined in AddTxFlagsToCmd. An error is returned if any flag query fails. +// +// Note, the provided clientCtx may have field pre-populated. The following order +// of precedence occurs: +// +// - client.Context field not pre-populated & flag not set: uses default flag value +// - client.Context field not pre-populated & flag set: uses set flag value +// - client.Context field pre-populated & flag not set: uses pre-populated value +// - client.Context field pre-populated & flag set: uses set flag value +func ReadTxCommandFlags(cctx sdkclient.Context, flagSet *pflag.FlagSet) (sdkclient.Context, error) { + cctx, err := ReadPersistentCommandFlags(cctx, flagSet) + if err != nil { + return cctx, err + } + + if !cctx.GenerateOnly || flagSet.Changed(cflags.FlagGenerateOnly) { + genOnly, _ := flagSet.GetBool(cflags.FlagGenerateOnly) + cctx = cctx.WithGenerateOnly(genOnly) + } + + if !cctx.Offline || flagSet.Changed(cflags.FlagOffline) { + offline, _ := flagSet.GetBool(cflags.FlagOffline) + cctx = cctx.WithOffline(offline) + } + + if !cctx.UseLedger || flagSet.Changed(cflags.FlagUseLedger) { + useLedger, _ := flagSet.GetBool(cflags.FlagUseLedger) + cctx = cctx.WithUseLedger(useLedger) + } + + if cctx.BroadcastMode == "" || flagSet.Changed(cflags.FlagBroadcastMode) { + bMode, _ := flagSet.GetString(cflags.FlagBroadcastMode) + cctx = cctx.WithBroadcastMode(bMode) + } + + if !cctx.SkipConfirm || flagSet.Changed(cflags.FlagSkipConfirmation) { + skipConfirm, _ := flagSet.GetBool(cflags.FlagSkipConfirmation) + cctx = cctx.WithSkipConfirmation(skipConfirm) + } + + if cctx.SignModeStr == "" || flagSet.Changed(cflags.FlagSignMode) { + signModeStr, _ := flagSet.GetString(cflags.FlagSignMode) + cctx = cctx.WithSignModeStr(signModeStr) + } + + if cctx.FeePayer == nil || flagSet.Changed(cflags.FlagFeePayer) { + payer, _ := flagSet.GetString(cflags.FlagFeePayer) + + if payer != "" { + payerAcc, err := sdk.AccAddressFromBech32(payer) + if err != nil { + return cctx, err + } + + cctx = cctx.WithFeePayerAddress(payerAcc) + } + } + + if cctx.FeeGranter == nil || flagSet.Changed(cflags.FlagFeeGranter) { + granter, _ := flagSet.GetString(cflags.FlagFeeGranter) + + if granter != "" { + granterAcc, err := sdk.AccAddressFromBech32(granter) + if err != nil { + return cctx, err + } + + cctx = cctx.WithFeeGranterAddress(granterAcc) + } + } + + if cctx.From == "" || flagSet.Changed(cflags.FlagFrom) { + from, _ := flagSet.GetString(cflags.FlagFrom) + fromAddr, fromName, keyType, err := sdkclient.GetFromFields(cctx, cctx.Keyring, from) + if err != nil { + return cctx, err + } + + cctx = cctx.WithFrom(from).WithFromAddress(fromAddr).WithFromName(fromName) + + // If the `from` signer account is a ledger key, we need to use + // SIGN_MODE_AMINO_JSON, because ledger doesn't support proto yet. + // ref: https://github.com/cosmos/cosmos-sdk/issues/8109 + if keyType == keyring.TypeLedger && cctx.SignModeStr != cflags.SignModeLegacyAminoJSON && !cctx.LedgerHasProtobuf { + fmt.Println("Default sign-mode 'direct' not supported by Ledger, using sign-mode 'amino-json'.") + cctx = cctx.WithSignModeStr(cflags.SignModeLegacyAminoJSON) + } + } + + if !cctx.IsAux || flagSet.Changed(cflags.FlagAux) { + isAux, _ := flagSet.GetBool(cflags.FlagAux) + cctx = cctx.WithAux(isAux) + if isAux { + // If the user didn't explicitly set an --output flag, use JSON by + // default. + if cctx.OutputFormat == "" || !flagSet.Changed(cflags.FlagOutput) { + cctx = cctx.WithOutputFormat("json") + } + + // If the user didn't explicitly set a --sign-mode flag, use + // DIRECT_AUX by default. + if cctx.SignModeStr == "" || !flagSet.Changed(cflags.FlagSignMode) { + cctx = cctx.WithSignModeStr(cflags.SignModeDirectAux) + } + } + } + + return cctx, nil +} diff --git a/go/cli/cert_query.go b/go/cli/cert_query.go new file mode 100644 index 00000000..85dd7270 --- /dev/null +++ b/go/cli/cert_query.go @@ -0,0 +1,97 @@ +package cli + +import ( + "fmt" + "math/big" + + "github.com/spf13/cobra" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + + cflags "pkg.akt.dev/go/cli/flags" + types "pkg.akt.dev/go/node/cert/v1" + utiltls "pkg.akt.dev/go/util/tls" +) + +func GetQueryCertCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Certificate query commands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetQueryCertCertificatesCmd(), + ) + + return cmd +} + +func GetQueryCertCertificatesCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "Query for all certificates", + SilenceUsage: true, + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + pageReq, err := sdkclient.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &types.QueryCertificatesRequest{ + Pagination: pageReq, + } + + if value := cmd.Flag("owner").Value.String(); value != "" { + var owner sdk.Address + if owner, err = sdk.AccAddressFromBech32(value); err != nil { + return err + } + + params.Filter.Owner = owner.String() + } + + if value := cmd.Flag("serial").Value.String(); value != "" { + if params.Filter.Owner == "" { + return fmt.Errorf("--serial flag requires --owner to be set") + } + val, valid := new(big.Int).SetString(value, 10) + if !valid { + return utiltls.ErrInvalidSerialFlag + } + + params.Filter.Serial = val.String() + } + + if value := cmd.Flag("state").Value.String(); value != "" { + if val, exists := types.State_value[value]; !exists || types.State(val) == types.CertificateStateInvalid { + return fmt.Errorf("invalid value of --state flag. expected valid|revoked") + } + + params.Filter.State = value + } + + res, err := cl.Query().Certs().Certificates(cmd.Context(), params) + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "certificates") + + cmd.Flags().String("serial", "", "filter certificates by serial number") + cmd.Flags().String("owner", "", "filter certificates by owner") + cmd.Flags().String("state", "", "filter certificates by valid|revoked") + + return cmd +} diff --git a/go/cli/cert_tx.go b/go/cli/cert_tx.go new file mode 100644 index 00000000..f8b5424a --- /dev/null +++ b/go/cli/cert_tx.go @@ -0,0 +1,464 @@ +package cli + +import ( + "crypto/x509" + "encoding/json" + "encoding/pem" + "fmt" + "math/big" + "time" + + "github.com/spf13/cobra" + "github.com/spf13/viper" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/server" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/genutil" + genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" + + cflags "pkg.akt.dev/go/cli/flags" + types "pkg.akt.dev/go/node/cert/v1" + utiltls "pkg.akt.dev/go/util/tls" +) + +const ( + // flagOverwrite = "overwrite" + flagSerial = "serial" + flagValidTime = "valid-duration" + flagStart = "start-time" + flagToGenesis = "to-genesis" +) + +var ( + errCertificateDoesNotExist = fmt.Errorf("%w: does not exist", utiltls.ErrCertificate) + errCannotOverwriteCertificate = fmt.Errorf("%w: cannot overwrite certificate", utiltls.ErrCertificate) +) + +func GetTxCertCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Certificates transaction subcommands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + // Commands + // 1. Generate - create public / private key pair + // 2. Publish - publish a key pair to the blockchain + // 3. Revoke - revoke a key pair on the blockchain + + cmd.AddCommand( + GetTxCertGenerateCmd(), + GetTxCertPublishCmd(), + GetTxCertRevokeCmd(), + ) + + return cmd +} + +func doCertGenerateCmd(cmd *cobra.Command, domains []string) error { + allowOverwrite := viper.GetBool(cflags.FlagOverwrite) + + cctx, err := sdkclient.GetClientTxContext(cmd) + if err != nil { + return err + } + fromAddress := cctx.GetFromAddress() + + kpm, err := utiltls.NewKeyPairManager(cctx, fromAddress) + if err != nil { + return err + } + + exists, err := kpm.KeyExists() + if err != nil { + return err + } + if !allowOverwrite && exists { + return errCannotOverwriteCertificate + } + + var startTime time.Time + startTimeStr := viper.GetString(flagStart) + if len(startTimeStr) == 0 { + startTime = time.Now().Truncate(time.Second) + } else { + startTime, err = time.Parse(time.RFC3339, startTimeStr) + if err != nil { + return err + } + } + validDuration := viper.GetDuration(flagValidTime) + + return kpm.Generate(startTime, startTime.Add(validDuration), domains) +} + +func doPublishCmd(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + toGenesis := viper.GetBool(flagToGenesis) + + fromAddress := cctx.GetFromAddress() + + kpm, err := utiltls.NewKeyPairManager(cctx, fromAddress) + if err != nil { + return err + } + + exists, err := kpm.KeyExists() + if err != nil { + return err + } + if !exists { + return errCertificateDoesNotExist + } + + cert, _, pubKey, err := kpm.Read() + if err != nil { + return err + } + + msg := &types.MsgCreateCertificate{ + Owner: fromAddress.String(), + Cert: pem.EncodeToMemory(&pem.Block{ + Type: types.PemBlkTypeCertificate, + Bytes: cert, + }), + Pubkey: pem.EncodeToMemory(&pem.Block{ + Type: types.PemBlkTypeECPublicKey, + Bytes: pubKey, + }), + } + + if err = msg.ValidateBasic(); err != nil { + return err + } + + if toGenesis { + return addCertToGenesis(cmd, types.GenesisCertificate{ + Owner: msg.Owner, + Certificate: types.Certificate{ + State: types.CertificateValid, + Cert: msg.Cert, + Pubkey: msg.Pubkey, + }, + }) + + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) +} + +func doRevokeCmd(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + serial := viper.GetString(flagSerial) + + fromAddress := cctx.GetFromAddress() + + if len(serial) != 0 { + if _, valid := new(big.Int).SetString(serial, 10); !valid { + return utiltls.ErrInvalidSerialFlag + } + } else { + kpm, err := utiltls.NewKeyPairManager(cctx, fromAddress) + if err != nil { + return err + } + + cert, _, _, err := kpm.Read() + if err != nil { + return err + } + + parsedCert, err := x509.ParseCertificate(cert) + if err != nil { + return err + } + + serial = parsedCert.SerialNumber.String() + } + + req := &types.QueryCertificatesRequest{ + Filter: types.CertificateFilter{ + Owner: fromAddress.String(), + Serial: serial, + State: types.CertificateValid.String(), + }, + } + + res, err := cl.Query().Certs().Certificates(cmd.Context(), req) + if err != nil { + return err + } + + exists := len(res.Certificates) != 0 + if !exists { + return fmt.Errorf("%w: certificate with serial %v does not exist on chain and cannot be revoked", utiltls.ErrCertificate, serial) + } + + msg := &types.MsgRevokeCertificate{ + ID: types.ID{ + Owner: cctx.FromAddress.String(), + Serial: serial, + }, + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) +} + +func GetTxCertGenerateCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "generate", + Short: "", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetTxCertGenerateClientCmd(), + GetTxCertGenerateServerCmd(), + ) + + return cmd +} + +func addTxCertGenerateFlags(cmd *cobra.Command) error { + cmd.Flags().String(flagStart, "", "certificate is not valid before this date. default current timestamp. RFC3339") + if err := viper.BindPFlag(flagStart, cmd.Flags().Lookup(flagStart)); err != nil { + return err + } + + cmd.Flags().Duration(flagValidTime, time.Hour*24*365, "certificate is not valid after this date. RFC3339") + if err := viper.BindPFlag(flagValidTime, cmd.Flags().Lookup(flagValidTime)); err != nil { + return err + } + cmd.Flags().Bool(cflags.FlagOverwrite, false, "overwrite existing certificate if present") + if err := viper.BindPFlag(cflags.FlagOverwrite, cmd.Flags().Lookup(cflags.FlagOverwrite)); err != nil { + return err + } + + cflags.AddTxFlagsToCmd(cmd) // TODO - add just the keyring flags? not all the TX ones + return nil +} + +func GetTxCertGenerateClientCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "client", + Short: "", + SuggestionsMinimumDistance: 2, + PersistentPreRunE: TxPersistentPreRunE, + RunE: doCertGenerateCmd, + SilenceUsage: true, + Args: cobra.ExactArgs(0), + } + err := addTxCertGenerateFlags(cmd) + if err != nil { + panic(err) + } + + return cmd +} + +func GetTxCertGenerateServerCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "server", + Short: "", + SuggestionsMinimumDistance: 2, + PersistentPreRunE: TxPersistentPreRunE, + RunE: doCertGenerateCmd, + SilenceUsage: true, + Args: cobra.MinimumNArgs(1), + } + err := addTxCertGenerateFlags(cmd) + if err != nil { + panic(err) + } + + return cmd +} + +func GetTxCertPublishCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "publish", + Short: "", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetTxCertPublishClientCmd(), + GetTxCertPublishServerCmd()) + + return cmd +} + +func GetTxCertPublishClientCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "client", + Short: "", + SuggestionsMinimumDistance: 2, + PersistentPreRunE: TxPersistentPreRunE, + RunE: doPublishCmd, + SilenceUsage: true, + Args: cobra.ExactArgs(0), + } + err := addTxCertPublishFlags(cmd) + if err != nil { + panic(err) + } + + return cmd +} + +func GetTxCertPublishServerCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "server", + Short: "", + SuggestionsMinimumDistance: 2, + PersistentPreRunE: TxPersistentPreRunE, + RunE: doPublishCmd, + SilenceUsage: true, + Args: cobra.ExactArgs(0), + } + err := addTxCertPublishFlags(cmd) + if err != nil { + panic(err) + } + + return cmd +} + +func addTxCertPublishFlags(cmd *cobra.Command) error { + cmd.Flags().Bool(flagToGenesis, false, "add to genesis") + if err := viper.BindPFlag(flagToGenesis, cmd.Flags().Lookup(flagToGenesis)); err != nil { + return err + } + + cflags.AddTxFlagsToCmd(cmd) + + return nil +} + +func addCertToGenesis(cmd *cobra.Command, cert types.GenesisCertificate) error { + cctx, err := sdkclient.GetClientTxContext(cmd) + if err != nil { + return err + } + + cdc := cctx.Codec + + serverCtx := server.GetServerContextFromCmd(cmd) + config := serverCtx.Config + + config.SetRoot(cctx.HomeDir) + + if err := cert.Validate(); err != nil { + return fmt.Errorf("%w: failed to validate new genesis certificate", err) + } + + genFile := config.GenesisFile() + appState, genDoc, err := genutiltypes.GenesisStateFromGenFile(genFile) + if err != nil { + return fmt.Errorf("%w: failed to unmarshal genesis state", err) + } + + certsGenState := types.GetGenesisStateFromAppState(cdc, appState) + + if certsGenState.Certificates.Contains(cert) { + return fmt.Errorf("%w: cannot add already existing certificate", err) + } + certsGenState.Certificates = append(certsGenState.Certificates, cert) + + certsGenStateBz, err := cdc.MarshalJSON(certsGenState) + if err != nil { + return fmt.Errorf("%w: failed to marshal auth genesis state", err) + } + + appState[types.ModuleName] = certsGenStateBz + + appStateJSON, err := json.Marshal(appState) + if err != nil { + return fmt.Errorf("%w: failed to marshal application genesis state", err) + } + + genDoc.AppState = appStateJSON + return genutil.ExportGenesisFile(genDoc, genFile) +} + +func GetTxCertRevokeCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "revoke", + Short: "", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + cmd.AddCommand( + GetTxCertsRevokeClientCmd(), + GetTxCertRevokeServerCmd()) + + return cmd +} + +func GetTxCertsRevokeClientCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "client", + Short: "", + SuggestionsMinimumDistance: 2, + PersistentPreRunE: TxPersistentPreRunE, + RunE: doRevokeCmd, + SilenceUsage: true, + Args: cobra.ExactArgs(0), + } + + err := addRevokeCmdFlags(cmd) + + if err != nil { + panic(err) + } + + return cmd +} + +func GetTxCertRevokeServerCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "server", + Short: "", + SuggestionsMinimumDistance: 2, + PersistentPreRunE: TxPersistentPreRunE, + RunE: doRevokeCmd, + SilenceUsage: true, + Args: cobra.ExactArgs(0), + } + err := addRevokeCmdFlags(cmd) + if err != nil { + panic(err) + } + + return cmd +} + +func addRevokeCmdFlags(cmd *cobra.Command) error { + cmd.Flags().String(flagSerial, "", "revoke certificate by serial number") + if err := viper.BindPFlag(flagSerial, cmd.Flags().Lookup(flagSerial)); err != nil { + return err + } + + cflags.AddTxFlagsToCmd(cmd) + return nil +} diff --git a/go/cli/client.go b/go/cli/client.go new file mode 100644 index 00000000..3e51c291 --- /dev/null +++ b/go/cli/client.go @@ -0,0 +1,71 @@ +package cli + +import ( + "context" + "errors" + "fmt" + "reflect" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + + cmtrpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" + + aclient "pkg.akt.dev/go/node/client" + cltypes "pkg.akt.dev/go/node/client/types" + "pkg.akt.dev/go/node/client/v1beta3" +) + +var ( + ErrInvalidClient = errors.New("invalid client") +) + +func DiscoverQueryClient(ctx context.Context, cctx sdkclient.Context) (v1beta3.LightClient, error) { + var cl v1beta3.LightClient + err := aclient.DiscoverLightClient(ctx, cctx, func(i interface{}) error { + var valid bool + + if cl, valid = i.(v1beta3.LightClient); !valid { + return fmt.Errorf("%w: expected %s, actual %s", ErrInvalidClient, reflect.TypeOf(cl), reflect.TypeOf(i)) + } + + return nil + }) + + if err != nil { + return nil, err + } + + return cl, nil +} + +func DiscoverClient(ctx context.Context, cctx sdkclient.Context, opts ...cltypes.ClientOption) (v1beta3.Client, error) { + var cl v1beta3.Client + + setupFn := func(i interface{}) error { + var valid bool + + if cl, valid = i.(v1beta3.Client); !valid { + return fmt.Errorf("%w: expected %s, actual %s", ErrInvalidClient, reflect.TypeOf(cl), reflect.TypeOf(i)) + } + + return nil + } + + err := aclient.DiscoverClient(ctx, cctx, setupFn, opts...) + + if err != nil { + return nil, err + } + + return cl, nil +} + +func RPCAkash(_ *cmtrpctypes.Context) (*aclient.Akash, error) { + result := &aclient.Akash{ + ClientInfo: &aclient.ClientInfo{ + ApiVersion: "v1beta3", + }, + } + + return result, nil +} diff --git a/go/cli/crisis_tx.go b/go/cli/crisis_tx.go new file mode 100644 index 00000000..ffbf618d --- /dev/null +++ b/go/cli/crisis_tx.go @@ -0,0 +1,69 @@ +package cli + +import ( + "errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/x/crisis/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetTxCrisisCmd returns a root CLI command handler for all x/crisis transaction commands. +func GetTxCrisisCmd() *cobra.Command { + txCmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Crisis transactions subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + txCmd.AddCommand( + GetTxCrisisVerifyInvariantTxCmd(), + ) + + return txCmd +} + +// GetTxCrisisVerifyInvariantTxCmd returns a CLI command handler for creating a +// MsgVerifyInvariant transaction. +func GetTxCrisisVerifyInvariantTxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "invariant-broken [module-name] [invariant-route]", + Short: "Submit proof that an invariant broken to halt the chain", + Args: cobra.ExactArgs(2), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + moduleName, route := args[0], args[1] + if moduleName == "" { + return errors.New("invalid module name") + } + if route == "" { + return errors.New("invalid invariant route") + } + + senderAddr := cctx.GetFromAddress() + + msg := types.NewMsgVerifyInvariant(senderAddr, moduleName, route) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/crisis_tx_test.go b/go/cli/crisis_tx_test.go new file mode 100644 index 00000000..fb55a2be --- /dev/null +++ b/go/cli/crisis_tx_test.go @@ -0,0 +1,100 @@ +package cli_test + +import ( + "context" + "io" + "testing" + + sdkmath "cosmossdk.io/math" + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/crisis" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +func TestNewMsgVerifyInvariantTxCmd(t *testing.T) { + encCfg := testutilmod.MakeTestEncodingConfig(crisis.AppModuleBasic{}) + kr := keyring.NewInMemory(encCfg.Codec) + baseCtx := client.Context{}. + WithKeyring(kr). + WithTxConfig(encCfg.TxConfig). + WithCodec(encCfg.Codec). + WithLegacyAmino(encCfg.Amino). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain"). + WithSignModeStr(cflags.SignModeDirect) + + accounts := testutil.CreateKeyringAccounts(t, kr, 1) + testCases := []struct { + name string + args []string + expectErr bool + errString string + expectedCode uint32 + }{ + { + "missing module", + cli.TestFlags(). + With( + "", + "total-supply", + ). + WithFrom(accounts[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10)))), + true, "invalid module name", 0, + }, + { + "missing invariant route", + cli.TestFlags(). + With( + "bank", + "", + ). + WithFrom(accounts[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10)))), + true, "invalid invariant route", 0, + }, + { + "valid transaction", + cli.TestFlags(). + With( + "bank", + "total-supply", + ). + WithFrom(accounts[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10)))), + false, "", 0, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cmd := cli.GetTxCrisisVerifyInvariantTxCmd() + + _, err := clitestutil.ExecTestCLICmd(context.Background(), baseCtx, cmd, tc.args...) + if tc.expectErr { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errString) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/go/cli/deployment_query.go b/go/cli/deployment_query.go new file mode 100644 index 00000000..f655b37b --- /dev/null +++ b/go/cli/deployment_query.go @@ -0,0 +1,148 @@ +package cli + +import ( + "github.com/spf13/cobra" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + + "pkg.akt.dev/go/node/deployment/v1" + "pkg.akt.dev/go/node/deployment/v1beta4" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetQueryDeploymentCmds returns the query commands for the deployment module +func GetQueryDeploymentCmds() *cobra.Command { + cmd := &cobra.Command{ + Use: v1.ModuleName, + Short: "Deployment query commands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetQueryDeploymentsCmd(), + GetQueryDeploymentCmd(), + GetQueryDeploymentGroupCmds(), + ) + + return cmd +} + +func GetQueryDeploymentsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "Query for all deployments", + Args: cobra.ExactArgs(0), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + dfilters, err := cflags.DepFiltersFromFlags(cmd.Flags()) + if err != nil { + return err + } + + pageReq, err := sdkclient.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &v1beta4.QueryDeploymentsRequest{ + Filters: dfilters, + Pagination: pageReq, + } + + res, err := cl.Query().Deployment().Deployments(ctx, params) + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "deployments") + cflags.AddDeploymentFilterFlags(cmd.Flags()) + + return cmd +} + +func GetQueryDeploymentCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "get", + Short: "Query deployment", + Args: cobra.ExactArgs(0), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + id, err := cflags.DeploymentIDFromFlags(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Deployment().Deployment(ctx, &v1beta4.QueryDeploymentRequest{ID: id}) + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddDeploymentIDFlags(cmd.Flags()) + cflags.MarkReqDeploymentIDFlags(cmd) + + return cmd +} + +func GetQueryDeploymentGroupCmds() *cobra.Command { + cmd := &cobra.Command{ + Use: "group", + Short: "Deployment group query commands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetQueryDeploymentGroupCmd(), + ) + + return cmd +} + +func GetQueryDeploymentGroupCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "get", + Short: "Query group of deployment", + Args: cobra.ExactArgs(0), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + id, err := cflags.GroupIDFromFlags(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Deployment().Group(ctx, &v1beta4.QueryGroupRequest{ID: id}) + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(&res.Group) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddGroupIDFlags(cmd.Flags()) + cflags.MarkReqGroupIDFlags(cmd) + + return cmd +} diff --git a/go/cli/deployment_tx.go b/go/cli/deployment_tx.go new file mode 100644 index 00000000..76672693 --- /dev/null +++ b/go/cli/deployment_tx.go @@ -0,0 +1,566 @@ +package cli + +import ( + "errors" + "fmt" + "os" + "reflect" + "strings" + "time" + + "github.com/spf13/cobra" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/authz" + + "pkg.akt.dev/go/sdl" + + cutils "pkg.akt.dev/go/node/cert/v1/utils" + dv1 "pkg.akt.dev/go/node/deployment/v1" + dv1beta4 "pkg.akt.dev/go/node/deployment/v1beta4" + "pkg.akt.dev/go/node/types/constants" + + cflags "pkg.akt.dev/go/cli/flags" +) + +var ( + errDeploymentUpdate = errors.New("deployment update failed") + errDeploymentUpdateGroupsChanged = fmt.Errorf("%w: groups are different than existing deployment, you cannot update groups", errDeploymentUpdate) +) + +// GetTxDeploymentCmds returns the transaction commands for this module +func GetTxDeploymentCmds() *cobra.Command { + cmd := &cobra.Command{ + Use: dv1.ModuleName, + Short: "Deployment transaction subcommands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + cmd.AddCommand( + GetTxDeploymentCreateCmd(), + GetTxDeploymentUpdateCmd(), + GetTxDeploymentDepositCmd(), + GetTxDeploymentCloseCmd(), + GetTxDeploymentGroupCmds(), + GetTxDeploymentAuthzCmd(), + ) + return cmd +} + +func GetTxDeploymentCreateCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "create [sdl-file]", + Short: "Create deployment", + Args: cobra.ExactArgs(1), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + // first lets validate certificate exists for given account + if _, err := cutils.LoadAndQueryCertificateForAccount(ctx, cctx, nil); err != nil { + if os.IsNotExist(err) { + err = fmt.Errorf("no certificate file found for account %q.\n"+ + "consider creating it as certificate required to submit manifest", cctx.FromAddress.String()) + } + + return err + } + + sdlManifest, err := sdl.ReadFile(args[0]) + if err != nil { + return err + } + + groups, err := sdlManifest.DeploymentGroups() + if err != nil { + return err + } + + warnIfGroupVolumesExceeds(cctx, groups) + + id, err := cflags.DeploymentIDFromFlags(cmd.Flags(), cflags.WithOwner(cctx.FromAddress)) + if err != nil { + return err + } + + // Default DSeq to the current block height + if id.DSeq == 0 { + syncInfo, err := cl.Node().SyncInfo(ctx) + if err != nil { + return err + } + + if syncInfo.CatchingUp { + return fmt.Errorf("cannot generate DSEQ from last block height. node is catching up") + } + + id.DSeq = uint64(syncInfo.LatestBlockHeight) // nolint: gosec + } + + version, err := sdlManifest.Version() + if err != nil { + return err + } + + deposit, err := DetectDeploymentDeposit(ctx, cmd.Flags(), cl.Query()) + if err != nil { + return err + } + + depositorAcc, err := cflags.DepositorFromFlags(cmd.Flags(), id.Owner) + if err != nil { + return err + } + + msg := &dv1beta4.MsgCreateDeployment{ + ID: id, + Hash: version, + Groups: make(dv1beta4.GroupSpecs, 0, len(groups)), + Deposit: deposit, + Depositor: depositorAcc, + } + + for _, group := range groups { + msg.Groups = append(msg.Groups, group) + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cflags.AddDeploymentIDFlags(cmd.Flags()) + cflags.AddDepositorFlag(cmd.Flags()) + cflags.AddDepositFlags(cmd.Flags()) + + return cmd +} + +func GetTxDeploymentDepositCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "deposit ", + Short: "Deposit funds to deployment", + Args: cobra.ExactArgs(1), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + id, err := cflags.DeploymentIDFromFlags(cmd.Flags(), cflags.WithOwner(cctx.FromAddress)) + if err != nil { + return err + } + + deposit, err := sdk.ParseCoinNormalized(args[0]) + if err != nil { + return err + } + + depositorAcc, err := cflags.DepositorFromFlags(cmd.Flags(), id.Owner) + if err != nil { + return err + } + + msg := &dv1.MsgDepositDeployment{ + ID: id, + Amount: deposit, + Depositor: depositorAcc, + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cflags.AddDeploymentIDFlags(cmd.Flags()) + cflags.AddDepositorFlag(cmd.Flags()) + + return cmd +} + +func GetTxDeploymentCloseCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "close", + Short: "Close deployment", + Args: cobra.ExactArgs(0), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + id, err := cflags.DeploymentIDFromFlags(cmd.Flags(), cflags.WithOwner(cctx.FromAddress)) + if err != nil { + return err + } + + msg := &dv1beta4.MsgCloseDeployment{ID: id} + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cflags.AddDeploymentIDFlags(cmd.Flags()) + return cmd +} + +func GetTxDeploymentUpdateCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "update [sdl-file]", + Short: "update deployment", + Args: cobra.ExactArgs(1), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + id, err := cflags.DeploymentIDFromFlags(cmd.Flags(), cflags.WithOwner(cctx.FromAddress)) + if err != nil { + return err + } + + sdlManifest, err := sdl.ReadFile(args[0]) + if err != nil { + return err + } + + hash, err := sdlManifest.Version() + if err != nil { + return err + } + + groups, err := sdlManifest.DeploymentGroups() + if err != nil { + return err + } + + // Query the RPC node to make sure the existing groups are identical + existingDeployment, err := cl.Query().Deployment().Deployment(cmd.Context(), &dv1beta4.QueryDeploymentRequest{ + ID: id, + }) + if err != nil { + return err + } + + // do not send the transaction if the groups have changed + existingGroups := existingDeployment.GetGroups() + if len(existingGroups) != len(groups) { + return errDeploymentUpdateGroupsChanged + } + + for i, existingGroup := range existingGroups { + if reflect.DeepEqual(groups[i], existingGroup.GroupSpec) { + return errDeploymentUpdateGroupsChanged + } + } + + warnIfGroupVolumesExceeds(cctx, groups) + + msg := &dv1beta4.MsgUpdateDeployment{ + ID: id, + Hash: hash, + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cflags.AddDeploymentIDFlags(cmd.Flags()) + + return cmd +} + +func GetTxDeploymentGroupCmds() *cobra.Command { + cmd := &cobra.Command{ + Use: "group", + Short: "Modify a Deployment's specific Group", + } + + cmd.AddCommand( + GetTxDeploymentGroupCloseCmd(), + GetDeploymentGroupPauseCmd(), + GetDeploymentGroupStartCmd(), + ) + + return cmd +} + +func GetTxDeploymentGroupCloseCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "close", + Short: "close a Deployment's specific Group", + Example: "akash tx deployment group-close --owner=[Account Address] --dseq=[uint64] --gseq=[uint32]", + Args: cobra.ExactArgs(0), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + + id, err := cflags.GroupIDFromFlags(cmd.Flags()) + if err != nil { + return err + } + + msg := &dv1beta4.MsgCloseGroup{ + ID: id, + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cflags.AddGroupIDFlags(cmd.Flags()) + cflags.MarkReqGroupIDFlags(cmd) + + return cmd +} + +func GetDeploymentGroupPauseCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "pause", + Short: "pause a Deployment's specific Group", + Example: "akash tx deployment group pause --owner=[Account Address] --dseq=[uint64] --gseq=[uint32]", + Args: cobra.ExactArgs(0), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + + id, err := cflags.GroupIDFromFlags(cmd.Flags()) + if err != nil { + return err + } + + msg := &dv1beta4.MsgPauseGroup{ + ID: id, + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cflags.AddGroupIDFlags(cmd.Flags()) + cflags.MarkReqGroupIDFlags(cmd) + + return cmd +} + +func GetDeploymentGroupStartCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "start", + Short: "start a Deployment's specific Group", + Example: "akash tx deployment group pause --owner=[Account Address] --dseq=[uint64] --gseq=[uint32]", + Args: cobra.ExactArgs(0), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + + id, err := cflags.GroupIDFromFlags(cmd.Flags()) + if err != nil { + return err + } + + msg := &dv1beta4.MsgStartGroup{ + ID: id, + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cflags.AddGroupIDFlags(cmd.Flags()) + cflags.MarkReqGroupIDFlags(cmd) + + return cmd +} + +func GetTxDeploymentAuthzCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "authz", + Short: "Deployment authorization transaction subcommands", + Long: "Authorize and revoke access to pay for deployments on behalf of your address", + } + + cmd.AddCommand( + GetTxDeploymentGrantAuthorizationCmd(), + GetTxDeploymentRevokeAuthorizationCmd(), + ) + + return cmd +} + +func GetTxDeploymentGrantAuthorizationCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "grant --from ", + Short: "Grant deposit deployment authorization to an address", + Long: strings.TrimSpace( + fmt.Sprintf(`grant authorization to an address to pay for a deployment on your behalf: + +Examples: + $ akash tx %s authz grant akash1skjw.. 50akt --from=akash1skl.. + $ akash tx %s authz grant akash1skjw.. 50akt --from=akash1skl.. --expiration=1661020200 + `, dv1.ModuleName, dv1.ModuleName), + ), + Args: cobra.ExactArgs(2), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + grantee, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + spendLimit, err := sdk.ParseCoinNormalized(args[1]) + if err != nil { + return err + } + if spendLimit.IsZero() || spendLimit.IsNegative() { + return fmt.Errorf("spend-limit should be greater than zero, got: %s", spendLimit) + } + + exp, err := cmd.Flags().GetInt64(cflags.FlagExpiration) + if err != nil { + return err + } + + granter := cctx.GetFromAddress() + authorization := dv1.NewDepositAuthorization(spendLimit) + + expiry := time.Unix(exp, 0) + msg, err := authz.NewMsgGrant(granter, grantee, authorization, &expiry) + if err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cmd.Flags().Int64(cflags.FlagExpiration, time.Now().AddDate(1, 0, 0).Unix(), "The Unix timestamp. Default is one year.") + _ = cmd.MarkFlagRequired(cflags.FlagFrom) + + return cmd +} + +func GetTxDeploymentRevokeAuthorizationCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "revoke [grantee] --from=[granter]", + Short: "Revoke deposit deployment authorization given to an address", + Long: strings.TrimSpace( + fmt.Sprintf(`revoke deposit deployment authorization from a granter to a grantee: +Example: + $ akash tx %s authz revoke akash1skj.. --from=akash1skj.. + `, dv1.ModuleName), + ), + Args: cobra.ExactArgs(1), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + grantee, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + granter := cctx.GetFromAddress() + msgTypeURL := (&dv1.DepositAuthorization{}).MsgTypeURL() + msg := authz.NewMsgRevoke(granter, grantee, msgTypeURL) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{&msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + _ = cmd.MarkFlagRequired(cflags.FlagFrom) + + return cmd +} + +func warnIfGroupVolumesExceeds(cctx sdkclient.Context, dgroups []dv1beta4.GroupSpec) { + for _, group := range dgroups { + for _, resources := range group.GetResourceUnits() { + if len(resources.Resources.Storage) > constants.DefaultMaxGroupVolumes { + _ = cctx.PrintString(fmt.Sprintf("amount of volumes for service exceeds recommended value (%v > %v)\n"+ + "there may no providers on network to bid", len(resources.Resources.Storage), constants.DefaultMaxGroupVolumes)) + } + } + } +} diff --git a/go/cli/distribution_query.go b/go/cli/distribution_query.go new file mode 100644 index 00000000..bea4bfa9 --- /dev/null +++ b/go/cli/distribution_query.go @@ -0,0 +1,403 @@ +package cli + +import ( + "fmt" + "strconv" + "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/distribution/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetQueryDistributionCmd returns the cli query commands for this module +func GetQueryDistributionCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Querying commands for the distribution module", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetQueryDistributionParamsCmd(), + GetQueryDistributionValidatorDistributionInfoCmd(), + GetQueryDistributionValidatorOutstandingRewardsCmd(), + GetQueryDistributionValidatorCommissionCmd(), + GetQueryDistributionValidatorSlashesCmd(), + GetQueryDistributionDelegatorRewardsCmd(), + GetQueryDistributionCommunityPoolCmd(), + GetQueryDistributionTokenizeShareRecordRewardCmd(), + ) + + return cmd +} + +// GetQueryDistributionParamsCmd implements the query params command. +func GetQueryDistributionParamsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Args: cobra.NoArgs, + Short: "Query distribution params", + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + + cl := MustQueryClientFromContext(ctx) + + res, err := cl.Query().Distribution().Params(ctx, &types.QueryParamsRequest{}) + if err != nil { + return err + } + + return cl.PrintMessage(&res.Params) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + return cmd +} + +// GetQueryDistributionValidatorDistributionInfoCmd implements the query validator distribution info command. +func GetQueryDistributionValidatorDistributionInfoCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "validator-distribution-info [validator]", + Args: cobra.ExactArgs(1), + Short: "Query validator distribution info", + Long: strings.TrimSpace( + fmt.Sprintf(`Query validator distribution info. +Example: +$ %s query distribution validator-distribution-info %s1lwjmdnks33xwnmfayc64ycprww49n33mtm92ne +`, + version.AppName, bech32PrefixValAddr, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + cl := MustQueryClientFromContext(ctx) + + validatorAddr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + res, err := cl.Query().Distribution().ValidatorDistributionInfo(ctx, &types.QueryValidatorDistributionInfoRequest{ + ValidatorAddress: validatorAddr.String(), + }) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + return cmd +} + +// GetQueryDistributionValidatorOutstandingRewardsCmd implements the query validator +// outstanding rewards command. +func GetQueryDistributionValidatorOutstandingRewardsCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "validator-outstanding-rewards [validator]", + Args: cobra.ExactArgs(1), + Short: "Query distribution outstanding (un-withdrawn) rewards for a validator and all their delegations", + Long: strings.TrimSpace( + fmt.Sprintf(`Query distribution outstanding (un-withdrawn) rewards for a validator and all their delegations. + +Example: +$ %s query distribution validator-outstanding-rewards %s1lwjmdnks33xwnmfayc64ycprww49n33mtm92ne +`, + version.AppName, bech32PrefixValAddr, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + cl := MustQueryClientFromContext(ctx) + + validatorAddr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + res, err := cl.Query().Distribution().ValidatorOutstandingRewards( + ctx, + &types.QueryValidatorOutstandingRewardsRequest{ValidatorAddress: validatorAddr.String()}, + ) + if err != nil { + return err + } + + return cl.PrintMessage(&res.Rewards) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + return cmd +} + +// GetQueryDistributionValidatorCommissionCmd implements the query validator commission command. +func GetQueryDistributionValidatorCommissionCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "commission [validator]", + Args: cobra.ExactArgs(1), + Short: "Query distribution validator commission", + Long: strings.TrimSpace( + fmt.Sprintf(`Query validator commission rewards from delegators to that validator. + +Example: +$ %s query distribution commission %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj +`, + version.AppName, bech32PrefixValAddr, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + cl := MustQueryClientFromContext(ctx) + + validatorAddr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + res, err := cl.Query().Distribution().ValidatorCommission( + ctx, + &types.QueryValidatorCommissionRequest{ValidatorAddress: validatorAddr.String()}, + ) + if err != nil { + return err + } + + return cl.PrintMessage(&res.Commission) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + return cmd +} + +// GetQueryDistributionValidatorSlashesCmd implements the query validator slashes command. +func GetQueryDistributionValidatorSlashesCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "slashes [validator] [start-height] [end-height]", + Args: cobra.ExactArgs(3), + Short: "Query distribution validator slashes", + Long: strings.TrimSpace( + fmt.Sprintf(`Query all slashes of a validator for a given block range. + +Example: +$ %s query distribution slashes %svaloper1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj 0 100 +`, + version.AppName, bech32PrefixValAddr, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + cl := MustQueryClientFromContext(ctx) + + validatorAddr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + startHeight, err := strconv.ParseUint(args[1], 10, 64) + if err != nil { + return fmt.Errorf("start-height %s not a valid uint, please input a valid start-height", args[1]) + } + + endHeight, err := strconv.ParseUint(args[2], 10, 64) + if err != nil { + return fmt.Errorf("end-height %s not a valid uint, please input a valid end-height", args[2]) + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Distribution().ValidatorSlashes( + ctx, + &types.QueryValidatorSlashesRequest{ + ValidatorAddress: validatorAddr.String(), + StartingHeight: startHeight, + EndingHeight: endHeight, + Pagination: pageReq, + }, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "validator slashes") + return cmd +} + +// GetQueryDistributionDelegatorRewardsCmd implements the query delegator rewards command. +func GetQueryDistributionDelegatorRewardsCmd() *cobra.Command { + bech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix() + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "rewards [delegator-addr] [validator-addr]", + Args: cobra.RangeArgs(1, 2), + Short: "Query all distribution delegator rewards or rewards from a particular validator", + Long: strings.TrimSpace( + fmt.Sprintf(`Query all rewards earned by a delegator, optionally restrict to rewards from a single validator. + +Example: +$ %s query distribution rewards %s1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p +$ %s query distribution rewards %s1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj +`, + version.AppName, bech32PrefixAccAddr, version.AppName, bech32PrefixAccAddr, bech32PrefixValAddr, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + cl := MustQueryClientFromContext(ctx) + + delegatorAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + // query for rewards from a particular delegation + if len(args) == 2 { + validatorAddr, err := sdk.ValAddressFromBech32(args[1]) + if err != nil { + return err + } + + res, err := cl.Query().Distribution().DelegationRewards( + ctx, + &types.QueryDelegationRewardsRequest{DelegatorAddress: delegatorAddr.String(), ValidatorAddress: validatorAddr.String()}, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res) + } + + res, err := cl.Query().Distribution().DelegationTotalRewards( + ctx, + &types.QueryDelegationTotalRewardsRequest{DelegatorAddress: delegatorAddr.String()}, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + return cmd +} + +// GetQueryDistributionCommunityPoolCmd returns the command for fetching community pool info. +func GetQueryDistributionCommunityPoolCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "community-pool", + Args: cobra.NoArgs, + Short: "Query the amount of coins in the community pool", + Long: strings.TrimSpace( + fmt.Sprintf(`Query all coins in the community pool which is under Governance control. + +Example: +$ %s query distribution community-pool +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + + cl := MustQueryClientFromContext(ctx) + + res, err := cl.Query().Distribution().CommunityPool(cmd.Context(), &types.QueryCommunityPoolRequest{}) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + return cmd +} + +// GetQueryDistributionTokenizeShareRecordRewardCmd implements the query tokenize share record rewards +func GetQueryDistributionTokenizeShareRecordRewardCmd() *cobra.Command { + bech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix() + + cmd := &cobra.Command{ + Use: "tokenize-share-record-rewards [owner]", + Args: cobra.ExactArgs(1), + Short: "Query distribution tokenize share record rewards", + Long: strings.TrimSpace( + fmt.Sprintf(`Query the query tokenize share record rewards. + +Example: +$ %s query distribution tokenize-share-record-rewards %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj +`, + version.AppName, bech32PrefixAccAddr, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + cl := MustQueryClientFromContext(ctx) + + ownerAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + res, err := cl.Query().Distribution().TokenizeShareRecordReward( + cmd.Context(), + &types.QueryTokenizeShareRecordRewardRequest{OwnerAddress: ownerAddr.String()}, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + return cmd +} diff --git a/go/cli/distribution_suite_test.go b/go/cli/distribution_suite_test.go new file mode 100644 index 00000000..dd1f4cc5 --- /dev/null +++ b/go/cli/distribution_suite_test.go @@ -0,0 +1,717 @@ +package cli_test + +import ( + "bytes" + "context" + "io" + "strings" + + sdkmath "cosmossdk.io/math" + + abci "github.com/cometbft/cometbft/abci/types" + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/testutil" + "github.com/cosmos/cosmos-sdk/testutil/network" + sdk "github.com/cosmos/cosmos-sdk/types" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/bank" + distrtestutil "github.com/cosmos/cosmos-sdk/x/distribution/testutil" + "github.com/cosmos/cosmos-sdk/x/gov" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + "github.com/cosmos/gogoproto/proto" + + "pkg.akt.dev/go/cli" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +type DistributionCLITestSuite struct { + CLITestSuite +} + +func (s *DistributionCLITestSuite) SetupSuite() { + s.encCfg = testutilmod.MakeTestEncodingConfig(gov.AppModuleBasic{}, bank.AppModuleBasic{}) + s.kr = keyring.NewInMemory(s.encCfg.Codec) + s.baseCtx = client.Context{}. + WithKeyring(s.kr). + WithTxConfig(s.encCfg.TxConfig). + WithCodec(s.encCfg.Codec). + WithLegacyAmino(s.encCfg.Amino). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain"). + WithSignModeStr("direct") + + var outBuf bytes.Buffer + ctxGen := func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&sdk.TxResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + } + s.cctx = ctxGen().WithOutput(&outBuf) + + cfg, err := network.DefaultConfigWithAppConfig(distrtestutil.AppConfig) + s.Require().NoError(err) + + genesisState := cfg.GenesisState + + var mintData minttypes.GenesisState + s.Require().NoError(cfg.Codec.UnmarshalJSON(genesisState[minttypes.ModuleName], &mintData)) + + inflation := sdkmath.LegacyMustNewDecFromStr("1.0") + mintData.Minter.Inflation = inflation + mintData.Params.InflationMin = inflation + mintData.Params.InflationMax = inflation + + mintDataBz, err := cfg.Codec.MarshalJSON(&mintData) + s.Require().NoError(err) + genesisState[minttypes.ModuleName] = mintDataBz + + cfg.GenesisState = genesisState +} + +func (s *DistributionCLITestSuite) TestGetCmdQueryParams() { + testCases := []struct { + name string + args []string + expectedOutput string + }{ + { + "json output", + cli.TestFlags(). + WithOutputJSON(), + `{"community_tax":"0","base_proposer_reward":"0","bonus_proposer_reward":"0","withdraw_addr_enabled":false}`, + }, + { + "text output", + cli.TestFlags(). + WithOutputText(), + `base_proposer_reward: "0" +bonus_proposer_reward: "0" +community_tax: "0" +withdraw_addr_enabled: false`, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryDistributionParamsCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + s.Require().NoError(err) + s.Require().Equal(tc.expectedOutput, strings.TrimSpace(out.String())) + }) + } +} + +func (s *DistributionCLITestSuite) TestGetCmdQueryValidatorDistributionInfo() { + addr := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + val := sdk.ValAddress(addr[0].Address.String()) + + testCases := []struct { + name string + args []string + expErr bool + }{ + { + "invalid val address", + cli.TestFlags(). + With("invalid address"). + WithOutputJSON(), + true, + }, + { + "json output", + cli.TestFlags(). + With(val.String()). + WithOutputJSON(), + false, + }, + { + "text output", + cli.TestFlags(). + With(val.String()). + WithOutputText(), + false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryDistributionValidatorDistributionInfoCmd() + + _, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + } + }) + } +} + +func (s *DistributionCLITestSuite) TestGetCmdQueryValidatorOutstandingRewards() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + args []string + expectErr bool + expectedOutput string + }{ + { + "invalid validator address", + cli.TestFlags(). + With("foo"). + WithHeight(3), + true, + "", + }, + { + "json output", + cli.TestFlags(). + With(sdk.ValAddress(val[0].Address).String()). + WithHeight(3). + WithOutputJSON(), + false, + `{"rewards":[]}`, + }, + { + "text output", + cli.TestFlags(). + With(sdk.ValAddress(val[0].Address).String()). + WithHeight(3). + WithOutputText(), + false, + `rewards: []`, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryDistributionValidatorOutstandingRewardsCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().Equal(tc.expectedOutput, strings.TrimSpace(out.String())) + } + }) + } +} + +func (s *DistributionCLITestSuite) TestGetCmdQueryValidatorCommission() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + args []string + expectErr bool + expectedOutput string + }{ + { + "invalid validator address", + cli.TestFlags(). + With("foo"). + WithHeight(3), + true, + "", + }, + { + "json output", + cli.TestFlags(). + With(sdk.ValAddress(val[0].Address).String()). + WithHeight(3). + WithOutputJSON(), + false, + `{"commission":[]}`, + }, + { + "text output", + cli.TestFlags(). + With(sdk.ValAddress(val[0].Address).String()). + WithHeight(3). + WithOutputText(), + false, + `commission: []`, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryDistributionValidatorCommissionCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().Equal(tc.expectedOutput, strings.TrimSpace(out.String())) + } + }) + } +} + +func (s *DistributionCLITestSuite) TestGetCmdQueryValidatorSlashes() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + args []string + expectErr bool + expectedOutput string + }{ + { + "invalid validator address", + cli.TestFlags(). + With( + "foo", + "1", + "3", + ). + WithHeight(3), + true, + "", + }, + { + "invalid start height", + cli.TestFlags(). + With( + sdk.ValAddress(val[0].Address).String(), + "-1", + "3", + ). + WithHeight(3), + true, + "", + }, + { + "invalid end height", + cli.TestFlags(). + With( + sdk.ValAddress(val[0].Address).String(), + "1", + "-3", + ). + WithHeight(3), + true, + "", + }, + { + "json output", + cli.TestFlags(). + With( + sdk.ValAddress(val[0].Address).String(), + "1", + "3", + ). + WithHeight(3). + WithOutputJSON(), + false, + "{\"slashes\":[],\"pagination\":null}", + }, + { + "text output", + cli.TestFlags(). + With( + sdk.ValAddress(val[0].Address).String(), + "1", + "3", + ). + WithHeight(3). + WithOutputText(), + false, + "pagination: null\nslashes: []", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryDistributionValidatorSlashesCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().Equal(tc.expectedOutput, strings.TrimSpace(out.String())) + } + }) + } +} + +func (s *DistributionCLITestSuite) TestGetCmdQueryDelegatorRewards() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + addr := val[0].Address + valAddr := sdk.ValAddress(addr) + + testCases := []struct { + name string + args []string + expectErr bool + expectedOutput string + }{ + { + "invalid delegator address", + cli.TestFlags(). + With( + "foo", + valAddr.String(), + ). + WithHeight(5), + true, + "", + }, + { + "invalid validator address", + cli.TestFlags(). + With( + addr.String(), + "foo", + ). + WithHeight(5), + true, + "", + }, + { + "json output", + cli.TestFlags(). + With( + addr.String(), + ). + WithHeight(5). + WithOutputJSON(), + false, + `{"rewards":[],"total":[]}`, + }, + { + "json output (specific validator)", + cli.TestFlags(). + With( + addr.String(), + valAddr.String(), + ). + WithHeight(5). + WithOutputJSON(), + false, + `{"rewards":[]}`, + }, + { + "text output", + cli.TestFlags(). + With( + addr.String(), + ). + WithHeight(5). + WithOutputText(), + false, + `rewards: [] +total: []`, + }, + { + "text output (specific validator)", + cli.TestFlags(). + With( + addr.String(), + valAddr.String(), + ). + WithHeight(5). + WithOutputText(), + false, + `rewards: []`, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryDistributionDelegatorRewardsCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().Equal(tc.expectedOutput, strings.TrimSpace(out.String())) + } + }) + } +} + +func (s *DistributionCLITestSuite) TestGetCmdQueryCommunityPool() { + testCases := []struct { + name string + args []string + expectedOutput string + }{ + { + "json output", + cli.TestFlags(). + WithHeight(3). + WithOutputJSON(), + `{"pool":[]}`, + }, + { + "text output", + cli.TestFlags(). + WithHeight(3). + WithOutputText(), + `pool: []`, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryDistributionCommunityPoolCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + s.Require().NoError(err) + s.Require().Equal(tc.expectedOutput, strings.TrimSpace(out.String())) + }) + } +} + +func (s *DistributionCLITestSuite) TestNewWithdrawRewardsCmd() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + args []string + expectErr bool + respType proto.Message + }{ + { + "invalid validator address", + cli.TestFlags(). + With( + val[0].Address.String(), + ). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + true, nil, + }, + { + "valid transaction", + cli.TestFlags(). + With( + sdk.ValAddress(val[0].Address).String(), + ). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + false, &sdk.TxResponse{}, + }, + { + "valid transaction (with commission)", + cli.TestFlags(). + With( + sdk.ValAddress(val[0].Address).String(), + ). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithCommission(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + false, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxDistributionWithdrawRewardsCmd() + + bz, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(bz.Bytes(), tc.respType), string(bz.Bytes())) + } + }) + } +} + +func (s *DistributionCLITestSuite) TestNewWithdrawAllRewardsCmd() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + args []string + expectErr bool + expErrMsg string + respType proto.Message + }{ + { + "invalid transaction (offline)", + cli.TestFlags(). + WithFrom(val[0].Address.String()). + WithOffline(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + true, + "cannot generate tx in offline mode", + nil, + }, + // { + // "valid transaction", + // cli.TestFlags(). + // WithFrom(val[0].Address.String()). + // WithSkipConfirm(). + // WithBroadcastModeSync(). + // WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + // false, "", &sdk.TxResponse{}, + // }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxDistributionWithdrawAllRewardsCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expErrMsg) + } else { + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + } + }) + } +} + +func (s *DistributionCLITestSuite) TestNewSetWithdrawAddrCmd() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + args []string + expectErr bool + respType proto.Message + }{ + { + "invalid withdraw address", + cli.TestFlags(). + With("foo"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + true, nil, + }, + { + "valid transaction", + cli.TestFlags(). + With(val[0].Address.String()). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + false, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxDistributionSetWithdrawAddrCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + } + }) + } +} + +func (s *DistributionCLITestSuite) TestNewFundCommunityPoolCmd() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + args []string + expectErr bool + respType proto.Message + }{ + { + "invalid funding amount", + cli.TestFlags(). + With("-43foocoin"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + true, nil, + }, + { + "valid transaction", + cli.TestFlags(). + With(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(5431))).String()). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + false, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxDistributionFundCommunityPoolCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + } + }) + } +} + +func (s *DistributionCLITestSuite) TestNewWithdrawAllTokenizeShareRecordRewardCmd() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + args []string + expectErr bool + expectedCode uint32 + respType proto.Message + }{ + { + "valid transaction of withdraw tokenize share record reward", + cli.TestFlags(). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))), + false, 0, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxDistributionWithdrawAllTokenizeShareRecordRewardCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err, out.String()) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + + txResp := tc.respType.(*sdk.TxResponse) + s.Require().Equal(tc.expectedCode, txResp.Code, out.String()) + } + }) + } +} diff --git a/go/cli/distribution_tx.go b/go/cli/distribution_tx.go new file mode 100644 index 00000000..8ab91637 --- /dev/null +++ b/go/cli/distribution_tx.go @@ -0,0 +1,361 @@ +package cli + +import ( + "context" + "fmt" + "strconv" + "strings" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/distribution/types" + "github.com/spf13/cobra" + + cclient "pkg.akt.dev/go/node/client/v1beta3" +) + +// Transaction flags for the x/distribution module +var ( + FlagCommission = "commission" + FlagMaxMessagesPerTx = "max-msgs" +) + +const ( + MaxMessagesPerTxDefault = 0 +) + +// getTxDistributionCmd returns a root CLI command handler for all x/distribution transaction commands. +func getTxDistributionCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Distribution transactions subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetTxDistributionWithdrawRewardsCmd(), + GetTxDistributionWithdrawAllRewardsCmd(), + GetTxDistributionSetWithdrawAddrCmd(), + GetTxDistributionFundCommunityPoolCmd(), + GetTxDistributionWithdrawTokenizeShareRecordRewardCmd(), + GetTxDistributionWithdrawAllTokenizeShareRecordRewardCmd(), + ) + + return cmd +} + +type distrGenerateOrBroadcastFunc func(context.Context, []sdk.Msg, ...cclient.BroadcastOption) (interface{}, error) + +func newSplitAndApply( + ctx context.Context, + genOrBroadcastFn distrGenerateOrBroadcastFunc, + msgs []sdk.Msg, + chunkSize int, + opts ...cclient.BroadcastOption, +) error { + if chunkSize == 0 { + if _, err := genOrBroadcastFn(ctx, msgs, opts...); err != nil { + return err + } + } + + // split messages into slices of length chunkSize + totalMessages := len(msgs) + for i := 0; i < len(msgs); i += chunkSize { + sliceEnd := i + chunkSize + if sliceEnd > totalMessages { + sliceEnd = totalMessages + } + + msgChunk := msgs[i:sliceEnd] + _, err := genOrBroadcastFn(ctx, msgChunk, opts...) + if err != nil { + return err + } + } + + return nil +} + +// GetTxDistributionWithdrawRewardsCmd returns a CLI command handler for creating a MsgWithdrawDelegatorReward transaction. +func GetTxDistributionWithdrawRewardsCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "withdraw-rewards [validator-addr]", + Short: "Withdraw rewards from a given delegation address, and optionally withdraw validator commission if the delegation address given is a validator operator", + Long: strings.TrimSpace( + fmt.Sprintf(`Withdraw rewards from a given delegation address, +and optionally withdraw validator commission if the delegation address given is a validator operator. + +Example: +$ %s tx distribution withdraw-rewards %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj --from mykey +$ %s tx distribution withdraw-rewards %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj --from mykey --commission +`, + version.AppName, bech32PrefixValAddr, version.AppName, bech32PrefixValAddr, + ), + ), + Args: cobra.ExactArgs(1), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + delAddr := cctx.GetFromAddress() + valAddr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + msgs := []sdk.Msg{types.NewMsgWithdrawDelegatorReward(delAddr, valAddr)} + + if commission, _ := cmd.Flags().GetBool(FlagCommission); commission { + msgs = append(msgs, types.NewMsgWithdrawValidatorCommission(valAddr)) + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, msgs) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cmd.Flags().Bool(FlagCommission, false, "Withdraw the validator's commission in addition to the rewards") + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxDistributionWithdrawAllRewardsCmd returns a CLI command handler for creating a MsgWithdrawDelegatorReward transaction. +func GetTxDistributionWithdrawAllRewardsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "withdraw-all-rewards", + Short: "withdraw all delegations rewards for a delegator", + Long: strings.TrimSpace( + fmt.Sprintf(`Withdraw all rewards for a single delegator. +Note that if you use this command with --%[2]s=%[3]s or --%[2]s=%[4]s, the %[5]s flag will automatically be set to 0. + +Example: +$ %[1]s tx distribution withdraw-all-rewards --from mykey +`, + version.AppName, flags.FlagBroadcastMode, flags.BroadcastSync, flags.BroadcastAsync, FlagMaxMessagesPerTx, + ), + ), + Args: cobra.NoArgs, + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + delAddr := cctx.GetFromAddress() + + // The transaction cannot be generated offline since it requires a query + // to get all the validators. + if cctx.Offline { + return fmt.Errorf("cannot generate tx in offline mode") + } + + delValsRes, err := cl.Query().Distribution().DelegatorValidators(ctx, &types.QueryDelegatorValidatorsRequest{DelegatorAddress: delAddr.String()}) + if err != nil { + return err + } + + validators := delValsRes.Validators + // build multi-message transaction + msgs := make([]sdk.Msg, 0, len(validators)) + for _, valAddr := range validators { + val, err := sdk.ValAddressFromBech32(valAddr) + if err != nil { + return err + } + + msg := types.NewMsgWithdrawDelegatorReward(delAddr, val) + msgs = append(msgs, msg) + } + + chunkSize, _ := cmd.Flags().GetInt(FlagMaxMessagesPerTx) + + return newSplitAndApply(ctx, cl.Tx().BroadcastMsgs, msgs, chunkSize) + }, + } + + cmd.Flags().Int(FlagMaxMessagesPerTx, MaxMessagesPerTxDefault, "Limit the number of messages per tx (0 for unlimited)") + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxDistributionSetWithdrawAddrCmd returns a CLI command handler for creating a MsgSetWithdrawAddress transaction. +func GetTxDistributionSetWithdrawAddrCmd() *cobra.Command { + bech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix() + + cmd := &cobra.Command{ + Use: "set-withdraw-addr [withdraw-addr]", + Short: "change the default withdraw address for rewards associated with an address", + Long: strings.TrimSpace( + fmt.Sprintf(`Set the withdraw address for rewards associated with a delegator address. + +Example: +$ %s tx distribution set-withdraw-addr %s1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p --from mykey +`, + version.AppName, bech32PrefixAccAddr, + ), + ), + Args: cobra.ExactArgs(1), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + delAddr := cctx.GetFromAddress() + withdrawAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + msg := types.NewMsgSetWithdrawAddress(delAddr, withdrawAddr) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxDistributionFundCommunityPoolCmd returns a CLI command handler for creating a MsgFundCommunityPool transaction. +func GetTxDistributionFundCommunityPoolCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "fund-community-pool [amount]", + Args: cobra.ExactArgs(1), + Short: "Funds the community pool with the specified amount", + Long: strings.TrimSpace( + fmt.Sprintf(`Funds the community pool with the specified amount + +Example: +$ %s tx distribution fund-community-pool 100uatom --from mykey +`, + version.AppName, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + depositorAddr := cctx.GetFromAddress() + amount, err := sdk.ParseCoinsNormalized(args[0]) + if err != nil { + return err + } + + msg := types.NewMsgFundCommunityPool(amount, depositorAddr) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxDistributionWithdrawAllTokenizeShareRecordRewardCmd defines a method to withdraw reward for all owning TokenizeShareRecord +func GetTxDistributionWithdrawAllTokenizeShareRecordRewardCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "withdraw-all-tokenize-share-rewards", + Args: cobra.ExactArgs(0), + Short: "Withdraw reward for all owning TokenizeShareRecord", + Long: strings.TrimSpace( + fmt.Sprintf(`Withdraw reward for all owned TokenizeShareRecord + +Example: +$ %s tx distribution withdraw-tokenize-share-rewards --from mykey +`, + version.AppName, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + msg := types.NewMsgWithdrawAllTokenizeShareRecordReward(cctx.GetFromAddress()) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxDistributionWithdrawTokenizeShareRecordRewardCmd defines a method to withdraw reward for an owning TokenizeShareRecord +func GetTxDistributionWithdrawTokenizeShareRecordRewardCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "withdraw-tokenize-share-rewards", + Args: cobra.ExactArgs(1), + Short: "Withdraw reward for an owning TokenizeShareRecord", + Long: strings.TrimSpace( + fmt.Sprintf(`Withdraw reward for an owned TokenizeShareRecord + +Example: +$ %s tx distribution withdraw-tokenize-share-rewards 1 --from mykey +`, + version.AppName, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + recordID, err := strconv.ParseUint(args[0], 10, 0) + if err != nil { + return err + } + + msg := types.NewMsgWithdrawTokenizeShareRecordReward(cctx.GetFromAddress(), recordID) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + flags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/distribution_tx_test.go b/go/cli/distribution_tx_test.go new file mode 100644 index 00000000..46d2fb9e --- /dev/null +++ b/go/cli/distribution_tx_test.go @@ -0,0 +1,55 @@ +package cli + +import ( + "context" + "testing" + + "github.com/stretchr/testify/require" + cclient "pkg.akt.dev/go/node/client/v1beta3" + + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func Test_splitAndCall_NoMessages(t *testing.T) { + err := newSplitAndApply(context.TODO(), nil, nil, 10) + require.NoError(t, err, "") +} + +func Test_splitAndCall_Splitting(t *testing.T) { + addr := sdk.AccAddress(secp256k1.GenPrivKey().PubKey().Address()) + + // Add five messages + msgs := []sdk.Msg{ + testdata.NewTestMsg(addr), + testdata.NewTestMsg(addr), + testdata.NewTestMsg(addr), + testdata.NewTestMsg(addr), + testdata.NewTestMsg(addr), + } + + // Keep track of number of calls + const chunkSize = 2 + + callCount := 0 + err := newSplitAndApply( + context.TODO(), + func(_ context.Context, msgs []sdk.Msg, _ ...cclient.BroadcastOption) (interface{}, error) { + callCount++ + + require.NotNil(t, msgs) + + if callCount < 3 { + require.Equal(t, len(msgs), 2) + } else { + require.Equal(t, len(msgs), 1) + } + + return nil, nil + }, + msgs, chunkSize) + + require.NoError(t, err, "") + require.Equal(t, 3, callCount) +} diff --git a/go/cli/escrow_query.go b/go/cli/escrow_query.go new file mode 100644 index 00000000..191f0286 --- /dev/null +++ b/go/cli/escrow_query.go @@ -0,0 +1,142 @@ +package cli + +import ( + "encoding/json" + "errors" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/spf13/cobra" + + "gopkg.in/yaml.v3" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + + cflags "pkg.akt.dev/go/cli/flags" + dv1 "pkg.akt.dev/go/node/deployment/v1" + dv1beta4 "pkg.akt.dev/go/node/deployment/v1beta4" + etypes "pkg.akt.dev/go/node/escrow/v1" + mv1 "pkg.akt.dev/go/node/market/v1" + mv1beta5 "pkg.akt.dev/go/node/market/v1beta5" +) + +var errNoLeaseMatches = errors.New("leases for deployment do not exist") + +func GetQueryEscrowCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: etypes.ModuleName, + Short: "Escrow query commands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetQueryEscrowBlocksRemainingCmd(), + ) + + return cmd +} + +func GetQueryEscrowBlocksRemainingCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "blocks-remaining", + Short: "Compute the number of blocks remaining for an ecrow account", + Args: cobra.ExactArgs(0), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + id, err := cflags.DeploymentIDFromFlags(cmd.Flags()) + if err != nil { + return err + } + + // Fetch leases matching owner & dseq + leaseRequest := mv1beta5.QueryLeasesRequest{ + Filters: mv1.LeaseFilters{ + Owner: id.Owner, + DSeq: id.DSeq, + GSeq: 0, + OSeq: 0, + Provider: "", + State: mv1.LeaseActive.String(), + }, + Pagination: nil, + } + + leasesResponse, err := cl.Query().Market().Leases(ctx, &leaseRequest) + if err != nil { + return err + } + + if len(leasesResponse.Leases) == 0 { + return errNoLeaseMatches + } + + // Fetch the balance of the escrow account + totalLeaseAmount := leasesResponse.TotalPriceAmount() + blockchainHeight, err := cl.Node().CurrentBlockHeight(ctx) + if err != nil { + return err + } + + res, err := cl.Query().Deployment().Deployment(cmd.Context(), &dv1beta4.QueryDeploymentRequest{ + ID: dv1.DeploymentID{Owner: id.Owner, DSeq: id.DSeq}, + }) + if err != nil { + return err + } + + balanceRemain := LeaseCalcBalanceRemain(res.EscrowAccount.TotalBalance().Amount, + int64(blockchainHeight), + res.EscrowAccount.SettledAt, + totalLeaseAmount) + + blocksRemain := LeaseCalcBlocksRemain(balanceRemain, totalLeaseAmount) + + output := struct { + BalanceRemain float64 `json:"balance_remaining" yaml:"balance_remaining"` + BlocksRemain int64 `json:"blocks_remaining" yaml:"blocks_remaining"` + EstimatedTimeRemain time.Duration `json:"estimated_time_remaining" yaml:"estimated_time_remaining"` + }{ + BalanceRemain: balanceRemain, + BlocksRemain: blocksRemain, + // EstimatedTimeRemain: netutil.AverageBlockTime * time.Duration(blocksRemain), + } + + outputType, err := cmd.Flags().GetString("output") + if err != nil { + return err + } + + var data []byte + if outputType == "json" { + data, err = json.MarshalIndent(output, " ", "\t") + } else { + data, err = yaml.Marshal(output) + } + + if err != nil { + return err + } + + return cl.ClientContext().PrintBytes(data) + + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddDeploymentIDFlags(cmd.Flags()) + cflags.MarkReqDeploymentIDFlags(cmd) + + return cmd +} + +func LeaseCalcBalanceRemain(balance sdk.Dec, currBlock, settledAt int64, leasePrice sdk.Dec) float64 { + return balance.MustFloat64() - (float64(currBlock-settledAt))*leasePrice.MustFloat64() +} + +func LeaseCalcBlocksRemain(balance float64, leasePrice sdk.Dec) int64 { + return int64(balance / leasePrice.MustFloat64()) +} diff --git a/go/cli/escrow_tx.go b/go/cli/escrow_tx.go new file mode 100644 index 00000000..351f5152 --- /dev/null +++ b/go/cli/escrow_tx.go @@ -0,0 +1,7 @@ +package cli + +import "github.com/spf13/cobra" + +func GetTxEscrowCmd() *cobra.Command { + return nil +} diff --git a/go/cli/evidence_query.go b/go/cli/evidence_query.go new file mode 100644 index 00000000..1d431e01 --- /dev/null +++ b/go/cli/evidence_query.go @@ -0,0 +1,71 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/evidence/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetQueryEvidenceCmd returns the CLI command with all evidence module query commands +// mounted. +func GetQueryEvidenceCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Query for evidence by hash or for all (paginated) submitted evidence", + Long: strings.TrimSpace( + fmt.Sprintf(`Query for specific submitted evidence by hash or query for all (paginated) evidence: + +Example: +$ %s query %s DF0C23E8634E480F84B9D5674A7CDC9816466DEC28A3358F73260F68D28D7660 +$ %s query %s --page=2 --limit=50 +`, + version.AppName, types.ModuleName, version.AppName, types.ModuleName, + ), + ), + Args: cobra.MaximumNArgs(1), + SuggestionsMinimumDistance: 2, + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + if len(args) > 0 { + params := &types.QueryEvidenceRequest{Hash: args[0]} + res, err := cl.Query().Evidence().Evidence(ctx, params) + if err != nil { + return err + } + + return cl.PrintMessage(res.Evidence) + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &types.QueryAllEvidenceRequest{ + Pagination: pageReq, + } + + res, err := cl.Query().Evidence().AllEvidence(ctx, params) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "evidence") + + return cmd +} diff --git a/go/cli/evidence_query_test.go b/go/cli/evidence_query_test.go new file mode 100644 index 00000000..165f11f2 --- /dev/null +++ b/go/cli/evidence_query_test.go @@ -0,0 +1,117 @@ +package cli_test + +import ( + "fmt" + "io" + "strings" + "testing" + + abci "github.com/cometbft/cometbft/abci/types" + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli" + sdk "github.com/cosmos/cosmos-sdk/types" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/evidence" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" +) + +func TestGetQueryCmd(t *testing.T) { + encCfg := testutilmod.MakeTestEncodingConfig(evidence.AppModuleBasic{}) + kr := keyring.NewInMemory(encCfg.Codec) + baseCtx := client.Context{}. + WithKeyring(kr). + WithTxConfig(encCfg.TxConfig). + WithCodec(encCfg.Codec). + WithLegacyAmino(encCfg.Amino). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain"). + WithSignModeStr(cflags.SignModeDirect) + + testCases := map[string]struct { + args []string + ctxGen func() client.Context + expCmdOutput string + expectedOutput string + expectErr bool + }{ + "non-existent evidence": { + cli.TestFlags(). + With("DF0C23E8634E480F84B9D5674A7CDC9816466DEC28A3358F73260F68D28D7660"), + func() client.Context { + bz, _ := encCfg.Codec.Marshal(&sdk.TxResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return baseCtx.WithClient(c) + }, + "DF0C23E8634E480F84B9D5674A7CDC9816466DEC28A3358F73260F68D28D7660", + "", + true, + }, + "all evidence (default pagination)": { + cli.TestFlags(). + WithOutputText(), + func() client.Context { + bz, _ := encCfg.Codec.Marshal(&sdk.TxResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return baseCtx.WithClient(c) + }, + "", + "evidence: []\npagination: null", + false, + }, + "all evidence (json output)": { + cli.TestFlags(). + WithOutputJSON(), + func() client.Context { + bz, _ := encCfg.Codec.Marshal(&sdk.TxResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return baseCtx.WithClient(c) + }, + "", + `{"evidence":[],"pagination":null}`, + false, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + cmd := cli.GetQueryEvidenceCmd() + + // var outBuf bytes.Buffer + // + // clientCtx := tc.ctxGen().WithOutput(&outBuf) + // ctx := svrcmd.CreateExecuteContext(context.Background()) + // + // cmd.SetContext(ctx) + // cmd.SetArgs(tc.args) + + // require.NoError(t, client.SetCmdClientContextHandler(clientCtx, cmd)) + + if len(tc.args) != 0 { + require.Contains(t, fmt.Sprint(cmd), tc.expCmdOutput) + } + + out, err := clitestutil.ExecTestCLICmd(tc.ctxGen(), cmd, tc.args) + if tc.expectErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + require.Contains(t, fmt.Sprint(cmd), "evidence [] [] Query for evidence by hash or for all (paginated) submitted evidence") + require.Contains(t, strings.TrimSpace(out.String()), tc.expectedOutput) + }) + } +} diff --git a/go/cli/evidence_tx.go b/go/cli/evidence_tx.go new file mode 100644 index 00000000..9eccf2ad --- /dev/null +++ b/go/cli/evidence_tx.go @@ -0,0 +1,45 @@ +package cli + +import ( + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/x/evidence/types" + + "github.com/spf13/cobra" +) + +// GetTxEvidenceCmd returns a CLI command that has all the native evidence module tx +// commands mounted. In addition, it mounts all childCmds, implemented by outside +// modules, under a sub-command. This allows external modules to implement custom +// Evidence types and Handlers while having the ability to create and sign txs +// containing them all from a single root command. +func GetTxEvidenceCmd(childCmds []*cobra.Command) *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Evidence transaction subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + submitEvidenceCmd := SubmitEvidenceCmd() + + for _, childCmd := range childCmds { + submitEvidenceCmd.AddCommand(childCmd) + } + + // TODO: Add tx commands. + + return cmd +} + +// SubmitEvidenceCmd returns the top-level evidence submission command handler. +// All concrete evidence submission child command handlers should be registered +// under this command. +func SubmitEvidenceCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "submit", + Short: "Submit arbitrary evidence of misbehavior", + } + + return cmd +} diff --git a/go/cli/feegrant_query.go b/go/cli/feegrant_query.go new file mode 100644 index 00000000..2642a5b2 --- /dev/null +++ b/go/cli/feegrant_query.go @@ -0,0 +1,180 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/feegrant" +) + +// GetQueryFeegrantCmd returns the cli query commands for this module +func GetQueryFeegrantCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: feegrant.ModuleName, + Short: "Querying commands for the feegrant module", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetQueryFeeGrantCmd(), + GetQueryFeeGrantsByGranteeCmd(), + GetQueryFeeGrantsByGranterCmd(), + ) + + return cmd +} + +// GetQueryFeeGrantCmd returns cmd to query for a grant between granter and grantee. +func GetQueryFeeGrantCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "grant [granter] [grantee]", + Args: cobra.ExactArgs(2), + Short: "Query details of a single grant", + Long: strings.TrimSpace( + fmt.Sprintf(`Query details for a grant. +You can find the fee-grant of a granter and grantee. + +Example: +$ %s query feegrant grant [granter] [grantee] +`, version.AppName), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + granterAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + granteeAddr, err := sdk.AccAddressFromBech32(args[1]) + if err != nil { + return err + } + + res, err := cl.Query().Feegrant().Allowance( + cmd.Context(), + &feegrant.QueryAllowanceRequest{ + Granter: granterAddr.String(), + Grantee: granteeAddr.String(), + }, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res.Allowance) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryFeeGrantsByGranteeCmd returns cmd to query for all grants for a grantee. +func GetQueryFeeGrantsByGranteeCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "grants-by-grantee [grantee]", + Args: cobra.ExactArgs(1), + Short: "Query all grants of a grantee", + Long: strings.TrimSpace( + fmt.Sprintf(`Queries all the grants for a grantee address. + +Example: +$ %s query feegrant grants-by-grantee [grantee] +`, version.AppName), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + granteeAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Feegrant().Allowances( + cmd.Context(), + &feegrant.QueryAllowancesRequest{ + Grantee: granteeAddr.String(), + Pagination: pageReq, + }, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + flags.AddPaginationFlagsToCmd(cmd, "grants") + + return cmd +} + +// GetQueryFeeGrantsByGranterCmd returns cmd to query for all grants by a granter. +func GetQueryFeeGrantsByGranterCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "grants-by-granter [granter]", + Args: cobra.ExactArgs(1), + Short: "Query all grants by a granter", + Long: strings.TrimSpace( + fmt.Sprintf(`Queries all the grants issued for a granter address. + +Example: +$ %s query feegrant grants-by-granter [granter] +`, version.AppName), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + granterAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Feegrant().AllowancesByGranter( + cmd.Context(), + &feegrant.QueryAllowancesByGranterRequest{ + Granter: granterAddr.String(), + Pagination: pageReq, + }, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + flags.AddPaginationFlagsToCmd(cmd, "grants") + + return cmd +} diff --git a/go/cli/feegrant_query_test.go b/go/cli/feegrant_query_test.go new file mode 100644 index 00000000..b9153760 --- /dev/null +++ b/go/cli/feegrant_query_test.go @@ -0,0 +1,147 @@ +package cli_test + +import ( + "context" + "fmt" + + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/x/feegrant" + + "pkg.akt.dev/go/cli" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +func (s *FeegrantCLITestSuite) TestCmdGetFeeGrant() { + granter := s.addedGranter + grantee := s.addedGrantee + + testCases := []struct { + name string + args []string + expectErrMsg string + expectErr bool + respType *feegrant.QueryAllowanceResponse + resp *feegrant.Grant + }{ + { + "wrong granter", + []string{ + "wrong_granter", + grantee.String(), + fmt.Sprintf("--%s=json", flags.FlagOutput), + }, + "decoding bech32 failed", + true, nil, nil, + }, + { + "wrong grantee", + []string{ + granter.String(), + "wrong_grantee", + fmt.Sprintf("--%s=json", flags.FlagOutput), + }, + "decoding bech32 failed", + true, nil, nil, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryFeeGrantCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + + if tc.expectErr { + s.Require().Error(err) + s.Require().Contains(err.Error(), tc.expectErrMsg) + } else { + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + } + }) + } +} + +func (s *FeegrantCLITestSuite) TestCmdGetFeeGrantsByGrantee() { + grantee := s.addedGrantee + cctx := s.cctx + + testCases := []struct { + name string + args []string + expectErr bool + resp *feegrant.QueryAllowancesResponse + expectLength int + }{ + { + "wrong grantee", + cli.TestFlags(). + With("wrong_grantee"). + WithOutputJSON(), + true, nil, 0, + }, + { + "valid req", + cli.TestFlags(). + With(grantee.String()). + WithOutputJSON(), + false, &feegrant.QueryAllowancesResponse{}, 1, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryFeeGrantsByGranteeCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), tc.resp), out.String()) + } + }) + } +} + +func (s *FeegrantCLITestSuite) TestCmdGetFeeGrantsByGranter() { + granter := s.addedGranter + cctx := s.cctx + + testCases := []struct { + name string + args []string + expectErr bool + resp *feegrant.QueryAllowancesByGranterResponse + expectLength int + }{ + { + "wrong granter", + cli.TestFlags(). + With("wrong_granter"). + WithOutputJSON(), + true, nil, 0, + }, + { + "valid req", + cli.TestFlags(). + With(granter.String()). + WithOutputJSON(), + false, &feegrant.QueryAllowancesByGranterResponse{}, 1, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryFeeGrantsByGranterCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), tc.resp), out.String()) + } + }) + } +} diff --git a/go/cli/feegrant_suite_test.go b/go/cli/feegrant_suite_test.go new file mode 100644 index 00000000..cb0a7801 --- /dev/null +++ b/go/cli/feegrant_suite_test.go @@ -0,0 +1,80 @@ +package cli_test + +import ( + "bytes" + "io" + "testing" + + sdkmath "cosmossdk.io/math" + abci "github.com/cometbft/cometbft/abci/types" + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/feegrant" + "github.com/cosmos/cosmos-sdk/x/feegrant/module" + + cflags "pkg.akt.dev/go/cli/flags" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +type FeegrantCLITestSuite struct { + CLITestSuite + + addedGranter sdk.AccAddress + addedGrantee sdk.AccAddress + addedGrant feegrant.Grant + accounts []sdk.AccAddress +} + +func (s *FeegrantCLITestSuite) SetupSuite() { + s.encCfg = testutilmod.MakeTestEncodingConfig(module.AppModuleBasic{}) + s.kr = keyring.NewInMemory(s.encCfg.Codec) + s.baseCtx = client.Context{}. + WithKeyring(s.kr). + WithTxConfig(s.encCfg.TxConfig). + WithCodec(s.encCfg.Codec). + WithLegacyAmino(s.encCfg.Amino). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain"). + WithSignModeStr(cflags.SignModeDirect) + + var outBuf bytes.Buffer + ctxGen := func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&sdk.TxResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + + return s.baseCtx.WithClient(c) + } + s.cctx = ctxGen().WithOutput(&outBuf) + + if testing.Short() { + s.T().Skip("skipping test in unit-tests mode.") + } + + accounts := testutil.CreateKeyringAccounts(s.T(), s.kr, 2) + + granter := accounts[0].Address + grantee := accounts[1].Address + + s.createGrant(granter, grantee) + + grant, err := feegrant.NewGrant(granter, grantee, &feegrant.BasicAllowance{ + SpendLimit: sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(100))), + }) + s.Require().NoError(err) + + s.addedGrant = grant + s.addedGranter = granter + s.addedGrantee = grantee + for _, v := range accounts { + s.accounts = append(s.accounts, v.Address) + } + s.accounts[1] = accounts[1].Address +} diff --git a/go/cli/feegrant_tx.go b/go/cli/feegrant_tx.go new file mode 100644 index 00000000..86086539 --- /dev/null +++ b/go/cli/feegrant_tx.go @@ -0,0 +1,222 @@ +package cli + +import ( + "fmt" + "strings" + "time" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/feegrant" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetTxFeegrantCmd returns the transaction commands for this module +func GetTxFeegrantCmd() *cobra.Command { + feegrantTxCmd := &cobra.Command{ + Use: feegrant.ModuleName, + Short: "Feegrant transactions subcommands", + Long: "Grant and revoke fee allowance for a grantee by a granter", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + feegrantTxCmd.AddCommand( + GetTxFeegrantGrantCmd(), + GetTxFeegrantRevokeCmd(), + ) + + return feegrantTxCmd +} + +// GetTxFeegrantGrantCmd returns a CLI command handler for creating a MsgGrantAllowance transaction. +func GetTxFeegrantGrantCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "grant [grantee]", + Short: "Grant Fee allowance to an address", + Long: strings.TrimSpace( + fmt.Sprintf( + `Grant authorization to pay fees from your address. + +Examples: +%s tx %s grant akash1skjw... --spend-limit 100uakt --from --expiration 2022-01-30T15:04:05Z or +%s tx %s grant akash1skjw... --spend-limit 100uakt --from --period 3600 --period-limit 10stake --expiration 2022-01-30T15:04:05Z or +%s tx %s grant akash1skjw... --spend-limit 100uakt --from --expiration 2022-01-30T15:04:05Z + --allowed-messages "/cosmos.gov.v1beta1.MsgSubmitProposal,/cosmos.gov.v1beta1.MsgVote" + `, version.AppName, feegrant.ModuleName, version.AppName, feegrant.ModuleName, version.AppName, feegrant.ModuleName, + ), + ), + Args: cobra.ExactArgs(1), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + grantee, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + granter := cctx.GetFromAddress() + sl, err := cmd.Flags().GetString(cflags.FlagSpendLimit) + if err != nil { + return err + } + + // if `FlagSpendLimit` isn't set, limit will be nil + limit, err := sdk.ParseCoinsNormalized(sl) + if err != nil { + return err + } + + exp, err := cmd.Flags().GetString(cflags.FlagExpiration) + if err != nil { + return err + } + + basic := feegrant.BasicAllowance{ + SpendLimit: limit, + } + + var expiresAtTime time.Time + if exp != "" { + expiresAtTime, err = time.Parse(time.RFC3339, exp) + if err != nil { + return err + } + basic.Expiration = &expiresAtTime + } + + var grant feegrant.FeeAllowanceI + grant = &basic + + periodClock, err := cmd.Flags().GetInt64(cflags.FlagPeriod) + if err != nil { + return err + } + + periodLimitVal, err := cmd.Flags().GetString(cflags.FlagPeriodLimit) + if err != nil { + return err + } + + // Check any of period or periodLimit flags set, If set consider it as periodic fee allowance. + if periodClock > 0 || periodLimitVal != "" { + periodLimit, err := sdk.ParseCoinsNormalized(periodLimitVal) + if err != nil { + return err + } + + if periodClock <= 0 { + return fmt.Errorf("period clock was not set") + } + + if periodLimit == nil { + return fmt.Errorf("period limit was not set") + } + + periodReset := getPeriodReset(periodClock) + if exp != "" && periodReset.Sub(expiresAtTime) > 0 { + return fmt.Errorf("period (%d) cannot reset after expiration (%v)", periodClock, exp) + } + + periodic := feegrant.PeriodicAllowance{ + Basic: basic, + Period: getPeriod(periodClock), + PeriodReset: getPeriodReset(periodClock), + PeriodSpendLimit: periodLimit, + PeriodCanSpend: periodLimit, + } + + grant = &periodic + } + + allowedMsgs, err := cmd.Flags().GetStringSlice(cflags.FlagAllowedMsgs) + if err != nil { + return err + } + + if len(allowedMsgs) > 0 { + grant, err = feegrant.NewAllowedMsgAllowance(grant, allowedMsgs) + if err != nil { + return err + } + } + + msg, err := feegrant.NewMsgGrantAllowance(grant, granter, grantee) + if err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cmd.Flags().StringSlice(cflags.FlagAllowedMsgs, []string{}, "Set of allowed messages for fee allowance") + cmd.Flags().String(cflags.FlagExpiration, "", "The RFC 3339 timestamp after which the grant expires for the user") + cmd.Flags().String(cflags.FlagSpendLimit, "", "Spend limit specifies the max limit can be used, if not mentioned there is no limit") + cmd.Flags().Int64(cflags.FlagPeriod, 0, "period specifies the time duration(in seconds) in which period_limit coins can be spent before that allowance is reset (ex: 3600)") + cmd.Flags().String(cflags.FlagPeriodLimit, "", "period limit specifies the maximum number of coins that can be spent in the period") + + return cmd +} + +// GetTxFeegrantRevokeCmd returns a CLI command handler for creating a MsgRevokeAllowance transaction. +func GetTxFeegrantRevokeCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "revoke [grantee]", + Short: "revoke fee-grant", + Long: strings.TrimSpace( + fmt.Sprintf(`revoke fee grant from a granter to a grantee.. + +Example: + $ %s tx %s revoke akash1skj.. --from + `, version.AppName, feegrant.ModuleName), + ), + Args: cobra.ExactArgs(1), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + grantee, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + msg := feegrant.NewMsgRevokeAllowance(cctx.GetFromAddress(), grantee) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{&msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +func getPeriodReset(duration int64) time.Time { + return time.Now().Add(getPeriod(duration)) +} + +func getPeriod(duration int64) time.Duration { + return time.Duration(duration) * time.Second +} diff --git a/go/cli/feegrant_tx_test.go b/go/cli/feegrant_tx_test.go new file mode 100644 index 00000000..9e2e3ceb --- /dev/null +++ b/go/cli/feegrant_tx_test.go @@ -0,0 +1,603 @@ +package cli_test + +import ( + "context" + "strings" + "time" + + sdkmath "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" + govv1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + govv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + "github.com/cosmos/gogoproto/proto" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +const ( + oneYear = 365 * 24 * 60 * 60 + tenHours = 10 * 60 * 60 + oneHour = 60 * 60 +) + +// createGrant creates a new basic allowance fee grant from granter to grantee. +func (s *FeegrantCLITestSuite) createGrant(granter, grantee sdk.Address) { + args := cli.TestFlags(). + With( + grantee.String(), + ). + WithFrom(granter.String()). + WithSpendLimit(sdk.NewCoin("uakt", sdkmath.NewInt(100)).String()). + WithExpiration(getFormattedExpiration(oneYear)). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(100)))) + + cmd := cli.GetTxFeegrantGrantCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, args...) + s.Require().NoError(err) + + var resp sdk.TxResponse + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &resp), out.String()) + s.Require().Equal(resp.Code, uint32(0)) +} + +func (s *FeegrantCLITestSuite) TestNewCmdFeeGrant() { + granter := s.accounts[0] + alreadyExistedGrantee := s.addedGrantee + cctx := s.cctx + + fromAddr, fromName, _, err := client.GetFromFields(s.baseCtx, s.kr, granter.String()) + s.Require().Equal(fromAddr, granter) + s.Require().NoError(err) + + commonArgs := cli.TestFlags(). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))) + + testCases := []struct { + name string + args []string + expectErr bool + expectedCode uint32 + respType proto.Message + }{ + { + "wrong grantee address", + cli.TestFlags(). + With( + granter.String(), + "wrong_grantee", + ). + WithFrom(granter.String()). + WithSpendLimit("100uakt"). + Append(commonArgs), + true, 0, nil, + }, + { + "wrong granter key name", + cli.TestFlags(). + With( + "akash16dun6ehcc86e03wreqqww89ey569wuj4em572w", + ). + WithFrom("invalid_granter"). + WithSpendLimit("100uakt"). + Append(commonArgs), + true, 0, nil, + }, + { + "valid basic fee grant", + cli.TestFlags(). + With( + "akash1nph3cfzk6trsmfxkeu943nvach5qw4vwas7t09", + ). + WithFrom(granter.String()). + WithSpendLimit("100uakt"). + Append(commonArgs), + false, 0, &sdk.TxResponse{}, + }, + { + "valid basic fee grant with granter key name", + cli.TestFlags(). + With( + "akash16dun6ehcc86e03wreqqww89ey569wuj45qeen5", + ). + WithFrom(fromName). + WithSpendLimit("100uakt"). + Append(commonArgs), + false, 0, &sdk.TxResponse{}, + }, + { + "valid basic fee grant with amino", + cli.TestFlags(). + With( + "akash1v57fx2l2rt6ehujuu99u2fw05779m5e23ac9nd", + ). + WithFrom(granter.String()). + WithSpendLimit("100uakt"). + WithSignMode(cflags.SignModeLegacyAminoJSON). + Append(commonArgs), + false, 0, &sdk.TxResponse{}, + }, + { + "valid basic fee grant without spend limit", + cli.TestFlags(). + With( + "akash17h5lzptx3ghvsuhk7wx4c4hnl7rsswxj5cgeqp", + ). + WithFrom(granter.String()). + Append(commonArgs), + false, 0, &sdk.TxResponse{}, + }, + { + "valid basic fee grant without expiration", + cli.TestFlags(). + With( + "akash16dlc38dcqt0uralyd8hksxyrny6kaeqflhlfcw", + ). + WithFrom(granter.String()). + WithSpendLimit("100uakt"). + Append(commonArgs), + false, 0, &sdk.TxResponse{}, + }, + { + "valid basic fee grant without spend-limit and expiration", + cli.TestFlags(). + With( + "akash1ku40qup9vwag4wtf8cls9mkszxfthakltdvkpa", + ). + WithFrom(granter.String()). + Append(commonArgs), + false, 0, &sdk.TxResponse{}, + }, + { + "try to add existed grant", + cli.TestFlags(). + With( + alreadyExistedGrantee.String(), + ). + WithFrom(granter.String()). + WithSpendLimit("100uakt"). + Append(commonArgs), + false, 18, &sdk.TxResponse{}, + }, + { + "invalid number of args(periodic fee grant)", + cli.TestFlags(). + With( + "akash1nph3cfzk6trsmfxkeu943nvach5qw4vwstnvkl", + ). + WithFrom(granter.String()). + WithSpendLimit("100uakt"). + WithPeriodLimit("10uakt"). + WithExpiration(getFormattedExpiration(tenHours)). + Append(commonArgs), + true, 0, nil, + }, + { + "period mentioned and period limit omitted, invalid periodic grant", + cli.TestFlags(). + With( + "akash1nph3cfzk6trsmfxkeu943nvach5qw4vwstnvkl", + ). + WithFrom(granter.String()). + WithSpendLimit("100uakt"). + WithPeriod(tenHours). + WithExpiration(getFormattedExpiration(tenHours)). + Append(commonArgs), + true, 0, nil, + }, + { + "period cannot be greater than the actual expiration(periodic fee grant)", + cli.TestFlags(). + With( + "akash1nph3cfzk6trsmfxkeu943nvach5qw4vwstnvkl", + ). + WithFrom(granter.String()). + WithSpendLimit("100uakt"). + WithPeriodLimit("10uakt"). + WithPeriod(tenHours). + WithExpiration(getFormattedExpiration(oneHour)). + Append(commonArgs), + true, 0, nil, + }, + { + "valid periodic fee grant", + cli.TestFlags(). + With( + "akash1nph3cfzk6trsmfxkeu943nvach5qw4vwas7t09", + ). + WithFrom(granter.String()). + WithSpendLimit("100uakt"). + WithPeriodLimit("10uakt"). + WithPeriod(oneHour). + WithExpiration(getFormattedExpiration(tenHours)). + Append(commonArgs), + false, 0, &sdk.TxResponse{}, + }, + { + "valid periodic fee grant without spend-limit", + cli.TestFlags(). + With( + "akash1vevyks8pthkscvgazc97qyfjt40m6g9x5ueyaa", + ). + WithFrom(granter.String()). + WithPeriodLimit("10uakt"). + WithPeriod(oneHour). + WithExpiration(getFormattedExpiration(tenHours)). + Append(commonArgs), + false, 0, &sdk.TxResponse{}, + }, + { + "valid periodic fee grant without expiration", + cli.TestFlags(). + With( + "akash1vevyks8pthkscvgazc97qyfjt40m6g9x5ueyaa", + ). + WithFrom(granter.String()). + WithSpendLimit("100uakt"). + WithPeriodLimit("10uakt"). + WithPeriod(oneHour). + Append(commonArgs), + false, 0, &sdk.TxResponse{}, + }, + { + "valid periodic fee grant without spend-limit and expiration", + cli.TestFlags(). + With( + "akash12nyk4pcf4arshznkpz882e4l4ts0lt0avu47d0", + ). + WithFrom(granter.String()). + WithPeriodLimit("10uakt"). + WithPeriod(oneHour). + Append(commonArgs), + false, 0, &sdk.TxResponse{}, + }, + { + "invalid expiration", + cli.TestFlags(). + With( + "akash1vevyks8pthkscvgazc97qyfjt40m6g9xe85ry8", + ). + WithFrom(granter.String()). + WithPeriodLimit("10uakt"). + WithPeriod(oneHour). + WithExpiration("invalid"). + Append(commonArgs), + true, 0, nil, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxFeegrantGrantCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + } + }) + } +} + +func (s *FeegrantCLITestSuite) TestNewCmdRevokeFeegrant() { + granter := s.addedGranter + grantee := s.addedGrantee + cctx := s.cctx + + commonArgs := cli.TestFlags(). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))) + + // Create new fee grant specifically to test amino. + aminoGrantee, err := sdk.AccAddressFromBech32("akash16ydaqh0fcnh4qt7a3jme4mmztm2qel5atrvfk4") + s.Require().NoError(err) + s.createGrant(granter, aminoGrantee) + + testCases := []struct { + name string + args []string + expectErr bool + expectedCode uint32 + respType proto.Message + }{ + { + "invalid grantee", + cli.TestFlags(). + With( + grantee.String(), + ). + WithFrom("wrong_granter"). + Append(commonArgs), + true, 0, nil, + }, + { + "invalid grantee", + cli.TestFlags(). + With( + "wrong_grantee", + ). + WithFrom(granter.String()). + Append(commonArgs), + true, 0, nil, + }, + { + "Valid revoke", + cli.TestFlags(). + With( + grantee.String(), + ). + WithFrom(granter.String()). + Append(commonArgs), + false, 0, &sdk.TxResponse{}, + }, + { + "Valid revoke with amino", + cli.TestFlags(). + With( + aminoGrantee.String(), + ). + WithFrom(granter.String()). + WithSignMode(cflags.SignModeLegacyAminoJSON). + Append(commonArgs), + false, 0, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxFeegrantRevokeCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + } + }) + } +} + +func (s *FeegrantCLITestSuite) TestTxWithFeeGrant() { + // s.T().Skip() // TODO to re-enable in #12274 + + cctx := s.cctx + granter := s.addedGranter + + // creating an account manually (This account won't be exist in state) + k, _, err := s.baseCtx.Keyring.NewMnemonic("grantee", keyring.English, sdk.FullFundraiserPath, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + s.Require().NoError(err) + pub, err := k.GetPubKey() + s.Require().NoError(err) + grantee := sdk.AccAddress(pub.Address()) + fee := sdk.NewCoin("uakt", sdkmath.NewInt(100)) + + args := cli.TestFlags(). + With( + grantee.String(), + ). + WithFrom(granter.String()). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithSpendLimit(fee.String()). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdkmath.NewInt(10)))). + WithExpiration(getFormattedExpiration(oneYear)) + + cmd := cli.GetTxFeegrantGrantCmd() + + var res sdk.TxResponse + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, args...) + s.Require().NoError(err) + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), &res), out.String()) + + testcases := []struct { + name string + flags []string + expErrCode uint32 + }{ + { + name: "granted fee allowance for an account which is not in state and creating any tx with it by using --fee-granter shouldn't fail", + flags: cli.TestFlags().WithFrom(grantee.String()).WithFeeGranter(granter), + }, + { + name: "--fee-payer should also sign the tx (direct)", + flags: cli.TestFlags().WithFrom(grantee.String()).WithFeePayer(granter), + expErrCode: 4, + }, + { + name: "--fee-payer should also sign the tx (amino-json)", + flags: cli.TestFlags().WithFrom(grantee.String()).WithFeePayer(granter).WithSignMode(cflags.SignModeLegacyAminoJSON), + expErrCode: 4, + }, + { + name: "use --fee-payer and --fee-granter together works", + flags: cli.TestFlags().WithFrom(grantee.String()).WithFeePayer(grantee).WithFeeGranter(granter), + }, + } + + for _, tc := range testcases { + s.Run(tc.name, func() { + cmd := cli.GetTxGovSubmitLegacyProposalCmd() + + pArgs := cli.TestFlags(). + WithTitle("Text Proposal"). + WithDescription("No desc"). + WithProposalType(govv1beta1.ProposalTypeText). + WithSkipConfirm(). + Append(tc.flags) + + _, err = clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, pArgs...) + s.Require().NoError(err) + + var resp sdk.TxResponse + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), &resp), out.String()) + }) + } +} + +func (s *FeegrantCLITestSuite) TestFilteredFeeAllowance() { + granter := s.addedGranter + k, _, err := s.baseCtx.Keyring.NewMnemonic("grantee1", keyring.English, sdk.FullFundraiserPath, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + s.Require().NoError(err) + pub, err := k.GetPubKey() + s.Require().NoError(err) + grantee := sdk.AccAddress(pub.Address()) + + cctx := s.cctx + + args := cli.TestFlags(). + WithBroadcastModeSync(). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(100)))) + + spendLimit := sdk.NewCoin("uakt", sdkmath.NewInt(1000)) + + allowMsgs := strings.Join([]string{sdk.MsgTypeURL(&govv1beta1.MsgSubmitProposal{}), sdk.MsgTypeURL(&govv1.MsgVoteWeighted{})}, ",") + + testCases := []struct { + name string + args []string + expectErr bool + respType proto.Message + expectedCode uint32 + }{ + { + "invalid granter address", + cli.TestFlags(). + With("akash1nph3cfzk6trsmfxkeu943nvach5qw4vwstnvkl"). + WithFrom("not an address"). + WithAllowedMsgs(allowMsgs). + WithSpendLimit(spendLimit.String()). + Append(args), + true, &sdk.TxResponse{}, 0, + }, + { + "invalid grantee address", + cli.TestFlags(). + With("not an address"). + WithFrom(granter.String()). + WithAllowedMsgs(allowMsgs). + WithSpendLimit(spendLimit.String()). + Append(args), + true, &sdk.TxResponse{}, 0, + }, + { + "valid filter fee grant", + cli.TestFlags(). + With(grantee.String()). + WithFrom(granter.String()). + WithAllowedMsgs(allowMsgs). + WithSpendLimit(spendLimit.String()). + Append(args), + false, &sdk.TxResponse{}, 0, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxFeegrantGrantCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + } + }) + } + + // exec filtered fee allowance + cases := []struct { + name string + malleate func() error + respType proto.Message + expectedCode uint32 + }{ + { + "valid proposal tx", + func() error { + cmd := cli.GetTxGovSubmitLegacyProposalCmd() + + pArgs := cli.TestFlags(). + WithTitle("Text Proposal"). + WithDescription("No desc"). + WithProposalType(govv1beta1.ProposalTypeText). + WithFeeGranter(granter). + WithFrom(grantee.String()). + Append(args) + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, pArgs...) + s.Require().NoError(err) + var resp sdk.TxResponse + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), &resp), out.String()) + + return err + }, + &sdk.TxResponse{}, + 0, + }, + { + "valid weighted_vote tx", + func() error { + sArgs := cli.TestFlags(). + With( + "0", + "yes", + ). + WithFrom(grantee.String()). + Append(args) + + cmd := cli.GetTxGovWeightedVoteCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, sArgs...) + + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), &sdk.TxResponse{}), out.String()) + + return err + }, + &sdk.TxResponse{}, + 2, + }, + { + "should fail with unauthorized msgs", + func() error { + sArgs := cli.TestFlags(). + With("akash14cm33pvnrv2497tyt8sp9yavhmw83nwel2kgqa"). + WithFrom(grantee.String()). + WithSpendLimit("100uakt"). + WithFeeGranter(granter). + Append(args) + + cmd := cli.GetTxFeegrantGrantCmd() + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, sArgs...) + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), &sdk.TxResponse{}), out.String()) + + return err + }, + &sdk.TxResponse{}, + 7, + }, + } + + for _, tc := range cases { + s.Run(tc.name, func() { + err := tc.malleate() + s.Require().NoError(err) + }) + } +} + +func getFormattedExpiration(duration int64) string { + return time.Now().Add(time.Duration(duration) * time.Second).Format(time.RFC3339) +} diff --git a/go/cli/flags/client.go b/go/cli/flags/client.go new file mode 100644 index 00000000..4d8a9a73 --- /dev/null +++ b/go/cli/flags/client.go @@ -0,0 +1,90 @@ +package flags + +import ( + "fmt" + "strconv" + + "github.com/spf13/pflag" + + cltypes "pkg.akt.dev/go/node/client/types" +) + +// ClientOptionsFromFlags reads client options from cli flag set. +func ClientOptionsFromFlags(flagSet *pflag.FlagSet) ([]cltypes.ClientOption, error) { + opts := make([]cltypes.ClientOption, 0) + + if flagSet.Changed(FlagAccountNumber) { + accNum, _ := flagSet.GetUint64(FlagAccountNumber) + opts = append(opts, cltypes.WithAccountNumber(accNum)) + } + + if flagSet.Changed(FlagSequence) { + accSeq, _ := flagSet.GetUint64(FlagSequence) + opts = append(opts, cltypes.WithAccountSequence(accSeq)) + } + + gasAdj, _ := flagSet.GetFloat64(FlagGasAdjustment) + opts = append(opts, cltypes.WithGasAdjustment(gasAdj)) + + if flagSet.Changed(FlagNote) { + memo, _ := flagSet.GetString(FlagNote) + opts = append(opts, cltypes.WithNote(memo)) + } + + if flagSet.Changed(FlagTimeoutHeight) { + timeoutHeight, _ := flagSet.GetUint64(FlagTimeoutHeight) + opts = append(opts, cltypes.WithTimeoutHeight(timeoutHeight)) + } + + if flagSet.Changed(FlagSkipConfirmation) { + skip, _ := flagSet.GetBool(FlagSkipConfirmation) + opts = append(opts, cltypes.WithSkipConfirm(skip)) + } + + if flagSet.Changed(FlagGas) { + gasStr, _ := flagSet.GetString(FlagGas) + gasSetting, _ := ParseGasSetting(gasStr) + opts = append(opts, cltypes.WithGas(gasSetting)) + } + + if flagSet.Changed(FlagFees) { + feesStr, _ := flagSet.GetString(FlagFees) + opts = append(opts, cltypes.WithFees(feesStr)) + } + + if flagSet.Changed(FlagGasPrices) { + gasPrices, _ := flagSet.GetString(FlagGasPrices) + opts = append(opts, cltypes.WithGasPrices(gasPrices)) + } + + signMode := SignModeDirect + if flagSet.Changed(FlagSignMode) { + signMode, _ = flagSet.GetString(FlagSignMode) + } + + opts = append(opts, cltypes.WithSignMode(signMode)) + + return opts, nil +} + +// ParseGasSetting parses a string gas value. The value may either be 'auto', +// which indicates a transaction should be executed in simulate mode to +// automatically find a sufficient gas value, or a string integer. It returns an +// error if a string integer is provided which cannot be parsed. +func ParseGasSetting(gasStr string) (cltypes.GasSetting, error) { + switch gasStr { + case "": + return cltypes.GasSetting{Simulate: false, Gas: DefaultGasLimit}, nil + + case GasFlagAuto: + return cltypes.GasSetting{Simulate: true, Gas: 0}, nil + + default: + gas, err := strconv.ParseUint(gasStr, 10, 64) + if err != nil { + return cltypes.GasSetting{}, fmt.Errorf("gas must be either integer or %s", GasFlagAuto) + } + + return cltypes.GasSetting{Simulate: false, Gas: gas}, nil + } +} diff --git a/go/cli/flags/deployment.go b/go/cli/flags/deployment.go new file mode 100644 index 00000000..b78992dd --- /dev/null +++ b/go/cli/flags/deployment.go @@ -0,0 +1,203 @@ +package flags + +import ( + "strings" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + sdk "github.com/cosmos/cosmos-sdk/types" + + dv1 "pkg.akt.dev/go/node/deployment/v1" + dv1beta4 "pkg.akt.dev/go/node/deployment/v1beta4" +) + +type DeploymentIDOptions struct { + NoOwner bool +} + +type DeploymentIDOption func(*DeploymentIDOptions) + +// DeploymentIDOptionNoOwner do not add mark as required owner flag +func DeploymentIDOptionNoOwner(val bool) DeploymentIDOption { + return func(opt *DeploymentIDOptions) { + opt.NoOwner = val + } +} + +type MarketOptions struct { + Owner sdk.AccAddress + Provider sdk.AccAddress +} + +type MarketOption func(*MarketOptions) + +func WithOwner(val sdk.AccAddress) MarketOption { + return func(opt *MarketOptions) { + opt.Owner = val + } +} + +func WithProvider(val sdk.AccAddress) MarketOption { + return func(opt *MarketOptions) { + opt.Provider = val + } +} + +// AddDeploymentIDFlags add flags for deployment except for Owner when NoOwner is set +func AddDeploymentIDFlags(flags *pflag.FlagSet, opts ...DeploymentIDOption) { + opt := &DeploymentIDOptions{} + + for _, o := range opts { + o(opt) + } + + if !opt.NoOwner { + flags.String(FlagOwner, "", "Deployment Owner") + } + + flags.Uint64(FlagDSeq, 0, "Deployment Sequence") +} + +// MarkReqDeploymentIDFlags marks flags required except for Owner when NoOwner is set +func MarkReqDeploymentIDFlags(cmd *cobra.Command, opts ...DeploymentIDOption) { + opt := &DeploymentIDOptions{} + + for _, o := range opts { + o(opt) + } + + if !opt.NoOwner { + _ = cmd.MarkFlagRequired(FlagOwner) + } + + _ = cmd.MarkFlagRequired(FlagDSeq) +} + +// DeploymentIDFromFlags returns DeploymentID with given flags, owner and error if occurred +func DeploymentIDFromFlags(flags *pflag.FlagSet, opts ...MarketOption) (dv1.DeploymentID, error) { + var id dv1.DeploymentID + opt := &MarketOptions{} + + for _, o := range opts { + o(opt) + } + + var owner string + if flag := flags.Lookup(FlagOwner); flag != nil { + owner = flag.Value.String() + } + + // if --owner flag was explicitly provided, use that. + var err error + if owner != "" { + opt.Owner, err = sdk.AccAddressFromBech32(owner) + if err != nil { + return id, err + } + } + + id.Owner = opt.Owner.String() + + if id.DSeq, err = flags.GetUint64(FlagDSeq); err != nil { + return id, err + } + return id, nil +} + +// DeploymentIDFromFlagsForOwner returns DeploymentID with given flags, owner and error if occurred +func DeploymentIDFromFlagsForOwner(flags *pflag.FlagSet, owner sdk.Address) (dv1.DeploymentID, error) { + id := dv1.DeploymentID{ + Owner: owner.String(), + } + + var err error + if id.DSeq, err = flags.GetUint64(FlagDSeq); err != nil { + return id, err + } + + return id, nil +} + +// AddGroupIDFlags add flags for Group +func AddGroupIDFlags(flags *pflag.FlagSet, opts ...DeploymentIDOption) { + AddDeploymentIDFlags(flags, opts...) + flags.Uint32(FlagGSeq, 1, "Group Sequence") +} + +// MarkReqGroupIDFlags marks flags required for group +func MarkReqGroupIDFlags(cmd *cobra.Command, opts ...DeploymentIDOption) { + MarkReqDeploymentIDFlags(cmd, opts...) +} + +// GroupIDFromFlags returns GroupID with given flags and error if occurred +func GroupIDFromFlags(flags *pflag.FlagSet, opts ...MarketOption) (dv1.GroupID, error) { + var id dv1.GroupID + prev, err := DeploymentIDFromFlags(flags, opts...) + if err != nil { + return id, err + } + + gseq, err := flags.GetUint32(FlagGSeq) + if err != nil { + return id, err + } + return dv1.MakeGroupID(prev, gseq), nil +} + +// AddDeploymentFilterFlags add flags to filter for deployment list +func AddDeploymentFilterFlags(flags *pflag.FlagSet) { + flags.String(FlagOwner, "", "deployment owner address to filter") + flags.String(FlagState, "", "deployment state to filter (active,closed)") + flags.Uint64(FlagDSeq, 0, "deployment sequence to filter") +} + +// DepFiltersFromFlags returns DeploymentFilters with given flags and error if occurred +func DepFiltersFromFlags(flags *pflag.FlagSet) (dv1beta4.DeploymentFilters, error) { + var dfilters dv1beta4.DeploymentFilters + owner, err := flags.GetString(FlagOwner) + if err != nil { + return dfilters, err + } + + if owner != "" { + _, err = sdk.AccAddressFromBech32(owner) + if err != nil { + return dfilters, err + } + } + + dfilters.Owner = owner + + if dfilters.State, err = flags.GetString(FlagState); err != nil { + return dfilters, err + } + + if dfilters.DSeq, err = flags.GetUint64(FlagDSeq); err != nil { + return dfilters, err + } + + return dfilters, nil +} + +// AddDepositorFlag adds the `--depositor-account` flag +func AddDepositorFlag(flags *pflag.FlagSet) { + flags.String(FlagDepositorAccount, "", "Depositor account pays for the deposit instead of deducting from the owner") +} + +// DepositorFromFlags returns the depositor account if one was specified in flags, +// otherwise it returns the owner's account. +func DepositorFromFlags(flags *pflag.FlagSet, owner string) (string, error) { + depositorAcc, err := flags.GetString(FlagDepositorAccount) + if err != nil { + return "", err + } + + // if no depositor is specified, owner is the default depositor + if strings.TrimSpace(depositorAcc) == "" { + return owner, nil + } + + _, err = sdk.AccAddressFromBech32(depositorAcc) + return depositorAcc, err +} diff --git a/go/cli/flags/flags.go b/go/cli/flags/flags.go new file mode 100644 index 00000000..42139c10 --- /dev/null +++ b/go/cli/flags/flags.go @@ -0,0 +1,358 @@ +package flags + +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "pkg.akt.dev/go/node/types/constants" + + "github.com/cosmos/cosmos-sdk/crypto/keyring" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + cmcli "github.com/cometbft/cometbft/libs/cli" +) + +const ( + // DefaultGasAdjustment is applied to gas estimates to avoid tx execution + // failures due to state changes that might occur between the tx simulation + // and the actual run. + DefaultGasAdjustment = constants.DefaultGasAdjustment + GasFlagAuto = constants.DefaultGas + DefaultGasLimit = 200000 + + DefaultKeyringBackend = keyring.BackendOS + + // BroadcastSync defines a tx broadcasting mode where the client waits for + // a CheckTx execution response only. + BroadcastSync = "sync" + // BroadcastAsync defines a tx broadcasting mode where the client returns + // immediately. + BroadcastAsync = "async" + + BroadcastBlock = "block" + + // SignModeDirect is the value of the --sign-mode flag for SIGN_MODE_DIRECT + SignModeDirect = "direct" + // SignModeLegacyAminoJSON is the value of the --sign-mode flag for SIGN_MODE_LEGACY_AMINO_JSON + SignModeLegacyAminoJSON = "amino-json" + // SignModeDirectAux is the value of the --sign-mode flag for SIGN_MODE_DIRECT_AUX + SignModeDirectAux = "direct-aux" + // SignModeEIP191 is the value of the --sign-mode flag for SIGN_MODE_EIP_191 + SignModeEIP191 = "eip-191" +) + +const ( + FlagGenesisTime = "genesis-time" + FlagGenTxDir = "gentx-dir" + FlagRecover = "recover" + // FlagDefaultBondDenom defines the default denom to use in the genesis file. + FlagDefaultBondDenom = "default-denom" + FlagDenom = "denom" + FlagVestingStart = "vesting-start-time" + FlagVestingEnd = "vesting-end-time" + FlagVestingAmt = "vesting-amount" + FlagAppendMode = "append" + FlagEvents = "events" + FlagType = "type" + FlagMultisig = "multisig" + FlagOverwrite = "overwrite" + FlagSigOnly = "signature-only" + FlagAmino = "amino" + FlagNoAutoIncrement = "no-auto-increment" + FlagAppend = "append" + FlagTitle = "title" + FlagMetadata = "metadata" + FlagSummary = "summary" + FlagNoValidate = "no-validate" + FlagDaemonName = "daemon-name" + FlagPeriod = "period" + FlagPeriodLimit = "period-limit" + FlagAllowedMsgs = "allowed-messages" + FlagMsgType = "msg-type" + FlagAllowedValidators = "allowed-validators" + FlagDenyValidators = "deny-validators" + FlagAllowList = "allow-list" + FlagDeposit = "deposit" + FlagStatus = "status" + FlagState = "state" + FlagOwner = "owner" + FlagDSeq = "dseq" + FlagGSeq = "gseq" + FlagOSeq = "oseq" + FlagProvider = "provider" + FlagSerial = "serial" + FlagPrice = "price" + FlagDepositorAccount = "depositor-account" + FlagExpiration = "expiration" + FlagSpendLimit = "spend-limit" + FlagHome = cmcli.HomeFlag + FlagKeyringDir = "keyring-dir" + FlagUseLedger = "ledger" + FlagChainID = "chain-id" + FlagNode = "node" + FlagGRPC = "grpc-addr" + FlagGRPCInsecure = "grpc-insecure" + FlagHeight = "height" + FlagGasAdjustment = "gas-adjustment" + FlagFrom = "from" + FlagName = "name" + FlagAccountNumber = "account-number" + FlagSequence = "sequence" + FlagNote = "note" + FlagFees = "fees" + FlagGas = "gas" + FlagGasPrices = "gas-prices" + FlagBroadcastMode = "broadcast-mode" + FlagDryRun = "dry-run" + FlagGenerateOnly = "generate-only" + FlagOffline = "offline" + FlagModulesToExport = "modules-to-export" + FlagOutputDocument = "output-document" // inspired by wget -O + FlagForZeroHeight = "for-zero-height" + FlagJailAllowedAddrs = "jail-allowed-addrs" + FlagSkipConfirmation = "yes" + FlagProve = "prove" + FlagKeyringBackend = "keyring-backend" + FlagPage = "page" + FlagLimit = "limit" + FlagSignMode = "sign-mode" + FlagPageKey = "page-key" + FlagOffset = "offset" + FlagCountTotal = "count-total" + FlagTimeoutHeight = "timeout-height" + FlagKeyType = "key-type" + FlagFeePayer = "fee-payer" + FlagFeeGranter = "fee-granter" + FlagReverse = "reverse" + FlagTip = "tip" + FlagAux = "aux" + FlagInitHeight = "initial-height" + FlagDelayed = "delayed" + // FlagOutput is the flag to set the output format. + // This differs from FlagOutputDocument that is used to set the output file. + FlagOutput = cmcli.OutputFlag + FlagSplit = "split" + + // CometBFT logging flags + + FlagLogLevel = "log_level" + FlagLogFormat = "log_format" + FlagLogNoColor = "log_no_color" + FlagLogColor = "log_color" + FlagLogTimestamp = "log_timestamp" + FlagTrace = "trace" + + FlagAddressValidator = "validator" + FlagAddressValidatorSrc = "addr-validator-source" + FlagAddressValidatorDst = "addr-validator-dest" + FlagPubKey = "pubkey" + FlagAmount = "amount" + FlagSharesAmount = "shares-amount" + FlagSharesFraction = "shares-fraction" + + FlagMoniker = "moniker" + FlagEditMoniker = "new-moniker" + FlagIdentity = "identity" + FlagWebsite = "website" + FlagSecurityContact = "security-contact" + FlagDetails = "details" + + FlagCommission = "commission" + FlagCommissionRate = "commission-rate" + FlagCommissionMaxRate = "commission-max-rate" + FlagCommissionMaxChangeRate = "commission-max-change-rate" + + FlagGenesisFormat = "genesis-format" + FlagNodeID = "node-id" + FlagIP = "ip" + FlagP2PPort = "p2p-port" + + // Tendermint full-node start flags + + FlagWithTendermint = "with-tendermint" + FlagAddress = "address" + FlagTransport = "transport" + FlagTraceStore = "trace-store" + FlagCPUProfile = "cpu-profile" + FlagMinGasPrices = "minimum-gas-prices" + FlagHaltHeight = "halt-height" + FlagHaltTime = "halt-time" + FlagInterBlockCache = "inter-block-cache" + FlagUnsafeSkipUpgrades = "unsafe-skip-upgrades" + FlagInvCheckPeriod = "inv-check-period" + + FlagPruning = "pruning" + FlagPruningKeepRecent = "pruning-keep-recent" + FlagPruningInterval = "pruning-interval" + FlagIndexEvents = "index-events" + FlagMinRetainBlocks = "min-retain-blocks" + FlagIAVLCacheSize = "iavl-cache-size" + FlagDisableIAVLFastNode = "iavl-disable-fastnode" + FlagIAVLLazyLoading = "iavl-lazy-loading" + + // state sync-related flags + + FlagStateSyncSnapshotInterval = "state-sync.snapshot-interval" + FlagStateSyncSnapshotKeepRecent = "state-sync.snapshot-keep-recent" +) + +// Deprecated +const ( + // FlagProposal only used for v1beta1 legacy proposals. + FlagProposal = "proposal" + // FlagDescription only used for v1beta1 legacy proposals. + FlagDescription = "description" + // FlagProposalType only used for v1beta1 legacy proposals. + FlagProposalType = "type" + // FlagUpgradeHeight only used for v1beta1 legacy proposals. + FlagUpgradeHeight = "upgrade-height" + // FlagUpgradeInfo only used for v1beta1 legacy proposals. + FlagUpgradeInfo = "upgrade-info" +) + +// common flagsets to add to various functions +var ( + fsShares = pflag.NewFlagSet("", pflag.ContinueOnError) + fsValidator = pflag.NewFlagSet("", pflag.ContinueOnError) + fsRedelegation = pflag.NewFlagSet("", pflag.ContinueOnError) +) + +func init() { + fsShares.String(FlagSharesAmount, "", "Amount of source-shares to either unbond or redelegate as a positive integer or decimal") + fsShares.String(FlagSharesFraction, "", "Fraction of source-shares to either unbond or redelegate as a positive integer or decimal >0 and <=1") + fsValidator.String(FlagAddressValidator, "", "The Bech32 address of the validator") + fsRedelegation.String(FlagAddressValidatorSrc, "", "The Bech32 address of the source validator") + fsRedelegation.String(FlagAddressValidatorDst, "", "The Bech32 address of the destination validator") +} + +func AddDepositFlags(flags *pflag.FlagSet) { + flags.String(FlagDeposit, "", "Deposit amount") +} + +func MarkReqDepositFlags(cmd *cobra.Command) { + _ = cmd.MarkFlagRequired(FlagDeposit) +} + +// LineBreak can be included in a command list to provide a blank line +// to help with readability +var LineBreak = &cobra.Command{Run: func(*cobra.Command, []string) {}} + +// AddQueryFlagsToCmd adds common flags to a module query command. +func AddQueryFlagsToCmd(cmd *cobra.Command) { + cmd.Flags().String(FlagNode, "tcp://localhost:26657", ": to Tendermint RPC interface for this chain") + cmd.Flags().String(FlagGRPC, "", "the gRPC endpoint to use for this chain") + cmd.Flags().Bool(FlagGRPCInsecure, false, "allow gRPC over insecure channels, if not TLS the server must use TLS") + cmd.Flags().Int64(FlagHeight, 0, "Use a specific height to query state at (this can error if the node is pruning state)") + cmd.Flags().StringP(FlagOutput, "o", "text", "Output format (text|json)") + + // some base commands does not require chainID e.g `simd testnet` while subcommands do + // hence the flag should not be required for those commands + _ = cmd.MarkFlagRequired(FlagChainID) +} + +// AddTxFlagsToCmd adds common flags to a module tx command. +func AddTxFlagsToCmd(cmd *cobra.Command) { + f := cmd.Flags() + f.StringP(FlagOutput, "o", "json", "Output format (text|json)") + f.String(FlagFrom, "", "Name or address of private key with which to sign") + f.Uint64P(FlagAccountNumber, "a", 0, "The account number of the signing account (offline mode only)") + f.Uint64P(FlagSequence, "s", 0, "The sequence number of the signing account (offline mode only)") + f.String(FlagNote, "", "Note to add a description to the transaction (previously --memo)") + f.String(FlagFees, "", "Fees to pay along with transaction; eg: 10uatom") + f.String(FlagGasPrices, constants.DefaultGasPrices, "Gas prices in decimal format to determine the transaction fee (e.g. 0.1uatom)") + f.String(FlagNode, "tcp://localhost:26657", ": to tendermint rpc interface for this chain") + f.Bool(FlagUseLedger, false, "Use a connected Ledger device") + f.Float64(FlagGasAdjustment, DefaultGasAdjustment, "adjustment factor to be multiplied against the estimate returned by the tx simulation; if the gas limit is set manually this flag is ignored ") + f.StringP(FlagBroadcastMode, "b", BroadcastSync, "Transaction broadcasting mode (sync|async)") + f.Bool(FlagDryRun, false, "ignore the --gas flag and perform a simulation of a transaction, but don't broadcast it (when enabled, the local Keybase is not accessible)") + f.Bool(FlagGenerateOnly, false, "Build an unsigned transaction and write it to STDOUT (when enabled, the local Keybase only accessed when providing a key name)") + f.Bool(FlagOffline, false, "Offline mode (does not allow any online functionality)") + f.BoolP(FlagSkipConfirmation, "y", false, "Skip tx broadcasting prompt confirmation") + f.String(FlagSignMode, "", "Choose sign mode (direct|amino-json|direct-aux), this is an advanced feature") + f.Uint64(FlagTimeoutHeight, 0, "Set a block timeout height to prevent the tx from being committed past a certain height") + f.String(FlagFeePayer, "", "Fee payer pays fees for the transaction instead of deducting from the signer") + f.String(FlagFeeGranter, "", "Fee granter grants fees for the transaction") + f.String(FlagTip, "", "Tip is the amount that is going to be transferred to the fee payer on the target chain. This flag is only valid when used with --aux, and is ignored if the target chain didn't enable the TipDecorator") + f.Bool(FlagAux, false, "Generate aux signer data instead of sending a tx") + f.String(FlagChainID, "", "The network chain ID") + // --gas can accept integers and "auto" + f.String(FlagGas, GasFlagAuto, fmt.Sprintf("gas limit to set per-transaction; set to %q to calculate sufficient gas automatically. Note: %q option doesn't always report accurate results. Set a valid coin value to adjust the result. Can be used instead of %q. (default %d)", + GasFlagAuto, GasFlagAuto, FlagFees, DefaultGasLimit)) + + AddKeyringFlags(f) +} + +// AddKeyringFlags sets common keyring flags +func AddKeyringFlags(flags *pflag.FlagSet) { + flags.String(FlagKeyringDir, "", "The client Keyring directory; if omitted, the default 'home' directory will be used") + flags.String(FlagKeyringBackend, DefaultKeyringBackend, "Select keyring's backend (os|file|kwallet|pass|test|memory)") +} + +// AddPaginationFlagsToCmd adds common pagination flags to cmd +func AddPaginationFlagsToCmd(cmd *cobra.Command, query string) { + cmd.Flags().Uint64(FlagPage, 1, fmt.Sprintf("pagination page of %s to query for. This sets offset to a multiple of limit", query)) + cmd.Flags().String(FlagPageKey, "", fmt.Sprintf("pagination page-key of %s to query for", query)) + cmd.Flags().Uint64(FlagOffset, 0, fmt.Sprintf("pagination offset of %s to query for", query)) + cmd.Flags().Uint64(FlagLimit, 100, fmt.Sprintf("pagination limit of %s to query for", query)) + cmd.Flags().Bool(FlagCountTotal, false, fmt.Sprintf("count total number of records in %s to query for", query)) + cmd.Flags().Bool(FlagReverse, false, "results are sorted in descending order") +} + +// FlagSetCommissionCreate Returns the FlagSet used for commission create. +func FlagSetCommissionCreate() *pflag.FlagSet { + fs := pflag.NewFlagSet("", pflag.ContinueOnError) + + fs.String(FlagCommissionRate, "", "The initial commission rate percentage") + fs.String(FlagCommissionMaxRate, "", "The maximum commission rate percentage") + fs.String(FlagCommissionMaxChangeRate, "", "The maximum commission change rate percentage (per day)") + + return fs +} + +// FlagSetAmount Returns the FlagSet for amount related operations. +func FlagSetAmount() *pflag.FlagSet { + fs := pflag.NewFlagSet("", pflag.ContinueOnError) + fs.String(FlagAmount, "", "Amount of coins to bond") + return fs +} + +// FlagSetPublicKey Returns the flagset for Public Key related operations. +func FlagSetPublicKey() *pflag.FlagSet { + fs := pflag.NewFlagSet("", pflag.ContinueOnError) + fs.String(FlagPubKey, "", "The validator's Protobuf JSON encoded public key") + return fs +} + +func FlagSetDescriptionEdit() *pflag.FlagSet { + fs := pflag.NewFlagSet("", pflag.ContinueOnError) + + fs.String(FlagEditMoniker, stakingtypes.DoNotModifyDesc, "The validator's name") + fs.String(FlagIdentity, stakingtypes.DoNotModifyDesc, "The (optional) identity signature (ex. UPort or Keybase)") + fs.String(FlagWebsite, stakingtypes.DoNotModifyDesc, "The validator's (optional) website") + fs.String(FlagSecurityContact, stakingtypes.DoNotModifyDesc, "The validator's (optional) security contact email") + fs.String(FlagDetails, stakingtypes.DoNotModifyDesc, "The validator's (optional) details") + + return fs +} + +func FlagSetCommissionUpdate() *pflag.FlagSet { + fs := pflag.NewFlagSet("", pflag.ContinueOnError) + + fs.String(FlagCommissionRate, "", "The new commission rate percentage") + + return fs +} + +func FlagSetDescriptionCreate() *pflag.FlagSet { + fs := pflag.NewFlagSet("", pflag.ContinueOnError) + + fs.String(FlagMoniker, "", "The validator's name") + fs.String(FlagIdentity, "", "The optional identity signature (ex. UPort or Keybase)") + fs.String(FlagWebsite, "", "The validator's (optional) website") + fs.String(FlagSecurityContact, "", "The validator's (optional) security contact email") + fs.String(FlagDetails, "", "The validator's (optional) details") + + return fs +} diff --git a/go/cli/flags/market.go b/go/cli/flags/market.go new file mode 100644 index 00000000..09190d31 --- /dev/null +++ b/go/cli/flags/market.go @@ -0,0 +1,203 @@ +package flags + +import ( + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "pkg.akt.dev/go/node/market/v1" + "pkg.akt.dev/go/node/market/v1beta5" +) + +// AddOrderIDFlags add flags for order +func AddOrderIDFlags(flags *pflag.FlagSet, opts ...DeploymentIDOption) { + AddGroupIDFlags(flags, opts...) + flags.Uint32(FlagOSeq, 1, "Order Sequence") +} + +// MarkReqOrderIDFlags marks flags required for order +func MarkReqOrderIDFlags(cmd *cobra.Command, opts ...DeploymentIDOption) { + MarkReqGroupIDFlags(cmd, opts...) +} + +// AddProviderFlag add provider flag to command flags set +func AddProviderFlag(flags *pflag.FlagSet) { + flags.String(FlagProvider, "", "Provider") +} + +// MarkReqProviderFlag marks provider flag as required +func MarkReqProviderFlag(cmd *cobra.Command) { + _ = cmd.MarkFlagRequired(FlagProvider) +} + +// OrderIDFromFlags returns OrderID with given flags and error if occurred +func OrderIDFromFlags(flags *pflag.FlagSet, opts ...MarketOption) (v1.OrderID, error) { + prev, err := GroupIDFromFlags(flags, opts...) + if err != nil { + return v1.OrderID{}, err + } + val, err := flags.GetUint32(FlagOSeq) + if err != nil { + return v1.OrderID{}, err + } + return v1.MakeOrderID(prev, val), nil +} + +// AddBidIDFlags add flags for bid +func AddBidIDFlags(flags *pflag.FlagSet, opts ...DeploymentIDOption) { + AddOrderIDFlags(flags, opts...) + AddProviderFlag(flags) +} + +// AddQueryBidIDFlags add flags for bid in query commands +func AddQueryBidIDFlags(flags *pflag.FlagSet) { + AddBidIDFlags(flags) +} + +// MarkReqBidIDFlags marks flags required for bid +// Used in get bid query command +func MarkReqBidIDFlags(cmd *cobra.Command, opts ...DeploymentIDOption) { + MarkReqOrderIDFlags(cmd, opts...) + MarkReqProviderFlag(cmd) +} + +// BidIDFromFlags returns BidID with given flags and error if occurred +// Here provider value is taken from flags +func BidIDFromFlags(flags *pflag.FlagSet, opts ...MarketOption) (v1.BidID, error) { + prev, err := OrderIDFromFlags(flags, opts...) + if err != nil { + return v1.BidID{}, err + } + + opt := &MarketOptions{} + + for _, o := range opts { + o(opt) + } + + if opt.Provider.Empty() { + provider, err := flags.GetString(FlagProvider) + if err != nil { + return v1.BidID{}, err + } + + if opt.Provider, err = sdk.AccAddressFromBech32(provider); err != nil { + return v1.BidID{}, err + } + } + + return v1.MakeBidID(prev, opt.Provider), nil +} + +func AddLeaseIDFlags(flags *pflag.FlagSet, opts ...DeploymentIDOption) { + AddBidIDFlags(flags, opts...) +} + +// MarkReqLeaseIDFlags marks flags required for bid +// Used in get bid query command +func MarkReqLeaseIDFlags(cmd *cobra.Command, opts ...DeploymentIDOption) { + MarkReqBidIDFlags(cmd, opts...) +} + +// LeaseIDFromFlags returns LeaseID with given flags and error if occurred +// Here provider value is taken from flags +func LeaseIDFromFlags(flags *pflag.FlagSet, opts ...MarketOption) (v1.LeaseID, error) { + bid, err := BidIDFromFlags(flags, opts...) + if err != nil { + return v1.LeaseID{}, err + } + + return bid.LeaseID(), nil +} + +// AddOrderFilterFlags add flags to filter for order list +func AddOrderFilterFlags(flags *pflag.FlagSet) { + flags.String(FlagOwner, "", "order owner address to filter") + flags.String(FlagState, "", "order state to filter (open,matched,closed)") + flags.Uint64(FlagDSeq, 0, "deployment sequence to filter") + flags.Uint32(FlagGSeq, 1, "group sequence to filter") + flags.Uint32(FlagOSeq, 1, "order sequence to filter") +} + +// OrderFiltersFromFlags returns OrderFilters with given flags and error if occurred +func OrderFiltersFromFlags(flags *pflag.FlagSet) (v1beta5.OrderFilters, error) { + dfilters, err := DepFiltersFromFlags(flags) + if err != nil { + return v1beta5.OrderFilters{}, err + } + ofilters := v1beta5.OrderFilters{ + Owner: dfilters.Owner, + DSeq: dfilters.DSeq, + State: dfilters.State, + } + + if ofilters.GSeq, err = flags.GetUint32(FlagGSeq); err != nil { + return ofilters, err + } + + if ofilters.OSeq, err = flags.GetUint32(FlagOSeq); err != nil { + return ofilters, err + } + + return ofilters, nil +} + +// AddBidFilterFlags add flags to filter for bid list +func AddBidFilterFlags(flags *pflag.FlagSet) { + flags.String(FlagOwner, "", "bid owner address to filter") + flags.String(FlagState, "", "bid state to filter (open,matched,lost,closed)") + flags.Uint64(FlagDSeq, 0, "deployment sequence to filter") + flags.Uint32(FlagGSeq, 1, "group sequence to filter") + flags.Uint32(FlagOSeq, 1, "order sequence to filter") + flags.String(FlagProvider, "", "bid provider address to filter") +} + +// BidFiltersFromFlags returns BidFilters with given flags and error if occurred +func BidFiltersFromFlags(flags *pflag.FlagSet) (v1beta5.BidFilters, error) { + ofilters, err := OrderFiltersFromFlags(flags) + if err != nil { + return v1beta5.BidFilters{}, err + } + bfilters := v1beta5.BidFilters{ + Owner: ofilters.Owner, + DSeq: ofilters.DSeq, + GSeq: ofilters.OSeq, + OSeq: ofilters.OSeq, + State: ofilters.State, + } + + provider, err := flags.GetString(FlagProvider) + if err != nil { + return bfilters, err + } + + if provider != "" { + _, err = sdk.AccAddressFromBech32(provider) + if err != nil { + return bfilters, err + } + } + bfilters.Provider = provider + + return bfilters, nil +} + +// AddLeaseFilterFlags add flags to filter for lease list +func AddLeaseFilterFlags(flags *pflag.FlagSet) { + flags.String(FlagOwner, "", "lease owner address to filter") + flags.String(FlagState, "", "lease state to filter (active,insufficient_funds,closed)") + flags.Uint64(FlagDSeq, 0, "deployment sequence to filter") + flags.Uint32(FlagGSeq, 1, "group sequence to filter") + flags.Uint32(FlagOSeq, 1, "order sequence to filter") + flags.String(FlagProvider, "", "bid provider address to filter") +} + +// LeaseFiltersFromFlags returns LeaseFilters with given flags and error if occurred +func LeaseFiltersFromFlags(flags *pflag.FlagSet) (v1.LeaseFilters, error) { + bfilters, err := BidFiltersFromFlags(flags) + if err != nil { + return v1.LeaseFilters{}, err + } + return v1.LeaseFilters(bfilters), nil +} diff --git a/go/cli/genesis.go b/go/cli/genesis.go new file mode 100644 index 00000000..7302e93c --- /dev/null +++ b/go/cli/genesis.go @@ -0,0 +1,37 @@ +package cli + +import ( + "github.com/cosmos/cosmos-sdk/client" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "github.com/cosmos/cosmos-sdk/x/genutil" + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/types/module" + gentypes "github.com/cosmos/cosmos-sdk/x/genutil/types" +) + +func GetGenesisCmd( + mbm module.BasicManager, + txCfg client.TxEncodingConfig, + defaultNodeHome string, +) *cobra.Command { + cmd := &cobra.Command{ + Use: "genesis", + Short: "Genesis control commands", + DisableFlagParsing: false, + SuggestionsMinimumDistance: 2, + RunE: ValidateCmd, + } + + gentxModule := mbm[gentypes.ModuleName].(genutil.AppModuleBasic) + + cmd.AddCommand( + getGenesisValidateCmd(mbm), + GetGenesisGenTxCmd(mbm, txCfg, banktypes.GenesisBalancesIterator{}, defaultNodeHome), + GetGenesisAddAccountCmd(defaultNodeHome), + GetGenesisInitCmd(mbm, defaultNodeHome), + GetGenesisCollectCmd(banktypes.GenesisBalancesIterator{}, defaultNodeHome, gentxModule.GenTxValidator), + ) + + return cmd +} diff --git a/go/cli/genesis_collect.go b/go/cli/genesis_collect.go new file mode 100644 index 00000000..73137644 --- /dev/null +++ b/go/cli/genesis_collect.go @@ -0,0 +1,69 @@ +package cli + +import ( + "encoding/json" + "path/filepath" + + tmtypes "github.com/cometbft/cometbft/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/server" + "github.com/cosmos/cosmos-sdk/x/genutil" + "github.com/cosmos/cosmos-sdk/x/genutil/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetGenesisCollectCmd - return the cobra command to collect genesis transactions +func GetGenesisCollectCmd(genBalIterator types.GenesisBalancesIterator, defaultNodeHome string, validator types.MessageValidator) *cobra.Command { + cmd := &cobra.Command{ + Use: "collect", + Short: "Collect genesis txs and output a genesis.json file", + RunE: func(cmd *cobra.Command, _ []string) error { + sctx := server.GetServerContextFromCmd(cmd) + cctx := client.GetClientContextFromCmd(cmd) + + config := sctx.Config + cdc := cctx.Codec + + config.SetRoot(cctx.HomeDir) + + nodeID, valPubKey, err := genutil.InitializeNodeValidatorFiles(config) + if err != nil { + return errors.Wrap(err, "failed to initialize node validator files") + } + + genDoc, err := tmtypes.GenesisDocFromFile(config.GenesisFile()) + if err != nil { + return errors.Wrap(err, "failed to read genesis doc from file") + } + + genTxDir, _ := cmd.Flags().GetString(cflags.FlagGenTxDir) + genTxsDir := genTxDir + if genTxsDir == "" { + genTxsDir = filepath.Join(config.RootDir, "config", "gentx") + } + + toPrint := newPrintInfo(config.Moniker, genDoc.ChainID, nodeID, genTxsDir, json.RawMessage("")) + initCfg := types.NewInitConfig(genDoc.ChainID, genTxsDir, nodeID, valPubKey) + + appMessage, err := genutil.GenAppStateFromConfig(cdc, + cctx.TxConfig, + config, initCfg, *genDoc, genBalIterator, validator) + if err != nil { + return errors.Wrap(err, "failed to get genesis app state from config") + } + + toPrint.AppMessage = appMessage + + return displayInfo(toPrint) + }, + } + + cmd.Flags().String(cflags.FlagHome, defaultNodeHome, "The application home directory") + cmd.Flags().String(cflags.FlagGenTxDir, "", "override default \"gentx\" directory from which collect and execute genesis transactions; default [--home]/config/gentx/") + + return cmd +} diff --git a/go/cli/genesis_genaccount.go b/go/cli/genesis_genaccount.go new file mode 100644 index 00000000..7777ef88 --- /dev/null +++ b/go/cli/genesis_genaccount.go @@ -0,0 +1,84 @@ +package cli + +import ( + "bufio" + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/server" + sdk "github.com/cosmos/cosmos-sdk/types" + auth "github.com/cosmos/cosmos-sdk/x/auth/helpers" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetGenesisAddAccountCmd returns add-genesis-account cobra Command. +// This command is provided as a default, applications are expected to provide their own command if custom genesis accounts are needed. +func GetGenesisAddAccountCmd(defaultNodeHome string) *cobra.Command { + cmd := &cobra.Command{ + Use: "add-account [address_or_key_name] [coin][,[coin]]", + Short: "Add a genesis account to genesis.json", + Long: `Add a genesis account to genesis.json. The provided account must specify +the account address or key name and a list of initial coins. If a key name is given, +the address will be looked up in the local Keybase. The list of initial tokens must +contain valid denominations. Accounts may optionally be supplied with vesting parameters. +`, + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + cctx := client.GetClientContextFromCmd(cmd) + sctx := server.GetServerContextFromCmd(cmd) + + config := sctx.Config + + config.SetRoot(cctx.HomeDir) + + var kr keyring.Keyring + addr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + inBuf := bufio.NewReader(cmd.InOrStdin()) + keyringBackend, _ := cmd.Flags().GetString(cflags.FlagKeyringBackend) + + if keyringBackend != "" && cctx.Keyring == nil { + var err error + kr, err = keyring.New(sdk.KeyringServiceName(), keyringBackend, cctx.HomeDir, inBuf, cctx.Codec) + if err != nil { + return err + } + } else { + kr = cctx.Keyring + } + + k, err := kr.Key(args[0]) + if err != nil { + return fmt.Errorf("failed to get address from Keyring: %w", err) + } + + addr, err = k.GetAddress() + if err != nil { + return err + } + } + + appendFlag, _ := cmd.Flags().GetBool(cflags.FlagAppendMode) + vestingStart, _ := cmd.Flags().GetInt64(cflags.FlagVestingStart) + vestingEnd, _ := cmd.Flags().GetInt64(cflags.FlagVestingEnd) + vestingAmtStr, _ := cmd.Flags().GetString(cflags.FlagVestingAmt) + + return auth.AddGenesisAccount(cctx.Codec, addr, appendFlag, config.GenesisFile(), args[1], vestingAmtStr, vestingStart, vestingEnd) + }, + } + + cmd.Flags().String(cflags.FlagHome, defaultNodeHome, "The application home directory") + cmd.Flags().String(cflags.FlagKeyringBackend, cflags.DefaultKeyringBackend, "Select keyring's backend (os|file|kwallet|pass|test)") + cmd.Flags().String(cflags.FlagVestingAmt, "", "amount of coins for vesting accounts") + cmd.Flags().Int64(cflags.FlagVestingStart, 0, "schedule start time (unix epoch) for vesting accounts") + cmd.Flags().Int64(cflags.FlagVestingEnd, 0, "schedule end time (unix epoch) for vesting accounts") + cmd.Flags().Bool(cflags.FlagAppendMode, false, "append the coins to an account already in the genesis.json file") + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/genesis_genaccount_test.go b/go/cli/genesis_genaccount_test.go new file mode 100644 index 00000000..7a8c61a3 --- /dev/null +++ b/go/cli/genesis_genaccount_test.go @@ -0,0 +1,107 @@ +package cli_test + +import ( + "context" + "fmt" + "testing" + + "github.com/spf13/viper" + "github.com/stretchr/testify/require" + + "github.com/cometbft/cometbft/libs/log" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/server" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/auth" + genutiltest "github.com/cosmos/cosmos-sdk/x/genutil/client/testutil" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" +) + +func TestAddGenesisAccountCmd(t *testing.T) { + _, _, addr1 := testdata.KeyTestPubAddr() + tests := []struct { + name string + addr string + denom string + withKeyring bool + expectErr bool + }{ + { + name: "invalid address", + addr: "", + denom: "1000uakt", + withKeyring: false, + expectErr: true, + }, + { + name: "valid address", + addr: addr1.String(), + denom: "1000uakt", + withKeyring: false, + expectErr: false, + }, + { + name: "multiple denoms", + addr: addr1.String(), + denom: "1000uakt, 2000stake", + withKeyring: false, + expectErr: false, + }, + { + name: "with keyring", + addr: "ser", + denom: "1000uakt", + withKeyring: true, + expectErr: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + home := t.TempDir() + logger := log.NewNopLogger() + cfg, err := genutiltest.CreateDefaultTendermintConfig(home) + require.NoError(t, err) + + appCodec := moduletestutil.MakeTestEncodingConfig(auth.AppModuleBasic{}).Codec + err = genutiltest.ExecInitCmd(testMbm, home, appCodec) + require.NoError(t, err) + + sctx := server.NewContext(viper.New(), cfg, logger) + cctx := client.Context{}.WithCodec(appCodec).WithHomeDir(home) + + if tc.withKeyring { + path := hd.CreateHDPath(118, 0, 0).String() + kr, err := keyring.New(sdk.KeyringServiceName(), keyring.BackendMemory, home, nil, appCodec) + require.NoError(t, err) + _, _, err = kr.NewMnemonic(tc.addr, keyring.English, path, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + require.NoError(t, err) + cctx = cctx.WithKeyring(kr) + } + + ctx := context.Background() + ctx = context.WithValue(ctx, cli.ClientContextKey, &cctx) + ctx = context.WithValue(ctx, server.ServerContextKey, sctx) + + cmd := cli.GetGenesisAddAccountCmd(home) + cmd.SetArgs([]string{ + tc.addr, + tc.denom, + fmt.Sprintf("--%s=home", cflags.FlagHome), + }) + + if tc.expectErr { + require.Error(t, cmd.ExecuteContext(ctx)) + } else { + require.NoError(t, cmd.ExecuteContext(ctx)) + } + }) + } +} diff --git a/go/cli/genesis_gentx.go b/go/cli/genesis_gentx.go new file mode 100644 index 00000000..ce50514f --- /dev/null +++ b/go/cli/genesis_gentx.go @@ -0,0 +1,276 @@ +package cli + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "path/filepath" + "strconv" + + tmtypes "github.com/cometbft/cometbft/types" + "github.com/pkg/errors" + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/server" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/cosmos/cosmos-sdk/version" + authclient "github.com/cosmos/cosmos-sdk/x/auth/client" + "github.com/cosmos/cosmos-sdk/x/genutil" + "github.com/cosmos/cosmos-sdk/x/genutil/types" + "github.com/cosmos/cosmos-sdk/x/staking/client/cli" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetGenesisGenTxCmd builds the application's gentx command. +func GetGenesisGenTxCmd(mbm module.BasicManager, txEncCfg client.TxEncodingConfig, genBalIterator types.GenesisBalancesIterator, defaultNodeHome string) *cobra.Command { + ipDefault, _ := server.ExternalIP() + fsCreateValidator, defaultsDesc := cli.CreateValidatorMsgFlagSet(ipDefault) + + cmd := &cobra.Command{ + Use: "gentx [key_name] [amount]", + Short: "Generate a genesis tx carrying a self delegation", + Args: cobra.ExactArgs(2), + Long: fmt.Sprintf(`Generate a genesis transaction that creates a validator with a self-delegation, +that is signed by the key in the Keyring referenced by a given name. A node ID and Bech32 consensus +pubkey may optionally be provided. If they are omitted, they will be retrieved from the priv_validator.json +file. The following default parameters are included: + %s + +Example: +$ %s gentx my-key-name 1000000uakt --home=/path/to/home/dir --keyring-backend=os --chain-id=test-chain-1 \ + --moniker="myvalidator" \ + --commission-max-change-rate=0.01 \ + --commission-max-rate=1.0 \ + --commission-rate=0.07 \ + --details="..." \ + --security-contact="..." \ + --website="..." +`, defaultsDesc, version.AppName, + ), + PreRunE: func(cmd *cobra.Command, args []string) error { + gas, err := cmd.Flags().GetString(cflags.FlagGas) + if err != nil { + return err + } + + if gas == cflags.GasFlagAuto { + cmd.Flags().Set(cflags.FlagGas, strconv.Itoa(cflags.DefaultGasLimit)) + } + + return nil + }, + RunE: func(cmd *cobra.Command, args []string) error { + stcx := server.GetServerContextFromCmd(cmd) + cctx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + cdc := cctx.Codec + + config := stcx.Config + config.SetRoot(cctx.HomeDir) + + nodeID, valPubKey, err := genutil.InitializeNodeValidatorFiles(stcx.Config) + if err != nil { + return errors.Wrap(err, "failed to initialize node validator files") + } + + // read --nodeID, if empty take it from priv_validator.json + if nodeIDString, _ := cmd.Flags().GetString(cflags.FlagNodeID); nodeIDString != "" { + nodeID = nodeIDString + } + + // read --pubkey, if empty take it from priv_validator.json + if pkStr, _ := cmd.Flags().GetString(cflags.FlagPubKey); pkStr != "" { + if err := cctx.Codec.UnmarshalInterfaceJSON([]byte(pkStr), &valPubKey); err != nil { + return errors.Wrap(err, "failed to unmarshal validator public key") + } + } + + genDoc, err := tmtypes.GenesisDocFromFile(config.GenesisFile()) + if err != nil { + return errors.Wrapf(err, "failed to read genesis doc file %s", config.GenesisFile()) + } + + var genesisState map[string]json.RawMessage + if err = json.Unmarshal(genDoc.AppState, &genesisState); err != nil { + return errors.Wrap(err, "failed to unmarshal genesis state") + } + + if err = mbm.ValidateGenesis(cdc, txEncCfg, genesisState); err != nil { + return errors.Wrap(err, "failed to validate genesis state") + } + + inBuf := bufio.NewReader(cmd.InOrStdin()) + + name := args[0] + key, err := cctx.Keyring.Key(name) + if err != nil { + return errors.Wrapf(err, "failed to fetch '%s' from the keyring", name) + } + + moniker := config.Moniker + if m, _ := cmd.Flags().GetString(cflags.FlagMoniker); m != "" { + moniker = m + } + + // set flags for creating a gentx + createValCfg, err := cli.PrepareConfigForTxCreateValidator(cmd.Flags(), moniker, nodeID, genDoc.ChainID, valPubKey) + if err != nil { + return errors.Wrap(err, "error creating configuration to create validator msg") + } + + amount := args[1] + coins, err := sdk.ParseCoinsNormalized(amount) + if err != nil { + return errors.Wrap(err, "failed to parse coins") + } + addr, err := key.GetAddress() + if err != nil { + return err + } + err = genutil.ValidateAccountInGenesis(genesisState, genBalIterator, addr, coins, cdc) + if err != nil { + return errors.Wrap(err, "failed to validate account in genesis") + } + + txFactory, err := tx.NewFactoryCLI(cctx, cmd.Flags()) + if err != nil { + return err + } + + pub, err := key.GetAddress() + if err != nil { + return err + } + cctx = cctx.WithInput(inBuf).WithFromAddress(pub) + + // The following line comes from a discrepancy between the `gentx` + // and `create-validator` commands: + // - `gentx` expects amount as an arg, + // - `create-validator` expects amount as a required flag. + // ref: https://github.com/cosmos/cosmos-sdk/issues/8251 + // Since gentx doesn't set the amount flag (which `create-validator` + // reads from), we copy the amount arg into the valCfg directly. + // + // Ideally, the `create-validator` command should take a validator + // config file instead of so many flags. + // ref: https://github.com/cosmos/cosmos-sdk/issues/8177 + createValCfg.Amount = amount + + // create a 'create-validator' message + txBldr, msg, err := cli.BuildCreateValidatorMsg(cctx, createValCfg, txFactory, true) + if err != nil { + return errors.Wrap(err, "failed to build create-validator message") + } + + if key.GetType() == keyring.TypeOffline || key.GetType() == keyring.TypeMulti { + cmd.PrintErrln("Offline key passed in. Use `tx sign` command to sign.") + return txBldr.PrintUnsignedTx(cctx, msg) + } + + // write the unsigned transaction to the buffer + w := bytes.NewBuffer([]byte{}) + cctx = cctx.WithOutput(w) + + if err = msg.ValidateBasic(); err != nil { + return err + } + + if err = txBldr.PrintUnsignedTx(cctx, msg); err != nil { + return errors.Wrap(err, "failed to print unsigned std tx") + } + + // read the transaction + stdTx, err := readUnsignedGenTxFile(cctx, w) + if err != nil { + return errors.Wrap(err, "failed to read unsigned gen tx file") + } + + // sign the transaction and write it to the output file + txBuilder, err := cctx.TxConfig.WrapTxBuilder(stdTx) + if err != nil { + return fmt.Errorf("error creating tx builder: %w", err) + } + + err = authclient.SignTx(txFactory, cctx, name, txBuilder, true, true) + if err != nil { + return errors.Wrap(err, "failed to sign std tx") + } + + outputDocument, _ := cmd.Flags().GetString(cflags.FlagOutputDocument) + if outputDocument == "" { + outputDocument, err = makeOutputFilepath(config.RootDir, nodeID) + if err != nil { + return errors.Wrap(err, "failed to create output file path") + } + } + + if err := writeSignedGenTx(cctx, outputDocument, stdTx); err != nil { + return errors.Wrap(err, "failed to write signed gen tx") + } + + cmd.PrintErrf("Genesis transaction written to %q\n", outputDocument) + return nil + }, + } + + cmd.Flags().String(cflags.FlagHome, defaultNodeHome, "The application home directory") + cmd.Flags().String(cflags.FlagOutputDocument, "", "Write the genesis transaction JSON document to the given file instead of the default location") + cmd.Flags().AddFlagSet(fsCreateValidator) + cflags.AddTxFlagsToCmd(cmd) + _ = cmd.Flags().MarkHidden(cflags.FlagOutput) // signing makes sense to output only json + + return cmd +} + +func makeOutputFilepath(rootDir, nodeID string) (string, error) { + writePath := filepath.Join(rootDir, "config", "gentx") + if err := os.MkdirAll(writePath, 0o700); err != nil { + return "", fmt.Errorf("could not create directory %q: %w", writePath, err) + } + + return filepath.Join(writePath, fmt.Sprintf("gentx-%v.json", nodeID)), nil +} + +func readUnsignedGenTxFile(clientCtx client.Context, r io.Reader) (sdk.Tx, error) { + bz, err := io.ReadAll(r) + if err != nil { + return nil, err + } + + aTx, err := clientCtx.TxConfig.TxJSONDecoder()(bz) + if err != nil { + return nil, err + } + + return aTx, err +} + +func writeSignedGenTx(clientCtx client.Context, outputDocument string, tx sdk.Tx) error { + outputFile, err := os.OpenFile(outputDocument, os.O_CREATE|os.O_EXCL|os.O_WRONLY, 0o644) + if err != nil { + return err + } + defer func() { + _ = outputFile.Close() + }() + + txj, err := clientCtx.TxConfig.TxJSONEncoder()(tx) + if err != nil { + return err + } + + _, err = fmt.Fprintf(outputFile, "%s\n", txj) + + return err +} diff --git a/go/cli/genesis_gentx_test.go b/go/cli/genesis_gentx_test.go new file mode 100644 index 00000000..2c198298 --- /dev/null +++ b/go/cli/genesis_gentx_test.go @@ -0,0 +1,93 @@ +package cli_test + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/cosmos/cosmos-sdk/client" + svrcmd "github.com/cosmos/cosmos-sdk/server/cmd" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" +) + +func (s *GenesisCLITestSuite) TestGenTxCmd() { + amount := sdk.NewCoin("uakt", sdk.NewInt(12)) + + tests := []struct { + name string + args []string + expCmdOutput string + }{ + { + name: "invalid commission rate returns error", + args: []string{ + fmt.Sprintf("--%s=%s", cflags.FlagChainID, s.baseCtx.ChainID), + fmt.Sprintf("--%s=1", cflags.FlagCommissionRate), + "node0", + amount.String(), + }, + expCmdOutput: fmt.Sprintf("--%s=%s --%s=1 %s %s", cflags.FlagChainID, s.baseCtx.ChainID, cflags.FlagCommissionRate, "node0", amount.String()), + }, + { + name: "valid gentx", + args: []string{ + fmt.Sprintf("--%s=%s", cflags.FlagChainID, s.baseCtx.ChainID), + "node0", + amount.String(), + }, + expCmdOutput: fmt.Sprintf("--%s=%s %s %s", cflags.FlagChainID, s.baseCtx.ChainID, "node0", amount.String()), + }, + { + name: "invalid pubkey", + args: []string{ + fmt.Sprintf("--%s=%s", cflags.FlagChainID, "test-chain-1"), + fmt.Sprintf("--%s={\"key\":\"BOIkjkFruMpfOFC9oNPhiJGfmY2pHF/gwHdLDLnrnS0=\"}", cflags.FlagPubKey), + "node0", + amount.String(), + }, + expCmdOutput: fmt.Sprintf("--%s=test-chain-1 --%s={\"key\":\"BOIkjkFruMpfOFC9oNPhiJGfmY2pHF/gwHdLDLnrnS0=\"} %s %s ", cflags.FlagChainID, cflags.FlagPubKey, "node0", amount.String()), + }, + { + name: "valid pubkey flag", + args: []string{ + fmt.Sprintf("--%s=%s", cflags.FlagChainID, "test-chain-1"), + fmt.Sprintf("--%s={\"@type\":\"/cosmos.crypto.ed25519.PubKey\",\"key\":\"BOIkjkFruMpfOFC9oNPhiJGfmY2pHF/gwHdLDLnrnS0=\"}", cflags.FlagPubKey), + "node0", + amount.String(), + }, + expCmdOutput: fmt.Sprintf("--%s=test-chain-1 --%s={\"@type\":\"/cosmos.crypto.ed25519.PubKey\",\"key\":\"BOIkjkFruMpfOFC9oNPhiJGfmY2pHF/gwHdLDLnrnS0=\"} %s %s ", cflags.FlagChainID, cflags.FlagPubKey, "node0", amount.String()), + }, + } + + for _, tc := range tests { + dir := s.T().TempDir() + genTxFile := filepath.Join(dir, "myTx") + + tc.args = append(tc.args, fmt.Sprintf("--%s=%s", cflags.FlagOutputDocument, genTxFile)) + + s.Run(tc.name, func() { + cctx := s.cctx + ctx := svrcmd.CreateExecuteContext(context.Background()) + + cmd := cli.GetGenesisGenTxCmd( + module.NewBasicManager(), + cctx.TxConfig, + banktypes.GenesisBalancesIterator{}, + cctx.HomeDir, + ) + cmd.SetContext(ctx) + cmd.SetArgs(tc.args) + + s.Require().NoError(client.SetCmdClientContextHandler(cctx, cmd)) + + if len(tc.args) != 0 { + s.Require().Contains(fmt.Sprint(cmd), tc.expCmdOutput) + } + }) + } +} diff --git a/go/cli/genesis_init.go b/go/cli/genesis_init.go new file mode 100644 index 00000000..924a620f --- /dev/null +++ b/go/cli/genesis_init.go @@ -0,0 +1,168 @@ +package cli + +import ( + "bufio" + "encoding/json" + "fmt" + "os" + "path/filepath" + + cfg "github.com/cometbft/cometbft/config" + tmrand "github.com/cometbft/cometbft/libs/rand" + "github.com/cometbft/cometbft/types" + "github.com/cosmos/go-bip39" + "github.com/pkg/errors" + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/input" + "github.com/cosmos/cosmos-sdk/server" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/cosmos/cosmos-sdk/x/genutil" + + cflags "pkg.akt.dev/go/cli/flags" +) + +type printInfo struct { + Moniker string `json:"moniker" yaml:"moniker"` + ChainID string `json:"chain_id" yaml:"chain_id"` + NodeID string `json:"node_id" yaml:"node_id"` + GenTxsDir string `json:"gentxs_dir" yaml:"gentxs_dir"` + AppMessage json.RawMessage `json:"app_message" yaml:"app_message"` +} + +func newPrintInfo(moniker, chainID, nodeID, genTxsDir string, appMessage json.RawMessage) printInfo { + return printInfo{ + Moniker: moniker, + ChainID: chainID, + NodeID: nodeID, + GenTxsDir: genTxsDir, + AppMessage: appMessage, + } +} + +func displayInfo(info printInfo) error { + out, err := json.MarshalIndent(info, "", " ") + if err != nil { + return err + } + + _, err = fmt.Fprintf(os.Stderr, "%s\n", sdk.MustSortJSON(out)) + + return err +} + +// GetGenesisInitCmd returns a command that initializes all files needed for Tendermint +// and the respective application. +func GetGenesisInitCmd(mbm module.BasicManager, defaultNodeHome string) *cobra.Command { + cmd := &cobra.Command{ + Use: "init [moniker]", + Short: "Initialize private validator, p2p, genesis, and application configuration files", + Long: `Initialize validators's and node's configuration files.`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + clientCtx := client.GetClientContextFromCmd(cmd) + cdc := clientCtx.Codec + + serverCtx := server.GetServerContextFromCmd(cmd) + config := serverCtx.Config + config.SetRoot(clientCtx.HomeDir) + + chainID, _ := cmd.Flags().GetString(cflags.FlagChainID) + switch { + case chainID != "": + case clientCtx.ChainID != "": + chainID = clientCtx.ChainID + default: + chainID = fmt.Sprintf("test-chain-%v", tmrand.Str(6)) + } + + // Get bip39 mnemonic + var mnemonic string + if isRecover, _ := cmd.Flags().GetBool(cflags.FlagRecover); isRecover { + inBuf := bufio.NewReader(cmd.InOrStdin()) + value, err := input.GetString("Enter your bip39 mnemonic", inBuf) + if err != nil { + return err + } + + mnemonic = value + if !bip39.IsMnemonicValid(mnemonic) { + return errors.New("invalid mnemonic") + } + } + + // Get initial height + initHeight, _ := cmd.Flags().GetInt64(flags.FlagInitHeight) + if initHeight < 1 { + initHeight = 1 + } + + nodeID, _, err := genutil.InitializeNodeValidatorFilesFromMnemonic(config, mnemonic) + if err != nil { + return err + } + + config.Moniker = args[0] + + genFile := config.GenesisFile() + overwrite, _ := cmd.Flags().GetBool(cflags.FlagOverwrite) + defaultDenom, _ := cmd.Flags().GetString(cflags.FlagDefaultBondDenom) + + // use os.Stat to check if the file exists + _, err = os.Stat(genFile) + if !overwrite && !os.IsNotExist(err) { + return fmt.Errorf("genesis.json file already exists: %v", genFile) + } + + // Overwrites the SDK default denom for side-effects + if defaultDenom != "" { + sdk.DefaultBondDenom = defaultDenom + } + + appGenState := mbm.DefaultGenesis(cdc) + + appState, err := json.MarshalIndent(appGenState, "", " ") + if err != nil { + return errors.Wrap(err, "Failed to marshal default genesis state") + } + + genDoc := &types.GenesisDoc{} + if _, err := os.Stat(genFile); err != nil { + if !os.IsNotExist(err) { + return err + } + } else { + genDoc, err = types.GenesisDocFromFile(genFile) + if err != nil { + return errors.Wrap(err, "Failed to read genesis doc from file") + } + } + + genDoc.ChainID = chainID + genDoc.Validators = nil + genDoc.AppState = appState + genDoc.InitialHeight = initHeight + + if err = genutil.ExportGenesisFile(genDoc, genFile); err != nil { + return errors.Wrap(err, "Failed to export genesis file") + } + + toPrint := newPrintInfo(config.Moniker, chainID, nodeID, "", appState) + + cfg.WriteConfigFile(filepath.Join(config.RootDir, "config", "config.toml"), config) + return displayInfo(toPrint) + }, + } + + cmd.Flags().String(cflags.FlagHome, defaultNodeHome, "node's home directory") + cmd.Flags().BoolP(cflags.FlagOverwrite, "o", false, "overwrite the genesis.json file") + cmd.Flags().Bool(cflags.FlagRecover, false, "provide seed phrase to recover existing key instead of creating") + cmd.Flags().String(cflags.FlagChainID, "", "genesis file chain-id, if left blank will be randomly created") + cmd.Flags().String(cflags.FlagDefaultBondDenom, "uakt", "genesis file default denomination, if left blank default value is 'uakt'") + cmd.Flags().Int64(cflags.FlagInitHeight, 1, "specify the initial block height at genesis") + + return cmd +} diff --git a/go/cli/genesis_init_test.go b/go/cli/genesis_init_test.go new file mode 100644 index 00000000..4fce1600 --- /dev/null +++ b/go/cli/genesis_init_test.go @@ -0,0 +1,362 @@ +package cli_test + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "testing" + "time" + + "github.com/spf13/viper" + "github.com/stretchr/testify/require" + + abci_server "github.com/cometbft/cometbft/abci/server" + "github.com/cometbft/cometbft/libs/log" + tmtypes "github.com/cometbft/cometbft/types" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/server" + "github.com/cosmos/cosmos-sdk/server/mock" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/cosmos/cosmos-sdk/x/genutil" + genutiltest "github.com/cosmos/cosmos-sdk/x/genutil/client/testutil" + "github.com/cosmos/cosmos-sdk/x/staking" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" +) + +var testMbm = module.NewBasicManager( + staking.AppModuleBasic{}, + genutil.AppModuleBasic{}, +) + +func TestInitCmd(t *testing.T) { + tests := []struct { + name string + flags func(dir string) []string + shouldErr bool + err error + }{ + { + name: "happy path", + flags: func(_ string) []string { + return []string{ + "appnode-test", + } + }, + shouldErr: false, + err: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + home := t.TempDir() + logger := log.NewNopLogger() + cfg, err := genutiltest.CreateDefaultTendermintConfig(home) + require.NoError(t, err) + + sctx := server.NewContext(viper.New(), cfg, logger) + interfaceRegistry := types.NewInterfaceRegistry() + marshaler := codec.NewProtoCodec(interfaceRegistry) + cctx := client.Context{}. + WithCodec(marshaler). + WithLegacyAmino(makeCodec()). + WithHomeDir(home) + + ctx := context.Background() + ctx = context.WithValue(ctx, cli.ClientContextKey, &cctx) + ctx = context.WithValue(ctx, server.ServerContextKey, sctx) + + cmd := cli.GetGenesisInitCmd(testMbm, home) + cmd.SetArgs( + tt.flags(home), + ) + + if tt.shouldErr { + err := cmd.ExecuteContext(ctx) + require.EqualError(t, err, tt.err.Error()) + } else { + require.NoError(t, cmd.ExecuteContext(ctx)) + } + }) + } +} + +func TestInitRecover(t *testing.T) { + home := t.TempDir() + logger := log.NewNopLogger() + cfg, err := genutiltest.CreateDefaultTendermintConfig(home) + require.NoError(t, err) + + serverCtx := server.NewContext(viper.New(), cfg, logger) + interfaceRegistry := types.NewInterfaceRegistry() + marshaler := codec.NewProtoCodec(interfaceRegistry) + clientCtx := client.Context{}. + WithCodec(marshaler). + WithLegacyAmino(makeCodec()). + WithHomeDir(home) + + ctx := context.Background() + ctx = context.WithValue(ctx, cli.ClientContextKey, &clientCtx) + ctx = context.WithValue(ctx, server.ServerContextKey, serverCtx) + + cmd := cli.GetGenesisInitCmd(testMbm, home) + mockIn := testutil.ApplyMockIODiscardOutErr(cmd) + + cmd.SetArgs([]string{ + "appnode-test", + fmt.Sprintf("--%s=true", cflags.FlagRecover), + }) + + // use valid mnemonic and complete recovery key generation successfully + mockIn.Reset("decide praise business actor peasant farm drastic weather extend front hurt later song give verb rhythm worry fun pond reform school tumble august one\n") + require.NoError(t, cmd.ExecuteContext(ctx)) +} + +func TestInitDefaultBondDenom(t *testing.T) { + home := t.TempDir() + logger := log.NewNopLogger() + cfg, err := genutiltest.CreateDefaultTendermintConfig(home) + require.NoError(t, err) + + serverCtx := server.NewContext(viper.New(), cfg, logger) + interfaceRegistry := types.NewInterfaceRegistry() + marshaler := codec.NewProtoCodec(interfaceRegistry) + clientCtx := client.Context{}. + WithCodec(marshaler). + WithLegacyAmino(makeCodec()). + WithHomeDir(home) + + ctx := context.Background() + ctx = context.WithValue(ctx, cli.ClientContextKey, &clientCtx) + ctx = context.WithValue(ctx, server.ServerContextKey, serverCtx) + + cmd := cli.GetGenesisInitCmd(testMbm, home) + + cmd.SetArgs([]string{ + "appnode-test", + fmt.Sprintf("--%s=%s", cflags.FlagHome, home), + fmt.Sprintf("--%s=testtoken", cflags.FlagDefaultBondDenom), + }) + + require.NoError(t, cmd.ExecuteContext(ctx)) +} + +func TestEmptyState(t *testing.T) { + home := t.TempDir() + logger := log.NewNopLogger() + cfg, err := genutiltest.CreateDefaultTendermintConfig(home) + require.NoError(t, err) + + serverCtx := server.NewContext(viper.New(), cfg, logger) + interfaceRegistry := types.NewInterfaceRegistry() + marshaler := codec.NewProtoCodec(interfaceRegistry) + clientCtx := client.Context{}. + WithCodec(marshaler). + WithLegacyAmino(makeCodec()). + WithHomeDir(home) + + ctx := context.Background() + ctx = context.WithValue(ctx, cli.ClientContextKey, &clientCtx) + ctx = context.WithValue(ctx, server.ServerContextKey, serverCtx) + + cmd := cli.GetGenesisInitCmd(testMbm, home) + cmd.SetArgs([]string{"appnode-test", fmt.Sprintf("--%s=%s", cflags.FlagHome, home)}) + + require.NoError(t, cmd.ExecuteContext(ctx)) + + old := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + cmd = server.ExportCmd(nil, home) + cmd.SetArgs([]string{fmt.Sprintf("--%s=%s", cflags.FlagHome, home)}) + require.NoError(t, cmd.ExecuteContext(ctx)) + + outC := make(chan string) + go func() { + var buf bytes.Buffer + _, _ = io.Copy(&buf, r) + outC <- buf.String() + }() + + _ = w.Close() + + os.Stdout = old + out := <-outC + + require.Contains(t, out, "genesis_time") + require.Contains(t, out, "chain_id") + require.Contains(t, out, "consensus_params") + require.Contains(t, out, "app_hash") + require.Contains(t, out, "app_state") +} + +func TestStartStandAlone(t *testing.T) { + home := t.TempDir() + logger := log.NewNopLogger() + interfaceRegistry := types.NewInterfaceRegistry() + marshaler := codec.NewProtoCodec(interfaceRegistry) + err := genutiltest.ExecInitCmd(testMbm, home, marshaler) + require.NoError(t, err) + + app, err := mock.NewApp(home, logger) + require.NoError(t, err) + + svrAddr, _, err := server.FreeTCPAddr() + require.NoError(t, err) + + svr, err := abci_server.NewServer(svrAddr, "socket", app) + require.NoError(t, err, "error creating listener") + + svr.SetLogger(logger.With("module", "abci-server")) + err = svr.Start() + require.NoError(t, err) + + timer := time.NewTimer(time.Duration(2) * time.Second) + for range timer.C { + err = svr.Stop() + require.NoError(t, err) + break + } +} + +func TestInitNodeValidatorFiles(t *testing.T) { + home := t.TempDir() + cfg, err := genutiltest.CreateDefaultTendermintConfig(home) + require.NoError(t, err) + + nodeID, valPubKey, err := genutil.InitializeNodeValidatorFiles(cfg) + require.NoError(t, err) + + require.NotEqual(t, "", nodeID) + require.NotEqual(t, 0, len(valPubKey.Bytes())) +} + +func TestInitConfig(t *testing.T) { + home := t.TempDir() + logger := log.NewNopLogger() + cfg, err := genutiltest.CreateDefaultTendermintConfig(home) + require.NoError(t, err) + + sctx := server.NewContext(viper.New(), cfg, logger) + interfaceRegistry := types.NewInterfaceRegistry() + marshaler := codec.NewProtoCodec(interfaceRegistry) + cctx := client.Context{}. + WithCodec(marshaler). + WithLegacyAmino(makeCodec()). + WithChainID("foo"). // add chain-id to cctx + WithHomeDir(home) + + ctx := context.Background() + ctx = context.WithValue(ctx, cli.ClientContextKey, &cctx) + ctx = context.WithValue(ctx, server.ServerContextKey, sctx) + + cmd := cli.GetGenesisInitCmd(testMbm, home) + cmd.SetArgs([]string{"testnode"}) + + require.NoError(t, cmd.ExecuteContext(ctx)) + + old := os.Stdout + r, w, _ := os.Pipe() + os.Stdout = w + + cmd = server.ExportCmd(nil, home) + require.NoError(t, cmd.ExecuteContext(ctx)) + + outC := make(chan string) + go func() { + var buf bytes.Buffer + _, _ = io.Copy(&buf, r) + outC <- buf.String() + }() + + _ = w.Close() + os.Stdout = old + out := <-outC + + require.Contains(t, out, "\"chain_id\": \"foo\"") +} + +func TestInitWithHeight(t *testing.T) { + home := t.TempDir() + logger := log.NewNopLogger() + cfg, err := genutiltest.CreateDefaultTendermintConfig(home) + require.NoError(t, err) + + sctx := server.NewContext(viper.New(), cfg, logger) + interfaceRegistry := types.NewInterfaceRegistry() + marshaler := codec.NewProtoCodec(interfaceRegistry) + cctx := client.Context{}. + WithCodec(marshaler). + WithLegacyAmino(makeCodec()). + WithChainID("foo"). // add chain-id to cctx + WithHomeDir(home) + + ctx := context.Background() + ctx = context.WithValue(ctx, cli.ClientContextKey, &cctx) + ctx = context.WithValue(ctx, server.ServerContextKey, sctx) + + testInitialHeight := int64(333) + + cmd := cli.GetGenesisInitCmd(testMbm, home) + cmd.SetArgs([]string{"init-height-test", fmt.Sprintf("--%s=%d", cflags.FlagInitHeight, testInitialHeight)}) + + require.NoError(t, cmd.ExecuteContext(ctx)) + + appGenesis, importErr := tmtypes.GenesisDocFromFile(cfg.GenesisFile()) + require.NoError(t, importErr) + + require.Equal(t, testInitialHeight, appGenesis.InitialHeight) +} + +func TestInitWithNegativeHeight(t *testing.T) { + home := t.TempDir() + logger := log.NewNopLogger() + cfg, err := genutiltest.CreateDefaultTendermintConfig(home) + require.NoError(t, err) + + sctx := server.NewContext(viper.New(), cfg, logger) + interfaceRegistry := types.NewInterfaceRegistry() + marshaler := codec.NewProtoCodec(interfaceRegistry) + cctx := client.Context{}. + WithCodec(marshaler). + WithLegacyAmino(makeCodec()). + WithChainID("foo"). // add chain-id to cctx + WithHomeDir(home) + + ctx := context.Background() + ctx = context.WithValue(ctx, cli.ClientContextKey, &cctx) + ctx = context.WithValue(ctx, server.ServerContextKey, sctx) + + testInitialHeight := int64(-333) + + cmd := cli.GetGenesisInitCmd(testMbm, home) + cmd.SetArgs([]string{"init-height-test", fmt.Sprintf("--%s=%d", cflags.FlagInitHeight, testInitialHeight)}) + + require.NoError(t, cmd.ExecuteContext(ctx)) + + appGenesis, importErr := tmtypes.GenesisDocFromFile(cfg.GenesisFile()) + require.NoError(t, importErr) + + require.Equal(t, int64(1), appGenesis.InitialHeight) +} + +// custom tx codec +func makeCodec() *codec.LegacyAmino { + cdc := codec.NewLegacyAmino() + + sdk.RegisterLegacyAminoCodec(cdc) + cryptocodec.RegisterCrypto(cdc) + + return cdc +} diff --git a/go/cli/genesis_migrate.go b/go/cli/genesis_migrate.go new file mode 100644 index 00000000..09200c69 --- /dev/null +++ b/go/cli/genesis_migrate.go @@ -0,0 +1,141 @@ +package cli + +import ( + "encoding/json" + "fmt" + "sort" + "time" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "golang.org/x/exp/maps" + + tmjson "github.com/cometbft/cometbft/libs/json" + + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + v043 "github.com/cosmos/cosmos-sdk/x/genutil/migrations/v043" + v046 "github.com/cosmos/cosmos-sdk/x/genutil/migrations/v046" + v047 "github.com/cosmos/cosmos-sdk/x/genutil/migrations/v047" + "github.com/cosmos/cosmos-sdk/x/genutil/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// Allow applications to extend and modify the migration process. +// +// Ref: https://github.com/cosmos/cosmos-sdk/issues/5041 +var migrationMap = types.MigrationMap{ + "v0.43": v043.Migrate, // NOTE: v0.43, v0.44 and v0.45 are genesis compatible. + "v0.46": v046.Migrate, + "v0.47": v047.Migrate, +} + +// GetMigrationCallback returns a MigrationCallback for a given version. +func GetMigrationCallback(version string) types.MigrationCallback { + return migrationMap[version] +} + +// GetMigrationVersions get all migration version in a sorted slice. +func GetMigrationVersions() []string { + versions := maps.Keys(migrationMap) + sort.Strings(versions) + + return versions +} + +// MigrateGenesisCmd returns a command to execute genesis state migration. +func MigrateGenesisCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "migrate [target-version] [genesis-file]", + Short: "Migrate genesis to a specified target version", + Long: fmt.Sprintf(`Migrate the source genesis into the target version and print to file or STDOUT. + +Example: +$ %s migrate v0.36 /path/to/genesis.json --chain-id= --genesis-time=2019-04-22T17:00:00Z +`, version.AppName), + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + return MigrateHandler(cmd, args, migrationMap) + }, + } + + cmd.Flags().String(cflags.FlagGenesisTime, "", "override genesis_time with this flag") + cmd.Flags().String(cflags.FlagChainID, "", "override chain_id with this flag") + + return cmd +} + +// MigrateHandler handles the migration command with a migration map as input, +// returning an error upon failure. +func MigrateHandler(cmd *cobra.Command, args []string, migrations types.MigrationMap) error { + clientCtx := client.GetClientContextFromCmd(cmd) + + var err error + + target := args[0] + importGenesis := args[1] + + genDoc, err := validateGenDoc(importGenesis) + if err != nil { + return err + } + + // Since some default values are valid values, we just print to + // make sure the user didn't forget to update these values. + if genDoc.ConsensusParams.Evidence.MaxBytes == 0 { + fmt.Printf("Warning: consensus_params.evidence.max_bytes is set to 0. If this is"+ + " deliberate, feel free to ignore this warning. If not, please have a look at the chain"+ + " upgrade guide at %s.\n", chainUpgradeGuide) + } + + var initialState types.AppMap + if err := json.Unmarshal(genDoc.AppState, &initialState); err != nil { + return errors.Wrap(err, "failed to JSON unmarshal initial genesis state") + } + + migrationFunc := migrations[target] + if migrationFunc == nil { + return fmt.Errorf("unknown migration function for version: %s", target) + } + + // TODO: handler error from migrationFunc call + newGenState := migrationFunc(initialState, clientCtx) + + genDoc.AppState, err = json.Marshal(newGenState) + if err != nil { + return errors.Wrap(err, "failed to JSON marshal migrated genesis state") + } + + genesisTime, _ := cmd.Flags().GetString(cflags.FlagGenesisTime) + if genesisTime != "" { + var t time.Time + + err := t.UnmarshalText([]byte(genesisTime)) + if err != nil { + return errors.Wrap(err, "failed to unmarshal genesis time") + } + + genDoc.GenesisTime = t + } + + chainID, _ := cmd.Flags().GetString(cflags.FlagChainID) + if chainID != "" { + genDoc.ChainID = chainID + } + + bz, err := tmjson.Marshal(genDoc) + if err != nil { + return errors.Wrap(err, "failed to marshal genesis doc") + } + + sortedBz, err := sdk.SortJSON(bz) + if err != nil { + return errors.Wrap(err, "failed to sort JSON genesis doc") + } + + cmd.Println(string(sortedBz)) + + return nil +} diff --git a/go/cli/genesis_migrate_test.go b/go/cli/genesis_migrate_test.go new file mode 100644 index 00000000..5983a29f --- /dev/null +++ b/go/cli/genesis_migrate_test.go @@ -0,0 +1,59 @@ +package cli_test + +import ( + "context" + "testing" + + "github.com/cosmos/cosmos-sdk/testutil" + "github.com/stretchr/testify/require" + + "pkg.akt.dev/go/cli" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +func TestGetMigrationCallback(t *testing.T) { + for _, version := range cli.GetMigrationVersions() { + require.NotNil(t, cli.GetMigrationCallback(version)) + } +} + +func (s *GenesisCLITestSuite) TestMigrateGenesis() { + testCases := []struct { + name string + genesis string + target string + expErr bool + expErrMsg string + check func(jsonOut string) + }{ + { + "migrate 0.37 to 0.42", + v037Exported, + "v0.42", + true, "Make sure that you have correctly migrated all Tendermint consensus params", func(_ string) {}, + }, + { + "migrate 0.42 to 0.43", + v040Valid, + "v0.43", + false, "", + func(jsonOut string) { + // Make sure the json output contains the ADR-037 gov weighted votes. + s.Require().Contains(jsonOut, "\"weight\":\"1.000000000000000000\"") + }, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + genesisFile := testutil.WriteToNewTempFile(s.T(), tc.genesis) + jsonOutput, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cli.MigrateGenesisCmd(), tc.target, genesisFile.Name()) + if tc.expErr { + s.Require().Contains(err.Error(), tc.expErrMsg) + } else { + s.Require().NoError(err) + tc.check(jsonOutput.String()) + } + }) + } +} diff --git a/go/cli/genesis_suite_test.go b/go/cli/genesis_suite_test.go new file mode 100644 index 00000000..91955752 --- /dev/null +++ b/go/cli/genesis_suite_test.go @@ -0,0 +1,43 @@ +package cli_test + +import ( + "bytes" + "io" + + abci "github.com/cometbft/cometbft/abci/types" + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli" + sdk "github.com/cosmos/cosmos-sdk/types" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/genutil" +) + +type GenesisCLITestSuite struct { + CLITestSuite +} + +func (s *GenesisCLITestSuite) SetupSuite() { + s.encCfg = testutilmod.MakeTestEncodingConfig(genutil.AppModuleBasic{}) + s.kr = keyring.NewInMemory(s.encCfg.Codec) + s.baseCtx = client.Context{}. + WithKeyring(s.kr). + WithTxConfig(s.encCfg.TxConfig). + WithCodec(s.encCfg.Codec). + WithLegacyAmino(s.encCfg.Amino). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain") + + var outBuf bytes.Buffer + ctxGen := func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&sdk.TxResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + } + s.cctx = ctxGen().WithOutput(&outBuf) +} diff --git a/go/cli/genesis_validate.go b/go/cli/genesis_validate.go new file mode 100644 index 00000000..31f94294 --- /dev/null +++ b/go/cli/genesis_validate.go @@ -0,0 +1,72 @@ +package cli + +import ( + "encoding/json" + "fmt" + + tmtypes "github.com/cometbft/cometbft/types" + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/server" + "github.com/cosmos/cosmos-sdk/types/module" +) + +const chainUpgradeGuide = "https://github.com/cosmos/cosmos-sdk/blob/main/UPGRADING.md" + +// getGenesisValidateCmd takes a genesis file, and makes sure that it is valid. +func getGenesisValidateCmd(mbm module.BasicManager) *cobra.Command { + return &cobra.Command{ + Use: "validate [file]", + Args: cobra.RangeArgs(0, 1), + Short: "validates the genesis file at the default location or at the location passed as an arg", + RunE: func(cmd *cobra.Command, args []string) (err error) { + sctx := server.GetServerContextFromCmd(cmd) + cctx := client.GetClientContextFromCmd(cmd) + + cdc := cctx.Codec + + // Load default if passed no args, otherwise load passed file + var genesis string + if len(args) == 0 { + genesis = sctx.Config.GenesisFile() + } else { + genesis = args[0] + } + + genDoc, err := validateGenDoc(genesis) + if err != nil { + return err + } + + var genState map[string]json.RawMessage + if err = json.Unmarshal(genDoc.AppState, &genState); err != nil { + return fmt.Errorf("error unmarshalling genesis doc %s: %s", genesis, err.Error()) + } + + if err = mbm.ValidateGenesis(cdc, cctx.TxConfig, genState); err != nil { + return fmt.Errorf("error validating genesis file %s: %s", genesis, err.Error()) + } + + fmt.Printf("File at %s is a valid genesis file\n", genesis) + + return nil + }, + } +} + +// validateGenDoc reads a genesis file and validates that it is a correct +// Tendermint GenesisDoc. This function does not do any cosmos-related +// validation. +func validateGenDoc(importGenesisFile string) (*tmtypes.GenesisDoc, error) { + genDoc, err := tmtypes.GenesisDocFromFile(importGenesisFile) + if err != nil { + return nil, fmt.Errorf("%s. Make sure that"+ + " you have correctly migrated all Tendermint consensus params, please see the"+ + " chain migration guide at %s for more info", + err.Error(), chainUpgradeGuide, + ) + } + + return genDoc, nil +} diff --git a/go/cli/genesis_validate_test.go b/go/cli/genesis_validate_test.go new file mode 100644 index 00000000..a92c5a0f --- /dev/null +++ b/go/cli/genesis_validate_test.go @@ -0,0 +1,101 @@ +package cli_test + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/testutil" + "github.com/cosmos/cosmos-sdk/x/genutil/client/cli" + + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +// An example exported genesis file from a 0.37 chain. Note that evidence +// parameters only contains `max_age`. +var v037Exported = `{ + "app_hash": "", + "app_state": {}, + "chain_id": "test", + "consensus_params": { + "block": { + "max_bytes": "22020096", + "max_gas": "-1", + "time_iota_ms": "1000" + }, + "evidence": { "max_age": "100000" }, + "validator": { "pub_key_types": ["ed25519"] } + }, + "genesis_time": "2020-09-29T20:16:29.172362037Z", + "validators": [] +}` + +// An example exported genesis file that's 0.40 compatible. +// We added the following app_state: +// +// - x/gov: added votes to test ADR-037 split votes migration. +var v040Valid = `{ + "app_hash": "", + "app_state": { + "gov": { + "starting_proposal_id": "0", + "deposits": [], + "votes": [ + { + "proposal_id": "5", + "voter": "cosmos1fl48vsnmsdzcv85q5d2q4z5ajdha8yu34mf0eh", + "option": "VOTE_OPTION_YES" + } + ], + "proposals": [], + "deposit_params": { "min_deposit": [], "max_deposit_period": "0s" }, + "voting_params": { "voting_period": "0s" }, + "tally_params": { "quorum": "0", "threshold": "0", "veto_threshold": "0" } + } + }, + "chain_id": "test", + "consensus_params": { + "block": { + "max_bytes": "22020096", + "max_gas": "-1", + "time_iota_ms": "1000" + }, + "evidence": { + "max_age_num_blocks": "100000", + "max_age_duration": "172800000000000", + "max_bytes": "0" + }, + "validator": { "pub_key_types": ["ed25519"] } + }, + "genesis_time": "2020-09-29T20:16:29.172362037Z", + "validators": [] +}` + +func (s *GenesisCLITestSuite) TestValidateGenesis() { + testCases := []struct { + name string + genesis string + expErr bool + }{ + { + "exported 0.37 genesis file", + v037Exported, + true, + }, + { + "valid 0.40 genesis file", + v040Valid, + false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + genesisFile := testutil.WriteToNewTempFile(s.T(), tc.genesis) + _, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cli.ValidateGenesisCmd(nil), genesisFile.Name()) + if tc.expErr { + s.Require().Contains(err.Error(), "Make sure that you have correctly migrated all Tendermint consensus params") + } else { + s.Require().NoError(err) + } + }) + } +} diff --git a/go/cli/go.mod b/go/cli/go.mod new file mode 100644 index 00000000..3dc3c6f5 --- /dev/null +++ b/go/cli/go.mod @@ -0,0 +1,208 @@ +module pkg.akt.dev/go/cli + +go 1.23.0 + +require ( + cosmossdk.io/errors v1.0.1 + cosmossdk.io/math v1.3.0 + github.com/chzyer/readline v1.5.1 + github.com/cometbft/cometbft v0.37.6 + github.com/cosmos/cosmos-sdk v0.47.16-ics-lsm + github.com/cosmos/go-bip39 v1.0.0 + github.com/cosmos/gogoproto v1.4.12 + github.com/manifoldco/promptui v0.9.0 + github.com/pkg/errors v0.9.1 + github.com/spf13/cobra v1.8.0 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.18.2 + github.com/stretchr/testify v1.9.0 + golang.org/x/exp v0.0.0-20231006140011-7918f672742d + google.golang.org/grpc v1.64.1 + gopkg.in/yaml.v3 v3.0.1 + pkg.akt.dev/go v0.0.2-rc3 + pkg.akt.dev/go/sdl v0.0.1-rc7 +) + +replace ( + github.com/cosmos/gogoproto => github.com/cosmos/gogoproto v1.4.10 + github.com/gogo/protobuf => github.com/cosmos/gogoproto v1.3.3-alpha.regen.1 + golang.org/x/exp => golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb + google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 +) + +require ( + cloud.google.com/go v0.113.0 // indirect + cloud.google.com/go/auth v0.4.1 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect + cloud.google.com/go/iam v1.1.8 // indirect + cloud.google.com/go/storage v1.40.0 // indirect + cosmossdk.io/api v0.3.1 // indirect + cosmossdk.io/core v0.5.1 // indirect + cosmossdk.io/depinject v1.0.0-alpha.4 // indirect + cosmossdk.io/log v1.3.1 // indirect + cosmossdk.io/tools/rosetta v0.2.1 // indirect + filippo.io/edwards25519 v1.1.0 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.2 // indirect + github.com/ChainSafe/go-schnorrkel v1.0.0 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/aws/aws-sdk-go v1.44.203 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect + github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/boz/go-lifecycle v0.1.1 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/cenkalti/backoff/v4 v4.1.3 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/apd/v2 v2.0.2 // indirect + github.com/cockroachdb/errors v1.10.0 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/coinbase/rosetta-sdk-go/types v1.0.0 // indirect + github.com/cometbft/cometbft-db v0.7.0 // indirect + github.com/confio/ics23/go v0.9.1 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect + github.com/cosmos/gogogateway v1.2.0 // indirect + github.com/cosmos/iavl v0.20.1 // indirect + github.com/cosmos/ledger-cosmos-go v0.12.4 // indirect + github.com/cosmos/rosetta-sdk-go v0.10.0 // indirect + github.com/creachadair/taskgroup v0.3.2 // indirect + github.com/danieljoos/wincred v1.1.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/edwingeng/deque/v2 v2.1.1 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/getsentry/sentry-go v0.23.0 // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gogo/googleapis v1.4.1 // indirect + github.com/gogo/protobuf v1.3.3 // indirect + github.com/golang/glog v1.2.0 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect + github.com/golang/mock v1.6.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/orderedcode v0.0.1 // indirect + github.com/google/s2a-go v0.1.7 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect + github.com/gorilla/handlers v1.5.1 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/gtank/merlin v0.1.1 // indirect + github.com/gtank/ristretto255 v0.1.2 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-getter v1.7.1 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-safetemp v1.0.0 // indirect + github.com/hashicorp/go-version v1.6.0 // indirect + github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect + github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c // indirect + github.com/hdevalence/ed25519consensus v0.1.0 // indirect + github.com/huandu/skiplist v1.2.0 // indirect + github.com/improbable-eng/grpc-web v0.15.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmespath/go-jmespath v0.4.0 // indirect + github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/lib/pq v1.10.7 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect + github.com/minio/highwayhash v1.0.2 // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/go-testing-interface v1.14.1 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/rakyll/statik v0.1.7 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/rs/cors v1.8.2 // indirect + github.com/rs/zerolog v1.32.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + github.com/tidwall/btree v1.6.0 // indirect + github.com/ulikunitz/xz v0.5.11 // indirect + github.com/zondax/hid v0.9.2 // indirect + github.com/zondax/ledger-go v0.14.3 // indirect + go.etcd.io/bbolt v1.3.7 // indirect + go.opencensus.io v0.24.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect + go.step.sm/crypto v0.45.1 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/net v0.26.0 // indirect + golang.org/x/oauth2 v0.20.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.21.0 // indirect + golang.org/x/term v0.21.0 // indirect + golang.org/x/text v0.16.0 // indirect + golang.org/x/time v0.5.0 // indirect + google.golang.org/api v0.181.0 // indirect + google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 // indirect + google.golang.org/protobuf v1.34.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/api v0.30.1 // indirect + k8s.io/apimachinery v0.30.1 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + nhooyr.io/websocket v1.8.6 // indirect + pgregory.net/rapid v1.1.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/go/cli/go.sum b/go/cli/go.sum new file mode 100644 index 00000000..73aa408d --- /dev/null +++ b/go/cli/go.sum @@ -0,0 +1,2254 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= +cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4= +cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc= +cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA= +cloud.google.com/go v0.100.1/go.mod h1:fs4QogzfH5n2pBXBP9vRiU+eCny7lD2vmFZy79Iuw1U= +cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A= +cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc= +cloud.google.com/go v0.102.1/go.mod h1:XZ77E9qnTEnrgEOvr4xzfdX5TRo7fB4T2F4O6+34hIU= +cloud.google.com/go v0.104.0/go.mod h1:OO6xxXdJyvuJPcEPBLN9BJPD+jep5G1+2U5B5gkRYtA= +cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= +cloud.google.com/go v0.113.0 h1:g3C70mn3lWfckKBiCVsAshabrDg01pQ0pnX1MNtnMkA= +cloud.google.com/go v0.113.0/go.mod h1:glEqlogERKYeePz6ZdkcLJ28Q2I6aERgDDErBg9GzO8= +cloud.google.com/go/accessapproval v1.4.0/go.mod h1:zybIuC3KpDOvotz59lFe5qxRZx6C75OtwbisN56xYB4= +cloud.google.com/go/accessapproval v1.5.0/go.mod h1:HFy3tuiGvMdcd/u+Cu5b9NkO1pEICJ46IR82PoUdplw= +cloud.google.com/go/accessapproval v1.6.0/go.mod h1:R0EiYnwV5fsRFiKZkPHr6mwyk2wxUJ30nL4j2pcFY2E= +cloud.google.com/go/accesscontextmanager v1.3.0/go.mod h1:TgCBehyr5gNMz7ZaH9xubp+CE8dkrszb4oK9CWyvD4o= +cloud.google.com/go/accesscontextmanager v1.4.0/go.mod h1:/Kjh7BBu/Gh83sv+K60vN9QE5NJcd80sU33vIe2IFPE= +cloud.google.com/go/accesscontextmanager v1.6.0/go.mod h1:8XCvZWfYw3K/ji0iVnp+6pu7huxoQTLmxAbVjbloTtM= +cloud.google.com/go/aiplatform v1.22.0/go.mod h1:ig5Nct50bZlzV6NvKaTwmplLLddFx0YReh9WfTO5jKw= +cloud.google.com/go/aiplatform v1.24.0/go.mod h1:67UUvRBKG6GTayHKV8DBv2RtR1t93YRu5B1P3x99mYY= +cloud.google.com/go/aiplatform v1.27.0/go.mod h1:Bvxqtl40l0WImSb04d0hXFU7gDOiq9jQmorivIiWcKg= +cloud.google.com/go/aiplatform v1.35.0/go.mod h1:7MFT/vCaOyZT/4IIFfxH4ErVg/4ku6lKv3w0+tFTgXQ= +cloud.google.com/go/analytics v0.11.0/go.mod h1:DjEWCu41bVbYcKyvlws9Er60YE4a//bK6mnhWvQeFNI= +cloud.google.com/go/analytics v0.12.0/go.mod h1:gkfj9h6XRf9+TS4bmuhPEShsh3hH8PAZzm/41OOhQd4= +cloud.google.com/go/analytics v0.18.0/go.mod h1:ZkeHGQlcIPkw0R/GW+boWHhCOR43xz9RN/jn7WcqfIE= +cloud.google.com/go/apigateway v1.3.0/go.mod h1:89Z8Bhpmxu6AmUxuVRg/ECRGReEdiP3vQtk4Z1J9rJk= +cloud.google.com/go/apigateway v1.4.0/go.mod h1:pHVY9MKGaH9PQ3pJ4YLzoj6U5FUDeDFBllIz7WmzJoc= +cloud.google.com/go/apigateway v1.5.0/go.mod h1:GpnZR3Q4rR7LVu5951qfXPJCHquZt02jf7xQx7kpqN8= +cloud.google.com/go/apigeeconnect v1.3.0/go.mod h1:G/AwXFAKo0gIXkPTVfZDd2qA1TxBXJ3MgMRBQkIi9jc= +cloud.google.com/go/apigeeconnect v1.4.0/go.mod h1:kV4NwOKqjvt2JYR0AoIWo2QGfoRtn/pkS3QlHp0Ni04= +cloud.google.com/go/apigeeconnect v1.5.0/go.mod h1:KFaCqvBRU6idyhSNyn3vlHXc8VMDJdRmwDF6JyFRqZ8= +cloud.google.com/go/apigeeregistry v0.4.0/go.mod h1:EUG4PGcsZvxOXAdyEghIdXwAEi/4MEaoqLMLDMIwKXY= +cloud.google.com/go/apigeeregistry v0.5.0/go.mod h1:YR5+s0BVNZfVOUkMa5pAR2xGd0A473vA5M7j247o1wM= +cloud.google.com/go/apikeys v0.4.0/go.mod h1:XATS/yqZbaBK0HOssf+ALHp8jAlNHUgyfprvNcBIszU= +cloud.google.com/go/apikeys v0.5.0/go.mod h1:5aQfwY4D+ewMMWScd3hm2en3hCj+BROlyrt3ytS7KLI= +cloud.google.com/go/appengine v1.4.0/go.mod h1:CS2NhuBuDXM9f+qscZ6V86m1MIIqPj3WC/UoEuR1Sno= +cloud.google.com/go/appengine v1.5.0/go.mod h1:TfasSozdkFI0zeoxW3PTBLiNqRmzraodCWatWI9Dmak= +cloud.google.com/go/appengine v1.6.0/go.mod h1:hg6i0J/BD2cKmDJbaFSYHFyZkgBEfQrDg/X0V5fJn84= +cloud.google.com/go/area120 v0.5.0/go.mod h1:DE/n4mp+iqVyvxHN41Vf1CR602GiHQjFPusMFW6bGR4= +cloud.google.com/go/area120 v0.6.0/go.mod h1:39yFJqWVgm0UZqWTOdqkLhjoC7uFfgXRC8g/ZegeAh0= +cloud.google.com/go/area120 v0.7.1/go.mod h1:j84i4E1RboTWjKtZVWXPqvK5VHQFJRF2c1Nm69pWm9k= +cloud.google.com/go/artifactregistry v1.6.0/go.mod h1:IYt0oBPSAGYj/kprzsBjZ/4LnG/zOcHyFHjWPCi6SAQ= +cloud.google.com/go/artifactregistry v1.7.0/go.mod h1:mqTOFOnGZx8EtSqK/ZWcsm/4U8B77rbcLP6ruDU2Ixk= +cloud.google.com/go/artifactregistry v1.8.0/go.mod h1:w3GQXkJX8hiKN0v+at4b0qotwijQbYUqF2GWkZzAhC0= +cloud.google.com/go/artifactregistry v1.9.0/go.mod h1:2K2RqvA2CYvAeARHRkLDhMDJ3OXy26h3XW+3/Jh2uYc= +cloud.google.com/go/artifactregistry v1.11.2/go.mod h1:nLZns771ZGAwVLzTX/7Al6R9ehma4WUEhZGWV6CeQNQ= +cloud.google.com/go/asset v1.5.0/go.mod h1:5mfs8UvcM5wHhqtSv8J1CtxxaQq3AdBxxQi2jGW/K4o= +cloud.google.com/go/asset v1.7.0/go.mod h1:YbENsRK4+xTiL+Ofoj5Ckf+O17kJtgp3Y3nn4uzZz5s= +cloud.google.com/go/asset v1.8.0/go.mod h1:mUNGKhiqIdbr8X7KNayoYvyc4HbbFO9URsjbytpUaW0= +cloud.google.com/go/asset v1.9.0/go.mod h1:83MOE6jEJBMqFKadM9NLRcs80Gdw76qGuHn8m3h8oHQ= +cloud.google.com/go/asset v1.10.0/go.mod h1:pLz7uokL80qKhzKr4xXGvBQXnzHn5evJAEAtZiIb0wY= +cloud.google.com/go/asset v1.11.1/go.mod h1:fSwLhbRvC9p9CXQHJ3BgFeQNM4c9x10lqlrdEUYXlJo= +cloud.google.com/go/assuredworkloads v1.5.0/go.mod h1:n8HOZ6pff6re5KYfBXcFvSViQjDwxFkAkmUFffJRbbY= +cloud.google.com/go/assuredworkloads v1.6.0/go.mod h1:yo2YOk37Yc89Rsd5QMVECvjaMKymF9OP+QXWlKXUkXw= +cloud.google.com/go/assuredworkloads v1.7.0/go.mod h1:z/736/oNmtGAyU47reJgGN+KVoYoxeLBoj4XkKYscNI= +cloud.google.com/go/assuredworkloads v1.8.0/go.mod h1:AsX2cqyNCOvEQC8RMPnoc0yEarXQk6WEKkxYfL6kGIo= +cloud.google.com/go/assuredworkloads v1.9.0/go.mod h1:kFuI1P78bplYtT77Tb1hi0FMxM0vVpRC7VVoJC3ZoT0= +cloud.google.com/go/assuredworkloads v1.10.0/go.mod h1:kwdUQuXcedVdsIaKgKTp9t0UJkE5+PAVNhdQm4ZVq2E= +cloud.google.com/go/auth v0.4.1 h1:Z7YNIhlWRtrnKlZke7z3GMqzvuYzdc2z98F9D1NV5Hg= +cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/automl v1.5.0/go.mod h1:34EjfoFGMZ5sgJ9EoLsRtdPSNZLcfflJR39VbVNS2M0= +cloud.google.com/go/automl v1.6.0/go.mod h1:ugf8a6Fx+zP0D59WLhqgTDsQI9w07o64uf/Is3Nh5p8= +cloud.google.com/go/automl v1.7.0/go.mod h1:RL9MYCCsJEOmt0Wf3z9uzG0a7adTT1fe+aObgSpkCt8= +cloud.google.com/go/automl v1.8.0/go.mod h1:xWx7G/aPEe/NP+qzYXktoBSDfjO+vnKMGgsApGJJquM= +cloud.google.com/go/automl v1.12.0/go.mod h1:tWDcHDp86aMIuHmyvjuKeeHEGq76lD7ZqfGLN6B0NuU= +cloud.google.com/go/baremetalsolution v0.3.0/go.mod h1:XOrocE+pvK1xFfleEnShBlNAXf+j5blPPxrhjKgnIFc= +cloud.google.com/go/baremetalsolution v0.4.0/go.mod h1:BymplhAadOO/eBa7KewQ0Ppg4A4Wplbn+PsFKRLo0uI= +cloud.google.com/go/baremetalsolution v0.5.0/go.mod h1:dXGxEkmR9BMwxhzBhV0AioD0ULBmuLZI8CdwalUxuss= +cloud.google.com/go/batch v0.3.0/go.mod h1:TR18ZoAekj1GuirsUsR1ZTKN3FC/4UDnScjT8NXImFE= +cloud.google.com/go/batch v0.4.0/go.mod h1:WZkHnP43R/QCGQsZ+0JyG4i79ranE2u8xvjq/9+STPE= +cloud.google.com/go/batch v0.7.0/go.mod h1:vLZN95s6teRUqRQ4s3RLDsH8PvboqBK+rn1oevL159g= +cloud.google.com/go/beyondcorp v0.2.0/go.mod h1:TB7Bd+EEtcw9PCPQhCJtJGjk/7TC6ckmnSFS+xwTfm4= +cloud.google.com/go/beyondcorp v0.3.0/go.mod h1:E5U5lcrcXMsCuoDNyGrpyTm/hn7ne941Jz2vmksAxW8= +cloud.google.com/go/beyondcorp v0.4.0/go.mod h1:3ApA0mbhHx6YImmuubf5pyW8srKnCEPON32/5hj+RmM= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/bigquery v1.42.0/go.mod h1:8dRTJxhtG+vwBKzE5OseQn/hiydoQN3EedCaOdYmxRA= +cloud.google.com/go/bigquery v1.43.0/go.mod h1:ZMQcXHsl+xmU1z36G2jNGZmKp9zNY5BUua5wDgmNCfw= +cloud.google.com/go/bigquery v1.44.0/go.mod h1:0Y33VqXTEsbamHJvJHdFmtqHvMIY28aK1+dFsvaChGc= +cloud.google.com/go/bigquery v1.48.0/go.mod h1:QAwSz+ipNgfL5jxiaK7weyOhzdoAy1zFm0Nf1fysJac= +cloud.google.com/go/billing v1.4.0/go.mod h1:g9IdKBEFlItS8bTtlrZdVLWSSdSyFUZKXNS02zKMOZY= +cloud.google.com/go/billing v1.5.0/go.mod h1:mztb1tBc3QekhjSgmpf/CV4LzWXLzCArwpLmP2Gm88s= +cloud.google.com/go/billing v1.6.0/go.mod h1:WoXzguj+BeHXPbKfNWkqVtDdzORazmCjraY+vrxcyvI= +cloud.google.com/go/billing v1.7.0/go.mod h1:q457N3Hbj9lYwwRbnlD7vUpyjq6u5U1RAOArInEiD5Y= +cloud.google.com/go/billing v1.12.0/go.mod h1:yKrZio/eu+okO/2McZEbch17O5CB5NpZhhXG6Z766ss= +cloud.google.com/go/binaryauthorization v1.1.0/go.mod h1:xwnoWu3Y84jbuHa0zd526MJYmtnVXn0syOjaJgy4+dM= +cloud.google.com/go/binaryauthorization v1.2.0/go.mod h1:86WKkJHtRcv5ViNABtYMhhNWRrD1Vpi//uKEy7aYEfI= +cloud.google.com/go/binaryauthorization v1.3.0/go.mod h1:lRZbKgjDIIQvzYQS1p99A7/U1JqvqeZg0wiI5tp6tg0= +cloud.google.com/go/binaryauthorization v1.4.0/go.mod h1:tsSPQrBd77VLplV70GUhBf/Zm3FsKmgSqgm4UmiDItk= +cloud.google.com/go/binaryauthorization v1.5.0/go.mod h1:OSe4OU1nN/VswXKRBmciKpo9LulY41gch5c68htf3/Q= +cloud.google.com/go/certificatemanager v1.3.0/go.mod h1:n6twGDvcUBFu9uBgt4eYvvf3sQ6My8jADcOVwHmzadg= +cloud.google.com/go/certificatemanager v1.4.0/go.mod h1:vowpercVFyqs8ABSmrdV+GiFf2H/ch3KyudYQEMM590= +cloud.google.com/go/certificatemanager v1.6.0/go.mod h1:3Hh64rCKjRAX8dXgRAyOcY5vQ/fE1sh8o+Mdd6KPgY8= +cloud.google.com/go/channel v1.8.0/go.mod h1:W5SwCXDJsq/rg3tn3oG0LOxpAo6IMxNa09ngphpSlnk= +cloud.google.com/go/channel v1.9.0/go.mod h1:jcu05W0my9Vx4mt3/rEHpfxc9eKi9XwsdDL8yBMbKUk= +cloud.google.com/go/channel v1.11.0/go.mod h1:IdtI0uWGqhEeatSB62VOoJ8FSUhJ9/+iGkJVqp74CGE= +cloud.google.com/go/cloudbuild v1.3.0/go.mod h1:WequR4ULxlqvMsjDEEEFnOG5ZSRSgWOywXYDb1vPE6U= +cloud.google.com/go/cloudbuild v1.4.0/go.mod h1:5Qwa40LHiOXmz3386FrjrYM93rM/hdRr7b53sySrTqA= +cloud.google.com/go/cloudbuild v1.7.0/go.mod h1:zb5tWh2XI6lR9zQmsm1VRA+7OCuve5d8S+zJUul8KTg= +cloud.google.com/go/clouddms v1.3.0/go.mod h1:oK6XsCDdW4Ib3jCCBugx+gVjevp2TMXFtgxvPSee3OM= +cloud.google.com/go/clouddms v1.4.0/go.mod h1:Eh7sUGCC+aKry14O1NRljhjyrr0NFC0G2cjwX0cByRk= +cloud.google.com/go/clouddms v1.5.0/go.mod h1:QSxQnhikCLUw13iAbffF2CZxAER3xDGNHjsTAkQJcQA= +cloud.google.com/go/cloudtasks v1.5.0/go.mod h1:fD92REy1x5woxkKEkLdvavGnPJGEn8Uic9nWuLzqCpY= +cloud.google.com/go/cloudtasks v1.6.0/go.mod h1:C6Io+sxuke9/KNRkbQpihnW93SWDU3uXt92nu85HkYI= +cloud.google.com/go/cloudtasks v1.7.0/go.mod h1:ImsfdYWwlWNJbdgPIIGJWC+gemEGTBK/SunNQQNCAb4= +cloud.google.com/go/cloudtasks v1.8.0/go.mod h1:gQXUIwCSOI4yPVK7DgTVFiiP0ZW/eQkydWzwVMdHxrI= +cloud.google.com/go/cloudtasks v1.9.0/go.mod h1:w+EyLsVkLWHcOaqNEyvcKAsWp9p29dL6uL9Nst1cI7Y= +cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= +cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= +cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M= +cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s= +cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= +cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U= +cloud.google.com/go/compute v1.10.0/go.mod h1:ER5CLbMxl90o2jtNbGSbtfOpQKR0t15FOtRsugnLrlU= +cloud.google.com/go/compute v1.12.0/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.12.1/go.mod h1:e8yNOBcBONZU1vJKCvCoDw/4JQsA0dpM4x/6PIIOocU= +cloud.google.com/go/compute v1.13.0/go.mod h1:5aPTS0cUNMIc1CE546K+Th6weJUNQErARyZtRXDJ8GE= +cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.15.1/go.mod h1:bjjoF/NtFUrkD/urWfdHaKuOPDR5nWIs63rR+SXhcpA= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute/metadata v0.1.0/go.mod h1:Z1VN+bulIf6bt4P/C37K4DyZYZEXYonfTBHHFPO/4UU= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= +cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= +cloud.google.com/go/contactcenterinsights v1.3.0/go.mod h1:Eu2oemoePuEFc/xKFPjbTuPSj0fYJcPls9TFlPNnHHY= +cloud.google.com/go/contactcenterinsights v1.4.0/go.mod h1:L2YzkGbPsv+vMQMCADxJoT9YiTTnSEd6fEvCeHTYVck= +cloud.google.com/go/contactcenterinsights v1.6.0/go.mod h1:IIDlT6CLcDoyv79kDv8iWxMSTZhLxSCofVV5W6YFM/w= +cloud.google.com/go/container v1.6.0/go.mod h1:Xazp7GjJSeUYo688S+6J5V+n/t+G5sKBTFkKNudGRxg= +cloud.google.com/go/container v1.7.0/go.mod h1:Dp5AHtmothHGX3DwwIHPgq45Y8KmNsgN3amoYfxVkLo= +cloud.google.com/go/container v1.13.1/go.mod h1:6wgbMPeQRw9rSnKBCAJXnds3Pzj03C4JHamr8asWKy4= +cloud.google.com/go/containeranalysis v0.5.1/go.mod h1:1D92jd8gRR/c0fGMlymRgxWD3Qw9C1ff6/T7mLgVL8I= +cloud.google.com/go/containeranalysis v0.6.0/go.mod h1:HEJoiEIu+lEXM+k7+qLCci0h33lX3ZqoYFdmPcoO7s4= +cloud.google.com/go/containeranalysis v0.7.0/go.mod h1:9aUL+/vZ55P2CXfuZjS4UjQ9AgXoSw8Ts6lemfmxBxI= +cloud.google.com/go/datacatalog v1.3.0/go.mod h1:g9svFY6tuR+j+hrTw3J2dNcmI0dzmSiyOzm8kpLq0a0= +cloud.google.com/go/datacatalog v1.5.0/go.mod h1:M7GPLNQeLfWqeIm3iuiruhPzkt65+Bx8dAKvScX8jvs= +cloud.google.com/go/datacatalog v1.6.0/go.mod h1:+aEyF8JKg+uXcIdAmmaMUmZ3q1b/lKLtXCmXdnc0lbc= +cloud.google.com/go/datacatalog v1.7.0/go.mod h1:9mEl4AuDYWw81UGc41HonIHH7/sn52H0/tc8f8ZbZIE= +cloud.google.com/go/datacatalog v1.8.0/go.mod h1:KYuoVOv9BM8EYz/4eMFxrr4DUKhGIOXxZoKYF5wdISM= +cloud.google.com/go/datacatalog v1.8.1/go.mod h1:RJ58z4rMp3gvETA465Vg+ag8BGgBdnRPEMMSTr5Uv+M= +cloud.google.com/go/datacatalog v1.12.0/go.mod h1:CWae8rFkfp6LzLumKOnmVh4+Zle4A3NXLzVJ1d1mRm0= +cloud.google.com/go/dataflow v0.6.0/go.mod h1:9QwV89cGoxjjSR9/r7eFDqqjtvbKxAK2BaYU6PVk9UM= +cloud.google.com/go/dataflow v0.7.0/go.mod h1:PX526vb4ijFMesO1o202EaUmouZKBpjHsTlCtB4parQ= +cloud.google.com/go/dataflow v0.8.0/go.mod h1:Rcf5YgTKPtQyYz8bLYhFoIV/vP39eL7fWNcSOyFfLJE= +cloud.google.com/go/dataform v0.3.0/go.mod h1:cj8uNliRlHpa6L3yVhDOBrUXH+BPAO1+KFMQQNSThKo= +cloud.google.com/go/dataform v0.4.0/go.mod h1:fwV6Y4Ty2yIFL89huYlEkwUPtS7YZinZbzzj5S9FzCE= +cloud.google.com/go/dataform v0.5.0/go.mod h1:GFUYRe8IBa2hcomWplodVmUx/iTL0FrsauObOM3Ipr0= +cloud.google.com/go/dataform v0.6.0/go.mod h1:QPflImQy33e29VuapFdf19oPbE4aYTJxr31OAPV+ulA= +cloud.google.com/go/datafusion v1.4.0/go.mod h1:1Zb6VN+W6ALo85cXnM1IKiPw+yQMKMhB9TsTSRDo/38= +cloud.google.com/go/datafusion v1.5.0/go.mod h1:Kz+l1FGHB0J+4XF2fud96WMmRiq/wj8N9u007vyXZ2w= +cloud.google.com/go/datafusion v1.6.0/go.mod h1:WBsMF8F1RhSXvVM8rCV3AeyWVxcC2xY6vith3iw3S+8= +cloud.google.com/go/datalabeling v0.5.0/go.mod h1:TGcJ0G2NzcsXSE/97yWjIZO0bXj0KbVlINXMG9ud42I= +cloud.google.com/go/datalabeling v0.6.0/go.mod h1:WqdISuk/+WIGeMkpw/1q7bK/tFEZxsrFJOJdY2bXvTQ= +cloud.google.com/go/datalabeling v0.7.0/go.mod h1:WPQb1y08RJbmpM3ww0CSUAGweL0SxByuW2E+FU+wXcM= +cloud.google.com/go/dataplex v1.3.0/go.mod h1:hQuRtDg+fCiFgC8j0zV222HvzFQdRd+SVX8gdmFcZzA= +cloud.google.com/go/dataplex v1.4.0/go.mod h1:X51GfLXEMVJ6UN47ESVqvlsRplbLhcsAt0kZCCKsU0A= +cloud.google.com/go/dataplex v1.5.2/go.mod h1:cVMgQHsmfRoI5KFYq4JtIBEUbYwc3c7tXmIDhRmNNVQ= +cloud.google.com/go/dataproc v1.7.0/go.mod h1:CKAlMjII9H90RXaMpSxQ8EU6dQx6iAYNPcYPOkSbi8s= +cloud.google.com/go/dataproc v1.8.0/go.mod h1:5OW+zNAH0pMpw14JVrPONsxMQYMBqJuzORhIBfBn9uI= +cloud.google.com/go/dataproc v1.12.0/go.mod h1:zrF3aX0uV3ikkMz6z4uBbIKyhRITnxvr4i3IjKsKrw4= +cloud.google.com/go/dataqna v0.5.0/go.mod h1:90Hyk596ft3zUQ8NkFfvICSIfHFh1Bc7C4cK3vbhkeo= +cloud.google.com/go/dataqna v0.6.0/go.mod h1:1lqNpM7rqNLVgWBJyk5NF6Uen2PHym0jtVJonplVsDA= +cloud.google.com/go/dataqna v0.7.0/go.mod h1:Lx9OcIIeqCrw1a6KdO3/5KMP1wAmTc0slZWwP12Qq3c= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/datastore v1.10.0/go.mod h1:PC5UzAmDEkAmkfaknstTYbNpgE49HAgW2J1gcgUfmdM= +cloud.google.com/go/datastream v1.2.0/go.mod h1:i/uTP8/fZwgATHS/XFu0TcNUhuA0twZxxQ3EyCUQMwo= +cloud.google.com/go/datastream v1.3.0/go.mod h1:cqlOX8xlyYF/uxhiKn6Hbv6WjwPPuI9W2M9SAXwaLLQ= +cloud.google.com/go/datastream v1.4.0/go.mod h1:h9dpzScPhDTs5noEMQVWP8Wx8AFBRyS0s8KWPx/9r0g= +cloud.google.com/go/datastream v1.5.0/go.mod h1:6TZMMNPwjUqZHBKPQ1wwXpb0d5VDVPl2/XoS5yi88q4= +cloud.google.com/go/datastream v1.6.0/go.mod h1:6LQSuswqLa7S4rPAOZFVjHIG3wJIjZcZrw8JDEDJuIs= +cloud.google.com/go/deploy v1.4.0/go.mod h1:5Xghikd4VrmMLNaF6FiRFDlHb59VM59YoDQnOUdsH/c= +cloud.google.com/go/deploy v1.5.0/go.mod h1:ffgdD0B89tToyW/U/D2eL0jN2+IEV/3EMuXHA0l4r+s= +cloud.google.com/go/deploy v1.6.0/go.mod h1:f9PTHehG/DjCom3QH0cntOVRm93uGBDt2vKzAPwpXQI= +cloud.google.com/go/dialogflow v1.15.0/go.mod h1:HbHDWs33WOGJgn6rfzBW1Kv807BE3O1+xGbn59zZWI4= +cloud.google.com/go/dialogflow v1.16.1/go.mod h1:po6LlzGfK+smoSmTBnbkIZY2w8ffjz/RcGSS+sh1el0= +cloud.google.com/go/dialogflow v1.17.0/go.mod h1:YNP09C/kXA1aZdBgC/VtXX74G/TKn7XVCcVumTflA+8= +cloud.google.com/go/dialogflow v1.18.0/go.mod h1:trO7Zu5YdyEuR+BhSNOqJezyFQ3aUzz0njv7sMx/iek= +cloud.google.com/go/dialogflow v1.19.0/go.mod h1:JVmlG1TwykZDtxtTXujec4tQ+D8SBFMoosgy+6Gn0s0= +cloud.google.com/go/dialogflow v1.29.0/go.mod h1:b+2bzMe+k1s9V+F2jbJwpHPzrnIyHihAdRFMtn2WXuM= +cloud.google.com/go/dialogflow v1.31.0/go.mod h1:cuoUccuL1Z+HADhyIA7dci3N5zUssgpBJmCzI6fNRB4= +cloud.google.com/go/dlp v1.6.0/go.mod h1:9eyB2xIhpU0sVwUixfBubDoRwP+GjeUoxxeueZmqvmM= +cloud.google.com/go/dlp v1.7.0/go.mod h1:68ak9vCiMBjbasxeVD17hVPxDEck+ExiHavX8kiHG+Q= +cloud.google.com/go/dlp v1.9.0/go.mod h1:qdgmqgTyReTz5/YNSSuueR8pl7hO0o9bQ39ZhtgkWp4= +cloud.google.com/go/documentai v1.7.0/go.mod h1:lJvftZB5NRiFSX4moiye1SMxHx0Bc3x1+p9e/RfXYiU= +cloud.google.com/go/documentai v1.8.0/go.mod h1:xGHNEB7CtsnySCNrCFdCyyMz44RhFEEX2Q7UD0c5IhU= +cloud.google.com/go/documentai v1.9.0/go.mod h1:FS5485S8R00U10GhgBC0aNGrJxBP8ZVpEeJ7PQDZd6k= +cloud.google.com/go/documentai v1.10.0/go.mod h1:vod47hKQIPeCfN2QS/jULIvQTugbmdc0ZvxxfQY1bg4= +cloud.google.com/go/documentai v1.16.0/go.mod h1:o0o0DLTEZ+YnJZ+J4wNfTxmDVyrkzFvttBXXtYRMHkM= +cloud.google.com/go/domains v0.6.0/go.mod h1:T9Rz3GasrpYk6mEGHh4rymIhjlnIuB4ofT1wTxDeT4Y= +cloud.google.com/go/domains v0.7.0/go.mod h1:PtZeqS1xjnXuRPKE/88Iru/LdfoRyEHYA9nFQf4UKpg= +cloud.google.com/go/domains v0.8.0/go.mod h1:M9i3MMDzGFXsydri9/vW+EWz9sWb4I6WyHqdlAk0idE= +cloud.google.com/go/edgecontainer v0.1.0/go.mod h1:WgkZ9tp10bFxqO8BLPqv2LlfmQF1X8lZqwW4r1BTajk= +cloud.google.com/go/edgecontainer v0.2.0/go.mod h1:RTmLijy+lGpQ7BXuTDa4C4ssxyXT34NIuHIgKuP4s5w= +cloud.google.com/go/edgecontainer v0.3.0/go.mod h1:FLDpP4nykgwwIfcLt6zInhprzw0lEi2P1fjO6Ie0qbc= +cloud.google.com/go/errorreporting v0.3.0/go.mod h1:xsP2yaAp+OAW4OIm60An2bbLpqIhKXdWR/tawvl7QzU= +cloud.google.com/go/essentialcontacts v1.3.0/go.mod h1:r+OnHa5jfj90qIfZDO/VztSFqbQan7HV75p8sA+mdGI= +cloud.google.com/go/essentialcontacts v1.4.0/go.mod h1:8tRldvHYsmnBCHdFpvU+GL75oWiBKl80BiqlFh9tp+8= +cloud.google.com/go/essentialcontacts v1.5.0/go.mod h1:ay29Z4zODTuwliK7SnX8E86aUF2CTzdNtvv42niCX0M= +cloud.google.com/go/eventarc v1.7.0/go.mod h1:6ctpF3zTnaQCxUjHUdcfgcA1A2T309+omHZth7gDfmc= +cloud.google.com/go/eventarc v1.8.0/go.mod h1:imbzxkyAU4ubfsaKYdQg04WS1NvncblHEup4kvF+4gw= +cloud.google.com/go/eventarc v1.10.0/go.mod h1:u3R35tmZ9HvswGRBnF48IlYgYeBcPUCjkr4BTdem2Kw= +cloud.google.com/go/filestore v1.3.0/go.mod h1:+qbvHGvXU1HaKX2nD0WEPo92TP/8AQuCVEBXNY9z0+w= +cloud.google.com/go/filestore v1.4.0/go.mod h1:PaG5oDfo9r224f8OYXURtAsY+Fbyq/bLYoINEK8XQAI= +cloud.google.com/go/filestore v1.5.0/go.mod h1:FqBXDWBp4YLHqRnVGveOkHDf8svj9r5+mUDLupOWEDs= +cloud.google.com/go/firestore v1.9.0/go.mod h1:HMkjKHNTtRyZNiMzu7YAsLr9K3X2udY2AMwDaMEQiiE= +cloud.google.com/go/functions v1.6.0/go.mod h1:3H1UA3qiIPRWD7PeZKLvHZ9SaQhR26XIJcC0A5GbvAk= +cloud.google.com/go/functions v1.7.0/go.mod h1:+d+QBcWM+RsrgZfV9xo6KfA1GlzJfxcfZcRPEhDDfzg= +cloud.google.com/go/functions v1.8.0/go.mod h1:RTZ4/HsQjIqIYP9a9YPbU+QFoQsAlYgrwOXJWHn1POY= +cloud.google.com/go/functions v1.9.0/go.mod h1:Y+Dz8yGguzO3PpIjhLTbnqV1CWmgQ5UwtlpzoyquQ08= +cloud.google.com/go/functions v1.10.0/go.mod h1:0D3hEOe3DbEvCXtYOZHQZmD+SzYsi1YbI7dGvHfldXw= +cloud.google.com/go/gaming v1.5.0/go.mod h1:ol7rGcxP/qHTRQE/RO4bxkXq+Fix0j6D4LFPzYTIrDM= +cloud.google.com/go/gaming v1.6.0/go.mod h1:YMU1GEvA39Qt3zWGyAVA9bpYz/yAhTvaQ1t2sK4KPUA= +cloud.google.com/go/gaming v1.7.0/go.mod h1:LrB8U7MHdGgFG851iHAfqUdLcKBdQ55hzXy9xBJz0+w= +cloud.google.com/go/gaming v1.8.0/go.mod h1:xAqjS8b7jAVW0KFYeRUxngo9My3f33kFmua++Pi+ggM= +cloud.google.com/go/gaming v1.9.0/go.mod h1:Fc7kEmCObylSWLO334NcO+O9QMDyz+TKC4v1D7X+Bc0= +cloud.google.com/go/gkebackup v0.2.0/go.mod h1:XKvv/4LfG829/B8B7xRkk8zRrOEbKtEam6yNfuQNH60= +cloud.google.com/go/gkebackup v0.3.0/go.mod h1:n/E671i1aOQvUxT541aTkCwExO/bTer2HDlj4TsBRAo= +cloud.google.com/go/gkebackup v0.4.0/go.mod h1:byAyBGUwYGEEww7xsbnUTBHIYcOPy/PgUWUtOeRm9Vg= +cloud.google.com/go/gkeconnect v0.5.0/go.mod h1:c5lsNAg5EwAy7fkqX/+goqFsU1Da/jQFqArp+wGNr/o= +cloud.google.com/go/gkeconnect v0.6.0/go.mod h1:Mln67KyU/sHJEBY8kFZ0xTeyPtzbq9StAVvEULYK16A= +cloud.google.com/go/gkeconnect v0.7.0/go.mod h1:SNfmVqPkaEi3bF/B3CNZOAYPYdg7sU+obZ+QTky2Myw= +cloud.google.com/go/gkehub v0.9.0/go.mod h1:WYHN6WG8w9bXU0hqNxt8rm5uxnk8IH+lPY9J2TV7BK0= +cloud.google.com/go/gkehub v0.10.0/go.mod h1:UIPwxI0DsrpsVoWpLB0stwKCP+WFVG9+y977wO+hBH0= +cloud.google.com/go/gkehub v0.11.0/go.mod h1:JOWHlmN+GHyIbuWQPl47/C2RFhnFKH38jH9Ascu3n0E= +cloud.google.com/go/gkemulticloud v0.3.0/go.mod h1:7orzy7O0S+5kq95e4Hpn7RysVA7dPs8W/GgfUtsPbrA= +cloud.google.com/go/gkemulticloud v0.4.0/go.mod h1:E9gxVBnseLWCk24ch+P9+B2CoDFJZTyIgLKSalC7tuI= +cloud.google.com/go/gkemulticloud v0.5.0/go.mod h1:W0JDkiyi3Tqh0TJr//y19wyb1yf8llHVto2Htf2Ja3Y= +cloud.google.com/go/grafeas v0.2.0/go.mod h1:KhxgtF2hb0P191HlY5besjYm6MqTSTj3LSI+M+ByZHc= +cloud.google.com/go/gsuiteaddons v1.3.0/go.mod h1:EUNK/J1lZEZO8yPtykKxLXI6JSVN2rg9bN8SXOa0bgM= +cloud.google.com/go/gsuiteaddons v1.4.0/go.mod h1:rZK5I8hht7u7HxFQcFei0+AtfS9uSushomRlg+3ua1o= +cloud.google.com/go/gsuiteaddons v1.5.0/go.mod h1:TFCClYLd64Eaa12sFVmUyG62tk4mdIsI7pAnSXRkcFo= +cloud.google.com/go/iam v0.1.0/go.mod h1:vcUNEa0pEm0qRVpmWepWaFMIAI8/hjB9mO8rNCJtF6c= +cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= +cloud.google.com/go/iam v0.5.0/go.mod h1:wPU9Vt0P4UmCux7mqtRu6jcpPAb74cP1fh50J3QpkUc= +cloud.google.com/go/iam v0.6.0/go.mod h1:+1AH33ueBne5MzYccyMHtEKqLE4/kJOibtffMHDMFMc= +cloud.google.com/go/iam v0.7.0/go.mod h1:H5Br8wRaDGNc8XP3keLc4unfUUZeyH3Sfl9XpQEYOeg= +cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= +cloud.google.com/go/iam v0.11.0/go.mod h1:9PiLDanza5D+oWFZiH1uG+RnRCfEGKoyl6yo4cgWZGY= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= +cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= +cloud.google.com/go/iap v1.4.0/go.mod h1:RGFwRJdihTINIe4wZ2iCP0zF/qu18ZwyKxrhMhygBEc= +cloud.google.com/go/iap v1.5.0/go.mod h1:UH/CGgKd4KyohZL5Pt0jSKE4m3FR51qg6FKQ/z/Ix9A= +cloud.google.com/go/iap v1.6.0/go.mod h1:NSuvI9C/j7UdjGjIde7t7HBz+QTwBcapPE07+sSRcLk= +cloud.google.com/go/ids v1.1.0/go.mod h1:WIuwCaYVOzHIj2OhN9HAwvW+DBdmUAdcWlFxRl+KubM= +cloud.google.com/go/ids v1.2.0/go.mod h1:5WXvp4n25S0rA/mQWAg1YEEBBq6/s+7ml1RDCW1IrcY= +cloud.google.com/go/ids v1.3.0/go.mod h1:JBdTYwANikFKaDP6LtW5JAi4gubs57SVNQjemdt6xV4= +cloud.google.com/go/iot v1.3.0/go.mod h1:r7RGh2B61+B8oz0AGE+J72AhA0G7tdXItODWsaA2oLs= +cloud.google.com/go/iot v1.4.0/go.mod h1:dIDxPOn0UvNDUMD8Ger7FIaTuvMkj+aGk94RPP0iV+g= +cloud.google.com/go/iot v1.5.0/go.mod h1:mpz5259PDl3XJthEmh9+ap0affn/MqNSP4My77Qql9o= +cloud.google.com/go/kms v1.4.0/go.mod h1:fajBHndQ+6ubNw6Ss2sSd+SWvjL26RNo/dr7uxsnnOA= +cloud.google.com/go/kms v1.5.0/go.mod h1:QJS2YY0eJGBg3mnDfuaCyLauWwBJiHRboYxJ++1xJNg= +cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/language v1.4.0/go.mod h1:F9dRpNFQmJbkaop6g0JhSBXCNlO90e1KWx5iDdxbWic= +cloud.google.com/go/language v1.6.0/go.mod h1:6dJ8t3B+lUYfStgls25GusK04NLh3eDLQnWM3mdEbhI= +cloud.google.com/go/language v1.7.0/go.mod h1:DJ6dYN/W+SQOjF8e1hLQXMF21AkH2w9wiPzPCJa2MIE= +cloud.google.com/go/language v1.8.0/go.mod h1:qYPVHf7SPoNNiCL2Dr0FfEFNil1qi3pQEyygwpgVKB8= +cloud.google.com/go/language v1.9.0/go.mod h1:Ns15WooPM5Ad/5no/0n81yUetis74g3zrbeJBE+ptUY= +cloud.google.com/go/lifesciences v0.5.0/go.mod h1:3oIKy8ycWGPUyZDR/8RNnTOYevhaMLqh5vLUXs9zvT8= +cloud.google.com/go/lifesciences v0.6.0/go.mod h1:ddj6tSX/7BOnhxCSd3ZcETvtNr8NZ6t/iPhY2Tyfu08= +cloud.google.com/go/lifesciences v0.8.0/go.mod h1:lFxiEOMqII6XggGbOnKiyZ7IBwoIqA84ClvoezaA/bo= +cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= +cloud.google.com/go/logging v1.7.0/go.mod h1:3xjP2CjkM3ZkO73aj4ASA5wRPGGCRrPIAeNqVNkzY8M= +cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= +cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= +cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= +cloud.google.com/go/managedidentities v1.3.0/go.mod h1:UzlW3cBOiPrzucO5qWkNkh0w33KFtBJU281hacNvsdE= +cloud.google.com/go/managedidentities v1.4.0/go.mod h1:NWSBYbEMgqmbZsLIyKvxrYbtqOsxY1ZrGM+9RgDqInM= +cloud.google.com/go/managedidentities v1.5.0/go.mod h1:+dWcZ0JlUmpuxpIDfyP5pP5y0bLdRwOS4Lp7gMni/LA= +cloud.google.com/go/maps v0.1.0/go.mod h1:BQM97WGyfw9FWEmQMpZ5T6cpovXXSd1cGmFma94eubI= +cloud.google.com/go/maps v0.6.0/go.mod h1:o6DAMMfb+aINHz/p/jbcY+mYeXBoZoxTfdSQ8VAJaCw= +cloud.google.com/go/mediatranslation v0.5.0/go.mod h1:jGPUhGTybqsPQn91pNXw0xVHfuJ3leR1wj37oU3y1f4= +cloud.google.com/go/mediatranslation v0.6.0/go.mod h1:hHdBCTYNigsBxshbznuIMFNe5QXEowAuNmmC7h8pu5w= +cloud.google.com/go/mediatranslation v0.7.0/go.mod h1:LCnB/gZr90ONOIQLgSXagp8XUW1ODs2UmUMvcgMfI2I= +cloud.google.com/go/memcache v1.4.0/go.mod h1:rTOfiGZtJX1AaFUrOgsMHX5kAzaTQ8azHiuDoTPzNsE= +cloud.google.com/go/memcache v1.5.0/go.mod h1:dk3fCK7dVo0cUU2c36jKb4VqKPS22BTkf81Xq617aWM= +cloud.google.com/go/memcache v1.6.0/go.mod h1:XS5xB0eQZdHtTuTF9Hf8eJkKtR3pVRCcvJwtm68T3rA= +cloud.google.com/go/memcache v1.7.0/go.mod h1:ywMKfjWhNtkQTxrWxCkCFkoPjLHPW6A7WOTVI8xy3LY= +cloud.google.com/go/memcache v1.9.0/go.mod h1:8oEyzXCu+zo9RzlEaEjHl4KkgjlNDaXbCQeQWlzNFJM= +cloud.google.com/go/metastore v1.5.0/go.mod h1:2ZNrDcQwghfdtCwJ33nM0+GrBGlVuh8rakL3vdPY3XY= +cloud.google.com/go/metastore v1.6.0/go.mod h1:6cyQTls8CWXzk45G55x57DVQ9gWg7RiH65+YgPsNh9s= +cloud.google.com/go/metastore v1.7.0/go.mod h1:s45D0B4IlsINu87/AsWiEVYbLaIMeUSoxlKKDqBGFS8= +cloud.google.com/go/metastore v1.8.0/go.mod h1:zHiMc4ZUpBiM7twCIFQmJ9JMEkDSyZS9U12uf7wHqSI= +cloud.google.com/go/metastore v1.10.0/go.mod h1:fPEnH3g4JJAk+gMRnrAnoqyv2lpUCqJPWOodSaf45Eo= +cloud.google.com/go/monitoring v1.7.0/go.mod h1:HpYse6kkGo//7p6sT0wsIC6IBDET0RhIsnmlA53dvEk= +cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= +cloud.google.com/go/networkconnectivity v1.4.0/go.mod h1:nOl7YL8odKyAOtzNX73/M5/mGZgqqMeryi6UPZTk/rA= +cloud.google.com/go/networkconnectivity v1.5.0/go.mod h1:3GzqJx7uhtlM3kln0+x5wyFvuVH1pIBJjhCpjzSt75o= +cloud.google.com/go/networkconnectivity v1.6.0/go.mod h1:OJOoEXW+0LAxHh89nXd64uGG+FbQoeH8DtxCHVOMlaM= +cloud.google.com/go/networkconnectivity v1.7.0/go.mod h1:RMuSbkdbPwNMQjB5HBWD5MpTBnNm39iAVpC3TmsExt8= +cloud.google.com/go/networkconnectivity v1.10.0/go.mod h1:UP4O4sWXJG13AqrTdQCD9TnLGEbtNRqjuaaA7bNjF5E= +cloud.google.com/go/networkmanagement v1.4.0/go.mod h1:Q9mdLLRn60AsOrPc8rs8iNV6OHXaGcDdsIQe1ohekq8= +cloud.google.com/go/networkmanagement v1.5.0/go.mod h1:ZnOeZ/evzUdUsnvRt792H0uYEnHQEMaz+REhhzJRcf4= +cloud.google.com/go/networkmanagement v1.6.0/go.mod h1:5pKPqyXjB/sgtvB5xqOemumoQNB7y95Q7S+4rjSOPYY= +cloud.google.com/go/networksecurity v0.5.0/go.mod h1:xS6fOCoqpVC5zx15Z/MqkfDwH4+m/61A3ODiDV1xmiQ= +cloud.google.com/go/networksecurity v0.6.0/go.mod h1:Q5fjhTr9WMI5mbpRYEbiexTzROf7ZbDzvzCrNl14nyU= +cloud.google.com/go/networksecurity v0.7.0/go.mod h1:mAnzoxx/8TBSyXEeESMy9OOYwo1v+gZ5eMRnsT5bC8k= +cloud.google.com/go/notebooks v1.2.0/go.mod h1:9+wtppMfVPUeJ8fIWPOq1UnATHISkGXGqTkxeieQ6UY= +cloud.google.com/go/notebooks v1.3.0/go.mod h1:bFR5lj07DtCPC7YAAJ//vHskFBxA5JzYlH68kXVdk34= +cloud.google.com/go/notebooks v1.4.0/go.mod h1:4QPMngcwmgb6uw7Po99B2xv5ufVoIQ7nOGDyL4P8AgA= +cloud.google.com/go/notebooks v1.5.0/go.mod h1:q8mwhnP9aR8Hpfnrc5iN5IBhrXUy8S2vuYs+kBJ/gu0= +cloud.google.com/go/notebooks v1.7.0/go.mod h1:PVlaDGfJgj1fl1S3dUwhFMXFgfYGhYQt2164xOMONmE= +cloud.google.com/go/optimization v1.1.0/go.mod h1:5po+wfvX5AQlPznyVEZjGJTMr4+CAkJf2XSTQOOl9l4= +cloud.google.com/go/optimization v1.2.0/go.mod h1:Lr7SOHdRDENsh+WXVmQhQTrzdu9ybg0NecjHidBq6xs= +cloud.google.com/go/optimization v1.3.1/go.mod h1:IvUSefKiwd1a5p0RgHDbWCIbDFgKuEdB+fPPuP0IDLI= +cloud.google.com/go/orchestration v1.3.0/go.mod h1:Sj5tq/JpWiB//X/q3Ngwdl5K7B7Y0KZ7bfv0wL6fqVA= +cloud.google.com/go/orchestration v1.4.0/go.mod h1:6W5NLFWs2TlniBphAViZEVhrXRSMgUGDfW7vrWKvsBk= +cloud.google.com/go/orchestration v1.6.0/go.mod h1:M62Bevp7pkxStDfFfTuCOaXgaaqRAga1yKyoMtEoWPQ= +cloud.google.com/go/orgpolicy v1.4.0/go.mod h1:xrSLIV4RePWmP9P3tBl8S93lTmlAxjm06NSm2UTmKvE= +cloud.google.com/go/orgpolicy v1.5.0/go.mod h1:hZEc5q3wzwXJaKrsx5+Ewg0u1LxJ51nNFlext7Tanwc= +cloud.google.com/go/orgpolicy v1.10.0/go.mod h1:w1fo8b7rRqlXlIJbVhOMPrwVljyuW5mqssvBtU18ONc= +cloud.google.com/go/osconfig v1.7.0/go.mod h1:oVHeCeZELfJP7XLxcBGTMBvRO+1nQ5tFG9VQTmYS2Fs= +cloud.google.com/go/osconfig v1.8.0/go.mod h1:EQqZLu5w5XA7eKizepumcvWx+m8mJUhEwiPqWiZeEdg= +cloud.google.com/go/osconfig v1.9.0/go.mod h1:Yx+IeIZJ3bdWmzbQU4fxNl8xsZ4amB+dygAwFPlvnNo= +cloud.google.com/go/osconfig v1.10.0/go.mod h1:uMhCzqC5I8zfD9zDEAfvgVhDS8oIjySWh+l4WK6GnWw= +cloud.google.com/go/osconfig v1.11.0/go.mod h1:aDICxrur2ogRd9zY5ytBLV89KEgT2MKB2L/n6x1ooPw= +cloud.google.com/go/oslogin v1.4.0/go.mod h1:YdgMXWRaElXz/lDk1Na6Fh5orF7gvmJ0FGLIs9LId4E= +cloud.google.com/go/oslogin v1.5.0/go.mod h1:D260Qj11W2qx/HVF29zBg+0fd6YCSjSqLUkY/qEenQU= +cloud.google.com/go/oslogin v1.6.0/go.mod h1:zOJ1O3+dTU8WPlGEkFSh7qeHPPSoxrcMbbK1Nm2iX70= +cloud.google.com/go/oslogin v1.7.0/go.mod h1:e04SN0xO1UNJ1M5GP0vzVBFicIe4O53FOfcixIqTyXo= +cloud.google.com/go/oslogin v1.9.0/go.mod h1:HNavntnH8nzrn8JCTT5fj18FuJLFJc4NaZJtBnQtKFs= +cloud.google.com/go/phishingprotection v0.5.0/go.mod h1:Y3HZknsK9bc9dMi+oE8Bim0lczMU6hrX0UpADuMefr0= +cloud.google.com/go/phishingprotection v0.6.0/go.mod h1:9Y3LBLgy0kDTcYET8ZH3bq/7qni15yVUoAxiFxnlSUA= +cloud.google.com/go/phishingprotection v0.7.0/go.mod h1:8qJI4QKHoda/sb/7/YmMQ2omRLSLYSu9bU0EKCNI+Lk= +cloud.google.com/go/policytroubleshooter v1.3.0/go.mod h1:qy0+VwANja+kKrjlQuOzmlvscn4RNsAc0e15GGqfMxg= +cloud.google.com/go/policytroubleshooter v1.4.0/go.mod h1:DZT4BcRw3QoO8ota9xw/LKtPa8lKeCByYeKTIf/vxdE= +cloud.google.com/go/policytroubleshooter v1.5.0/go.mod h1:Rz1WfV+1oIpPdN2VvvuboLVRsB1Hclg3CKQ53j9l8vw= +cloud.google.com/go/privatecatalog v0.5.0/go.mod h1:XgosMUvvPyxDjAVNDYxJ7wBW8//hLDDYmnsNcMGq1K0= +cloud.google.com/go/privatecatalog v0.6.0/go.mod h1:i/fbkZR0hLN29eEWiiwue8Pb+GforiEIBnV9yrRUOKI= +cloud.google.com/go/privatecatalog v0.7.0/go.mod h1:2s5ssIFO69F5csTXcwBP7NPFTZvps26xGzvQ2PQaBYg= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/pubsub v1.26.0/go.mod h1:QgBH3U/jdJy/ftjPhTkyXNj543Tin1pRYcdcPRnFIRI= +cloud.google.com/go/pubsub v1.27.1/go.mod h1:hQN39ymbV9geqBnfQq6Xf63yNhUAhv9CZhzp5O6qsW0= +cloud.google.com/go/pubsub v1.28.0/go.mod h1:vuXFpwaVoIPQMGXqRyUQigu/AX1S3IWugR9xznmcXX8= +cloud.google.com/go/pubsublite v1.5.0/go.mod h1:xapqNQ1CuLfGi23Yda/9l4bBCKz/wC3KIJ5gKcxveZg= +cloud.google.com/go/pubsublite v1.6.0/go.mod h1:1eFCS0U11xlOuMFV/0iBqw3zP12kddMeCbj/F3FSj9k= +cloud.google.com/go/recaptchaenterprise v1.3.1/go.mod h1:OdD+q+y4XGeAlxRaMn1Y7/GveP6zmq76byL6tjPE7d4= +cloud.google.com/go/recaptchaenterprise/v2 v2.1.0/go.mod h1:w9yVqajwroDNTfGuhmOjPDN//rZGySaf6PtFVcSCa7o= +cloud.google.com/go/recaptchaenterprise/v2 v2.2.0/go.mod h1:/Zu5jisWGeERrd5HnlS3EUGb/D335f9k51B/FVil0jk= +cloud.google.com/go/recaptchaenterprise/v2 v2.3.0/go.mod h1:O9LwGCjrhGHBQET5CA7dd5NwwNQUErSgEDit1DLNTdo= +cloud.google.com/go/recaptchaenterprise/v2 v2.4.0/go.mod h1:Am3LHfOuBstrLrNCBrlI5sbwx9LBg3te2N6hGvHn2mE= +cloud.google.com/go/recaptchaenterprise/v2 v2.5.0/go.mod h1:O8LzcHXN3rz0j+LBC91jrwI3R+1ZSZEWrfL7XHgNo9U= +cloud.google.com/go/recaptchaenterprise/v2 v2.6.0/go.mod h1:RPauz9jeLtB3JVzg6nCbe12qNoaa8pXc4d/YukAmcnA= +cloud.google.com/go/recommendationengine v0.5.0/go.mod h1:E5756pJcVFeVgaQv3WNpImkFP8a+RptV6dDLGPILjvg= +cloud.google.com/go/recommendationengine v0.6.0/go.mod h1:08mq2umu9oIqc7tDy8sx+MNJdLG0fUi3vaSVbztHgJ4= +cloud.google.com/go/recommendationengine v0.7.0/go.mod h1:1reUcE3GIu6MeBz/h5xZJqNLuuVjNg1lmWMPyjatzac= +cloud.google.com/go/recommender v1.5.0/go.mod h1:jdoeiBIVrJe9gQjwd759ecLJbxCDED4A6p+mqoqDvTg= +cloud.google.com/go/recommender v1.6.0/go.mod h1:+yETpm25mcoiECKh9DEScGzIRyDKpZ0cEhWGo+8bo+c= +cloud.google.com/go/recommender v1.7.0/go.mod h1:XLHs/W+T8olwlGOgfQenXBTbIseGclClff6lhFVe9Bs= +cloud.google.com/go/recommender v1.8.0/go.mod h1:PkjXrTT05BFKwxaUxQmtIlrtj0kph108r02ZZQ5FE70= +cloud.google.com/go/recommender v1.9.0/go.mod h1:PnSsnZY7q+VL1uax2JWkt/UegHssxjUVVCrX52CuEmQ= +cloud.google.com/go/redis v1.7.0/go.mod h1:V3x5Jq1jzUcg+UNsRvdmsfuFnit1cfe3Z/PGyq/lm4Y= +cloud.google.com/go/redis v1.8.0/go.mod h1:Fm2szCDavWzBk2cDKxrkmWBqoCiL1+Ctwq7EyqBCA/A= +cloud.google.com/go/redis v1.9.0/go.mod h1:HMYQuajvb2D0LvMgZmLDZW8V5aOC/WxstZHiy4g8OiA= +cloud.google.com/go/redis v1.10.0/go.mod h1:ThJf3mMBQtW18JzGgh41/Wld6vnDDc/F/F35UolRZPM= +cloud.google.com/go/redis v1.11.0/go.mod h1:/X6eicana+BWcUda5PpwZC48o37SiFVTFSs0fWAJ7uQ= +cloud.google.com/go/resourcemanager v1.3.0/go.mod h1:bAtrTjZQFJkiWTPDb1WBjzvc6/kifjj4QBYuKCCoqKA= +cloud.google.com/go/resourcemanager v1.4.0/go.mod h1:MwxuzkumyTX7/a3n37gmsT3py7LIXwrShilPh3P1tR0= +cloud.google.com/go/resourcemanager v1.5.0/go.mod h1:eQoXNAiAvCf5PXxWxXjhKQoTMaUSNrEfg+6qdf/wots= +cloud.google.com/go/resourcesettings v1.3.0/go.mod h1:lzew8VfESA5DQ8gdlHwMrqZs1S9V87v3oCnKCWoOuQU= +cloud.google.com/go/resourcesettings v1.4.0/go.mod h1:ldiH9IJpcrlC3VSuCGvjR5of/ezRrOxFtpJoJo5SmXg= +cloud.google.com/go/resourcesettings v1.5.0/go.mod h1:+xJF7QSG6undsQDfsCJyqWXyBwUoJLhetkRMDRnIoXA= +cloud.google.com/go/retail v1.8.0/go.mod h1:QblKS8waDmNUhghY2TI9O3JLlFk8jybHeV4BF19FrE4= +cloud.google.com/go/retail v1.9.0/go.mod h1:g6jb6mKuCS1QKnH/dpu7isX253absFl6iE92nHwlBUY= +cloud.google.com/go/retail v1.10.0/go.mod h1:2gDk9HsL4HMS4oZwz6daui2/jmKvqShXKQuB2RZ+cCc= +cloud.google.com/go/retail v1.11.0/go.mod h1:MBLk1NaWPmh6iVFSz9MeKG/Psyd7TAgm6y/9L2B4x9Y= +cloud.google.com/go/retail v1.12.0/go.mod h1:UMkelN/0Z8XvKymXFbD4EhFJlYKRx1FGhQkVPU5kF14= +cloud.google.com/go/run v0.2.0/go.mod h1:CNtKsTA1sDcnqqIFR3Pb5Tq0usWxJJvsWOCPldRU3Do= +cloud.google.com/go/run v0.3.0/go.mod h1:TuyY1+taHxTjrD0ZFk2iAR+xyOXEA0ztb7U3UNA0zBo= +cloud.google.com/go/run v0.8.0/go.mod h1:VniEnuBwqjigv0A7ONfQUaEItaiCRVujlMqerPPiktM= +cloud.google.com/go/scheduler v1.4.0/go.mod h1:drcJBmxF3aqZJRhmkHQ9b3uSSpQoltBPGPxGAWROx6s= +cloud.google.com/go/scheduler v1.5.0/go.mod h1:ri073ym49NW3AfT6DZi21vLZrG07GXr5p3H1KxN5QlI= +cloud.google.com/go/scheduler v1.6.0/go.mod h1:SgeKVM7MIwPn3BqtcBntpLyrIJftQISRrYB5ZtT+KOk= +cloud.google.com/go/scheduler v1.7.0/go.mod h1:jyCiBqWW956uBjjPMMuX09n3x37mtyPJegEWKxRsn44= +cloud.google.com/go/scheduler v1.8.0/go.mod h1:TCET+Y5Gp1YgHT8py4nlg2Sew8nUHMqcpousDgXJVQc= +cloud.google.com/go/secretmanager v1.6.0/go.mod h1:awVa/OXF6IiyaU1wQ34inzQNc4ISIDIrId8qE5QGgKA= +cloud.google.com/go/secretmanager v1.8.0/go.mod h1:hnVgi/bN5MYHd3Gt0SPuTPPp5ENina1/LxM+2W9U9J4= +cloud.google.com/go/secretmanager v1.9.0/go.mod h1:b71qH2l1yHmWQHt9LC80akm86mX8AL6X1MA01dW8ht4= +cloud.google.com/go/secretmanager v1.10.0/go.mod h1:MfnrdvKMPNra9aZtQFvBcvRU54hbPD8/HayQdlUgJpU= +cloud.google.com/go/security v1.5.0/go.mod h1:lgxGdyOKKjHL4YG3/YwIL2zLqMFCKs0UbQwgyZmfJl4= +cloud.google.com/go/security v1.7.0/go.mod h1:mZklORHl6Bg7CNnnjLH//0UlAlaXqiG7Lb9PsPXLfD0= +cloud.google.com/go/security v1.8.0/go.mod h1:hAQOwgmaHhztFhiQ41CjDODdWP0+AE1B3sX4OFlq+GU= +cloud.google.com/go/security v1.9.0/go.mod h1:6Ta1bO8LXI89nZnmnsZGp9lVoVWXqsVbIq/t9dzI+2Q= +cloud.google.com/go/security v1.10.0/go.mod h1:QtOMZByJVlibUT2h9afNDWRZ1G96gVywH8T5GUSb9IA= +cloud.google.com/go/security v1.12.0/go.mod h1:rV6EhrpbNHrrxqlvW0BWAIawFWq3X90SduMJdFwtLB8= +cloud.google.com/go/securitycenter v1.13.0/go.mod h1:cv5qNAqjY84FCN6Y9z28WlkKXyWsgLO832YiWwkCWcU= +cloud.google.com/go/securitycenter v1.14.0/go.mod h1:gZLAhtyKv85n52XYWt6RmeBdydyxfPeTrpToDPw4Auc= +cloud.google.com/go/securitycenter v1.15.0/go.mod h1:PeKJ0t8MoFmmXLXWm41JidyzI3PJjd8sXWaVqg43WWk= +cloud.google.com/go/securitycenter v1.16.0/go.mod h1:Q9GMaLQFUD+5ZTabrbujNWLtSLZIZF7SAR0wWECrjdk= +cloud.google.com/go/securitycenter v1.18.1/go.mod h1:0/25gAzCM/9OL9vVx4ChPeM/+DlfGQJDwBy/UC8AKK0= +cloud.google.com/go/servicecontrol v1.4.0/go.mod h1:o0hUSJ1TXJAmi/7fLJAedOovnujSEvjKCAFNXPQ1RaU= +cloud.google.com/go/servicecontrol v1.5.0/go.mod h1:qM0CnXHhyqKVuiZnGKrIurvVImCs8gmqWsDoqe9sU1s= +cloud.google.com/go/servicecontrol v1.11.0/go.mod h1:kFmTzYzTUIuZs0ycVqRHNaNhgR+UMUpw9n02l/pY+mc= +cloud.google.com/go/servicedirectory v1.4.0/go.mod h1:gH1MUaZCgtP7qQiI+F+A+OpeKF/HQWgtAddhTbhL2bs= +cloud.google.com/go/servicedirectory v1.5.0/go.mod h1:QMKFL0NUySbpZJ1UZs3oFAmdvVxhhxB6eJ/Vlp73dfg= +cloud.google.com/go/servicedirectory v1.6.0/go.mod h1:pUlbnWsLH9c13yGkxCmfumWEPjsRs1RlmJ4pqiNjVL4= +cloud.google.com/go/servicedirectory v1.7.0/go.mod h1:5p/U5oyvgYGYejufvxhgwjL8UVXjkuw7q5XcG10wx1U= +cloud.google.com/go/servicedirectory v1.8.0/go.mod h1:srXodfhY1GFIPvltunswqXpVxFPpZjf8nkKQT7XcXaY= +cloud.google.com/go/servicemanagement v1.4.0/go.mod h1:d8t8MDbezI7Z2R1O/wu8oTggo3BI2GKYbdG4y/SJTco= +cloud.google.com/go/servicemanagement v1.5.0/go.mod h1:XGaCRe57kfqu4+lRxaFEAuqmjzF0r+gWHjWqKqBvKFo= +cloud.google.com/go/servicemanagement v1.6.0/go.mod h1:aWns7EeeCOtGEX4OvZUWCCJONRZeFKiptqKf1D0l/Jc= +cloud.google.com/go/serviceusage v1.3.0/go.mod h1:Hya1cozXM4SeSKTAgGXgj97GlqUvF5JaoXacR1JTP/E= +cloud.google.com/go/serviceusage v1.4.0/go.mod h1:SB4yxXSaYVuUBYUml6qklyONXNLt83U0Rb+CXyhjEeU= +cloud.google.com/go/serviceusage v1.5.0/go.mod h1:w8U1JvqUqwJNPEOTQjrMHkw3IaIFLoLsPLvsE3xueec= +cloud.google.com/go/shell v1.3.0/go.mod h1:VZ9HmRjZBsjLGXusm7K5Q5lzzByZmJHf1d0IWHEN5X4= +cloud.google.com/go/shell v1.4.0/go.mod h1:HDxPzZf3GkDdhExzD/gs8Grqk+dmYcEjGShZgYa9URw= +cloud.google.com/go/shell v1.6.0/go.mod h1:oHO8QACS90luWgxP3N9iZVuEiSF84zNyLytb+qE2f9A= +cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= +cloud.google.com/go/speech v1.6.0/go.mod h1:79tcr4FHCimOp56lwC01xnt/WPJZc4v3gzyT7FoBkCM= +cloud.google.com/go/speech v1.7.0/go.mod h1:KptqL+BAQIhMsj1kOP2la5DSEEerPDuOP/2mmkhHhZQ= +cloud.google.com/go/speech v1.8.0/go.mod h1:9bYIl1/tjsAnMgKGHKmBZzXKEkGgtU+MpdDPTE9f7y0= +cloud.google.com/go/speech v1.9.0/go.mod h1:xQ0jTcmnRFFM2RfX/U+rk6FQNUF6DQlydUSyoooSpco= +cloud.google.com/go/speech v1.14.1/go.mod h1:gEosVRPJ9waG7zqqnsHpYTOoAS4KouMRLDFMekpJ0J0= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y= +cloud.google.com/go/storage v1.23.0/go.mod h1:vOEEDNFnciUMhBeT6hsJIn3ieU5cFRmzeLgDvXzfIXc= +cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= +cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= +cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= +cloud.google.com/go/storagetransfer v1.5.0/go.mod h1:dxNzUopWy7RQevYFHewchb29POFv3/AaBgnhqzqiK0w= +cloud.google.com/go/storagetransfer v1.6.0/go.mod h1:y77xm4CQV/ZhFZH75PLEXY0ROiS7Gh6pSKrM8dJyg6I= +cloud.google.com/go/storagetransfer v1.7.0/go.mod h1:8Giuj1QNb1kfLAiWM1bN6dHzfdlDAVC9rv9abHot2W4= +cloud.google.com/go/talent v1.1.0/go.mod h1:Vl4pt9jiHKvOgF9KoZo6Kob9oV4lwd/ZD5Cto54zDRw= +cloud.google.com/go/talent v1.2.0/go.mod h1:MoNF9bhFQbiJ6eFD3uSsg0uBALw4n4gaCaEjBw9zo8g= +cloud.google.com/go/talent v1.3.0/go.mod h1:CmcxwJ/PKfRgd1pBjQgU6W3YBwiewmUzQYH5HHmSCmM= +cloud.google.com/go/talent v1.4.0/go.mod h1:ezFtAgVuRf8jRsvyE6EwmbTK5LKciD4KVnHuDEFmOOA= +cloud.google.com/go/talent v1.5.0/go.mod h1:G+ODMj9bsasAEJkQSzO2uHQWXHHXUomArjWQQYkqK6c= +cloud.google.com/go/texttospeech v1.4.0/go.mod h1:FX8HQHA6sEpJ7rCMSfXuzBcysDAuWusNNNvN9FELDd8= +cloud.google.com/go/texttospeech v1.5.0/go.mod h1:oKPLhR4n4ZdQqWKURdwxMy0uiTS1xU161C8W57Wkea4= +cloud.google.com/go/texttospeech v1.6.0/go.mod h1:YmwmFT8pj1aBblQOI3TfKmwibnsfvhIBzPXcW4EBovc= +cloud.google.com/go/tpu v1.3.0/go.mod h1:aJIManG0o20tfDQlRIej44FcwGGl/cD0oiRyMKG19IQ= +cloud.google.com/go/tpu v1.4.0/go.mod h1:mjZaX8p0VBgllCzF6wcU2ovUXN9TONFLd7iz227X2Xg= +cloud.google.com/go/tpu v1.5.0/go.mod h1:8zVo1rYDFuW2l4yZVY0R0fb/v44xLh3llq7RuV61fPM= +cloud.google.com/go/trace v1.3.0/go.mod h1:FFUE83d9Ca57C+K8rDl/Ih8LwOzWIV1krKgxg6N0G28= +cloud.google.com/go/trace v1.4.0/go.mod h1:UG0v8UBqzusp+z63o7FK74SdFE+AXpCLdFb1rshXG+Y= +cloud.google.com/go/trace v1.8.0/go.mod h1:zH7vcsbAhklH8hWFig58HvxcxyQbaIqMarMg9hn5ECA= +cloud.google.com/go/translate v1.3.0/go.mod h1:gzMUwRjvOqj5i69y/LYLd8RrNQk+hOmIXTi9+nb3Djs= +cloud.google.com/go/translate v1.4.0/go.mod h1:06Dn/ppvLD6WvA5Rhdp029IX2Mi3Mn7fpMRLPvXT5Wg= +cloud.google.com/go/translate v1.6.0/go.mod h1:lMGRudH1pu7I3n3PETiOB2507gf3HnfLV8qlkHZEyos= +cloud.google.com/go/video v1.8.0/go.mod h1:sTzKFc0bUSByE8Yoh8X0mn8bMymItVGPfTuUBUyRgxk= +cloud.google.com/go/video v1.9.0/go.mod h1:0RhNKFRF5v92f8dQt0yhaHrEuH95m068JYOvLZYnJSw= +cloud.google.com/go/video v1.13.0/go.mod h1:ulzkYlYgCp15N2AokzKjy7MQ9ejuynOJdf1tR5lGthk= +cloud.google.com/go/videointelligence v1.6.0/go.mod h1:w0DIDlVRKtwPCn/C4iwZIJdvC69yInhW0cfi+p546uU= +cloud.google.com/go/videointelligence v1.7.0/go.mod h1:k8pI/1wAhjznARtVT9U1llUaFNPh7muw8QyOUpavru4= +cloud.google.com/go/videointelligence v1.8.0/go.mod h1:dIcCn4gVDdS7yte/w+koiXn5dWVplOZkE+xwG9FgK+M= +cloud.google.com/go/videointelligence v1.9.0/go.mod h1:29lVRMPDYHikk3v8EdPSaL8Ku+eMzDljjuvRs105XoU= +cloud.google.com/go/videointelligence v1.10.0/go.mod h1:LHZngX1liVtUhZvi2uNS0VQuOzNi2TkY1OakiuoUOjU= +cloud.google.com/go/vision v1.2.0/go.mod h1:SmNwgObm5DpFBme2xpyOyasvBc1aPdjvMk2bBk0tKD0= +cloud.google.com/go/vision/v2 v2.2.0/go.mod h1:uCdV4PpN1S0jyCyq8sIM42v2Y6zOLkZs+4R9LrGYwFo= +cloud.google.com/go/vision/v2 v2.3.0/go.mod h1:UO61abBx9QRMFkNBbf1D8B1LXdS2cGiiCRx0vSpZoUo= +cloud.google.com/go/vision/v2 v2.4.0/go.mod h1:VtI579ll9RpVTrdKdkMzckdnwMyX2JILb+MhPqRbPsY= +cloud.google.com/go/vision/v2 v2.5.0/go.mod h1:MmaezXOOE+IWa+cS7OhRRLK2cNv1ZL98zhqFFZaaH2E= +cloud.google.com/go/vision/v2 v2.6.0/go.mod h1:158Hes0MvOS9Z/bDMSFpjwsUrZ5fPrdwuyyvKSGAGMY= +cloud.google.com/go/vmmigration v1.2.0/go.mod h1:IRf0o7myyWFSmVR1ItrBSFLFD/rJkfDCUTO4vLlJvsE= +cloud.google.com/go/vmmigration v1.3.0/go.mod h1:oGJ6ZgGPQOFdjHuocGcLqX4lc98YQ7Ygq8YQwHh9A7g= +cloud.google.com/go/vmmigration v1.5.0/go.mod h1:E4YQ8q7/4W9gobHjQg4JJSgXXSgY21nA5r8swQV+Xxc= +cloud.google.com/go/vmwareengine v0.1.0/go.mod h1:RsdNEf/8UDvKllXhMz5J40XxDrNJNN4sagiox+OI208= +cloud.google.com/go/vmwareengine v0.2.2/go.mod h1:sKdctNJxb3KLZkE/6Oui94iw/xs9PRNC2wnNLXsHvH8= +cloud.google.com/go/vpcaccess v1.4.0/go.mod h1:aQHVbTWDYUR1EbTApSVvMq1EnT57ppDmQzZ3imqIk4w= +cloud.google.com/go/vpcaccess v1.5.0/go.mod h1:drmg4HLk9NkZpGfCmZ3Tz0Bwnm2+DKqViEpeEpOq0m8= +cloud.google.com/go/vpcaccess v1.6.0/go.mod h1:wX2ILaNhe7TlVa4vC5xce1bCnqE3AeH27RV31lnmZes= +cloud.google.com/go/webrisk v1.4.0/go.mod h1:Hn8X6Zr+ziE2aNd8SliSDWpEnSS1u4R9+xXZmFiHmGE= +cloud.google.com/go/webrisk v1.5.0/go.mod h1:iPG6fr52Tv7sGk0H6qUFzmL3HHZev1htXuWDEEsqMTg= +cloud.google.com/go/webrisk v1.6.0/go.mod h1:65sW9V9rOosnc9ZY7A7jsy1zoHS5W9IAXv6dGqhMQMc= +cloud.google.com/go/webrisk v1.7.0/go.mod h1:mVMHgEYH0r337nmt1JyLthzMr6YxwN1aAIEc2fTcq7A= +cloud.google.com/go/webrisk v1.8.0/go.mod h1:oJPDuamzHXgUc+b8SiHRcVInZQuybnvEW72PqTc7sSg= +cloud.google.com/go/websecurityscanner v1.3.0/go.mod h1:uImdKm2wyeXQevQJXeh8Uun/Ym1VqworNDlBXQevGMo= +cloud.google.com/go/websecurityscanner v1.4.0/go.mod h1:ebit/Fp0a+FWu5j4JOmJEV8S8CzdTkAS77oDsiSqYWQ= +cloud.google.com/go/websecurityscanner v1.5.0/go.mod h1:Y6xdCPy81yi0SQnDY1xdNTNpfY1oAgXUlcfN3B3eSng= +cloud.google.com/go/workflows v1.6.0/go.mod h1:6t9F5h/unJz41YqfBmqSASJSXccBLtD1Vwf+KmJENM0= +cloud.google.com/go/workflows v1.7.0/go.mod h1:JhSrZuVZWuiDfKEFxU0/F1PQjmpnpcoISEXH2bcHC3M= +cloud.google.com/go/workflows v1.8.0/go.mod h1:ysGhmEajwZxGn1OhGOGKsTXc5PyxOc0vfKf5Af+to4M= +cloud.google.com/go/workflows v1.9.0/go.mod h1:ZGkj1aFIOd9c8Gerkjjq7OW7I5+l6cSvT3ujaO/WwSA= +cloud.google.com/go/workflows v1.10.0/go.mod h1:fZ8LmRmZQWacon9UCX1r/g/DfAXx5VcPALq2CxzdePw= +cosmossdk.io/api v0.3.1 h1:NNiOclKRR0AOlO4KIqeaG6PS6kswOMhHD0ir0SscNXE= +cosmossdk.io/api v0.3.1/go.mod h1:DfHfMkiNA2Uhy8fj0JJlOCYOBp4eWUUJ1te5zBGNyIw= +cosmossdk.io/core v0.5.1 h1:vQVtFrIYOQJDV3f7rw4pjjVqc1id4+mE0L9hHP66pyI= +cosmossdk.io/core v0.5.1/go.mod h1:KZtwHCLjcFuo0nmDc24Xy6CRNEL9Vl/MeimQ2aC7NLE= +cosmossdk.io/depinject v1.0.0-alpha.4 h1:PLNp8ZYAMPTUKyG9IK2hsbciDWqna2z1Wsl98okJopc= +cosmossdk.io/depinject v1.0.0-alpha.4/go.mod h1:HeDk7IkR5ckZ3lMGs/o91AVUc7E596vMaOmslGFM3yU= +cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= +cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= +cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= +cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= +cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= +cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= +cosmossdk.io/tools/rosetta v0.2.1 h1:ddOMatOH+pbxWbrGJKRAawdBkPYLfKXutK9IETnjYxw= +cosmossdk.io/tools/rosetta v0.2.1/go.mod h1:Pqdc1FdvkNV3LcNIkYWt2RQY6IP1ge6YWZk8MhhO9Hw= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +gioui.org v0.0.0-20210308172011-57750fc8a0a6/go.mod h1:RSH6KIUZ0p2xy5zHDxgAM4zumjgTw83q2ge/PI+yyw8= +git.sr.ht/~sbinet/gg v0.3.1/go.mod h1:KGYtlADtqsqANL9ueOFkWymvzUvLMQllU5Ixo+8v3pc= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= +github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= +github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= +github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/adlio/schema v1.3.3 h1:oBJn8I02PyTB466pZO1UZEn1TV5XLlifBSyMrmHl/1I= +github.com/adlio/schema v1.3.3/go.mod h1:1EsRssiv9/Ce2CMzq5DoL7RiMshhuigQxrR4DMV9fHg= +github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajstarks/deck v0.0.0-20200831202436-30c9fc6549a9/go.mod h1:JynElWSGnm/4RlzPXRlREEwqTHAN3T56Bv2ITsFT3gY= +github.com/ajstarks/deck/generate v0.0.0-20210309230005-c3f852c02e19/go.mod h1:T13YZdzov6OU0A1+RfKZiZN9ca6VeKdBdyDV+BY97Tk= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b/go.mod h1:1KcenG0jGWcpt8ov532z81sp/kMMUG485J2InIOyADM= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/apache/arrow/go/v10 v10.0.1/go.mod h1:YvhnlEePVnBS4+0z3fhPfUy7W1Ikj0Ih0vcRo/gZ1M0= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/apache/thrift v0.16.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2FXSqgU= +github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= +github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= +github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.44.122/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo= +github.com/aws/aws-sdk-go v1.44.203 h1:pcsP805b9acL3wUqa4JR2vg1k2wnItkDYNvfmcy6F+U= +github.com/aws/aws-sdk-go v1.44.203/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= +github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d h1:xDfNPAt8lFiC1UJrqV3uuy861HCTo708pDMbjHHdCas= +github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4= +github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/boombuler/barcode v1.0.0/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boombuler/barcode v1.0.1/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= +github.com/boz/go-lifecycle v0.1.1 h1:tG/wff7Zxbkf19g4D4I0G8Y4sq83iT5QjD4rzEf/zrI= +github.com/boz/go-lifecycle v0.1.1/go.mod h1:zdagAUMcC2C0OmQkBlJZFV77uF4GCVaGphAexGi7oho= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= +github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/bufbuild/protocompile v0.4.0 h1:LbFKd2XowZvQ/kajzguUp2DC9UEIQhIq77fZZlaQsNA= +github.com/bufbuild/protocompile v0.4.0/go.mod h1:3v93+mbWn/v3xzN+31nwkJfrEpAUwp+BagBSZWx+TP8= +github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cheggaaa/pb v1.0.27/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/logex v1.2.1 h1:XHDu3E6q+gdHgsdTPH6ImJMIp436vR6MPtH8gP05QzM= +github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwysCBrQ= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04= +github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230310173818-32f1caf87195/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cockroachdb/apd/v2 v2.0.2 h1:weh8u7Cneje73dDh+2tEVLUvyBc89iwepWCD8b8034E= +github.com/cockroachdb/apd/v2 v2.0.2/go.mod h1:DDxRlzC2lo3/vSlmSoS7JkqbbrARPuFOGr0B9pvN3Gw= +github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/cockroachdb/errors v1.10.0 h1:lfxS8zZz1+OjtV4MtNWgboi/W5tyLEB6VQZBXN+0VUU= +github.com/cockroachdb/errors v1.10.0/go.mod h1:lknhIsEVQ9Ss/qKDBQS/UqFSvPQjOwNq2qyKAxtHRqE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= +github.com/coinbase/rosetta-sdk-go/types v1.0.0 h1:jpVIwLcPoOeCR6o1tU+Xv7r5bMONNbHU7MuEHboiFuA= +github.com/coinbase/rosetta-sdk-go/types v1.0.0/go.mod h1:eq7W2TMRH22GTW0N0beDnN931DW0/WOI1R2sdHNHG4c= +github.com/cometbft/cometbft v0.37.6 h1:2BSD0lGPbcIyRd99Pf1zH0Sa8o0pbfqVWEDbZ4Ec2Uc= +github.com/cometbft/cometbft v0.37.6/go.mod h1:5FRkFil9uagHZogIX9x8z51c3GIPpQmdIN8Mq46HfzY= +github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= +github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= +github.com/confio/ics23/go v0.9.1 h1:3MV46eeWwO3xCauKyAtuAdJYMyPnnchW4iLr2bTw6/U= +github.com/confio/ics23/go v0.9.1/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak= +github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg= +github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= +github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= +github.com/cosmos/cosmos-sdk v0.47.16-ics-lsm h1:+mlfnZ4Cs8HMw9xy7Epjv56avptYSTsX3TVlUDX3Qcs= +github.com/cosmos/cosmos-sdk v0.47.16-ics-lsm/go.mod h1:uzvMwHXmuRDSOaF8ec9HickjLHJcItWBREdkaDHcPiE= +github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= +github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= +github.com/cosmos/gogoproto v1.3.3-alpha.regen.1 h1:Qmv/wAw4xHnjN5iZ9qHergfk1O7nnYl7ZsIY5lF+E9k= +github.com/cosmos/gogoproto v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= +github.com/cosmos/gogoproto v1.4.10 h1:QH/yT8X+c0F4ZDacDv3z+xE3WU1P1Z3wQoLMBRJoKuI= +github.com/cosmos/gogoproto v1.4.10/go.mod h1:3aAZzeRWpAwr+SS/LLkICX2/kDFyaYVzckBDzygIxek= +github.com/cosmos/iavl v0.20.1 h1:rM1kqeG3/HBT85vsZdoSNsehciqUQPWrR4BYmqE2+zg= +github.com/cosmos/iavl v0.20.1/go.mod h1:WO7FyvaZJoH65+HFOsDir7xU9FWk2w9cHXNW1XHcl7A= +github.com/cosmos/ledger-cosmos-go v0.12.4 h1:drvWt+GJP7Aiw550yeb3ON/zsrgW0jgh5saFCr7pDnw= +github.com/cosmos/ledger-cosmos-go v0.12.4/go.mod h1:fjfVWRf++Xkygt9wzCsjEBdjcf7wiiY35fv3ctT+k4M= +github.com/cosmos/rosetta-sdk-go v0.10.0 h1:E5RhTruuoA7KTIXUcMicL76cffyeoyvNybzUGSKFTcM= +github.com/cosmos/rosetta-sdk-go v0.10.0/go.mod h1:SImAZkb96YbwvoRkzSMQB6noNJXFgWl/ENIznEoYQI4= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= +github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= +github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/edwingeng/deque/v2 v2.1.1 h1:+xjC3TnaeMPLZMi7QQf9jN2K00MZmTwruApqplbL9IY= +github.com/edwingeng/deque/v2 v2.1.1/go.mod h1:HukI8CQe9KDmZCcURPZRYVYjH79Zy2tIjTF9sN3Bgb0= +github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= +github.com/envoyproxy/go-control-plane v0.11.0/go.mod h1:VnHyVMpzcLvCFt9yUz1UnCwHLhwx1WguiVDV7pTG/tI= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= +github.com/envoyproxy/protoc-gen-validate v0.10.0/go.mod h1:DRjgyB0I43LtJapqN6NiRwroiAU2PaFuvk/vjgh61ss= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE= +github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8= +github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-fonts/dejavu v0.1.0/go.mod h1:4Wt4I4OU2Nq9asgDCteaAaWZOV24E+0/Pwo0gppep4g= +github.com/go-fonts/latin-modern v0.2.0/go.mod h1:rQVLdDMK+mK1xscDwsqM5J8U2jrRa3T0ecnM9pNujks= +github.com/go-fonts/liberation v0.1.1/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/liberation v0.2.0/go.mod h1:K6qoJYypsmfVjWg8KOVDQhLc8UDgIK2HYqyqAO9z7GY= +github.com/go-fonts/stix v0.1.0/go.mod h1:w/c1f0ldAUlJmLBvlbkvVXLAD+tAMqobIIQpmnUIzUY= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-latex/latex v0.0.0-20210118124228-b3d85cf34e07/go.mod h1:CO1AlKB2CSIqUrmQPqA0gdRIlnLEY0gK5JGjh37zN5U= +github.com/go-latex/latex v0.0.0-20210823091927-c0d11ff05a81/go.mod h1:SX0U8uGpxhq9o2S/CELCSUxEWWAuoCUcVCQWv7G2OCk= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= +github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= +github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.11.1 h1:prmOlTVv+YjZjmRmNSF3VmspqJIxJWXmqUsHwfTRRkQ= +github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= +github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= +github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= +github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= +github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= +github.com/gogo/googleapis v1.4.1-0.20201022092350-68b0159b7869/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/flatbuffers v2.0.8+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= +github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= +github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/9KK8Lm7X9ctZC2t60YyIpYEI16jx0Qg= +github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= +github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= +github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM= +github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM= +github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= +github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= +github.com/googleapis/gax-go/v2 v2.5.1/go.mod h1:h6B0KMMFNtI2ddbGJn3T3ZbwkeT6yqEF02fYlzkUCyo= +github.com/googleapis/gax-go/v2 v2.6.0/go.mod h1:1mjbznJAPHFpesgE5ucqfYEscaz5kMdcIDwU/6+DDoY= +github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= +github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= +github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= +github.com/hashicorp/go-getter v1.7.1 h1:SWiSWN/42qdpR0MdhaOc/bLR48PLuP1ZQtYLRlM69uY= +github.com/hashicorp/go-getter v1.7.1/go.mod h1:W7TalhMmbPmsSMdNjD0ZskARur/9GJ17cfHTRtXV744= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= +github.com/hashicorp/go-safetemp v1.0.0 h1:2HR189eFNrjHQyENnQMMpCiBAsRxzbTMIgBhEyExpmo= +github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoDHxNAB65b+Rj1I= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= +github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c h1:PdZEHcpa3117kJ1Wa5EYupzCzn9QlBby8Fx2YpZPYvo= +github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= +github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= +github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= +github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= +github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= +github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= +github.com/jhump/protoreflect v1.15.1 h1:HUMERORf3I3ZdX05WaQ6MIpd/NJ434hTp5YiKgfCL6c= +github.com/jhump/protoreflect v1.15.1/go.mod h1:jD/2GMKKE6OqX8qTjhADU1e6DShO+gavG9e0Q693nKo= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= +github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= +github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac h1:GcJkaxD5Wy/Ucn+L0USlpbGJy9O6+7r0nBI7ftJ7Uu0= +github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac/go.mod h1:dM7ihgFM8Do6WGIfOXWPgpJ+4bKGR/4ZkYh8HKDdFy4= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU= +github.com/klauspost/compress v1.15.11/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= +github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= +github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star v0.6.1/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= +github.com/lyft/protoc-gen-star/v2 v2.0.1/go.mod h1:RcCdONR2ScXaYnQC5tUzxzlpA3WVYF7/opLeUgcQs/o= +github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-sqlite3 v1.14.14/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-testing-interface v1.14.1 h1:jrgshOhYAUVNMAJiKbEu7EqAwgJJ2JqpQmpLJOu07cU= +github.com/mitchellh/go-testing-interface v1.14.1/go.mod h1:gfgS7OtZj6MA4U1UrDRp04twqAjfvlZyCfX3sDjEym8= +github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= +github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/grpc-proxy v0.0.0-20181017164139-0f1106ef9c76/go.mod h1:x5OoJHDHqxHS801UIuhqGl6QdSAEJvtausosHSdazIo= +github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= +github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= +github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= +github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= +github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= +github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= +github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= +github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034= +github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ= +github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w= +github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg= +github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= +github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= +github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= +github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= +github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= +github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/phpdave11/gofpdi v1.0.13/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= +github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrec/lz4/v4 v4.1.15/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.3.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= +github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ruudk/golang-pdf417 v0.0.0-20181029194003-1af4ab5afa58/go.mod h1:6lfFZQK844Gfx8o5WFuvpxWRwnSoipWe/p622j1v06w= +github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= +github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= +github.com/spf13/afero v1.9.2/go.mod h1:iUV7ddyEEZPO5gA3zD4fJt6iStLlL+Lg4m2cihcDf8Y= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= +github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= +github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= +github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= +github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/ulikunitz/xz v0.5.11 h1:kpFauv27b6ynzBNT/Xy+1k+fK4WswhN/6PN5WhFAGw8= +github.com/ulikunitz/xz v0.5.11/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= +github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= +github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= +go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw= +go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.step.sm/crypto v0.45.1 h1:Xb8XldsbqT6pDYsg46BVPP1euASNbeNAhzrlvUP3QWo= +go.step.sm/crypto v0.45.1/go.mod h1:XtJBuMuZb11YeJpG8uP3fyBl2MerXWJ/pWQX/Au+Kt8= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= +golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb h1:xIApU0ow1zwMa2uL1VDNeQlNVFTWMQxZUZCMDy0Q4Us= +golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= +golang.org/x/image v0.0.0-20190910094157-69e4b8554b2a/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200119044424-58c23975cae1/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200430140353-33d19683fad8/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20200618115811-c13761719519/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20201208152932-35266b937fa6/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210216034530-4410531fe030/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/image v0.0.0-20210607152325-775e3b0c77b9/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20210628002857-a66eb6448b8d/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20211028202545-6944b10bf410/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/image v0.0.0-20220302094943-723b81ca9867/go.mod h1:023OzeP/+EPmXeapQh35lcL3II3LrY8Ic+EFFKVhULM= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= +golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= +golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221006150949-b44042a4b9c1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.0.0-20221014153046-6fdb5e3db783/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= +golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A= +golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.5.0/go.mod h1:9/XBHVqLaWO3/BRHs5jbpYCnOZVjj5V0ndyaAM7KB4I= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= +golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= +golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220819030929-7fc1605a5dde/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20220922220347-f3bd1da661af/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= +golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190927191325-030b2cf1153e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU= +golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.9.3/go.mod h1:TZumC3NeyVQskjXqmyWt4S3bINhy7B4eYwW69EbyX+0= +gonum.org/v1/gonum v0.11.0/go.mod h1:fSG4YDCxxUZQJ7rKsQrj0gMOg00Il0Z96/qMA4bVQhA= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +gonum.org/v1/plot v0.9.0/go.mod h1:3Pcqqmp6RHvJI72kgb8fThyUnav364FOsdDo2aGW5lY= +gonum.org/v1/plot v0.10.1/go.mod h1:VZW5OlhkL1mysU9vaqNHnsy86inf6Ot+jB3r+BczCEo= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= +google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI= +google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I= +google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo= +google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g= +google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA= +google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8= +google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs= +google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.77.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA= +google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw= +google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg= +google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o= +google.golang.org/api v0.85.0/go.mod h1:AqZf8Ep9uZ2pyTvgL+x0D3Zt0eoT9b5E8fmzfu6FO2g= +google.golang.org/api v0.90.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.93.0/go.mod h1:+Sem1dnrKlrXMR/X0bPnMWyluQe4RsNoYfmNLhOIkzw= +google.golang.org/api v0.95.0/go.mod h1:eADj+UBuxkh5zlrSntJghuNeg8HwQ1w5lTKkuqaETEI= +google.golang.org/api v0.96.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.97.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.98.0/go.mod h1:w7wJQLTM+wvQpNf5JyEcBoxK0RH7EDrh/L4qfsuJ13s= +google.golang.org/api v0.99.0/go.mod h1:1YOf74vkVndF7pG6hIHuINsM7eWwpVTAfNMNiL91A08= +google.golang.org/api v0.100.0/go.mod h1:ZE3Z2+ZOr87Rx7dqFsdRQkRBk36kDtp/h+QpHbB7a70= +google.golang.org/api v0.102.0/go.mod h1:3VFl6/fzoA+qNuS1N1/VfXY4LjoXN/wzeIp7TweWwGo= +google.golang.org/api v0.103.0/go.mod h1:hGtW6nK1AC+d9si/UBhw8Xli+QMOf6xyNAyJw4qU9w0= +google.golang.org/api v0.106.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.107.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.108.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.110.0/go.mod h1:7FC4Vvx1Mooxh8C5HWjzZHcavuS2f6pmJpZx60ca7iI= +google.golang.org/api v0.181.0 h1:rPdjwnWgiPPOJx3IcSAQ2III5aX5tCer6wMpa/xmZi4= +google.golang.org/api v0.181.0/go.mod h1:MnQ+M0CFsfUwA5beZ+g/vCBCPXvtmZwRz2qzZk8ih1k= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI= +google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20220502173005-c8bf987b8c21/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4= +google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220628213854-d9e0b6570c03/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA= +google.golang.org/genproto v0.0.0-20220722212130-b98a9ff5e252/go.mod h1:GkXuJDJ6aQ7lnJcRF+SJVgFdQhypqgl3LB1C9vabdRE= +google.golang.org/genproto v0.0.0-20220801145646-83ce21fca29f/go.mod h1:iHe1svFLAZg9VWz891+QbRMwUv9O/1Ww+/mngYeThbc= +google.golang.org/genproto v0.0.0-20220815135757-37a418bb8959/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220817144833-d7fd3f11b9b1/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220822174746-9e6da59bd2fc/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829144015-23454907ede3/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220829175752-36a9c930ecbf/go.mod h1:dbqgFATTzChvnt+ujMdZwITVAJHFtfyN1qUhDqEiIlk= +google.golang.org/genproto v0.0.0-20220913154956-18f8339a66a5/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220914142337-ca0e39ece12f/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220915135415-7fd63a7952de/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220916172020-2692e8806bfa/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+Sk5eDzHnzlStwW3itdNaWoZA5XeSG+R3JHSo= +google.golang.org/genproto v0.0.0-20220920201722-2b89144ce006/go.mod h1:ht8XFiar2npT/g4vkk7O0WYS1sHOHbdujxbEp7CJWbw= +google.golang.org/genproto v0.0.0-20220926165614-551eb538f295/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20220926220553-6981cbe3cfce/go.mod h1:woMGP53BroOrRY3xTxlbr8Y3eB/nzAvvFM83q7kG2OI= +google.golang.org/genproto v0.0.0-20221010155953-15ba04fc1c0e/go.mod h1:3526vdqwhZAwq4wsRUaVG555sVgsNmIjRtO7t/JH29U= +google.golang.org/genproto v0.0.0-20221014173430-6e2ab493f96b/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221014213838-99cd37c6964a/go.mod h1:1vXfmgAz9N9Jx0QA82PqRVauvCz1SGSz739p0f183jM= +google.golang.org/genproto v0.0.0-20221024153911-1573dae28c9c/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221024183307-1bc688fe9f3e/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221025140454-527a21cfbd71/go.mod h1:9qHF0xnpdSfF6knlcsnpzUu5y+rpwgbvsyGAZPBMg4s= +google.golang.org/genproto v0.0.0-20221027153422-115e99e71e1c/go.mod h1:CGI5F/G+E5bKwmfYo09AXuVN4dD894kIKUFmVbP2/Fo= +google.golang.org/genproto v0.0.0-20221109142239-94d6d90a7d66/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221114212237-e4508ebdbee1/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221117204609-8f9c96812029/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221118155620-16455021b5e6/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201164419-0e50fba7f41c/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221201204527-e3fa12d562f3/go.mod h1:rZS5c/ZVYMaOGBfO68GWtjOw/eLaZM1X6iVtgjZ+EWg= +google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE= +google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230112194545-e10362b5ecf9/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230113154510-dbe35b8444a5/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230123190316-2c411cf9d197/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230124163310-31e0e69b6fc2/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230125152338-dcaf20b6aeaa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230127162408-596548ed4efa/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230209215440-0dfe4f8abfcc/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20230216225411-c8e22ba71e44/go.mod h1:8B0gmkoRebU8ukX6HP+4wrVQUY1+6PkQ44BSyIlflHA= +google.golang.org/genproto v0.0.0-20230222225845-10f96fb3dbec/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 h1:mxSlqyb8ZAHsYDCfiXN1EDdNTdvjUJSLY+OnAUtYNYA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= +google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.50.1/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= +google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.52.0/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5vorUY= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= +google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGONTY8= +google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= +google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= +k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= +k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= +k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= +k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= +modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.2/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/cc/v3 v3.36.3/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI= +modernc.org/ccgo/v3 v3.0.0-20220428102840-41399a37e894/go.mod h1:eI31LL8EwEBKPpNpA4bU1/i+sKOwOrQy8D87zWUcRZc= +modernc.org/ccgo/v3 v3.0.0-20220430103911-bc99d88307be/go.mod h1:bwdAnOoaIt8Ax9YdWGjxWsdkPcZyRPHqrOvJxaKAKGw= +modernc.org/ccgo/v3 v3.16.4/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.6/go.mod h1:tGtX0gE9Jn7hdZFeU88slbTh1UtCYKusWOoCJuvkWsQ= +modernc.org/ccgo/v3 v3.16.8/go.mod h1:zNjwkizS+fIFDrDjIAgBSCLkWbJuHF+ar3QRn+Z9aws= +modernc.org/ccgo/v3 v3.16.9/go.mod h1:zNMzC9A9xeNUepy6KuZBbugn3c0Mc9TeiJO4lgvkJDo= +modernc.org/ccorpus v1.11.6/go.mod h1:2gEUTrWqdpH2pXsmTM1ZkjeSrUWDpjMu2T6m29L/ErQ= +modernc.org/httpfs v1.0.6/go.mod h1:7dosgurJGp0sPaRanU53W4xZYKh14wfzX420oZADeHM= +modernc.org/libc v0.0.0-20220428101251-2d5f3daf273b/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.16.0/go.mod h1:N4LD6DBE9cf+Dzf9buBlzVJndKr/iJHG97vGLHYnb5A= +modernc.org/libc v1.16.1/go.mod h1:JjJE0eu4yeK7tab2n4S1w8tlWd9MxXLRzheaRnAKymU= +modernc.org/libc v1.16.17/go.mod h1:hYIV5VZczAmGZAnG15Vdngn5HSF5cSkbvfz2B7GRuVU= +modernc.org/libc v1.16.19/go.mod h1:p7Mg4+koNjc8jkqwcoFBJx7tXkpj00G77X7A72jXPXA= +modernc.org/libc v1.17.0/go.mod h1:XsgLldpP4aWlPlsjqKRdHPqCxCjISdHfM/yeWC5GyW0= +modernc.org/libc v1.17.1/go.mod h1:FZ23b+8LjxZs7XtFMbSzL/EhPxNbfZbErxEHc7cbD9s= +modernc.org/mathutil v1.2.2/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/mathutil v1.5.0/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E= +modernc.org/memory v1.1.1/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.0/go.mod h1:/0wo5ibyrQiaoUoH7f9D8dnglAmILJ5/cxZlRECf+Nw= +modernc.org/memory v1.2.1/go.mod h1:PkUhL0Mugw21sHPeskwZW4D6VscE/GQJOnIpCnW6pSU= +modernc.org/opt v0.1.1/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sqlite v1.18.1/go.mod h1:6ho+Gow7oX5V+OiOQ6Tr4xeqbx13UZ6t+Fw9IRUG4d4= +modernc.org/strutil v1.1.1/go.mod h1:DE+MQQ/hjKBZS2zNInV5hhcipt5rLPWkmpbGeW5mmdw= +modernc.org/strutil v1.1.3/go.mod h1:MEHNA7PdEnEwLvspRMtWTNnp2nnyvMfkimT1NKNAGbw= +modernc.org/tcl v1.13.1/go.mod h1:XOLfOwzhkljL4itZkK6T72ckMgvj0BDsnKNdZVUOecw= +modernc.org/token v1.0.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= +modernc.org/z v1.5.1/go.mod h1:eWFB510QWW5Th9YGZT81s+LwvaAs3Q2yr4sP0rmLkv8= +nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= +pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +pkg.akt.dev/go v0.0.2-rc3 h1:M/IGxhoVNYKEzF1QmwAh1kcVyJpYcaIUQ+UQ/HVeucM= +pkg.akt.dev/go v0.0.2-rc3/go.mod h1:rHtfieCwWNdlBVIWLGIne0PFWzBW7t2uTJ2WcB7MNJM= +pkg.akt.dev/go/sdl v0.0.1-rc7 h1:WKr1VyqjxZrfWC7S8W56YZC8QntajZsiiYcLwyKRs+M= +pkg.akt.dev/go/sdl v0.0.1-rc7/go.mod h1:R6FM2iL/8FnAzjF9Q5Zbos/27+MZcOongU0RE+bTLGE= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/go/cli/gov_prompt_test.go b/go/cli/gov_prompt_test.go new file mode 100644 index 00000000..ab741249 --- /dev/null +++ b/go/cli/gov_prompt_test.go @@ -0,0 +1,86 @@ +//go:build !race + +// Disabled -race because the package github.com/manifoldco/promptui@v0.9.0 +// has a data race and this code exposes it, but fixing it would require +// holding up the associated change to this. + +package cli_test + +import ( + "fmt" + "math" + "os" + "testing" + + "github.com/chzyer/readline" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "pkg.akt.dev/go/cli" +) + +type st struct { + I int +} + +// Tests that we successfully report overflows in parsing ints +// See https://github.com/cosmos/cosmos-sdk/issues/13346 +func TestPromptIntegerOverflow(t *testing.T) { + // Intentionally sending values out of the range of int. + intOverflowers := []string{ + "-9223372036854775809", + "9223372036854775808", + "9923372036854775809", + "-9923372036854775809", + "18446744073709551616", + "-18446744073709551616", + } + + for _, intOverflower := range intOverflowers { + overflowStr := intOverflower + t.Run(overflowStr, func(t *testing.T) { + origStdin := readline.Stdin + defer func() { + readline.Stdin = origStdin + }() + + fin, fw := readline.NewFillableStdin(os.Stdin) + readline.Stdin = fin + _, _ = fw.Write([]byte(overflowStr + "\n")) + + v, err := cli.Prompt(st{}, "") + assert.Equal(t, st{}, v, "expected a value of zero") + require.NotNil(t, err, "expected a report of an overflow") + require.Contains(t, err.Error(), "range") + }) + } +} + +func TestPromptParseInteger(t *testing.T) { + // Intentionally sending a value out of the range of + values := []struct { + in string + want int + }{ + {fmt.Sprintf("%d", math.MinInt), math.MinInt}, + {"19991", 19991}, + {"991000000199", 991000000199}, + } + + for _, tc := range values { + t.Run(tc.in, func(t *testing.T) { + origStdin := readline.Stdin + defer func() { + readline.Stdin = origStdin + }() + + fin, fw := readline.NewFillableStdin(os.Stdin) + readline.Stdin = fin + _, _ = fw.Write([]byte(tc.in + "\n")) + + v, err := cli.Prompt(st{}, "") + assert.Nil(t, err, "expected a nil error") + assert.Equal(t, tc.want, v.I, "expected %d = %d", tc.want, v.I) + }) + } +} diff --git a/go/cli/gov_query.go b/go/cli/gov_query.go new file mode 100644 index 00000000..49fe8459 --- /dev/null +++ b/go/cli/gov_query.go @@ -0,0 +1,631 @@ +package cli + +import ( + "fmt" + "strconv" + "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + gcutils "github.com/cosmos/cosmos-sdk/x/gov/client/utils" + "github.com/cosmos/cosmos-sdk/x/gov/types" + v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + + cflags "pkg.akt.dev/go/cli/flags" + cutils "pkg.akt.dev/go/node/utils" +) + +// GetQueryGovCmd returns the cli query commands for this module +func GetQueryGovCmd() *cobra.Command { + // Group gov queries under a subcommand + govQueryCmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Querying commands for the governance module", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + govQueryCmd.AddCommand( + GetQueryGovProposalCmd(), + GetQueryGovProposalsCmd(), + GetQueryGovVoteCmd(), + GetQueryGovVotesCmd(), + GetQueryGovQueryParamsCmd(), + GetQueryGovParamCmd(), + GetQueryGovProposerCmd(), + GetQueryGovDepositCmd(), + GetQueryGovDepositsCmd(), + GetQueryGovTallyCmd(), + ) + + return govQueryCmd +} + +// GetQueryGovProposalCmd implements the query proposal command. +func GetQueryGovProposalCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "proposal [proposal-id]", + Args: cobra.ExactArgs(1), + Short: "Query details of a single proposal", + Long: strings.TrimSpace( + fmt.Sprintf(`Query details for a proposal. You can find the +proposal-id by running "%s query gov proposals". + +Example: +$ %s query gov proposal 1 +`, + version.AppName, version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + // validate that the proposal id is a uint + proposalID, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("proposal-id %s not a valid uint, please input a valid proposal-id", args[0]) + } + + cl := MustQueryClientFromContext(ctx) + + res, err := cl.Query().Gov().Proposal(ctx, &v1.QueryProposalRequest{ProposalId: proposalID}) + if err != nil { + return err + } + + return cl.PrintMessage(res.Proposal) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryGovProposalsCmd implements a query proposals command. Command to Get +// Proposals Information. +func GetQueryGovProposalsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "proposals", + Short: "Query proposals with optional filters", + Long: strings.TrimSpace( + fmt.Sprintf(`Query for a all paginated proposals that match optional filters: + +Example: +$ %s query gov proposals --depositor akash1skjwj5whet0lpe65qaq4rpq03hjxlwd9nf39lk +$ %s query gov proposals --voter akash1skjwj5whet0lpe65qaq4rpq03hjxlwd9nf39lk +$ %s query gov proposals --status (DepositPeriod|VotingPeriod|Passed|Rejected) +$ %s query gov proposals --page=2 --limit=100 +`, + version.AppName, version.AppName, version.AppName, version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + + cl := MustQueryClientFromContext(ctx) + + bechDepositorAddr, _ := cmd.Flags().GetString(flagDepositor) + bechVoterAddr, _ := cmd.Flags().GetString(flagVoter) + strProposalStatus, _ := cmd.Flags().GetString(flagStatus) + + var proposalStatus v1.ProposalStatus + + if len(bechDepositorAddr) != 0 { + _, err := sdk.AccAddressFromBech32(bechDepositorAddr) + if err != nil { + return err + } + } + + if len(bechVoterAddr) != 0 { + _, err := sdk.AccAddressFromBech32(bechVoterAddr) + if err != nil { + return err + } + } + + if len(strProposalStatus) != 0 { + proposalStatus1, err := v1.ProposalStatusFromString(gcutils.NormalizeProposalStatus(strProposalStatus)) + proposalStatus = proposalStatus1 + if err != nil { + return err + } + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Gov().Proposals(ctx, &v1.QueryProposalsRequest{ + ProposalStatus: proposalStatus, + Voter: bechVoterAddr, + Depositor: bechDepositorAddr, + Pagination: pageReq, + }) + + if err != nil { + return err + } + + if len(res.GetProposals()) == 0 { + return fmt.Errorf("no proposals found") + } + + return cl.PrintMessage(res) + }, + } + + cmd.Flags().String(flagDepositor, "", "(optional) filter by proposals deposited on by depositor") + cmd.Flags().String(flagVoter, "", "(optional) filter by proposals voted on by voted") + cmd.Flags().String(flagStatus, "", "(optional) filter proposals by proposal status, status: deposit_period/voting_period/passed/rejected") + cflags.AddPaginationFlagsToCmd(cmd, "proposals") + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryGovVoteCmd implements the query proposal vote command. Command to Get a +// Vote Information. +func GetQueryGovVoteCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "vote [proposal-id] [voter-addr]", + Args: cobra.ExactArgs(2), + Short: "Query details of a single vote", + Long: strings.TrimSpace( + fmt.Sprintf(`Query details for a single vote on a proposal given its identifier. + +Example: +$ %s query gov vote 1 akash1skjwj5whet0lpe65qaq4rpq03hjxlwd9nf39lk +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + + // validate that the proposal id is a uint + proposalID, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("proposal-id %s not a valid int, please input a valid proposal-id", args[0]) + } + + cl := MustQueryClientFromContext(ctx) + + // check to see if the proposal is in the store + _, err = cl.Query().Gov().Proposal( + ctx, + &v1.QueryProposalRequest{ProposalId: proposalID}, + ) + if err != nil { + return fmt.Errorf("failed to fetch proposal-id %d: %s", proposalID, err) + } + + voterAddr, err := sdk.AccAddressFromBech32(args[1]) + if err != nil { + return err + } + + res, err := cl.Query().Gov().Vote( + ctx, + &v1.QueryVoteRequest{ProposalId: proposalID, Voter: args[1]}, + ) + if err != nil { + return err + } + + vote := res.GetVote() + if vote.Empty() { + params := v1.NewQueryVoteParams(proposalID, voterAddr) + resByTxQuery, err := cutils.QueryVoteByTxQuery(ctx, cl.ClientContext(), params) + if err != nil { + return err + } + + if err := cl.ClientContext().Codec.UnmarshalJSON(resByTxQuery, vote); err != nil { + return err + } + } + + return cl.PrintMessage(res.Vote) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryGovVotesCmd implements the command to query for proposal votes. +func GetQueryGovVotesCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "votes [proposal-id]", + Args: cobra.ExactArgs(1), + Short: "Query votes on a proposal", + Long: strings.TrimSpace( + fmt.Sprintf(`Query vote details for a single proposal by its identifier. + +Example: +$ %[1]s query gov votes 1 +$ %[1]s query gov votes 1 --page=2 --limit=100 +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + cctx := cl.ClientContext() + + // validate that the proposal id is a uint + proposalID, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("proposal-id %s not a valid int, please input a valid proposal-id", args[0]) + } + + // check to see if the proposal is in the store + proposalRes, err := cl.Query().Gov().Proposal( + ctx, + &v1.QueryProposalRequest{ProposalId: proposalID}, + ) + if err != nil { + return fmt.Errorf("failed to fetch proposal-id %d: %s", proposalID, err) + } + + propStatus := proposalRes.GetProposal().Status + if !(propStatus == v1.StatusVotingPeriod || propStatus == v1.StatusDepositPeriod) { + page, _ := cmd.Flags().GetInt(cflags.FlagPage) + limit, _ := cmd.Flags().GetInt(cflags.FlagLimit) + + params := v1.NewQueryProposalVotesParams(proposalID, page, limit) + resByTxQuery, err := cutils.QueryVotesByTxQuery(ctx, cctx, params) + if err != nil { + return err + } + + var votes v1.Votes + // TODO migrate to use JSONCodec (implement MarshalJSONArray + // or wrap lists of proto.Message in some other message) + cctx.LegacyAmino.MustUnmarshalJSON(resByTxQuery, &votes) + return cctx.PrintObjectLegacy(votes) + + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Gov().Votes( + ctx, + &v1.QueryVotesRequest{ProposalId: proposalID, Pagination: pageReq}, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddPaginationFlagsToCmd(cmd, "votes") + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryGovDepositCmd implements the query proposal deposit command. Command to +// get a specific Deposit Information. +func GetQueryGovDepositCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "deposit [proposal-id] [depositer-addr]", + Args: cobra.ExactArgs(2), + Short: "Query details of a deposit", + Long: strings.TrimSpace( + fmt.Sprintf(`Query details for a single proposal deposit on a proposal by its identifier. + +Example: +$ %s query gov deposit 1 akash1skjwj5whet0lpe65qaq4rpq03hjxlwd9nf39lk +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + // validate that the proposal id is a uint + proposalID, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("proposal-id %s not a valid uint, please input a valid proposal-id", args[0]) + } + + // check to see if the proposal is in the store + _, err = cl.Query().Gov().Proposal( + ctx, + &v1.QueryProposalRequest{ProposalId: proposalID}, + ) + if err != nil { + return fmt.Errorf("failed to fetch proposal-id %d: %s", proposalID, err) + } + + res, err := cl.Query().Gov().Deposit( + ctx, + &v1.QueryDepositRequest{ProposalId: proposalID, Depositor: args[1]}, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res.Deposit) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryGovDepositsCmd implements the command to query for proposal deposits. +func GetQueryGovDepositsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "deposits [proposal-id]", + Args: cobra.ExactArgs(1), + Short: "Query deposits on a proposal", + Long: strings.TrimSpace( + fmt.Sprintf(`Query details for all deposits on a proposal. +You can find the proposal-id by running "%s query gov proposals". + +Example: +$ %s query gov deposits 1 +`, + version.AppName, version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + // validate that the proposal id is a uint + proposalID, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("proposal-id %s not a valid uint, please input a valid proposal-id", args[0]) + } + + // check to see if the proposal is in the store + _, err = cl.Query().Gov().Proposal( + ctx, + &v1.QueryProposalRequest{ProposalId: proposalID}, + ) + if err != nil { + return fmt.Errorf("failed to fetch proposal-id %d: %s", proposalID, err) + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Gov().Deposits( + ctx, + &v1.QueryDepositsRequest{ProposalId: proposalID, Pagination: pageReq}, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddPaginationFlagsToCmd(cmd, "deposits") + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryGovTallyCmd implements the command to query for proposal tally result. +func GetQueryGovTallyCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "tally [proposal-id]", + Args: cobra.ExactArgs(1), + Short: "Get the tally of a proposal vote", + Long: strings.TrimSpace( + fmt.Sprintf(`Query tally of votes on a proposal. You can find +the proposal-id by running "%s query gov proposals". + +Example: +$ %s query gov tally 1 +`, + version.AppName, version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + // validate that the proposal id is a uint + proposalID, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("proposal-id %s not a valid int, please input a valid proposal-id", args[0]) + } + + // check to see if the proposal is in the store + _, err = cl.Query().Gov().Proposal( + ctx, + &v1.QueryProposalRequest{ProposalId: proposalID}, + ) + if err != nil { + return fmt.Errorf("failed to fetch proposal-id %d: %s", proposalID, err) + } + + // Query store + res, err := cl.Query().Gov().TallyResult( + ctx, + &v1.QueryTallyResultRequest{ProposalId: proposalID}, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res.Tally) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryGovQueryParamsCmd implements the query params command. +// +// nolint:staticcheck // this function contains deprecated commands that we need. +func GetQueryGovQueryParamsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "Query the parameters of the governance process", + Long: strings.TrimSpace( + fmt.Sprintf(`Query the all the parameters for the governance process. + +Example: +$ %s query gov params +`, + version.AppName, + ), + ), + Args: cobra.NoArgs, + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + // Query store for all 3 params + res, err := cl.Query().Gov().Params( + ctx, + &v1.QueryParamsRequest{ParamsType: "deposit"}, + ) + if err != nil { + return err + } + + vp := v1.NewVotingParams(res.Params.VotingPeriod) + res.VotingParams = &vp + + tp := v1.NewTallyParams(res.Params.Quorum, res.Params.Threshold, res.Params.VetoThreshold) + res.TallyParams = &tp + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryGovParamCmd implements the query param command. +func GetQueryGovParamCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "param [param-type]", + Args: cobra.ExactArgs(1), + Short: "Query the parameters (voting|tallying|deposit) of the governance process", + Long: strings.TrimSpace( + fmt.Sprintf(`Query the all the parameters for the governance process. +Example: +$ %s query gov param voting +$ %s query gov param tallying +$ %s query gov param deposit +`, + version.AppName, version.AppName, version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + // Query store + res, err := cl.Query().Gov().Params( + cmd.Context(), + &v1.QueryParamsRequest{ParamsType: args[0]}, + ) + if err != nil { + return err + } + + var out fmt.Stringer + //nolint:staticcheck // this switch statement contains deprecated commands that we need. + switch args[0] { + case "voting": + out = res.GetVotingParams() + case "tallying": + out = res.GetTallyParams() + case "deposit": + out = res.GetDepositParams() + default: + return fmt.Errorf("argument must be one of (voting|tallying|deposit), was %s", args[0]) + } + + return cl.PrintMessage(out) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryGovProposerCmd implements the query proposer command. +func GetQueryGovProposerCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "proposer [proposal-id]", + Args: cobra.ExactArgs(1), + Short: "Query the proposer of a governance proposal", + Long: strings.TrimSpace( + fmt.Sprintf(`Query which address proposed a proposal with a given ID. + +Example: +$ %s query gov proposer 1 +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + cctx := cl.ClientContext() + + // validate that the proposalID is a uint + proposalID, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("proposal-id %s is not a valid uint", args[0]) + } + + prop, err := cutils.QueryProposerByTxQuery(ctx, cctx, proposalID) + if err != nil { + return err + } + + return cctx.PrintObjectLegacy(prop) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/gov_query_test.go b/go/cli/gov_query_test.go new file mode 100644 index 00000000..9cca08e1 --- /dev/null +++ b/go/cli/gov_query_test.go @@ -0,0 +1,343 @@ +package cli_test + +import ( + "fmt" + "strings" + + "github.com/cosmos/cosmos-sdk/testutil" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" +) + +func (s *GovCLITestSuite) TestCmdParams() { + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + "json output", + []string{fmt.Sprintf("--%s=json", cflags.FlagOutput)}, + "--output=json", + }, + { + "text output", + cli.TestFlags(). + WithOutputText(), + "--output=text", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryGovParamCmd() + cmd.SetArgs(tc.args) + + s.Require().Contains(fmt.Sprint(cmd), strings.TrimSpace(tc.expCmdOutput)) + }) + } +} + +func (s *GovCLITestSuite) TestCmdParam() { + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + "voting params", + cli.TestFlags(). + With("voting"). + WithOutputJSON(), + `voting --output=json`, + }, + { + "tally params", + cli.TestFlags(). + With("tallying"). + WithOutputJSON(), + `tallying --output=json`, + }, + { + "deposit params", + cli.TestFlags(). + With("deposit"). + WithOutputJSON(), + `deposit --output=json`, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryGovParamCmd() + cmd.SetArgs(tc.args) + s.Require().Contains(fmt.Sprint(cmd), strings.TrimSpace(tc.expCmdOutput)) + }) + } +} + +func (s *GovCLITestSuite) TestCmdProposer() { + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + "without proposal id", + cli.TestFlags(). + WithOutputJSON(), + "--output=json", + }, + { + "with proposal id", + cli.TestFlags(). + With("1"). + WithOutputJSON(), + "1 --output=json", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryGovProposerCmd() + cmd.SetArgs(tc.args) + s.Require().Contains(fmt.Sprint(cmd), strings.TrimSpace(tc.expCmdOutput)) + }) + } +} + +func (s *GovCLITestSuite) TestCmdTally() { + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + "without proposal id", + cli.TestFlags(). + WithOutputJSON(), + "--output=json", + }, + { + "with proposal id (json output)", + cli.TestFlags(). + With("2"). + WithOutputJSON(), + "2 --output=json", + }, + { + "with proposal id (text output)", + cli.TestFlags(). + With("1"). + WithOutputText(), + "1 --output=text", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryGovTallyCmd() + cmd.SetArgs(tc.args) + s.Require().Contains(fmt.Sprint(cmd), strings.TrimSpace(tc.expCmdOutput)) + }) + } +} + +func (s *GovCLITestSuite) TestCmdGetProposal() { + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + "get proposal with json response", + cli.TestFlags(). + With("1"). + WithOutputJSON(), + "1 --output=json", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryGovProposalCmd() + cmd.SetArgs(tc.args) + s.Require().Contains(fmt.Sprint(cmd), strings.TrimSpace(tc.expCmdOutput)) + }) + } +} + +func (s *GovCLITestSuite) TestCmdGetProposals() { + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + "get proposals as json response", + cli.TestFlags(). + WithOutputJSON(), + "--output=json", + }, + { + "get proposals with invalid status", + cli.TestFlags(). + WithStatus("unknown"). + WithOutputJSON(), + "--status=unknown --output=json", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryGovProposalsCmd() + cmd.SetArgs(tc.args) + s.Require().Contains(fmt.Sprint(cmd), strings.TrimSpace(tc.expCmdOutput)) + }) + } +} + +func (s *GovCLITestSuite) TestCmdQueryDeposits() { + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + "get deposits", + cli.TestFlags(). + With("10"), + "10", + }, + { + "get deposits(json output)", + cli.TestFlags(). + With("1"). + WithOutputJSON(), + "1 --output=json", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryGovDepositsCmd() + cmd.SetArgs(tc.args) + s.Require().Contains(fmt.Sprint(cmd), strings.TrimSpace(tc.expCmdOutput)) + }) + } +} + +func (s *GovCLITestSuite) TestCmdQueryDeposit() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + "get deposit with no depositor", + cli.TestFlags(). + With("1"), + "1", + }, + { + "get deposit with wrong deposit address", + cli.TestFlags(). + With("1", "wrong address"), + "1 wrong address", + }, + { + "get deposit (valid req)", + cli.TestFlags(). + With("1", val[0].Address.String()). + WithOutputJSON(), + fmt.Sprintf("1 %s --output=json", val[0].Address.String()), + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryGovDepositCmd() + cmd.SetArgs(tc.args) + s.Require().Contains(fmt.Sprint(cmd), strings.TrimSpace(tc.expCmdOutput)) + }) + } +} + +func (s *GovCLITestSuite) TestCmdQueryVotes() { + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + "get votes with no proposal id", + []string{}, + "", + }, + { + "get votes of a proposal", + cli.TestFlags(). + With("10"), + "10", + }, + { + "get votes of a proposal (json output)", + cli.TestFlags(). + With("1"). + WithOutputJSON(), + "1 --output=json", + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryGovVotesCmd() + cmd.SetArgs(tc.args) + s.Require().Contains(fmt.Sprint(cmd), strings.TrimSpace(tc.expCmdOutput)) + }) + } +} + +func (s *GovCLITestSuite) TestCmdQueryVote() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + "get vote of a proposal", + cli.TestFlags(). + With("10", val[0].Address.String()), + fmt.Sprintf("10 %s", val[0].Address.String()), + }, + { + "get vote by wrong voter", + cli.TestFlags(). + With("1", "wrong address"), + "1 wrong address", + }, + { + "get vote of a proposal (json output)", + cli.TestFlags(). + With("1", val[0].Address.String()). + WithOutputJSON(), + fmt.Sprintf("1 %s --output=json", val[0].Address.String()), + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryGovVoteCmd() + cmd.SetArgs(tc.args) + + if len(tc.args) != 0 { + s.Require().Contains(fmt.Sprint(cmd), strings.TrimSpace(tc.expCmdOutput)) + } + }) + } +} diff --git a/go/cli/gov_suite_test.go b/go/cli/gov_suite_test.go new file mode 100644 index 00000000..aab6b6fc --- /dev/null +++ b/go/cli/gov_suite_test.go @@ -0,0 +1,136 @@ +package cli_test + +import ( + "bytes" + "context" + "io" + + abci "github.com/cometbft/cometbft/abci/types" + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/gov" + "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +type GovCLITestSuite struct { + CLITestSuite +} + +func (s *GovCLITestSuite) SetupSuite() { + s.encCfg = testutilmod.MakeTestEncodingConfig(gov.AppModuleBasic{}) + s.kr = keyring.NewInMemory(s.encCfg.Codec) + s.baseCtx = client.Context{}. + WithKeyring(s.kr). + WithTxConfig(s.encCfg.TxConfig). + WithCodec(s.encCfg.Codec). + WithLegacyAmino(s.encCfg.Amino). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain"). + WithSignModeStr(cflags.SignModeDirect) + + var outBuf bytes.Buffer + ctxGen := func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&sdk.TxResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + } + s.cctx = ctxGen().WithOutput(&outBuf) + + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + // create a proposal with deposit + cmd := cli.GetTxGovSubmitLegacyProposalCmd() + + _, err := clitestutil.ExecTestCLICmd( + context.Background(), + s.cctx, + cmd, + cli.TestFlags(). + WithFrom(val[0].Address.String()). + WithTitle("Text Proposal 1"). + WithDescription("Where is the title!?"). + WithProposalType(v1beta1.ProposalTypeText). + WithDeposit(sdk.NewCoin("uakt", cli.DefaultMinDepositTokens)). + WithBroadcastModeSync(). + WithSkipConfirm()...) + s.Require().NoError(err) + + // vote for proposal + cmd = cli.GetTxGovVoteCmd() + + _, err = clitestutil.ExecTestCLICmd( + context.Background(), + s.cctx, + cmd, + cli.TestFlags(). + With( + "1", + "yes", + ). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithSkipConfirm()...) + s.Require().NoError(err) + + // create a proposal without deposit + cmd = cli.GetTxGovSubmitLegacyProposalCmd() + + _, err = clitestutil.ExecTestCLICmd( + context.Background(), + s.cctx, + cmd, + cli.TestFlags(). + WithFrom(val[0].Address.String()). + WithTitle("Text Proposal 2"). + WithDescription("Where is the title!?"). + WithProposalType(v1beta1.ProposalTypeText). + WithBroadcastModeSync(). + WithSkipConfirm()...) + s.Require().NoError(err) + + // create a proposal3 with deposit + cmd = cli.GetTxGovSubmitLegacyProposalCmd() + + _, err = clitestutil.ExecTestCLICmd( + context.Background(), + s.cctx, + cmd, + cli.TestFlags(). + WithFrom(val[0].Address.String()). + WithTitle("Text Proposal 3"). + WithDescription("Where is the title!?"). + WithProposalType(v1beta1.ProposalTypeText). + WithDeposit(sdk.NewCoin("uakt", cli.DefaultMinDepositTokens)). + WithBroadcastModeSync(). + WithSkipConfirm()...) + s.Require().NoError(err) + + // vote for proposal3 as val + cmd = cli.GetTxGovWeightedVoteCmd() + + _, err = clitestutil.ExecTestCLICmd( + context.Background(), + s.cctx, + cmd, + cli.TestFlags(). + With( + "3", + "yes=0.6,no=0.3,abstain=0.05,no_with_veto=0.05", + ). + WithFrom(val[0].Address.String()). + WithBroadcastModeSync(). + WithSkipConfirm()...) + s.Require().NoError(err) +} diff --git a/go/cli/gov_tx.go b/go/cli/gov_tx.go new file mode 100644 index 00000000..387d8871 --- /dev/null +++ b/go/cli/gov_tx.go @@ -0,0 +1,838 @@ +package cli + +import ( + "encoding/json" + "fmt" + "os" + "reflect" + "sort" + "strconv" + "strings" + + "github.com/manifoldco/promptui" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + govutils "github.com/cosmos/cosmos-sdk/x/gov/client/utils" + "github.com/cosmos/cosmos-sdk/x/gov/types" + v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// Proposal flags +const ( + flagVoter = "voter" + flagDepositor = "depositor" + flagStatus = "status" +) + +// ProposalFlags defines the core required fields of a legacy proposal. It is used to +// verify that these values are not provided in conjunction with a JSON proposal +// file. +var ProposalFlags = []string{ + cflags.FlagTitle, + cflags.FlagDescription, // nolint:staticcheck + cflags.FlagProposalType, // nolint:staticcheck + cflags.FlagDeposit, +} + +const ( + proposalText = "text" + proposalOther = "other" + draftProposalFileName = "draft_proposal.json" + draftMetadataFileName = "draft_metadata.json" +) + +var suggestedProposalTypes = []proposalType{ + { + Name: proposalText, + MsgType: "", // no message for text proposal + }, + { + Name: "community-pool-spend", + MsgType: "/cosmos.distribution.v1beta1.MsgCommunityPoolSpend", + }, + { + Name: "software-upgrade", + MsgType: "/cosmos.upgrade.v1beta1.MsgSoftwareUpgrade", + }, + { + Name: "cancel-software-upgrade", + MsgType: "/cosmos.upgrade.v1beta1.MsgCancelUpgrade", + }, + { + Name: proposalOther, + MsgType: "", // user will input the message type + }, +} + +type proposalType struct { + Name string + MsgType string + Msg sdk.Msg +} + +// proposal defines the new Msg-based proposal. +type proposalMsg struct { + // Msgs defines an array of sdk.Msgs proto-JSON-encoded as Anys. + Messages []json.RawMessage `json:"messages,omitempty"` + Metadata string `json:"metadata"` + Deposit string `json:"deposit"` + Title string `json:"title"` + Summary string `json:"summary"` + Expedited bool `json:"expedited"` +} + +type legacyProposal struct { + Title string + Description string + Type string + Deposit string +} + +// GetTxGovCmd returns the transaction commands for this module +// governance ModuleClient is slightly different from other ModuleClients in that +// it contains a slice of legacy "proposal" child commands. These commands are respective +// to the proposal type handlers that are implemented in other modules but are mounted +// under the governance CLI (eg. parameter change proposals). +func GetTxGovCmd(legacyPropCmds []*cobra.Command) *cobra.Command { + govTxCmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Governance transactions subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmdSubmitLegacyProp := GetTxGovSubmitLegacyProposalCmd() + for _, propCmd := range legacyPropCmds { + cflags.AddTxFlagsToCmd(propCmd) + cmdSubmitLegacyProp.AddCommand(propCmd) + } + + govTxCmd.AddCommand( + GetTxGovDepositCmd(), + GetTxGovVoteCmd(), + GetTxGovWeightedVoteCmd(), + GetTxGovSubmitProposalCmd(), + GetTxGovDraftProposalCmd(), + + // Deprecated + cmdSubmitLegacyProp, + ) + + return govTxCmd +} + +// GetTxGovSubmitProposalCmd implements submitting a proposal transaction command. +func GetTxGovSubmitProposalCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "submit-proposal [path/to/proposal.json]", + Short: "Submit a proposal along with some messages, metadata and deposit", + Args: cobra.ExactArgs(1), + Long: strings.TrimSpace( + fmt.Sprintf(`Submit a proposal along with some messages, metadata and deposit. +They should be defined in a JSON file. + +Example: +$ %s tx gov submit-proposal path/to/proposal.json + +Where proposal.json contains: + +{ + // array of proto-JSON-encoded sdk.Msgs + "messages": [ + { + "@type": "/cosmos.bank.v1beta1.MsgSend", + "from_address": "cosmos1...", + "to_address": "cosmos1...", + "amount":[{"denom": "uakt","amount": "10"}] + } + ], + // metadata can be any of base64 encoded, raw text, stringified json, IPFS link to json + // see below for example metadata + "metadata": "4pIMOgIGx1vZGU=", + "deposit": "10stake", + "title": "My proposal", + "summary": "A short summary of my proposal" + "expedited": false +} + +metadata example: +{ + "title": "", + "authors": [""], + "summary": "", + "details": "", + "proposal_forum_url": "", + "vote_option_context": "", +} +`, + version.AppName, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + proposal, msgs, deposit, err := parseSubmitProposal(cctx.Codec, args[0]) + if err != nil { + return err + } + + msg, err := v1.NewMsgSubmitProposal( + msgs, + deposit, + cctx.GetFromAddress().String(), + proposal.Metadata, + proposal.Title, + proposal.Summary, + proposal.Expedited, + ) + if err != nil { + return fmt.Errorf("invalid message: %w", err) + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxGovSubmitLegacyProposalCmd implements submitting a proposal transaction command. +// Deprecated: please use GetTxGovSubmitProposalCmd instead. +func GetTxGovSubmitLegacyProposalCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "submit-legacy-proposal", + Short: "Submit a legacy proposal along with an initial deposit", + Long: strings.TrimSpace( + fmt.Sprintf(`Submit a legacy proposal along with an initial deposit. +Proposal title, description, type and deposit can be given directly or through a proposal JSON file. + +Example: +$ %s tx gov submit-legacy-proposal --proposal="path/to/proposal.json" --from mykey + +Where proposal.json contains: + +{ + "title": "Test Proposal", + "description": "My awesome proposal", + "type": "Text", + "deposit": "10test" +} + +Which is equivalent to: + +$ %s tx gov submit-legacy-proposal --title="Test Proposal" --description="My awesome proposal" --type="Text" --deposit="10test" --from mykey +`, + version.AppName, version.AppName, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + proposal, err := parseSubmitLegacyProposal(cmd.Flags()) + if err != nil { + return fmt.Errorf("failed to parse proposal: %w", err) + } + + amount, err := sdk.ParseCoinsNormalized(proposal.Deposit) + if err != nil { + return err + } + + content, ok := v1beta1.ContentFromProposalType(proposal.Title, proposal.Description, proposal.Type) + if !ok { + return fmt.Errorf("failed to create proposal content: unknown proposal type %s", proposal.Type) + } + + msg, err := v1beta1.NewMsgSubmitProposal(content, amount, cctx.GetFromAddress()) + if err != nil { + return fmt.Errorf("invalid message: %w", err) + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cmd.Flags().String(cflags.FlagTitle, "", "The proposal title") + cmd.Flags().String(cflags.FlagDescription, "", "The proposal description") // nolint:staticcheck + cmd.Flags().String(cflags.FlagProposalType, "", "The proposal Type") // nolint:staticcheck + cmd.Flags().String(cflags.FlagDeposit, "", "The proposal deposit") + cmd.Flags().String(cflags.FlagProposal, "", "Proposal file path (if this path is given, other proposal flags are ignored)") // nolint:staticcheck + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxGovDepositCmd implements depositing tokens for an active proposal. +func GetTxGovDepositCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "deposit [proposal-id] [deposit]", + Args: cobra.ExactArgs(2), + Short: "Deposit tokens for an active proposal", + Long: strings.TrimSpace( + fmt.Sprintf(`Submit a deposit for an active proposal. You can +find the proposal-id by running "%s query gov proposals". + +Example: +$ %s tx gov deposit 1 10stake --from mykey +`, + version.AppName, version.AppName, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + // validate that the proposal id is a uint + proposalID, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("proposal-id %s not a valid uint, please input a valid proposal-id", args[0]) + } + + // Get depositor address + from := cctx.GetFromAddress() + + // Get amount of coins + amount, err := sdk.ParseCoinsNormalized(args[1]) + if err != nil { + return err + } + + msg := v1.NewMsgDeposit(from, proposalID, amount) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxGovVoteCmd implements creating a new vote command. +func GetTxGovVoteCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "vote [proposal-id] [option]", + Args: cobra.ExactArgs(2), + Short: "Vote for an active proposal, options: yes/no/no_with_veto/abstain", + Long: strings.TrimSpace( + fmt.Sprintf(`Submit a vote for an active proposal. You can +find the proposal-id by running "%s query gov proposals". + +Example: +$ %s tx gov vote 1 yes --from mykey +`, + version.AppName, version.AppName, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + // Get voting address + from := cctx.GetFromAddress() + + // validate that the proposal id is a uint + proposalID, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("proposal-id %s not a valid int, please input a valid proposal-id", args[0]) + } + + // Find out which vote option user chose + byteVoteOption, err := v1.VoteOptionFromString(govutils.NormalizeVoteOption(args[1])) + if err != nil { + return err + } + + metadata, err := cmd.Flags().GetString(cflags.FlagMetadata) + if err != nil { + return err + } + + // Build vote message and run basic validation + msg := v1.NewMsgVote(from, proposalID, byteVoteOption, metadata) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cmd.Flags().String(cflags.FlagMetadata, "", "Specify metadata of the vote") + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxGovWeightedVoteCmd implements creating a new weighted vote command. +func GetTxGovWeightedVoteCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "weighted-vote [proposal-id] [weighted-options]", + Args: cobra.ExactArgs(2), + Short: "Vote for an active proposal, options: yes/no/no_with_veto/abstain", + Long: strings.TrimSpace( + fmt.Sprintf(`Submit a vote for an active proposal. You can +find the proposal-id by running "%s query gov proposals". + +Example: +$ %s tx gov weighted-vote 1 yes=0.6,no=0.3,abstain=0.05,no_with_veto=0.05 --from mykey +`, + version.AppName, version.AppName, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + // Get voter address + from := cctx.GetFromAddress() + + // validate that the proposal id is a uint + proposalID, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return fmt.Errorf("proposal-id %s not a valid int, please input a valid proposal-id", args[0]) + } + + // Figure out which vote options user chose + options, err := v1.WeightedVoteOptionsFromString(govutils.NormalizeWeightedVoteOptions(args[1])) + if err != nil { + return err + } + + metadata, err := cmd.Flags().GetString(cflags.FlagMetadata) + if err != nil { + return err + } + + // Build vote message and run basic validation + msg := v1.NewMsgVoteWeighted(from, proposalID, options, metadata) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cmd.Flags().String(cflags.FlagMetadata, "", "Specify metadata of the weighted vote") + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxGovDraftProposalCmd let a user generate a draft proposal. +func GetTxGovDraftProposalCmd() *cobra.Command { + flagSkipMetadata := "skip-metadata" + + cmd := &cobra.Command{ + Use: "draft-proposal", + Short: "Generate a draft proposal json file. The generated proposal json contains only one message (skeleton).", + SilenceUsage: true, + RunE: func(cmd *cobra.Command, _ []string) error { + clientCtx, err := client.GetClientTxContext(cmd) + if err != nil { + return err + } + + // prompt proposal type + proposalTypesPrompt := promptui.Select{ + Label: "Select proposal type", + Items: getProposalSuggestions(), + } + + _, selectedProposalType, err := proposalTypesPrompt.Run() + if err != nil { + return fmt.Errorf("failed to prompt proposal types: %w", err) + } + + var proposal proposalType + for _, p := range suggestedProposalTypes { + if strings.EqualFold(p.Name, selectedProposalType) { + proposal = p + break + } + } + + // create any proposal type + if proposal.Name == proposalOther { + // prompt proposal type + msgPrompt := promptui.Select{ + Label: "Select proposal message type:", + Items: func() []string { + msgs := clientCtx.InterfaceRegistry.ListImplementations(sdk.MsgInterfaceProtoName) + sort.Strings(msgs) + return msgs + }(), + } + + _, result, err := msgPrompt.Run() + if err != nil { + return fmt.Errorf("failed to prompt proposal types: %w", err) + } + + proposal.MsgType = result + } + + if proposal.MsgType != "" { + proposal.Msg, err = sdk.GetMsgFromTypeURL(clientCtx.Codec, proposal.MsgType) + if err != nil { + // should never happen + panic(err) + } + } + + skipMetadataPrompt, _ := cmd.Flags().GetBool(flagSkipMetadata) + + result, metadata, err := proposal.Prompt(clientCtx.Codec, skipMetadataPrompt) + if err != nil { + return err + } + + if err := writeFile(draftProposalFileName, result); err != nil { + return err + } + + if !skipMetadataPrompt { + if err := writeFile(draftMetadataFileName, metadata); err != nil { + return err + } + } + + cmd.Println("The draft proposal has successfully been generated.\nProposals should contain off-chain metadata, please upload the metadata JSON to IPFS.\nThen, replace the generated metadata field with the IPFS CID.") + + return nil + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cmd.Flags().Bool(flagSkipMetadata, false, "skip metadata prompt") + + return cmd +} + +// getProposalSuggestions suggests a list of proposal types +func getProposalSuggestions() []string { + types := make([]string, len(suggestedProposalTypes)) + for i, p := range suggestedProposalTypes { + types[i] = p.Name + } + return types +} + +// Prompt the proposal type values and return the proposal and its metadata +func (p *proposalType) Prompt(cdc codec.Codec, skipMetadata bool) (*proposalMsg, types.ProposalMetadata, error) { + metadata, err := PromptMetadata(skipMetadata) + if err != nil { + return nil, metadata, fmt.Errorf("failed to set proposal metadata: %w", err) + } + + proposal := &proposalMsg{ + Metadata: "ipfs://CID", // the metadata must be saved on IPFS, set placeholder + Title: metadata.Title, + Summary: metadata.Summary, + } + + // set deposit + depositPrompt := promptui.Prompt{ + Label: "Enter proposal deposit", + Validate: client.ValidatePromptCoins, + } + proposal.Deposit, err = depositPrompt.Run() + if err != nil { + return nil, metadata, fmt.Errorf("failed to set proposal deposit: %w", err) + } + + if p.Msg == nil { + return proposal, metadata, nil + } + + // set messages field + result, err := Prompt(p.Msg, "msg") + if err != nil { + return nil, metadata, fmt.Errorf("failed to set proposal message: %w", err) + } + + message, err := cdc.MarshalInterfaceJSON(result) + if err != nil { + return nil, metadata, fmt.Errorf("failed to marshal proposal message: %w", err) + } + proposal.Messages = append(proposal.Messages, message) + + return proposal, metadata, nil +} + +// Prompt prompts the user for all values of the given type. +// data is the struct to be filled +// namePrefix is the name to be displayed as "Enter " +func Prompt[T any](data T, namePrefix string) (T, error) { + v := reflect.ValueOf(&data).Elem() + if v.Kind() == reflect.Interface { + v = reflect.ValueOf(data) + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + } + + for i := 0; i < v.NumField(); i++ { + // if the field is a struct skip or not slice of string or int then skip + switch v.Field(i).Kind() { + case reflect.Struct: + // TODO(@julienrbrt) in the future we can add a recursive call to Prompt + continue + case reflect.Slice: + if v.Field(i).Type().Elem().Kind() != reflect.String && v.Field(i).Type().Elem().Kind() != reflect.Int { + continue + } + } + + // create prompts + prompt := promptui.Prompt{ + Label: fmt.Sprintf("Enter %s %s", namePrefix, strings.ToLower(client.CamelCaseToString(v.Type().Field(i).Name))), + Validate: client.ValidatePromptNotEmpty, + } + + fieldName := strings.ToLower(v.Type().Field(i).Name) + + if strings.EqualFold(fieldName, "authority") { + // pre-fill with gov address + prompt.Default = authtypes.NewModuleAddress(types.ModuleName).String() + prompt.Validate = client.ValidatePromptAddress + } + + // TODO(@julienrbrt) use scalar annotation instead of dumb string name matching + if strings.Contains(fieldName, "addr") || + strings.Contains(fieldName, "sender") || + strings.Contains(fieldName, "voter") || + strings.Contains(fieldName, "depositor") || + strings.Contains(fieldName, "granter") || + strings.Contains(fieldName, "grantee") || + strings.Contains(fieldName, "recipient") { + prompt.Validate = client.ValidatePromptAddress + } + + result, err := prompt.Run() + if err != nil { + return data, fmt.Errorf("failed to prompt for %s: %w", fieldName, err) + } + + switch v.Field(i).Kind() { + case reflect.String: + v.Field(i).SetString(result) + case reflect.Int: + resultInt, err := strconv.ParseInt(result, 10, 0) + if err != nil { + return data, fmt.Errorf("invalid value for int: %w", err) + } + // If a value was successfully parsed the ranges of: + // [minInt, maxInt] + // are within the ranges of: + // [minInt64, maxInt64] + // of which on 64-bit machines, which are most common, + // int==int64 + v.Field(i).SetInt(resultInt) + case reflect.Slice: + switch v.Field(i).Type().Elem().Kind() { + case reflect.String: + v.Field(i).Set(reflect.ValueOf([]string{result})) + case reflect.Int: + resultInt, err := strconv.ParseInt(result, 10, 0) + if err != nil { + return data, fmt.Errorf("invalid value for int: %w", err) + } + + v.Field(i).Set(reflect.ValueOf([]int{int(resultInt)})) + } + default: + // skip any other types + continue + } + } + + return data, nil +} + +// PromptMetadata prompts for proposal metadata or only title and summary if skip is true +func PromptMetadata(skip bool) (types.ProposalMetadata, error) { + var ( + metadata types.ProposalMetadata + err error + ) + + if !skip { + metadata, err = Prompt(types.ProposalMetadata{}, "proposal") + if err != nil { + return metadata, fmt.Errorf("failed to set proposal metadata: %w", err) + } + } else { + // prompt for title and summary + titlePrompt := promptui.Prompt{ + Label: "Enter proposal title", + Validate: client.ValidatePromptNotEmpty, + } + + metadata.Title, err = titlePrompt.Run() + if err != nil { + return metadata, fmt.Errorf("failed to set proposal title: %w", err) + } + + summaryPrompt := promptui.Prompt{ + Label: "Enter proposal summary", + Validate: client.ValidatePromptNotEmpty, + } + + metadata.Summary, err = summaryPrompt.Run() + if err != nil { + return metadata, fmt.Errorf("failed to set proposal summary: %w", err) + } + } + + return metadata, nil +} + +// writeFile writes the input to the file +func writeFile(fileName string, input any) error { + raw, err := json.MarshalIndent(input, "", " ") + if err != nil { + return fmt.Errorf("failed to marshal proposal: %w", err) + } + + if err := os.WriteFile(fileName, raw, 0o600); err != nil { + return err + } + + return nil +} + +// parseSubmitProposal reads and parses the proposal. +func parseSubmitProposal(cdc codec.Codec, path string) (proposalMsg, []sdk.Msg, sdk.Coins, error) { + var proposal proposalMsg + + contents, err := os.ReadFile(path) + if err != nil { + return proposal, nil, nil, err + } + + err = json.Unmarshal(contents, &proposal) + if err != nil { + return proposal, nil, nil, err + } + + msgs := make([]sdk.Msg, len(proposal.Messages)) + for i, anyJSON := range proposal.Messages { + var msg sdk.Msg + err := cdc.UnmarshalInterfaceJSON(anyJSON, &msg) + if err != nil { + return proposal, nil, nil, err + } + + msgs[i] = msg + } + + deposit, err := sdk.ParseCoinsNormalized(proposal.Deposit) + if err != nil { + return proposal, nil, nil, err + } + + return proposal, msgs, deposit, nil +} + +// parseSubmitLegacyProposal reads and parses the legacy proposal. +func parseSubmitLegacyProposal(fs *pflag.FlagSet) (*legacyProposal, error) { + proposal := &legacyProposal{} + proposalFile, _ := fs.GetString(cflags.FlagProposal) // nolint:staticcheck + + if proposalFile == "" { + proposalType, _ := fs.GetString(cflags.FlagProposalType) // nolint:staticcheck + proposal.Title, _ = fs.GetString(cflags.FlagTitle) + proposal.Description, _ = fs.GetString(cflags.FlagDescription) // nolint:staticcheck + proposal.Type = govutils.NormalizeProposalType(proposalType) + proposal.Deposit, _ = fs.GetString(cflags.FlagDeposit) + if err := proposal.validate(); err != nil { + return nil, err + } + + return proposal, nil + } + + for _, flag := range ProposalFlags { + if v, _ := fs.GetString(flag); v != "" { + return nil, fmt.Errorf("--%s flag provided alongside --proposal, which is a noop", flag) + } + } + + contents, err := os.ReadFile(proposalFile) + if err != nil { + return nil, err + } + + err = json.Unmarshal(contents, proposal) + if err != nil { + return nil, err + } + + if err := proposal.validate(); err != nil { + return nil, err + } + + return proposal, nil +} + +// validate the legacyProposal +func (p legacyProposal) validate() error { + if p.Type == "" { + return fmt.Errorf("proposal type is required") + } + + if p.Title == "" { + return fmt.Errorf("proposal title is required") + } + + if p.Description == "" { + return fmt.Errorf("proposal description is required") + } + return nil +} diff --git a/go/cli/gov_tx_test.go b/go/cli/gov_tx_test.go new file mode 100644 index 00000000..1b347efa --- /dev/null +++ b/go/cli/gov_tx_test.go @@ -0,0 +1,410 @@ +package cli_test + +import ( + "context" + "encoding/base64" + "fmt" + + "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/cosmos/cosmos-sdk/x/gov/types" + "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + "github.com/cosmos/gogoproto/proto" + + "pkg.akt.dev/go/cli" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +func (s *GovCLITestSuite) TestNewCmdSubmitProposal() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + // Create a legacy proposal JSON, make sure it doesn't pass this new CLI + // command. + invalidProp := `{ + "title": "", + "description": "Where is the title!?", + "type": "Text", + "deposit": "-324foocoin" + }` + invalidPropFile := testutil.WriteToNewTempFile(s.T(), invalidProp) + defer func() { + _ = invalidPropFile.Close() + }() + + // Create a valid new proposal JSON. + propMetadata := []byte{42} + validProp := fmt.Sprintf(` + { + "messages": [ + { + "@type": "/cosmos.gov.v1.MsgExecLegacyContent", + "authority": "%s", + "content": { + "@type": "/cosmos.gov.v1beta1.TextProposal", + "title": "My awesome title", + "description": "My awesome description" + } + } + ], + "title": "My awesome title", + "summary": "My awesome description", + "metadata": "%s", + "deposit": "%s" + }`, authtypes.NewModuleAddress(types.ModuleName), base64.StdEncoding.EncodeToString(propMetadata), sdk.NewCoin("uakt", sdk.NewInt(5431))) + validPropFile := testutil.WriteToNewTempFile(s.T(), validProp) + + defer func() { + _ = validPropFile.Close() + }() + + testCases := []struct { + name string + args []string + expectErr bool + respType proto.Message + }{ + { + "invalid proposal", + cli.TestFlags(). + With(invalidPropFile.Name()). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + true, nil, + }, + { + "valid proposal", + cli.TestFlags(). + With(validPropFile.Name()). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithSignMode("direct"). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + false, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxGovSubmitProposalCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + } + }) + } +} + +func (s *GovCLITestSuite) TestNewCmdSubmitLegacyProposal() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + invalidProp := `{ + "title": "", + "description": "Where is the title!?", + "type": "Text", + "deposit": "-324foocoin" + }` + invalidPropFile := testutil.WriteToNewTempFile(s.T(), invalidProp) + + defer func() { + _ = invalidPropFile.Close() + }() + + validProp := fmt.Sprintf(`{ + "title": "Text Proposal", + "description": "Hello, World!", + "type": "Text", + "deposit": "%s" + }`, sdk.NewCoin("uakt", sdk.NewInt(5431))) + validPropFile := testutil.WriteToNewTempFile(s.T(), validProp) + defer func() { + _ = validPropFile.Close() + }() + + testCases := []struct { + name string + args []string + expectErr bool + respType proto.Message + }{ + { + "invalid proposal (file)", + cli.TestFlags(). + WithProposal(invalidPropFile.Name()). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + true, nil, + }, + { + "invalid proposal", + cli.TestFlags(). + WithDescription("Where is the title!?"). + WithProposalType(v1beta1.ProposalTypeText). + WithDeposit(sdk.NewCoin("uakt", sdk.NewInt(5431))). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + true, nil, + }, + { + "valid transaction (file)", + cli.TestFlags(). + WithProposal(validPropFile.Name()). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithSignMode("direct"). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + false, &sdk.TxResponse{}, + }, + { + "valid transaction", + cli.TestFlags(). + WithTitle("Text Proposal"). + WithDescription("'Where is the title!?"). + WithProposalType(v1beta1.ProposalTypeText). + WithDeposit(sdk.NewCoin("uakt", sdk.NewInt(5431))). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithSignMode("direct"). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + false, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxGovSubmitLegacyProposalCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + } + }) + } +} + +func (s *GovCLITestSuite) TestNewCmdDeposit() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + args []string + expectErr bool + }{ + { + "without proposal id", + cli.TestFlags(). + With(sdk.NewCoin("uakt", sdk.NewInt(10)).String()). // 10uakt + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + true, + }, + { + "without deposit amount", + cli.TestFlags(). + With("1"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + true, + }, + { + "deposit on a proposal", + cli.TestFlags(). + With("1", sdk.NewCoin("uakt", sdk.NewInt(10)).String()). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithSignMode("direct"). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + false, + }, + } + + for _, tc := range testCases { + var resp sdk.TxResponse + + s.Run(tc.name, func() { + cmd := cli.GetTxGovDepositCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &resp), out.String()) + } + }) + } +} + +func (s *GovCLITestSuite) TestNewCmdVote() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + args []string + expectErr bool + expectedCode uint32 + }{ + { + "invalid vote", + []string{}, + true, 0, + }, + { + "vote for invalid proposal", + cli.TestFlags(). + With("10", "yes"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithMetadata("AQ=="). + WithSignMode("direct"). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + false, 3, + }, + { + "valid vote", + cli.TestFlags(). + With("1", "yes"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithSignMode("direct"). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + false, 0, + }, + { + "valid vote with metadata", + cli.TestFlags(). + With("1", "yes"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithSignMode("direct"). + WithMetadata("AQ=="). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + false, 0, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxGovVoteCmd() + var txResp sdk.TxResponse + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &txResp), out.String()) + } + }) + } +} + +func (s *GovCLITestSuite) TestNewCmdWeightedVote() { + val := testutil.CreateKeyringAccounts(s.T(), s.kr, 1) + + testCases := []struct { + name string + args []string + expectErr bool + expectedCode uint32 + }{ + { + "invalid vote", + []string{}, + true, 0, + }, + { + "vote for invalid proposal", + cli.TestFlags(). + With("10", "yes"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithMetadata("AQ=="). + WithSignMode("direct"). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + false, 3, + }, + { + "valid vote", + cli.TestFlags(). + With("1", "yes"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithSignMode("direct"). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + false, 0, + }, + { + "valid vote with metadata", + cli.TestFlags(). + With("1", "yes"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithSignMode("direct"). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + false, 0, + }, + { + "invalid valid split vote string", + cli.TestFlags(). + With("1", "yes/0.6,no/0.3,abstain/0.05,no_with_veto/0.05"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithSignMode("direct"). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + true, 0, + }, + { + "valid split vote", + cli.TestFlags(). + With("1", "yes=0.6,no=0.3,abstain=0.05,no_with_veto=0.05"). + WithFrom(val[0].Address.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithSignMode("direct"). + WithFees(sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10)))), + false, 0, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxGovWeightedVoteCmd() + var txResp sdk.TxResponse + + out, err := clitestutil.ExecTestCLICmd(context.Background(), s.cctx, cmd, tc.args...) + + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), &txResp), out.String()) + } + }) + } +} diff --git a/go/cli/gov_util.go b/go/cli/gov_util.go new file mode 100644 index 00000000..d24110b3 --- /dev/null +++ b/go/cli/gov_util.go @@ -0,0 +1,62 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + govv1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// AddGovPropFlagsToCmd adds flags for defining MsgSubmitProposal fields. +// +// See also ReadGovPropFlags. +func AddGovPropFlagsToCmd(cmd *cobra.Command) { + cmd.Flags().String(cflags.FlagDeposit, "", "The deposit to include with the governance proposal") + cmd.Flags().String(cflags.FlagMetadata, "", "The metadata to include with the governance proposal") + cmd.Flags().String(cflags.FlagTitle, "", "The title to put on the governance proposal") + cmd.Flags().String(cflags.FlagSummary, "", "The summary to include with the governance proposal") +} + +// ReadGovPropFlags parses a MsgSubmitProposal from the provided context and flags. +// Setting the messages is up to the caller. +// +// See also AddGovPropFlagsToCmd. +func ReadGovPropFlags(clientCtx client.Context, flagSet *pflag.FlagSet) (*govv1.MsgSubmitProposal, error) { + rv := &govv1.MsgSubmitProposal{} + + deposit, err := flagSet.GetString(cflags.FlagDeposit) + if err != nil { + return nil, fmt.Errorf("could not read deposit: %w", err) + } + if len(deposit) > 0 { + rv.InitialDeposit, err = sdk.ParseCoinsNormalized(deposit) + if err != nil { + return nil, fmt.Errorf("invalid deposit: %w", err) + } + } + + rv.Metadata, err = flagSet.GetString(cflags.FlagMetadata) + if err != nil { + return nil, fmt.Errorf("could not read metadata: %w", err) + } + + rv.Title, err = flagSet.GetString(cflags.FlagTitle) + if err != nil { + return nil, fmt.Errorf("could not read title: %w", err) + } + + rv.Summary, err = flagSet.GetString(cflags.FlagSummary) + if err != nil { + return nil, fmt.Errorf("could not read summary: %w", err) + } + + rv.Proposer = clientCtx.GetFromAddress().String() + + return rv, nil +} diff --git a/go/cli/gov_util_test.go b/go/cli/gov_util_test.go new file mode 100644 index 00000000..14b33711 --- /dev/null +++ b/go/cli/gov_util_test.go @@ -0,0 +1,697 @@ +package cli + +import ( + "bytes" + "encoding/base64" + "fmt" + "io" + "os" + "strings" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/testutil" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +func TestParseSubmitLegacyProposal(t *testing.T) { + okJSON := testutil.WriteToNewTempFile(t, ` +{ + "title": "Test Proposal", + "description": "My awesome proposal", + "type": "Text", + "deposit": "1000test" +} +`) + + badJSON := testutil.WriteToNewTempFile(t, "bad json") + fs := GetTxGovSubmitLegacyProposalCmd().Flags() + + // nonexistent json + err := fs.Set(cflags.FlagProposal, "fileDoesNotExist") // nolint:staticcheck + require.NoError(t, err) + _, err = parseSubmitLegacyProposal(fs) + require.Error(t, err) + + // invalid json + err = fs.Set(cflags.FlagProposal, badJSON.Name()) // nolint:staticcheck + require.NoError(t, err) + _, err = parseSubmitLegacyProposal(fs) + require.Error(t, err) + + // ok json + fs.Set(cflags.FlagProposal, okJSON.Name()) // nolint:staticcheck + proposal1, err := parseSubmitLegacyProposal(fs) + require.Nil(t, err, "unexpected error") + require.Equal(t, "Test Proposal", proposal1.Title) + require.Equal(t, "My awesome proposal", proposal1.Description) + require.Equal(t, "Text", proposal1.Type) + require.Equal(t, "1000test", proposal1.Deposit) + + // flags that can't be used with --proposal + for _, incompatibleFlag := range ProposalFlags { + fs.Set(incompatibleFlag, "some value") + _, err := parseSubmitLegacyProposal(fs) + require.Error(t, err) + fs.Set(incompatibleFlag, "") + } + + // no --proposal, only flags + err = fs.Set(cflags.FlagProposal, "") // nolint:staticcheck + require.NoError(t, err) + flagTestCases := map[string]struct { + pTitle string + pDescription string + pType string + expErr bool + errMsg string + }{ + "valid flags": { + pTitle: proposal1.Title, + pDescription: proposal1.Description, + pType: proposal1.Type, + }, + "empty type": { + pTitle: proposal1.Title, + pDescription: proposal1.Description, + expErr: true, + errMsg: "proposal type is required", + }, + "empty title": { + pDescription: proposal1.Description, + pType: proposal1.Type, + expErr: true, + errMsg: "proposal title is required", + }, + "empty description": { + pTitle: proposal1.Title, + pType: proposal1.Type, + expErr: true, + errMsg: "proposal description is required", + }, + } + for name, tc := range flagTestCases { + t.Run(name, func(t *testing.T) { + err = fs.Set(cflags.FlagTitle, tc.pTitle) + require.NoError(t, err) + err = fs.Set(cflags.FlagDescription, tc.pDescription) // nolint:staticcheck + require.NoError(t, err) + err = fs.Set(cflags.FlagProposalType, tc.pType) // nolint:staticcheck + require.NoError(t, err) + err = fs.Set(cflags.FlagDeposit, proposal1.Deposit) + require.NoError(t, err) + proposal2, err := parseSubmitLegacyProposal(fs) + + if tc.expErr { + require.Error(t, err) + require.Contains(t, err.Error(), tc.errMsg) + } else { + require.NoError(t, err) + require.Equal(t, proposal1.Title, proposal2.Title) + require.Equal(t, proposal1.Description, proposal2.Description) + require.Equal(t, proposal1.Type, proposal2.Type) + require.Equal(t, proposal1.Deposit, proposal2.Deposit) + } + }) + } + + err = okJSON.Close() + require.Nil(t, err, "unexpected error") + err = badJSON.Close() + require.Nil(t, err, "unexpected error") +} + +func TestParseSubmitProposal(t *testing.T) { + _, _, addr := testdata.KeyTestPubAddr() + interfaceRegistry := codectypes.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(interfaceRegistry) + banktypes.RegisterInterfaces(interfaceRegistry) + stakingtypes.RegisterInterfaces(interfaceRegistry) + v1beta1.RegisterInterfaces(interfaceRegistry) + v1.RegisterInterfaces(interfaceRegistry) + expectedMetadata := []byte{42} + + okJSON := testutil.WriteToNewTempFile(t, fmt.Sprintf(` +{ + "messages": [ + { + "@type": "/cosmos.bank.v1beta1.MsgSend", + "from_address": "%s", + "to_address": "%s", + "amount":[{"denom": "uakt","amount": "10"}] + }, + { + "@type": "/cosmos.staking.v1beta1.MsgDelegate", + "delegator_address": "%s", + "validator_address": "%s", + "amount":{"denom": "uakt","amount": "10"} + }, + { + "@type": "/cosmos.gov.v1.MsgExecLegacyContent", + "authority": "%s", + "content": { + "@type": "/cosmos.gov.v1beta1.TextProposal", + "title": "My awesome title", + "description": "My awesome description" + } + } + ], + "metadata": "%s", + "title": "My awesome title", + "summary": "My awesome summary", + "deposit": "1000test", + "expedited": true +} +`, addr, addr, addr, addr, addr, base64.StdEncoding.EncodeToString(expectedMetadata))) + + badJSON := testutil.WriteToNewTempFile(t, "bad json") + + // nonexistent json + _, _, _, err := parseSubmitProposal(cdc, "fileDoesNotExist") // nolint: dogsled + require.Error(t, err) + + // invalid json + _, _, _, err = parseSubmitProposal(cdc, badJSON.Name()) // nolint: dogsled + require.Error(t, err) + + // ok json + proposal, msgs, deposit, err := parseSubmitProposal(cdc, okJSON.Name()) + require.NoError(t, err, "unexpected error") + require.Equal(t, sdk.NewCoins(sdk.NewCoin("test", sdk.NewInt(1000))), deposit) + require.Equal(t, base64.StdEncoding.EncodeToString(expectedMetadata), proposal.Metadata) + require.Len(t, msgs, 3) + msg1, ok := msgs[0].(*banktypes.MsgSend) + require.True(t, ok) + require.Equal(t, addr.String(), msg1.FromAddress) + require.Equal(t, addr.String(), msg1.ToAddress) + require.Equal(t, sdk.NewCoins(sdk.NewCoin("uakt", sdk.NewInt(10))), msg1.Amount) + msg2, ok := msgs[1].(*stakingtypes.MsgDelegate) + require.True(t, ok) + require.Equal(t, addr.String(), msg2.DelegatorAddress) + require.Equal(t, addr.String(), msg2.ValidatorAddress) + require.Equal(t, sdk.NewCoin("uakt", sdk.NewInt(10)), msg2.Amount) + msg3, ok := msgs[2].(*v1.MsgExecLegacyContent) + require.True(t, ok) + require.Equal(t, addr.String(), msg3.Authority) + textProp, ok := msg3.Content.GetCachedValue().(*v1beta1.TextProposal) + require.True(t, ok) + require.Equal(t, "My awesome title", textProp.Title) + require.Equal(t, "My awesome description", textProp.Description) + require.Equal(t, "My awesome title", proposal.Title) + require.Equal(t, "My awesome summary", proposal.Summary) + require.Equal(t, true, proposal.Expedited) + + err = okJSON.Close() + require.Nil(t, err, "unexpected error") + err = badJSON.Close() + require.Nil(t, err, "unexpected error") +} + +func getCommandHelp(t *testing.T, cmd *cobra.Command) string { + // Create a pipe, so we can capture the help sent to stdout. + reader, writer, err := os.Pipe() + require.NoError(t, err, "creating os.Pipe()") + outChan := make(chan string) + defer func(origCmdOut io.Writer) { + cmd.SetOut(origCmdOut) + // Ignoring these errors since we're just ensuring cleanup here, + // and they will return an error if already called (which we don't care about). + _ = reader.Close() + _ = writer.Close() + close(outChan) + }(cmd.OutOrStdout()) + cmd.SetOut(writer) + + // Do the reading in a separate goroutine from the writing (a best practice). + go func() { + var b bytes.Buffer + _, buffErr := io.Copy(&b, reader) + if buffErr != nil { + // Due to complexities of goroutines and multiple channels, I'm sticking with a + // single channel and just putting the error in there (which I'll test for later). + b.WriteString("buffer error: " + buffErr.Error()) + } + outChan <- b.String() + }() + + err = cmd.Help() + require.NoError(t, err, "cmd.Help()") + require.NoError(t, writer.Close(), "pipe writer .Close()") + rv := <-outChan + require.NotContains(t, rv, "buffer error: ", "buffer output") + return rv +} + +func TestAddGovPropFlagsToCmd(t *testing.T) { + cmd := &cobra.Command{ + Short: "Just a test command that does nothing but we can add flags to it.", + Run: func(_ *cobra.Command, args []string) { + t.Errorf("The cmd has run with the args %q, but Run shouldn't have been called.", args) + }, + } + testFunc := func() { + AddGovPropFlagsToCmd(cmd) + } + require.NotPanics(t, testFunc, "AddGovPropFlagsToCmd") + + help := getCommandHelp(t, cmd) + + expDepositDesc := "The deposit to include with the governance proposal" + expMetadataDesc := "The metadata to include with the governance proposal" + expTitleDesc := "The title to put on the governance proposal" + expSummaryDesc := "The summary to include with the governance proposal" + // Regexp notes: (?m:...) = multi-line mode so ^ and $ match the beginning and end of each line. + // Each regexp assertion checks for a line containing only a specific flag and its description. + assert.Regexp(t, `(?m:^\s+--`+cflags.FlagDeposit+` string\s+`+expDepositDesc+`$)`, help, "help output") + assert.Regexp(t, `(?m:^\s+--`+cflags.FlagMetadata+` string\s+`+expMetadataDesc+`$)`, help, "help output") + assert.Regexp(t, `(?m:^\s+--`+cflags.FlagTitle+` string\s+`+expTitleDesc+`$)`, help, "help output") + assert.Regexp(t, `(?m:^\s+--`+cflags.FlagSummary+` string\s+`+expSummaryDesc+`$)`, help, "help output") +} + +func TestReadGovPropFlags(t *testing.T) { + fromAddr := sdk.AccAddress("from_addr___________") + argDeposit := "--" + cflags.FlagDeposit + argMetadata := "--" + cflags.FlagMetadata + argTitle := "--" + cflags.FlagTitle + argSummary := "--" + cflags.FlagSummary + + // cz is a shorter way to define coins objects for these tests. + cz := func(coins string) sdk.Coins { + rv, err := sdk.ParseCoinsNormalized(coins) + require.NoError(t, err, "ParseCoinsNormalized(%q)", coins) + return rv + } + + tests := []struct { + name string + fromAddr sdk.AccAddress + args []string + exp *v1.MsgSubmitProposal + expErr []string + }{ + { + name: "no args no from", + fromAddr: nil, + args: []string{}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: "", + Metadata: "", + Title: "", + Summary: "", + }, + }, + { + name: "only from defined", + fromAddr: fromAddr, + args: []string{}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: fromAddr.String(), + Metadata: "", + Title: "", + Summary: "", + }, + }, + + // only deposit tests. + { + name: "only deposit empty string", + fromAddr: nil, + args: []string{argDeposit, ""}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: "", + Metadata: "", + Title: "", + Summary: "", + }, + }, + { + name: "only deposit one coin", + fromAddr: nil, + args: []string{argDeposit, "1bigcoin"}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: cz("1bigcoin"), + Proposer: "", + Metadata: "", + Title: "", + Summary: "", + }, + }, + { + name: "only deposit invalid coins", + fromAddr: nil, + args: []string{argDeposit, "not really coins"}, + expErr: []string{"invalid deposit", "invalid decimal coin expression", "not really coins"}, + }, + { + name: "only deposit two coins", + fromAddr: nil, + args: []string{argDeposit, "1acoin,2bcoin"}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: cz("1acoin,2bcoin"), + Proposer: "", + Metadata: "", + Title: "", + Summary: "", + }, + }, + { + name: "only deposit two coins other order", + fromAddr: nil, + args: []string{argDeposit, "2bcoin,1acoin"}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: cz("1acoin,2bcoin"), + Proposer: "", + Metadata: "", + Title: "", + Summary: "", + }, + }, + { + name: "only deposit coin 1 of 3 bad", + fromAddr: nil, + args: []string{argDeposit, "1bad^coin,2bcoin,3ccoin"}, + expErr: []string{"invalid deposit", "invalid decimal coin expression", "1bad^coin"}, + }, + { + name: "only deposit coin 2 of 3 bad", + fromAddr: nil, + args: []string{argDeposit, "1acoin,2bad^coin,3ccoin"}, + expErr: []string{"invalid deposit", "invalid decimal coin expression", "2bad^coin"}, + }, + { + name: "only deposit coin 3 of 3 bad", + fromAddr: nil, + args: []string{argDeposit, "1acoin,2bcoin,3bad^coin"}, + expErr: []string{"invalid deposit", "invalid decimal coin expression", "3bad^coin"}, + }, + // As far as I can tell, there's no way to make flagSet.GetString return an error for a defined string flag. + // So I don't have a test for the "could not read deposit" error case. + + // only metadata tests. + { + name: "only metadata empty", + fromAddr: nil, + args: []string{argMetadata, ""}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: "", + Metadata: "", + Title: "", + Summary: "", + }, + }, + { + name: "only metadata simple", + fromAddr: nil, + args: []string{argMetadata, "just some metadata"}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: "", + Metadata: "just some metadata", + Title: "", + Summary: "", + }, + }, + { + name: "only metadata super long", + fromAddr: nil, + args: []string{argMetadata, strings.Repeat("Long", 1_000_000)}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: "", + Metadata: strings.Repeat("Long", 1_000_000), + Title: "", + Summary: "", + }, + }, + // As far as I can tell, there's no way to make flagSet.GetString return an error for a defined string flag. + // So I don't have a test for the "could not read metadata" error case. + + // only title tests. + { + name: "only title empty", + fromAddr: nil, + args: []string{argTitle, ""}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: "", + Metadata: "", + Title: "", + Summary: "", + }, + }, + { + name: "only title simple", + fromAddr: nil, + args: []string{argTitle, "just a title"}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: "", + Metadata: "", + Title: "just a title", + Summary: "", + }, + }, + { + name: "only title super long", + fromAddr: nil, + args: []string{argTitle, strings.Repeat("Long", 1_000_000)}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: "", + Metadata: "", + Title: strings.Repeat("Long", 1_000_000), + Summary: "", + }, + }, + // As far as I can tell, there's no way to make flagSet.GetString return an error for a defined string flag. + // So I don't have a test for the "could not read title" error case. + + // only summary tests. + { + name: "only summary empty", + fromAddr: nil, + args: []string{argSummary, ""}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: "", + Metadata: "", + Title: "", + Summary: "", + }, + }, + { + name: "only summary simple", + fromAddr: nil, + args: []string{argSummary, "just a short summary"}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: "", + Metadata: "", + Title: "", + Summary: "just a short summary", + }, + }, + { + name: "only summary super long", + fromAddr: nil, + args: []string{argSummary, strings.Repeat("Long", 1_000_000)}, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: "", + Metadata: "", + Title: "", + Summary: strings.Repeat("Long", 1_000_000), + }, + }, + // As far as I can tell, there's no way to make flagSet.GetString return an error for a defined string flag. + // So I don't have a test for the "could not read summary" error case. + + // Combo tests. + { + name: "all together order 1", + fromAddr: fromAddr, + args: []string{ + argDeposit, "56depcoin", + argMetadata, "my proposal is cool", + argTitle, "Simple Gov Prop Title", + argSummary, "This is just a test summary on a simple gov prop.", + }, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: cz("56depcoin"), + Proposer: fromAddr.String(), + Metadata: "my proposal is cool", + Title: "Simple Gov Prop Title", + Summary: "This is just a test summary on a simple gov prop.", + }, + }, + { + name: "all together order 2", + fromAddr: fromAddr, + args: []string{ + argTitle, "This title is a *bit* more complex.", + argSummary, "This\nis\na\ncrazy\nsummary", + argDeposit, "78coolcoin", + argMetadata, "this proposal is cooler", + }, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: cz("78coolcoin"), + Proposer: fromAddr.String(), + Metadata: "this proposal is cooler", + Title: "This title is a *bit* more complex.", + Summary: "This\nis\na\ncrazy\nsummary", + }, + }, + { + name: "all except proposer", + fromAddr: nil, + args: []string{ + argMetadata, "https://example.com/lucky", + argDeposit, "33luckycoin", + argSummary, "This proposal will bring you luck and good fortune in the new year.", + argTitle, "Increase Luck", + }, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: cz("33luckycoin"), + Proposer: "", + Metadata: "https://example.com/lucky", + Title: "Increase Luck", + Summary: "This proposal will bring you luck and good fortune in the new year.", + }, + }, + { + name: "all except proposer but all empty", + fromAddr: nil, + args: []string{ + argMetadata, "", + argDeposit, "", + argSummary, "", + argTitle, "", + }, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: "", + Metadata: "", + Title: "", + Summary: "", + }, + }, + { + name: "all except deposit", + fromAddr: fromAddr, + args: []string{ + argTitle, "This is a Title", + argSummary, "This is a useless summary", + argMetadata, "worthless metadata", + }, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: nil, + Proposer: fromAddr.String(), + Metadata: "worthless metadata", + Title: "This is a Title", + Summary: "This is a useless summary", + }, + expErr: nil, + }, + { + name: "all except metadata", + fromAddr: fromAddr, + args: []string{ + argTitle, "Bland Title", + argSummary, "Boring summary", + argDeposit, "99mdcoin", + }, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: cz("99mdcoin"), + Proposer: fromAddr.String(), + Metadata: "", + Title: "Bland Title", + Summary: "Boring summary", + }, + }, + { + name: "all except title", + fromAddr: fromAddr, + args: []string{ + argMetadata, "this metadata does not have the title either", + argDeposit, "71whatcoin", + argSummary, "This is a summary on a titleless proposal.", + }, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: cz("71whatcoin"), + Proposer: fromAddr.String(), + Metadata: "this metadata does not have the title either", + Title: "", + Summary: "This is a summary on a titleless proposal.", + }, + expErr: nil, + }, + { + name: "all except summary", + fromAddr: fromAddr, + args: []string{ + argMetadata, "28", + argTitle, "Now This is What I Call A Governance Proposal 28", + argDeposit, "42musiccoin", + }, + exp: &v1.MsgSubmitProposal{ + InitialDeposit: cz("42musiccoin"), + Proposer: fromAddr.String(), + Metadata: "28", + Title: "Now This is What I Call A Governance Proposal 28", + Summary: "", + }, + expErr: nil, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + cmd := &cobra.Command{ + Short: tc.name, + Run: func(_ *cobra.Command, args []string) { + t.Errorf("The cmd for %q has run with the args %q, but Run shouldn't have been called.", tc.name, args) + }, + } + AddGovPropFlagsToCmd(cmd) + err := cmd.ParseFlags(tc.args) + require.NoError(t, err, "parsing test case args using cmd: %q", tc.args) + flagSet := cmd.Flags() + + clientCtx := client.Context{ + FromAddress: tc.fromAddr, + } + + var msg *v1.MsgSubmitProposal + testFunc := func() { + msg, err = ReadGovPropFlags(clientCtx, flagSet) + } + require.NotPanics(t, testFunc, "ReadGovPropFlags") + if len(tc.expErr) > 0 { + require.Error(t, err, "ReadGovPropFlags error") + for _, exp := range tc.expErr { + assert.ErrorContains(t, err, exp, "ReadGovPropFlags error") + } + } else { + require.NoError(t, err, "ReadGovPropFlags error") + } + assert.Equal(t, tc.exp, msg, "ReadGovPropFlags msg") + }) + } +} diff --git a/go/cli/init.go b/go/cli/init.go new file mode 100644 index 00000000..62ae6e06 --- /dev/null +++ b/go/cli/init.go @@ -0,0 +1,9 @@ +package cli + +import ( + "github.com/spf13/cobra" +) + +func init() { + cobra.EnableTraverseRunHooks = true +} diff --git a/go/cli/market_query.go b/go/cli/market_query.go new file mode 100644 index 00000000..8cae3e31 --- /dev/null +++ b/go/cli/market_query.go @@ -0,0 +1,291 @@ +package cli + +import ( + "github.com/spf13/cobra" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + + cflags "pkg.akt.dev/go/cli/flags" + v1 "pkg.akt.dev/go/node/market/v1" + "pkg.akt.dev/go/node/market/v1beta5" +) + +// GetQueryMarketCmds returns the transaction commands for the market module +func GetQueryMarketCmds() *cobra.Command { + cmd := &cobra.Command{ + Use: v1beta5.ModuleName, + Short: "Market query commands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetQueryMarketOrderCmds(), + GetQueryMarketBidCmds(), + GetQueryMarketLeaseCmds(), + ) + + return cmd +} + +func GetQueryMarketOrderCmds() *cobra.Command { + cmd := &cobra.Command{ + Use: "order", + Short: "Order query commands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetQueryMarketOrdersCmd(), + GetQueryMarketOrderCmd(), + ) + + return cmd +} + +func GetQueryMarketBidCmds() *cobra.Command { + cmd := &cobra.Command{ + Use: "bid", + Short: "Bid query commands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetQueryMarketBidsCmd(), + GetQueryMarketBidCmd(), + ) + + return cmd +} + +func GetQueryMarketLeaseCmds() *cobra.Command { + cmd := &cobra.Command{ + Use: "lease", + Short: "Lease query commands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetQueryMarketLeasesCmd(), + GetQueryMarketLeaseCmd(), + ) + + return cmd +} + +func GetQueryMarketOrdersCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "Query for all orders", + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + ofilters, err := cflags.OrderFiltersFromFlags(cmd.Flags()) + if err != nil { + return err + } + + pageReq, err := sdkclient.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &v1beta5.QueryOrdersRequest{ + Filters: ofilters, + Pagination: pageReq, + } + + res, err := cl.Query().Market().Orders(ctx, params) + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "orders") + cflags.AddOrderFilterFlags(cmd.Flags()) + + return cmd +} + +func GetQueryMarketOrderCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "get", + Short: "Query order", + Args: cobra.ExactArgs(0), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + id, err := cflags.OrderIDFromFlags(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Market().Order(ctx, &v1beta5.QueryOrderRequest{ID: id}) + + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(&res.Order) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddOrderIDFlags(cmd.Flags()) + cflags.MarkReqOrderIDFlags(cmd) + + return cmd +} + +func GetQueryMarketBidsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "Query for all bids", + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + bfilters, err := cflags.BidFiltersFromFlags(cmd.Flags()) + if err != nil { + return err + } + + pageReq, err := sdkclient.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &v1beta5.QueryBidsRequest{ + Filters: bfilters, + Pagination: pageReq, + } + + res, err := cl.Query().Market().Bids(ctx, params) + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "bids") + cflags.AddBidFilterFlags(cmd.Flags()) + + return cmd +} + +func GetQueryMarketBidCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "get", + Short: "Query order", + Args: cobra.ExactArgs(0), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + bidID, err := cflags.BidIDFromFlags(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Market().Bid(ctx, &v1beta5.QueryBidRequest{ID: bidID}) + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddQueryBidIDFlags(cmd.Flags()) + cflags.MarkReqBidIDFlags(cmd) + + return cmd +} + +func GetQueryMarketLeasesCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + PersistentPreRunE: QueryPersistentPreRunE, + Short: "Query for all leases", + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + lfilters, err := cflags.LeaseFiltersFromFlags(cmd.Flags()) + if err != nil { + return err + } + + pageReq, err := sdkclient.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &v1beta5.QueryLeasesRequest{ + Filters: lfilters, + Pagination: pageReq, + } + + res, err := cl.Query().Market().Leases(ctx, params) + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "leases") + cflags.AddLeaseFilterFlags(cmd.Flags()) + + return cmd +} + +func GetQueryMarketLeaseCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "get", + Short: "Query order", + Args: cobra.ExactArgs(0), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + bidID, err := cflags.BidIDFromFlags(cmd.Flags()) + if err != nil { + return err + } + + res, err := cl.Query().Market().Lease(cmd.Context(), &v1beta5.QueryLeaseRequest{ID: v1.MakeLeaseID(bidID)}) + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddQueryBidIDFlags(cmd.Flags()) + cflags.MarkReqBidIDFlags(cmd) + + return cmd +} diff --git a/go/cli/market_tx.go b/go/cli/market_tx.go new file mode 100644 index 00000000..c1ec31da --- /dev/null +++ b/go/cli/market_tx.go @@ -0,0 +1,278 @@ +package cli + +import ( + "github.com/spf13/cobra" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + + cflags "pkg.akt.dev/go/cli/flags" + mv1beta "pkg.akt.dev/go/node/market/v1beta5" + types "pkg.akt.dev/go/node/market/v1beta5" +) + +// GetTxMarketCmds returns the transaction commands for market module +func GetTxMarketCmds() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Transaction subcommands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + cmd.AddCommand( + GetTxMarketBidCmds(), + GetTxMarketLeaseCmds(), + ) + return cmd +} + +func GetTxMarketBidCmds() *cobra.Command { + cmd := &cobra.Command{ + Use: "bid", + Short: "Bid subcommands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + cmd.AddCommand( + GetTxMarketBidCreateCmd(), + GetTxMarketBidCloseCmd(), + ) + return cmd +} + +func GetTxMarketBidCreateCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "create", + Short: "Create a market bid", + Args: cobra.ExactArgs(0), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + price, err := cmd.Flags().GetString("price") + if err != nil { + return err + } + + coin, err := sdk.ParseDecCoin(price) + if err != nil { + return err + } + + id, err := cflags.OrderIDFromFlags(cmd.Flags(), cflags.WithProvider(cctx.FromAddress)) + if err != nil { + return err + } + + deposit, err := DetectBidDeposit(ctx, cmd.Flags(), cl.Query()) + if err != nil { + return err + } + + msg := &mv1beta.MsgCreateBid{ + OrderID: id, + Provider: cctx.GetFromAddress().String(), + Price: coin, + Deposit: deposit, + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cflags.AddOrderIDFlags(cmd.Flags()) + + cmd.Flags().String("price", "", "Bid Price") + cflags.AddDepositFlags(cmd.Flags()) + + return cmd +} + +func GetTxMarketBidCloseCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "close", + Short: "Close a market bid", + Args: cobra.ExactArgs(0), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + id, err := cflags.BidIDFromFlags(cmd.Flags(), cflags.WithProvider(cctx.FromAddress)) + if err != nil { + return err + } + + msg := &mv1beta.MsgCloseBid{ + ID: id, + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cflags.AddBidIDFlags(cmd.Flags()) + + return cmd +} + +func GetTxMarketLeaseCmds() *cobra.Command { + cmd := &cobra.Command{ + Use: "lease", + Short: "Lease subcommands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetTxMarketLeaseCreateCmd(), + GetTxMarketLeaseWithdrawCmd(), + GetTxMarketLeaseCloseCmd(), + ) + + return cmd +} + +func GetTxMarketLeaseCreateCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "create", + Short: "Create a market lease", + Args: cobra.ExactArgs(0), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + id, err := cflags.LeaseIDFromFlags(cmd.Flags(), cflags.WithOwner(cctx.FromAddress)) + if err != nil { + return err + } + + msg := &mv1beta.MsgCreateLease{ + BidID: id.BidID(), + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cflags.AddLeaseIDFlags(cmd.Flags()) + cflags.MarkReqLeaseIDFlags(cmd, cflags.DeploymentIDOptionNoOwner(true)) + + return cmd +} + +func GetTxMarketLeaseWithdrawCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "withdraw", + Short: "Settle and withdraw available funds from market order escrow account", + Args: cobra.ExactArgs(0), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + + cctx := cl.ClientContext() + + id, err := cflags.LeaseIDFromFlags(cmd.Flags(), cflags.WithOwner(cctx.FromAddress)) + if err != nil { + return err + } + + msg := &mv1beta.MsgWithdrawLease{ + ID: id, + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cflags.AddLeaseIDFlags(cmd.Flags()) + cflags.MarkReqLeaseIDFlags(cmd, cflags.DeploymentIDOptionNoOwner(true)) + + return cmd +} + +func GetTxMarketLeaseCloseCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "close", + Short: "Close a market order", + Args: cobra.ExactArgs(0), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + id, err := cflags.LeaseIDFromFlags(cmd.Flags(), cflags.WithOwner(cctx.FromAddress)) + if err != nil { + return err + } + + msg := &mv1beta.MsgCloseLease{ + ID: id, + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + cflags.AddLeaseIDFlags(cmd.Flags()) + cflags.MarkReqLeaseIDFlags(cmd, cflags.DeploymentIDOptionNoOwner(true)) + + return cmd +} diff --git a/go/cli/mint_query.go b/go/cli/mint_query.go new file mode 100644 index 00000000..15cb2770 --- /dev/null +++ b/go/cli/mint_query.go @@ -0,0 +1,113 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/x/mint/types" +) + +// GetQueryMintCmd returns the cli query commands for the minting module. +func GetQueryMintCmd() *cobra.Command { + mintingQueryCmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Querying commands for the minting module", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + mintingQueryCmd.AddCommand( + GetQueryMintParamsCmd(), + GetQueryMintInflationCmd(), + GetQueryMintAnnualProvisionsCmd(), + ) + + return mintingQueryCmd +} + +// GetQueryMintParamsCmd implements a command to return the current minting +// parameters. +func GetQueryMintParamsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "Query the current minting parameters", + Args: cobra.NoArgs, + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + params := &types.QueryParamsRequest{} + res, err := cl.Query().Mint().Params(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(&res.Params) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryMintInflationCmd implements a command to return the current minting +// inflation value. +func GetQueryMintInflationCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "inflation", + Short: "Query the current minting inflation value", + Args: cobra.NoArgs, + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + cctx := cl.ClientContext() + + params := &types.QueryInflationRequest{} + res, err := cl.Query().Mint().Inflation(cmd.Context(), params) + if err != nil { + return err + } + + return cctx.PrintString(fmt.Sprintf("%s\n", res.Inflation)) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryMintAnnualProvisionsCmd implements a command to return the current minting +// annual provisions value. +func GetQueryMintAnnualProvisionsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "annual-provisions", + Short: "Query the current minting annual provisions value", + Args: cobra.NoArgs, + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + cctx := cl.ClientContext() + + params := &types.QueryAnnualProvisionsRequest{} + res, err := cl.Query().Mint().AnnualProvisions(cmd.Context(), params) + if err != nil { + return err + } + + return cctx.PrintString(fmt.Sprintf("%s\n", res.AnnualProvisions)) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/mint_query_test.go b/go/cli/mint_query_test.go new file mode 100644 index 00000000..5ab274f5 --- /dev/null +++ b/go/cli/mint_query_test.go @@ -0,0 +1,181 @@ +package cli_test + +import ( + "context" + "fmt" + "io" + "strings" + "testing" + + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/mint" + + "pkg.akt.dev/go/cli" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +func TestGetQueryMintParamsCmd(t *testing.T) { + encCfg := testutilmod.MakeTestEncodingConfig(mint.AppModuleBasic{}) + kr := keyring.NewInMemory(encCfg.Codec) + baseCtx := client.Context{}. + WithKeyring(kr). + WithTxConfig(encCfg.TxConfig). + WithCodec(encCfg.Codec). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain") + + testCases := []struct { + name string + flagArgs []string + expCmdOutput string + expectedOutput string + }{ + { + "json output", + cli.TestFlags(). + WithHeight(1). + WithOutputJSON(), + `[--height=1 --output=json]`, + `{"mint_denom":"","inflation_rate_change":"0","inflation_max":"0","inflation_min":"0","goal_bonded":"0","blocks_per_year":"0"}`, + }, + { + "text output", + cli.TestFlags(). + WithHeight(1). + WithOutputText(), + `[--height=1 --output=text]`, + `blocks_per_year: "0" +goal_bonded: "0" +inflation_max: "0" +inflation_min: "0" +inflation_rate_change: "0" +mint_denom: ""`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cmd := cli.GetQueryMintParamsCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), baseCtx, cmd, tc.flagArgs...) + require.NoError(t, err) + require.Equal(t, tc.expectedOutput, strings.TrimSpace(out.String())) + + if len(tc.flagArgs) != 0 { + require.Contains(t, fmt.Sprint(cmd), "params [] [] Query the current minting parameters") + require.Contains(t, fmt.Sprint(cmd), tc.expCmdOutput) + } + }) + } +} + +func TestGetQueryMintInflationCmd(t *testing.T) { + encCfg := testutilmod.MakeTestEncodingConfig(mint.AppModuleBasic{}) + kr := keyring.NewInMemory(encCfg.Codec) + baseCtx := client.Context{}. + WithKeyring(kr). + WithTxConfig(encCfg.TxConfig). + WithCodec(encCfg.Codec). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain") + + testCases := []struct { + name string + flagArgs []string + expCmdOutput string + expectedOutput string + }{ + { + "json output", + cli.TestFlags(). + WithHeight(1). + WithOutputJSON(), + `[--height=1 --output=json]`, + ``, + }, + { + "text output", + cli.TestFlags(). + WithHeight(1). + WithOutputText(), + `[--height=1 --output=text]`, + ``, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cmd := cli.GetQueryMintInflationCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), baseCtx, cmd, tc.flagArgs...) + require.NoError(t, err) + require.Equal(t, tc.expectedOutput, strings.TrimSpace(out.String())) + + if len(tc.flagArgs) != 0 { + require.Contains(t, fmt.Sprint(cmd), "inflation [] [] Query the current minting inflation value") + require.Contains(t, fmt.Sprint(cmd), tc.expCmdOutput) + } + }) + } +} + +func TestGetCmdQueryAnnualProvisions(t *testing.T) { + encCfg := testutilmod.MakeTestEncodingConfig(mint.AppModuleBasic{}) + kr := keyring.NewInMemory(encCfg.Codec) + baseCtx := client.Context{}. + WithKeyring(kr). + WithTxConfig(encCfg.TxConfig). + WithCodec(encCfg.Codec). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain") + + testCases := []struct { + name string + flagArgs []string + expCmdOutput string + expectedOutput string + }{ + { + "json output", + cli.TestFlags(). + WithHeight(1). + WithOutputJSON(), + `[--height=1 --output=json]`, + ``, + }, + { + "text output", + cli.TestFlags(). + WithHeight(1). + WithOutputText(), + `[--height=1 --output=text]`, + ``, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cmd := cli.GetQueryMintAnnualProvisionsCmd() + + out, err := clitestutil.ExecTestCLICmd(context.Background(), baseCtx, cmd, tc.flagArgs...) + require.NoError(t, err) + require.Equal(t, tc.expectedOutput, strings.TrimSpace(out.String())) + + if len(tc.flagArgs) != 0 { + require.Contains(t, fmt.Sprint(cmd), "annual-provisions [] [] Query the current minting annual provisions value") + require.Contains(t, fmt.Sprint(cmd), tc.expCmdOutput) + } + }) + } +} diff --git a/go/cli/params_query.go b/go/cli/params_query.go new file mode 100644 index 00000000..5ccfd8da --- /dev/null +++ b/go/cli/params_query.go @@ -0,0 +1,54 @@ +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/x/params/types" + "github.com/cosmos/cosmos-sdk/x/params/types/proposal" +) + +// GetQueryParamsCmd returns a root CLI command handler for all x/params query commands. +func GetQueryParamsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Querying commands for the params module", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetQueryParamsSubspaceCmd(), + ) + + return cmd +} + +// GetQueryParamsSubspaceCmd returns a CLI command handler for querying subspace +// parameters managed by the x/params module. +func GetQueryParamsSubspaceCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "subspace [subspace] [key]", + Short: "Query for raw parameters by subspace and key", + Args: cobra.ExactArgs(2), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + params := proposal.QueryParamsRequest{Subspace: args[0], Key: args[1]} + res, err := cl.Query().Params().Params(cmd.Context(), ¶ms) + if err != nil { + return err + } + + return cl.PrintMessage(&res.Param) + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/params_tx.go b/go/cli/params_tx.go new file mode 100644 index 00000000..2970b4eb --- /dev/null +++ b/go/cli/params_tx.go @@ -0,0 +1,91 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + govv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + paramscutils "github.com/cosmos/cosmos-sdk/x/params/client/utils" + paramproposal "github.com/cosmos/cosmos-sdk/x/params/types/proposal" +) + +// GetTxParamsSubmitParamChangeProposalCmd returns a CLI command handler for creating +// a parameter change proposal governance transaction. +func GetTxParamsSubmitParamChangeProposalCmd() *cobra.Command { + return &cobra.Command{ + Use: "param-change [proposal-file]", + Args: cobra.ExactArgs(1), + Short: "Submit a parameter change proposal", + Long: strings.TrimSpace( + fmt.Sprintf(`Submit a parameter proposal along with an initial deposit. +The proposal details must be supplied via a JSON file. For values that contains +objects, only non-empty fields will be updated. + +IMPORTANT: Currently parameter changes are evaluated but not validated, so it is +very important that any "value" change is valid (ie. correct type and within bounds) +for its respective parameter, eg. "MaxValidators" should be an integer and not a decimal. + +Proper vetting of a parameter change proposal should prevent this from happening +(no deposits should occur during the governance process), but it should be noted +regardless. + +Example: +$ %s tx gov submit-proposal param-change --from= + +Where proposal.json contains: + +{ + "title": "Staking Param Change", + "description": "Update max validators", + "changes": [ + { + "subspace": "staking", + "key": "MaxValidators", + "value": 105 + } + ], + "deposit": "1000uakt" +} +`, + version.AppName, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + proposal, err := paramscutils.ParseParamChangeProposalJSON(cctx.LegacyAmino, args[0]) + if err != nil { + return err + } + + from := cctx.GetFromAddress() + content := paramproposal.NewParameterChangeProposal( + proposal.Title, proposal.Description, proposal.Changes.ToParamChanges(), + ) + + deposit, err := sdk.ParseCoinsNormalized(proposal.Deposit) + if err != nil { + return err + } + + msg, err := govv1beta1.NewMsgSubmitProposal(content, deposit, from) + if err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } +} diff --git a/go/cli/params_tx_test.go b/go/cli/params_tx_test.go new file mode 100644 index 00000000..1e50b397 --- /dev/null +++ b/go/cli/params_tx_test.go @@ -0,0 +1,42 @@ +package cli + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/testutil" + "github.com/cosmos/cosmos-sdk/x/params/client/utils" +) + +func TestParamsParseProposal(t *testing.T) { + cdc := codec.NewLegacyAmino() + okJSON := testutil.WriteToNewTempFile(t, ` +{ + "title": "Staking Param Change", + "description": "Update max validators", + "changes": [ + { + "subspace": "staking", + "key": "MaxValidators", + "value": 1 + } + ], + "deposit": "1000stake" +} +`) + proposal, err := utils.ParseParamChangeProposalJSON(cdc, okJSON.Name()) + require.NoError(t, err) + + require.Equal(t, "Staking Param Change", proposal.Title) + require.Equal(t, "Update max validators", proposal.Description) + require.Equal(t, "1000stake", proposal.Deposit) + require.Equal(t, utils.ParamChangesJSON{ + { + Subspace: "staking", + Key: "MaxValidators", + Value: []byte{0x31}, + }, + }, proposal.Changes) +} diff --git a/go/cli/provider_query.go b/go/cli/provider_query.go new file mode 100644 index 00000000..08d98f90 --- /dev/null +++ b/go/cli/provider_query.go @@ -0,0 +1,90 @@ +package cli + +import ( + "github.com/spf13/cobra" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + + cflags "pkg.akt.dev/go/cli/flags" + types "pkg.akt.dev/go/node/provider/v1beta4" +) + +// GetQueryProviderCmds returns the transaction commands for the provider module +func GetQueryProviderCmds() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Provider query commands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + + cmd.AddCommand( + GetQueryGetProvidersCmd(), + GetQueryProviderCmd(), + ) + + return cmd +} + +func GetQueryGetProvidersCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "list", + Short: "Query for all providers", + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + pageReq, err := sdkclient.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &types.QueryProvidersRequest{ + Pagination: pageReq, + } + + res, err := cl.Query().Provider().Providers(ctx, params) + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "providers") + + return cmd +} + +func GetQueryProviderCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "get [address]", + Short: "Query provider", + Args: cobra.ExactArgs(1), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + owner, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + res, err := cl.Query().Provider().Provider(cmd.Context(), &types.QueryProviderRequest{Owner: owner.String()}) + if err != nil { + return err + } + + return cl.ClientContext().PrintProto(&res.Provider) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/provider_tx.go b/go/cli/provider_tx.go new file mode 100644 index 00000000..c2a03f4d --- /dev/null +++ b/go/cli/provider_tx.go @@ -0,0 +1,155 @@ +package cli + +import ( + "errors" + "fmt" + "os" + + "github.com/spf13/cobra" + "gopkg.in/yaml.v3" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + + cflags "pkg.akt.dev/go/cli/flags" + types "pkg.akt.dev/go/node/provider/v1beta4" + tattr "pkg.akt.dev/go/node/types/attributes/v1" +) + +var ( + ErrDuplicatedAttribute = errors.New("provider: duplicated attribute") +) + +// ProviderConfig is the struct that stores provider config +type ProviderConfig struct { + Host string `json:"host" yaml:"host"` + Info types.Info `json:"info" yaml:"info"` + Attributes tattr.Attributes `json:"attributes" yaml:"attributes"` +} + +// GetAttributes returns config attributes into key value pairs +func (c ProviderConfig) GetAttributes() tattr.Attributes { + return c.Attributes +} + +// ReadProviderConfigPath reads and parses file +func ReadProviderConfigPath(path string) (ProviderConfig, error) { + buf, err := os.ReadFile(path) + if err != nil { + return ProviderConfig{}, err + } + var val ProviderConfig + if err := yaml.Unmarshal(buf, &val); err != nil { + return ProviderConfig{}, err + } + + dups := make(map[string]string) + for _, attr := range val.Attributes { + if _, exists := dups[attr.Key]; exists { + return ProviderConfig{}, fmt.Errorf("%w: %s", ErrDuplicatedAttribute, attr.Key) + } + + dups[attr.Key] = attr.Value + } + + return val, err +} + +// GetTxProviderCmd returns the transaction commands for provider module +func GetTxProviderCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Provider transaction subcommands", + SuggestionsMinimumDistance: 2, + RunE: sdkclient.ValidateCmd, + } + cmd.AddCommand( + GetTxProviderCreateCmd(), + GetTxProviderUpdateCmd(), + ) + return cmd +} + +func GetTxProviderCreateCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "create [config-file]", + Short: "Create a provider", + Args: cobra.ExactArgs(1), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + // TODO: enable reading files with non-local URIs + cfg, err := ReadProviderConfigPath(args[0]) + if err != nil { + err = fmt.Errorf("%w: ReadConfigPath err: %q", err, args[0]) + return err + } + + msg := &types.MsgCreateProvider{ + Owner: cctx.GetFromAddress().String(), + HostURI: cfg.Host, + Info: cfg.Info, + Attributes: cfg.GetAttributes(), + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +func GetTxProviderUpdateCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "update [config-file]", + Short: "Update provider", + Args: cobra.ExactArgs(1), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + cfg, err := ReadProviderConfigPath(args[0]) + if err != nil { + return err + } + + msg := &types.MsgUpdateProvider{ + Owner: cctx.GetFromAddress().String(), + HostURI: cfg.Host, + Info: cfg.Info, + Attributes: cfg.GetAttributes(), + } + + if err := msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/query.go b/go/cli/query.go new file mode 100644 index 00000000..eb5e6d60 --- /dev/null +++ b/go/cli/query.go @@ -0,0 +1,69 @@ +package cli + +import ( + "context" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client/rpc" + + cflags "pkg.akt.dev/go/cli/flags" +) + +func QueryPersistentPreRunE(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + + cctx, err := GetClientTxContext(cmd) + if err != nil { + return err + } + + cl, err := DiscoverQueryClient(ctx, cctx) + if err != nil { + return err + } + + ctx = context.WithValue(ctx, ContextTypeQueryClient, cl) + + cmd.SetContext(ctx) + + return nil +} + +func QueryCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "query", + Aliases: []string{"q"}, + Short: "Querying subcommands", + } + + cmd.AddCommand( + GetQueryAuthCmd(), + GetQueryAuthzCmd(), + GetQueryBankCmd(), + GetQueryDistributionCmd(), + GetQueryEvidenceCmd(), + GetQueryFeegrantCmd(), + GetQueryMintCmd(), + GetQueryParamsCmd(), + cflags.LineBreak, + rpc.ValidatorCommand(), + rpc.BlockCommand(), + GetQueryAuthTxsByEventsCmd(), + GetQueryAuthTxCmd(), + GetQueryGovCmd(), + GetQuerySlashingCmd(), + GetQueryStakingCmd(), + cflags.LineBreak, + GetQueryAuditCmd(), + GetQueryCertCmd(), + GetQueryDeploymentCmds(), + GetQueryMarketCmds(), + GetQueryEscrowCmd(), + GetQueryProviderCmds(), + ) + + cmd.PersistentFlags().String(cflags.FlagChainID, "", "The network chain ID") + + return cmd +} diff --git a/go/cli/slashing_query.go b/go/cli/slashing_query.go new file mode 100644 index 00000000..6b2efd77 --- /dev/null +++ b/go/cli/slashing_query.go @@ -0,0 +1,138 @@ +package cli + +import ( + "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/slashing/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetQuerySlashingCmd returns the cli query commands for this module +func GetQuerySlashingCmd() *cobra.Command { + // Group slashing queries under a subcommand + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Querying commands for the slashing module", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetQuerySlashingSigningInfoCmd(), + GetQuerySlashingParamsCmd(), + GetQuerySlashingSigningInfosCmd(), + ) + + return cmd +} + +// GetQuerySlashingSigningInfoCmd implements the command to query signing info. +func GetQuerySlashingSigningInfoCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "signing-info [validator-conspub]", + Short: "Query a validator's signing information", + Long: strings.TrimSpace(`Use a validators' consensus public key to find the signing-info for that validator: + +$ query slashing signing-info '{"@type":"/cosmos.crypto.ed25519.PubKey","key":"OauFcTKbN5Lx3fJL689cikXBqe+hcp6Y+x0rYUdR9Jk="}' +`), + Args: cobra.ExactArgs(1), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + cctx := cl.ClientContext() + + var pk cryptotypes.PubKey + if err := cctx.Codec.UnmarshalInterfaceJSON([]byte(args[0]), &pk); err != nil { + return err + } + + consAddr := sdk.ConsAddress(pk.Address()) + params := &types.QuerySigningInfoRequest{ConsAddress: consAddr.String()} + res, err := cl.Query().Slashing().SigningInfo(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(&res.ValSigningInfo) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQuerySlashingSigningInfosCmd implements the command to query signing infos. +func GetQuerySlashingSigningInfosCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "signing-infos", + Short: "Query signing information of all validators", + Long: strings.TrimSpace(`signing infos of validators: + +$ query slashing signing-infos +`), + Args: cobra.NoArgs, + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &types.QuerySigningInfosRequest{Pagination: pageReq} + res, err := cl.Query().Slashing().SigningInfos(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "signing infos") + + return cmd +} + +// GetQuerySlashingParamsCmd implements a command to fetch slashing parameters. +func GetQuerySlashingParamsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Short: "Query the current slashing parameters", + Args: cobra.NoArgs, + Long: strings.TrimSpace(`Query genesis parameters for the slashing module: + +$ query slashing params +`), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + params := &types.QueryParamsRequest{} + res, err := cl.Query().Slashing().Params(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(&res.Params) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/slashing_query_test.go b/go/cli/slashing_query_test.go new file mode 100644 index 00000000..5dc24057 --- /dev/null +++ b/go/cli/slashing_query_test.go @@ -0,0 +1,136 @@ +package cli_test + +import ( + "bytes" + "context" + "io" + + abci "github.com/cometbft/cometbft/abci/types" + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/crypto/types" + sdk "github.com/cosmos/cosmos-sdk/types" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/nft/module" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +type SlashingCLITestSuite struct { + CLITestSuite + + pub types.PubKey + addr sdk.AccAddress +} + +func (s *SlashingCLITestSuite) SetupSuite() { + s.encCfg = testutilmod.MakeTestEncodingConfig(module.AppModuleBasic{}) + s.kr = keyring.NewInMemory(s.encCfg.Codec) + s.baseCtx = client.Context{}. + WithKeyring(s.kr). + WithTxConfig(s.encCfg.TxConfig). + WithCodec(s.encCfg.Codec). + WithLegacyAmino(s.encCfg.Amino). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain"). + WithSignModeStr(cflags.SignModeDirect) + + var outBuf bytes.Buffer + ctxGen := func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&sdk.TxResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + + return s.baseCtx.WithClient(c) + } + s.cctx = ctxGen().WithOutput(&outBuf) + + k, _, err := s.cctx.Keyring.NewMnemonic("NewValidator", keyring.English, sdk.FullFundraiserPath, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + s.Require().NoError(err) + + pub, err := k.GetPubKey() + s.Require().NoError(err) + + s.pub = pub + s.addr = sdk.AccAddress(pub.Address()) +} + +func (s *SlashingCLITestSuite) TestGetCmdQuerySigningInfo() { + pubKeyBz, err := s.encCfg.Codec.MarshalInterfaceJSON(s.pub) + s.Require().NoError(err) + pubKeyStr := string(pubKeyBz) + + testCases := []struct { + name string + args []string + expectErr bool + }{ + {"invalid address", []string{"foo"}, true}, + { + "valid address (json output)", + cli.TestFlags(). + With(pubKeyStr). + WithHeight(1). + WithOutputJSON(), + false, + }, + { + "valid address (text output)", + cli.TestFlags(). + With(pubKeyStr). + WithHeight(1). + WithOutputText(), + false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQuerySlashingSigningInfoCmd() + cctx := s.cctx + + _, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + } + }) + } +} + +func (s *SlashingCLITestSuite) TestGetCmdQueryParams() { + testCases := []struct { + name string + args []string + }{ + { + "json output", + cli.TestFlags(). + WithOutputJSON(), + }, + { + "text output", + cli.TestFlags(). + WithOutputText(), + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQuerySlashingParamsCmd() + cctx := s.cctx + + _, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + s.Require().NoError(err) + }) + } +} diff --git a/go/cli/slashing_tx.go b/go/cli/slashing_tx.go new file mode 100644 index 00000000..82e35f74 --- /dev/null +++ b/go/cli/slashing_tx.go @@ -0,0 +1,62 @@ +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/slashing/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetTxSlashingCmd returns a root CLI command handler for all x/slashing transaction commands. +func GetTxSlashingCmd() *cobra.Command { + slashingTxCmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Slashing transaction subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + slashingTxCmd.AddCommand( + GetTxSlashingUnjailCmd(), + ) + + return slashingTxCmd +} + +// GetTxSlashingUnjailCmd returns a CLI command handler for creating a MsgUnjail transaction. +func GetTxSlashingUnjailCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "unjail", + Args: cobra.NoArgs, + Short: "unjail validator previously jailed for downtime", + Long: `unjail a jailed validator: + +$ tx slashing unjail --from mykey +`, + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + valAddr := cctx.GetFromAddress() + + msg := types.NewMsgUnjail(sdk.ValAddress(valAddr)) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/slashing_tx_test.go b/go/cli/slashing_tx_test.go new file mode 100644 index 00000000..fe059911 --- /dev/null +++ b/go/cli/slashing_tx_test.go @@ -0,0 +1,52 @@ +package cli_test + +import ( + "context" + + sdkmath "cosmossdk.io/math" + "github.com/cosmos/gogoproto/proto" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "pkg.akt.dev/go/cli" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +func (s *SlashingCLITestSuite) TestNewUnjailTxCmd() { + val := s.addr + testCases := []struct { + name string + args []string + expectErr bool + expectedCode uint32 + respType proto.Message + }{ + { + "valid transaction", + cli.TestFlags(). + WithFrom(val.String()). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10)))), + false, 0, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxSlashingUnjailCmd() + cctx := s.cctx + + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + + txResp := tc.respType.(*sdk.TxResponse) + s.Require().Equal(tc.expectedCode, txResp.Code, out.String()) + } + }) + } +} diff --git a/go/cli/staking_query.go b/go/cli/staking_query.go new file mode 100644 index 00000000..7354373a --- /dev/null +++ b/go/cli/staking_query.go @@ -0,0 +1,1036 @@ +package cli + +import ( + "fmt" + "strconv" + "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/staking/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetQueryStakingCmd returns the cli query commands for this module +func GetQueryStakingCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Querying commands for the staking module", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetQueryStakingDelegationCmd(), + GetQueryStakingDelegationsCmd(), + GetQueryStakingUnbondingDelegationCmd(), + GetQueryStakingUnbondingDelegationsCmd(), + GetQueryStakingRedelegationCmd(), + GetQueryStakingRedelegationsCmd(), + GetQueryStakingValidatorCmd(), + GetQueryStakingValidatorsCmd(), + GetQueryStakingValidatorDelegationsCmd(), + GetQueryStakingValidatorUnbondingDelegationsCmd(), + GetQueryStakingValidatorRedelegationsCmd(), + GetQueryStakingHistoricalInfoCmd(), + GetQueryStakingParamsCmd(), + GetQueryStakingPoolCmd(), + GetQueryStakingTokenizeShareRecordByIDCmd(), + GetQueryStakingTokenizeShareRecordByDenomCmd(), + GetQueryStakingTokenizeShareRecordsOwnedCmd(), + GetQueryStakingAllTokenizeShareRecordsCmd(), + GetQueryStakingLastTokenizeShareRecordIDCmd(), + GetQueryStakingTotalTokenizeSharedAssetsCmd(), + GetQueryStakingTokenizeShareLockInfoCmd(), + GetQueryStakingTotalLiquidStakedCmd(), + ) + + return cmd +} + +// GetQueryStakingValidatorCmd implements the validator query command. +func GetQueryStakingValidatorCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "validator [validator-addr]", + Short: "Query a validator", + Long: strings.TrimSpace( + fmt.Sprintf(`Query details about an individual validator. + +Example: +$ %s query staking validator %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj +`, + version.AppName, bech32PrefixValAddr, + ), + ), + Args: cobra.ExactArgs(1), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + addr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + params := &types.QueryValidatorRequest{ValidatorAddr: addr.String()} + res, err := cl.Query().Staking().Validator(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(&res.Validator) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryStakingValidatorsCmd implements the query all validators command. +func GetQueryStakingValidatorsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "validators", + Short: "Query for all validators", + Args: cobra.NoArgs, + Long: strings.TrimSpace( + fmt.Sprintf(`Query details about all validators on a network. + +Example: +$ %s query staking validators +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + result, err := cl.Query().Staking().Validators(cmd.Context(), &types.QueryValidatorsRequest{ + // Leaving status empty on purpose to query all validators. + Pagination: pageReq, + }) + if err != nil { + return err + } + + return cl.PrintMessage(result) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "validators") + + return cmd +} + +// GetQueryStakingValidatorUnbondingDelegationsCmd implements the query all unbonding delegatations from a validator command. +func GetQueryStakingValidatorUnbondingDelegationsCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "unbonding-delegations-from [validator-addr]", + Short: "Query all unbonding delegatations from a validator", + Long: strings.TrimSpace( + fmt.Sprintf(`Query delegations that are unbonding _from_ a validator. + +Example: +$ %s query staking unbonding-delegations-from %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj +`, + version.AppName, bech32PrefixValAddr, + ), + ), + Args: cobra.ExactArgs(1), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + valAddr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &types.QueryValidatorUnbondingDelegationsRequest{ + ValidatorAddr: valAddr.String(), + Pagination: pageReq, + } + + res, err := cl.Query().Staking().ValidatorUnbondingDelegations(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "unbonding delegations") + + return cmd +} + +// GetQueryStakingValidatorRedelegationsCmd implements the query all redelegatations +// from a validator command. +func GetQueryStakingValidatorRedelegationsCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "redelegations-from [validator-addr]", + Short: "Query all outgoing redelegatations from a validator", + Long: strings.TrimSpace( + fmt.Sprintf(`Query delegations that are redelegating _from_ a validator. + +Example: +$ %s query staking redelegations-from %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj +`, + version.AppName, bech32PrefixValAddr, + ), + ), + Args: cobra.ExactArgs(1), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + valSrcAddr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &types.QueryRedelegationsRequest{ + SrcValidatorAddr: valSrcAddr.String(), + Pagination: pageReq, + } + + res, err := cl.Query().Staking().Redelegations(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "validator redelegations") + + return cmd +} + +// GetQueryStakingDelegationCmd the query delegation command. +func GetQueryStakingDelegationCmd() *cobra.Command { + bech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix() + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "delegation [delegator-addr] [validator-addr]", + Short: "Query a delegation based on address and validator address", + Long: strings.TrimSpace( + fmt.Sprintf(`Query delegations for an individual delegator on an individual validator. + +Example: +$ %s query staking delegation %s1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj +`, + version.AppName, bech32PrefixAccAddr, bech32PrefixValAddr, + ), + ), + Args: cobra.ExactArgs(2), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + delAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + valAddr, err := sdk.ValAddressFromBech32(args[1]) + if err != nil { + return err + } + + params := &types.QueryDelegationRequest{ + DelegatorAddr: delAddr.String(), + ValidatorAddr: valAddr.String(), + } + + res, err := cl.Query().Staking().Delegation(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(res.DelegationResponse) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryStakingDelegationsCmd implements the command to query all the delegations +// made from one delegator. +func GetQueryStakingDelegationsCmd() *cobra.Command { + bech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix() + + cmd := &cobra.Command{ + Use: "delegations [delegator-addr]", + Short: "Query all delegations made by one delegator", + Long: strings.TrimSpace( + fmt.Sprintf(`Query delegations for an individual delegator on all validators. + +Example: +$ %s query staking delegations %s1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p +`, + version.AppName, bech32PrefixAccAddr, + ), + ), + Args: cobra.ExactArgs(1), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + delAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &types.QueryDelegatorDelegationsRequest{ + DelegatorAddr: delAddr.String(), + Pagination: pageReq, + } + + res, err := cl.Query().Staking().DelegatorDelegations(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "delegations") + + return cmd +} + +// GetQueryStakingValidatorDelegationsCmd implements the command to query all the +// delegations to a specific validator. +func GetQueryStakingValidatorDelegationsCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "delegations-to [validator-addr]", + Short: "Query all delegations made to one validator", + Long: strings.TrimSpace( + fmt.Sprintf(`Query delegations on an individual validator. + +Example: +$ %s query staking delegations-to %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj +`, + version.AppName, bech32PrefixValAddr, + ), + ), + Args: cobra.ExactArgs(1), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + valAddr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &types.QueryValidatorDelegationsRequest{ + ValidatorAddr: valAddr.String(), + Pagination: pageReq, + } + + res, err := cl.Query().Staking().ValidatorDelegations(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "validator delegations") + + return cmd +} + +// GetQueryStakingUnbondingDelegationCmd implements the command to query a single +// unbonding-delegation record. +func GetQueryStakingUnbondingDelegationCmd() *cobra.Command { + bech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix() + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "unbonding-delegation [delegator-addr] [validator-addr]", + Short: "Query an unbonding-delegation record based on delegator and validator address", + Long: strings.TrimSpace( + fmt.Sprintf(`Query unbonding delegations for an individual delegator on an individual validator. + +Example: +$ %s query staking unbonding-delegation %s1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj +`, + version.AppName, bech32PrefixAccAddr, bech32PrefixValAddr, + ), + ), + Args: cobra.ExactArgs(2), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + valAddr, err := sdk.ValAddressFromBech32(args[1]) + if err != nil { + return err + } + + delAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + params := &types.QueryUnbondingDelegationRequest{ + DelegatorAddr: delAddr.String(), + ValidatorAddr: valAddr.String(), + } + + res, err := cl.Query().Staking().UnbondingDelegation(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(&res.Unbond) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryStakingUnbondingDelegationsCmd implements the command to query all the +// unbonding-delegation records for a delegator. +func GetQueryStakingUnbondingDelegationsCmd() *cobra.Command { + bech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix() + + cmd := &cobra.Command{ + Use: "unbonding-delegations [delegator-addr]", + Short: "Query all unbonding-delegations records for one delegator", + Long: strings.TrimSpace( + fmt.Sprintf(`Query unbonding delegations for an individual delegator. + +Example: +$ %s query staking unbonding-delegations %s1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p +`, + version.AppName, bech32PrefixAccAddr, + ), + ), + Args: cobra.ExactArgs(1), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + delegatorAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &types.QueryDelegatorUnbondingDelegationsRequest{ + DelegatorAddr: delegatorAddr.String(), + Pagination: pageReq, + } + + res, err := cl.Query().Staking().DelegatorUnbondingDelegations(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "unbonding delegations") + + return cmd +} + +// GetQueryStakingRedelegationCmd implements the command to query a single +// redelegation record. +func GetQueryStakingRedelegationCmd() *cobra.Command { + bech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix() + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "redelegation [delegator-addr] [src-validator-addr] [dst-validator-addr]", + Short: "Query a redelegation record based on delegator and a source and destination validator address", + Long: strings.TrimSpace( + fmt.Sprintf(`Query a redelegation record for an individual delegator between a source and destination validator. + +Example: +$ %s query staking redelegation %s1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p %s1l2rsakp388kuv9k8qzq6lrm9taddae7fpx59wm %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj +`, + version.AppName, bech32PrefixAccAddr, bech32PrefixValAddr, bech32PrefixValAddr, + ), + ), + Args: cobra.ExactArgs(3), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + delAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + valSrcAddr, err := sdk.ValAddressFromBech32(args[1]) + if err != nil { + return err + } + + valDstAddr, err := sdk.ValAddressFromBech32(args[2]) + if err != nil { + return err + } + + params := &types.QueryRedelegationsRequest{ + DelegatorAddr: delAddr.String(), + DstValidatorAddr: valDstAddr.String(), + SrcValidatorAddr: valSrcAddr.String(), + } + + res, err := cl.Query().Staking().Redelegations(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryStakingRedelegationsCmd implements the command to query all the +// redelegation records for a delegator. +func GetQueryStakingRedelegationsCmd() *cobra.Command { + bech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix() + + cmd := &cobra.Command{ + Use: "redelegations [delegator-addr]", + Args: cobra.ExactArgs(1), + Short: "Query all redelegations records for one delegator", + Long: strings.TrimSpace( + fmt.Sprintf(`Query all redelegation records for an individual delegator. + +Example: +$ %s query staking redelegation %s1gghjut3ccd8ay0zduzj64hwre2fxs9ld75ru9p +`, + version.AppName, bech32PrefixAccAddr, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + delAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &types.QueryRedelegationsRequest{ + DelegatorAddr: delAddr.String(), + Pagination: pageReq, + } + + res, err := cl.Query().Staking().Redelegations(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "delegator redelegations") + + return cmd +} + +// GetQueryStakingHistoricalInfoCmd implements the historical info query command +func GetQueryStakingHistoricalInfoCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "historical-info [height]", + Args: cobra.ExactArgs(1), + Short: "Query historical info at given height", + Long: strings.TrimSpace( + fmt.Sprintf(`Query historical info at given height. + +Example: +$ %s query staking historical-info 5 +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + height, err := strconv.ParseInt(args[0], 10, 64) + if err != nil || height < 0 { + return fmt.Errorf("height argument provided must be a non-negative-integer: %v", err) + } + + params := &types.QueryHistoricalInfoRequest{Height: height} + res, err := cl.Query().Staking().HistoricalInfo(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(res.Hist) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryStakingPoolCmd implements the pool query command. +func GetQueryStakingPoolCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "pool", + Args: cobra.NoArgs, + Short: "Query the current staking pool values", + Long: strings.TrimSpace( + fmt.Sprintf(`Query values for amounts stored in the staking pool. + +Example: +$ %s query staking pool +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + res, err := cl.Query().Staking().Pool(cmd.Context(), &types.QueryPoolRequest{}) + if err != nil { + return err + } + + return cl.PrintMessage(&res.Pool) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryStakingParamsCmd implements the params query command. +func GetQueryStakingParamsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "params", + Args: cobra.NoArgs, + Short: "Query the current staking parameters information", + Long: strings.TrimSpace( + fmt.Sprintf(`Query values set as staking parameters. + +Example: +$ %s query staking params +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + res, err := cl.Query().Staking().Params(cmd.Context(), &types.QueryParamsRequest{}) + if err != nil { + return err + } + + return cl.PrintMessage(&res.Params) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryStakingTokenizeShareRecordByIDCmd implements the query for individual tokenize share record information by share by id +func GetQueryStakingTokenizeShareRecordByIDCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "tokenize-share-record-by-id [id]", + Args: cobra.ExactArgs(1), + Short: "Query individual tokenize share record information by share by id", + Long: strings.TrimSpace( + fmt.Sprintf(`Query individual tokenize share record information by share by id. + +Example: +$ %s query staking tokenize-share-record-by-id [id] +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + id, err := strconv.ParseUint(args[0], 10, 0) + if err != nil { + return err + } + + res, err := cl.Query().Staking().TokenizeShareRecordById(cmd.Context(), &types.QueryTokenizeShareRecordByIdRequest{ + Id: id, + }) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryStakingTokenizeShareRecordByDenomCmd implements the query for individual tokenize share record information by share denom +func GetQueryStakingTokenizeShareRecordByDenomCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "tokenize-share-record-by-denom", + Args: cobra.ExactArgs(1), + Short: "Query individual tokenize share record information by share denom", + Long: strings.TrimSpace( + fmt.Sprintf(`Query individual tokenize share record information by share denom. + +Example: +$ %s query staking tokenize-share-record-by-denom +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + res, err := cl.Query().Staking().TokenizeShareRecordByDenom(cmd.Context(), &types.QueryTokenizeShareRecordByDenomRequest{ + Denom: args[0], + }) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryStakingTokenizeShareRecordsOwnedCmd implements the query tokenize share records by address +func GetQueryStakingTokenizeShareRecordsOwnedCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "tokenize-share-records-owned", + Args: cobra.ExactArgs(1), + Short: "Query tokenize share records by address", + Long: strings.TrimSpace( + fmt.Sprintf(`Query tokenize share records by address. + +Example: +$ %s query staking tokenize-share-records-owned [owner] +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + owner, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + res, err := cl.Query().Staking().TokenizeShareRecordsOwned(cmd.Context(), &types.QueryTokenizeShareRecordsOwnedRequest{ + Owner: owner.String(), + }) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryStakingAllTokenizeShareRecordsCmd implements the query for all tokenize share records +func GetQueryStakingAllTokenizeShareRecordsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "all-tokenize-share-records", + Args: cobra.NoArgs, + Short: "Query for all tokenize share records", + Long: strings.TrimSpace( + fmt.Sprintf(`Query for all tokenize share records. + +Example: +$ %s query staking all-tokenize-share-records +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + pageReq, err := client.ReadPageRequest(cmd.Flags()) + if err != nil { + return err + } + + params := &types.QueryAllTokenizeShareRecordsRequest{ + Pagination: pageReq, + } + + res, err := cl.Query().Staking().AllTokenizeShareRecords(cmd.Context(), params) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + cflags.AddPaginationFlagsToCmd(cmd, "tokenize share records") + + return cmd +} + +// GetQueryStakingLastTokenizeShareRecordIDCmd implements the query for last tokenize share record id +func GetQueryStakingLastTokenizeShareRecordIDCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "last-tokenize-share-record-id", + Args: cobra.NoArgs, + Short: "Query for last tokenize share record id", + Long: strings.TrimSpace( + fmt.Sprintf(`Query for last tokenize share record id. + +Example: +$ %s query staking last-tokenize-share-record-id +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + res, err := cl.Query().Staking().LastTokenizeShareRecordId(cmd.Context(), &types.QueryLastTokenizeShareRecordIdRequest{}) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryStakingTotalTokenizeSharedAssetsCmd implements the query for total tokenized staked assets +func GetQueryStakingTotalTokenizeSharedAssetsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "total-tokenize-share-assets", + Args: cobra.NoArgs, + Short: "Query for total tokenized staked assets", + Long: strings.TrimSpace( + fmt.Sprintf(`Query for total tokenized staked assets. + +Example: +$ %s query staking total-tokenize-share-assets +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + res, err := cl.Query().Staking().TotalTokenizeSharedAssets(cmd.Context(), &types.QueryTotalTokenizeSharedAssetsRequest{}) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryStakingTotalLiquidStakedCmd implements the query for total liquid staked tokens +func GetQueryStakingTotalLiquidStakedCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "total-liquid-staked", + Args: cobra.NoArgs, + Short: "Query for total liquid staked tokens", + Long: strings.TrimSpace( + fmt.Sprintf(`Query for total number of liquid staked tokens. +Liquid staked tokens are identified as either a tokenized delegation, +or tokens owned by an interchain account. +Example: +$ %s query staking total-liquid-staked +`, + version.AppName, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + res, err := cl.Query().Staking().TotalLiquidStaked(cmd.Context(), &types.QueryTotalLiquidStaked{}) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryStakingTokenizeShareLockInfoCmd returns the tokenize share lock status for a user +func GetQueryStakingTokenizeShareLockInfoCmd() *cobra.Command { + bech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix() + + cmd := &cobra.Command{ + Use: "tokenize-share-lock-info [address]", + Args: cobra.ExactArgs(1), + Short: "Query tokenize share lock information", + Long: strings.TrimSpace( + fmt.Sprintf(`Query the status of a tokenize share lock for a given account +Example: +$ %s query staking tokenize-share-lock-info %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj +`, + version.AppName, bech32PrefixAccAddr, + ), + ), + PersistentPreRunE: QueryPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + address := args[0] + if _, err := sdk.AccAddressFromBech32(address); err != nil { + return err + } + + res, err := cl.Query().Staking().TokenizeShareLockInfo( + cmd.Context(), + &types.QueryTokenizeShareLockInfo{Address: address}, + ) + if err != nil { + return err + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/staking_query_test.go b/go/cli/staking_query_test.go new file mode 100644 index 00000000..5eb65084 --- /dev/null +++ b/go/cli/staking_query_test.go @@ -0,0 +1,577 @@ +package cli_test + +import ( + "context" + "fmt" + "strings" + + "github.com/cosmos/gogoproto/proto" + + "github.com/cosmos/cosmos-sdk/client/flags" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/staking/types" + + "pkg.akt.dev/go/cli" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +func (s *StakingCLITestSuite) TestGetCmdQueryValidator() { + testCases := []struct { + name string + args []string + expectErr bool + }{ + { + "with invalid address ", + cli.TestFlags(). + With("bla"). + WithOutputJSON(), + true, + }, + { + "happy case", + cli.TestFlags(). + With(sdk.ValAddress(s.addrs[0]).String()). + WithOutputJSON(), + false, + }, + } + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryStakingValidatorCmd() + cctx := s.cctx + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + if tc.expectErr { + s.Require().Error(err) + s.Require().NotEqual("internal", err.Error()) + } else { + var result types.Validator + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), &result)) + } + }) + } +} + +func (s *StakingCLITestSuite) TestGetCmdQueryValidators() { + testCases := []struct { + name string + args []string + minValidatorCount int + }{ + { + "one validator case", + cli.TestFlags(). + WithLimit(1). + WithOutputJSON(), + 1, + }, + { + "multi validator case", + cli.TestFlags(). + WithOutputJSON(), + 0, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryStakingValidatorsCmd() + cctx := s.cctx + + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + s.Require().NoError(err) + + var result types.QueryValidatorsResponse + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), &result)) + }) + } +} + +func (s *StakingCLITestSuite) TestGetCmdQueryDelegation() { + testCases := []struct { + name string + args []string + expErr bool + respType proto.Message + }{ + { + "with wrong delegator address", + cli.TestFlags(). + With( + "wrongDelAddr", + s.addrs[1].String(), + ). + WithOutputJSON(), + true, nil, + }, + { + "with wrong validator address", + cli.TestFlags(). + With( + s.addrs[0].String(), + "wrongDelAddr", + ). + WithOutputJSON(), + true, nil, + }, + { + "with json output", + cli.TestFlags(). + With( + s.addrs[0].String(), + sdk.ValAddress(s.addrs[1]).String(), + ). + WithOutputJSON(), + false, + &types.DelegationResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryStakingDelegationCmd() + cctx := s.cctx + + _, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + if tc.expErr { + s.Require().Error(err) + } else { + s.Require().Contains(err.Error(), "Marshal called with nil") + } + }) + } +} + +func (s *StakingCLITestSuite) TestGetCmdQueryDelegations() { + testCases := []struct { + name string + args []string + expErr bool + respType proto.Message + }{ + { + "with no delegator address", + []string{}, + true, nil, + }, + { + "with wrong delegator address", + cli.TestFlags(). + With("wrongDelAddr"), + true, nil, + }, + { + "valid request (height specific)", + cli.TestFlags(). + With( + s.addrs[0].String(), + ). + WithOutputJSON(). + WithHeight(1), + false, + &types.QueryDelegatorDelegationsResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryStakingDelegationsCmd() + cctx := s.cctx + + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + if tc.expErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + } + }) + } +} + +func (s *StakingCLITestSuite) TestGetCmdQueryValidatorDelegations() { + testCases := []struct { + name string + args []string + expErr bool + respType proto.Message + }{ + { + "with no validator address", + []string{}, + true, nil, + }, + { + "wrong validator address", + cli.TestFlags(). + With("wrongDelAddr"), + true, nil, + }, + { + "valid request(height specific)", + cli.TestFlags(). + With( + s.addrs[0].String(), + ). + WithOutputJSON(). + WithHeight(1), + false, + &types.QueryValidatorDelegationsResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryStakingDelegationsCmd() + cctx := s.cctx + + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + if tc.expErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + s.Require().NoError(cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + } + }) + } +} + +func (s *StakingCLITestSuite) TestGetCmdQueryUnbondingDelegations() { + testCases := []struct { + name string + args []string + expErr bool + }{ + { + "wrong delegator address", + cli.TestFlags(). + With( + "wrongDelAddr", + ). + WithOutputJSON(), + true, + }, + { + "valid request", + cli.TestFlags(). + With( + s.addrs[0].String(), + ). + WithOutputJSON(), + false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryStakingUnbondingDelegationsCmd() + cctx := s.cctx + + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + + if tc.expErr { + s.Require().Error(err) + } else { + var ubds types.QueryDelegatorUnbondingDelegationsResponse + err = s.cctx.Codec.UnmarshalJSON(out.Bytes(), &ubds) + + s.Require().NoError(err) + } + }) + } +} + +func (s *StakingCLITestSuite) TestGetCmdQueryUnbondingDelegation() { + testCases := []struct { + name string + args []string + expErr bool + }{ + { + "wrong delegator address", + cli.TestFlags(). + With( + "wrongDelAddr", + s.addrs[0].String(), + ). + WithOutputJSON(), + true, + }, + { + "wrong validator address", + cli.TestFlags(). + With( + s.addrs[0].String(), + "wrongValAddr", + ). + WithOutputJSON(), + true, + }, + { + "valid request", + cli.TestFlags(). + With( + s.addrs[0].String(), + sdk.ValAddress(s.addrs[1]).String(), + ). + WithOutputJSON(), + false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryStakingUnbondingDelegationCmd() + cctx := s.cctx + + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + + if tc.expErr { + s.Require().Error(err) + } else { + var ubd types.UnbondingDelegation + + err = s.cctx.Codec.UnmarshalJSON(out.Bytes(), &ubd) + s.Require().NoError(err) + } + }) + } +} + +func (s *StakingCLITestSuite) TestGetCmdQueryValidatorUnbondingDelegations() { + testCases := []struct { + name string + args []string + expErr bool + }{ + { + "wrong validator address", + cli.TestFlags(). + With( + "wrongValAddr", + ). + WithOutputJSON(), + true, + }, + { + "valid request", + cli.TestFlags(). + With( + sdk.ValAddress(s.addrs[0]).String(), + ). + WithOutputJSON(), + false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryStakingValidatorUnbondingDelegationsCmd() + cctx := s.cctx + + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + + if tc.expErr { + s.Require().Error(err) + } else { + var ubds types.QueryValidatorUnbondingDelegationsResponse + err = s.cctx.Codec.UnmarshalJSON(out.Bytes(), &ubds) + s.Require().NoError(err) + } + }) + } +} + +func (s *StakingCLITestSuite) TestGetCmdQueryRedelegations() { + testCases := []struct { + name string + args []string + expErr bool + }{ + { + "wrong delegator address", + cli.TestFlags(). + With( + "wrongdeladdr", + ). + WithOutputJSON(), + true, + }, + { + "valid request", + cli.TestFlags(). + With( + s.addrs[0].String(), + ). + WithOutputJSON(), + false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryStakingRedelegationsCmd() + cctx := s.cctx + + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + if tc.expErr { + s.Require().Error(err) + } else { + s.Require().NoError(err) + var redelegations types.QueryRedelegationsResponse + err = s.cctx.Codec.UnmarshalJSON(out.Bytes(), &redelegations) + s.Require().NoError(err) + } + }) + } +} + +func (s *StakingCLITestSuite) TestGetCmdQueryRedelegation() { + testCases := []struct { + name string + args []string + expErr bool + }{ + { + "wrong delegator address", + cli.TestFlags(). + With( + "wrongdeladdr", + sdk.ValAddress(s.addrs[0]).String(), + sdk.ValAddress(s.addrs[1]).String(), + ). + WithOutputJSON(), + true, + }, + { + "wrong source validator address address", + cli.TestFlags(). + With( + s.addrs[0].String(), + "wrongSrcValAddress", + sdk.ValAddress(s.addrs[1]).String(), + ). + WithOutputJSON(), + true, + }, + { + "wrong destination validator address address", + cli.TestFlags(). + With( + s.addrs[0].String(), + sdk.ValAddress(s.addrs[0]).String(), + "wrongSrcValAddress", + ). + WithOutputJSON(), + true, + }, + { + "valid request", + cli.TestFlags(). + With( + s.addrs[0].String(), + sdk.ValAddress(s.addrs[0]).String(), + sdk.ValAddress(s.addrs[1]).String(), + ). + WithOutputJSON(), + false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryStakingRedelegationCmd() + cctx := s.cctx + + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + + if tc.expErr { + s.Require().Error(err) + } else { + var redelegations types.QueryRedelegationsResponse + + err = s.cctx.Codec.UnmarshalJSON(out.Bytes(), &redelegations) + s.Require().NoError(err) + } + }) + } +} + +func (s *StakingCLITestSuite) TestGetCmdQueryValidatorRedelegations() { + testCases := []struct { + name string + args []string + expErr bool + }{ + { + "wrong validator address", + cli.TestFlags(). + With( + "wrongValAddr", + ). + WithOutputJSON(), + true, + }, + { + "valid request", + cli.TestFlags(). + With( + sdk.ValAddress(s.addrs[0]).String(), + ). + WithOutputJSON(), + false, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryStakingValidatorRedelegationsCmd() + cctx := s.cctx + + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + + if tc.expErr { + s.Require().Error(err) + } else { + var redelegations types.QueryRedelegationsResponse + err = s.cctx.Codec.UnmarshalJSON(out.Bytes(), &redelegations) + s.Require().NoError(err) + } + }) + } +} + +func (s *StakingCLITestSuite) TestGetCmdQueryPool() { + testCases := []struct { + name string + args []string + expectedOutput string + }{ + { + "with text", + []string{ + fmt.Sprintf("--%s=text", flags.FlagOutput), + fmt.Sprintf("--%s=1", flags.FlagHeight), + }, + `bonded_tokens: "0" +not_bonded_tokens: "0"`, + }, + { + "with json", + []string{ + fmt.Sprintf("--%s=json", flags.FlagOutput), + fmt.Sprintf("--%s=1", flags.FlagHeight), + }, + `{"not_bonded_tokens":"0","bonded_tokens":"0"}`, + }, + } + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetQueryStakingPoolCmd() + cctx := s.cctx + out, err := clitestutil.ExecTestCLICmd(context.Background(), cctx, cmd, tc.args...) + s.Require().NoError(err) + s.Require().Equal(tc.expectedOutput, strings.TrimSpace(out.String())) + }) + } +} diff --git a/go/cli/staking_tx.go b/go/cli/staking_tx.go new file mode 100644 index 00000000..a6561a7b --- /dev/null +++ b/go/cli/staking_tx.go @@ -0,0 +1,961 @@ +package cli + +import ( + "errors" + "fmt" + "os" + "strconv" + "strings" + + errorsmod "cosmossdk.io/errors" + sdkmath "cosmossdk.io/math" + "github.com/spf13/cobra" + flag "github.com/spf13/pflag" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/tx" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + cclient "pkg.akt.dev/go/node/client/v1beta3" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// default values +var ( + DefaultTokens = sdk.DefaultPowerReduction.Mul(sdk.NewInt(100)) + defaultAmount = DefaultTokens.String() + sdk.DefaultBondDenom + defaultCommissionRate = "0.1" + defaultCommissionMaxRate = "0.2" + defaultCommissionMaxChangeRate = "0.01" +) + +// GetTxStakingCmd returns a root CLI command handler for all x/staking transaction commands. +func GetTxStakingCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: stakingtypes.ModuleName, + Short: "Staking transaction subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetTxStakingCreateValidatorCmd(), + GetTxStakingEditValidatorCmd(), + GetTxStakingDelegateCmd(), + GetTxStakingRedelegateCmd(), + GetTxStakingUnbondCmd(), + GetTxStakingUnbondValidatorCmd(), + GetTxStakingCancelUnbondingDelegationCmd(), + GetTxStakingTokenizeSharesCmd(), + GetTxStakingRedeemTokensCmd(), + GetTxStakingTransferTokenizeShareRecordCmd(), + GetTxStakingDisableTokenizeShares(), + GetTxStakingEnableTokenizeShares(), + GetTxStakingValidatorBondCmd(), + ) + + return cmd +} + +// GetTxStakingCreateValidatorCmd returns a CLI command handler for creating a MsgCreateValidator transaction. +func GetTxStakingCreateValidatorCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "create-validator", + Short: "create new validator", + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + msg, err := newBuildCreateValidatorMsg(cctx, cmd.Flags()) + if err != nil { + return err + } + + var opts []cclient.BroadcastOption + + genOnly, _ := cmd.Flags().GetBool(cflags.FlagGenerateOnly) + if genOnly { + ip, _ := cmd.Flags().GetString(cflags.FlagIP) + p2pPort, _ := cmd.Flags().GetUint(cflags.FlagP2PPort) + nodeID, _ := cmd.Flags().GetString(cflags.FlagNodeID) + + if nodeID != "" && ip != "" && p2pPort > 0 { + opts = append(opts, cclient.WithNote(fmt.Sprintf("%s@%s:%d", nodeID, ip, p2pPort))) + } + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}, opts...) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cmd.Flags().AddFlagSet(cflags.FlagSetPublicKey()) + cmd.Flags().AddFlagSet(cflags.FlagSetAmount()) + cmd.Flags().AddFlagSet(cflags.FlagSetDescriptionCreate()) + cmd.Flags().AddFlagSet(cflags.FlagSetCommissionCreate()) + + cmd.Flags().String(cflags.FlagIP, "", fmt.Sprintf("The node's public IP. It takes effect only when used in combination with --%s", cflags.FlagGenerateOnly)) + cmd.Flags().String(cflags.FlagNodeID, "", "The node's ID") + cflags.AddTxFlagsToCmd(cmd) + + _ = cmd.MarkFlagRequired(cflags.FlagFrom) + _ = cmd.MarkFlagRequired(cflags.FlagAmount) + _ = cmd.MarkFlagRequired(cflags.FlagPubKey) + _ = cmd.MarkFlagRequired(cflags.FlagMoniker) + + return cmd +} + +// GetTxStakingEditValidatorCmd returns a CLI command handler for creating a MsgEditValidator transaction. +func GetTxStakingEditValidatorCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "edit-validator", + Short: "edit an existing validator account", + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + valAddr := cctx.GetFromAddress() + moniker, _ := cmd.Flags().GetString(cflags.FlagEditMoniker) + identity, _ := cmd.Flags().GetString(cflags.FlagIdentity) + website, _ := cmd.Flags().GetString(cflags.FlagWebsite) + security, _ := cmd.Flags().GetString(cflags.FlagSecurityContact) + details, _ := cmd.Flags().GetString(cflags.FlagDetails) + description := stakingtypes.NewDescription(moniker, identity, website, security, details) + + var newRate *sdk.Dec + + commissionRate, _ := cmd.Flags().GetString(cflags.FlagCommissionRate) + if commissionRate != "" { + rate, err := sdkmath.LegacyNewDecFromStr(commissionRate) + if err != nil { + return fmt.Errorf("invalid new commission rate: %v", err) + } + + newRate = &rate + } + + msg := stakingtypes.NewMsgEditValidator(sdk.ValAddress(valAddr), description, newRate) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cmd.Flags().AddFlagSet(cflags.FlagSetDescriptionEdit()) + cmd.Flags().AddFlagSet(cflags.FlagSetCommissionUpdate()) + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxStakingDelegateCmd returns a CLI command handler for creating a MsgDelegate transaction. +func GetTxStakingDelegateCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "delegate [validator-addr] [amount]", + Args: cobra.ExactArgs(2), + Short: "Delegate liquid tokens to a validator", + Long: strings.TrimSpace( + fmt.Sprintf(`Delegate an amount of liquid coins to a validator from your wallet. + +Example: +$ %s tx staking delegate %s1l2rsakp388kuv9k8qzq6lrm9taddae7fpx59wm 1000stake --from mykey +`, + version.AppName, bech32PrefixValAddr, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + amount, err := sdk.ParseCoinNormalized(args[1]) + if err != nil { + return err + } + + delAddr := cctx.GetFromAddress() + valAddr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + msg := stakingtypes.NewMsgDelegate(delAddr, valAddr, amount) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxStakingRedelegateCmd returns a CLI command handler for creating a MsgBeginRedelegate transaction. +func GetTxStakingRedelegateCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "redelegate [src-validator-addr] [dst-validator-addr] [amount]", + Short: "Redelegate illiquid tokens from one validator to another", + Args: cobra.ExactArgs(3), + Long: strings.TrimSpace( + fmt.Sprintf(`Redelegate an amount of illiquid staking tokens from one validator to another. + +Example: +$ %s tx staking redelegate %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj %s1l2rsakp388kuv9k8qzq6lrm9taddae7fpx59wm 100stake --from mykey +`, + version.AppName, bech32PrefixValAddr, bech32PrefixValAddr, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + delAddr := cctx.GetFromAddress() + valSrcAddr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + valDstAddr, err := sdk.ValAddressFromBech32(args[1]) + if err != nil { + return err + } + + amount, err := sdk.ParseCoinNormalized(args[2]) + if err != nil { + return err + } + + msg := stakingtypes.NewMsgBeginRedelegate(delAddr, valSrcAddr, valDstAddr, amount) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxStakingUnbondCmd returns a CLI command handler for creating a MsgUndelegate transaction. +func GetTxStakingUnbondCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "unbond [validator-addr] [amount]", + Short: "Unbond shares from a validator", + Args: cobra.ExactArgs(2), + Long: strings.TrimSpace( + fmt.Sprintf(`Unbond an amount of bonded shares from a validator. + +Example: +$ %s tx staking unbond %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj 100stake --from mykey +`, + version.AppName, bech32PrefixValAddr, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + delAddr := cctx.GetFromAddress() + valAddr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + amount, err := sdk.ParseCoinNormalized(args[1]) + if err != nil { + return err + } + + msg := stakingtypes.NewMsgUndelegate(delAddr, valAddr, amount) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +func GetTxStakingUnbondValidatorCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "unbond-validator", + Short: "Unbond a validator", + Args: cobra.ExactArgs(0), + Long: strings.TrimSpace( + fmt.Sprintf(`Unbond a validator. + +Example: +$ %s tx staking unbond-validator --from mykey +`, + version.AppName, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + msg := stakingtypes.NewMsgUnbondValidator(sdk.ValAddress(cctx.GetFromAddress())) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxStakingCancelUnbondingDelegationCmd returns a CLI command handler for creating a MsgCancelUnbondingDelegation transaction. +func GetTxStakingCancelUnbondingDelegationCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + + cmd := &cobra.Command{ + Use: "cancel-unbond [validator-addr] [amount] [creation-height]", + Short: "Cancel unbonding delegation and delegate back to the validator", + Args: cobra.ExactArgs(3), + Long: strings.TrimSpace( + fmt.Sprintf(`Cancel Unbonding Delegation and delegate back to the validator. + +Example: +$ %s tx staking cancel-unbond %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj 100stake 2 --from mykey +`, + version.AppName, bech32PrefixValAddr, + ), + ), + Example: fmt.Sprintf(`$ %s tx staking cancel-unbond %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj 100stake 2 --from mykey`, + version.AppName, bech32PrefixValAddr), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + delAddr := cctx.GetFromAddress() + valAddr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + amount, err := sdk.ParseCoinNormalized(args[1]) + if err != nil { + return err + } + + creationHeight, err := strconv.ParseInt(args[2], 10, 64) + if err != nil { + return errorsmod.Wrap(fmt.Errorf("invalid height: %d", creationHeight), "invalid height") + } + + msg := stakingtypes.NewMsgCancelUnbondingDelegation(delAddr, valAddr, creationHeight, amount) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +func newBuildCreateValidatorMsg(clientCtx client.Context, fs *flag.FlagSet) (*stakingtypes.MsgCreateValidator, error) { + fAmount, _ := fs.GetString(cflags.FlagAmount) + amount, err := sdk.ParseCoinNormalized(fAmount) + if err != nil { + return nil, err + } + + valAddr := clientCtx.GetFromAddress() + pkStr, err := fs.GetString(cflags.FlagPubKey) + if err != nil { + return nil, err + } + + var pk cryptotypes.PubKey + if err := clientCtx.Codec.UnmarshalInterfaceJSON([]byte(pkStr), &pk); err != nil { + return nil, err + } + + moniker, _ := fs.GetString(cflags.FlagMoniker) + identity, _ := fs.GetString(cflags.FlagIdentity) + website, _ := fs.GetString(cflags.FlagWebsite) + security, _ := fs.GetString(cflags.FlagSecurityContact) + details, _ := fs.GetString(cflags.FlagDetails) + description := stakingtypes.NewDescription( + moniker, + identity, + website, + security, + details, + ) + + // get the initial validator commission parameters + rateStr, _ := fs.GetString(cflags.FlagCommissionRate) + maxRateStr, _ := fs.GetString(cflags.FlagCommissionMaxRate) + maxChangeRateStr, _ := fs.GetString(cflags.FlagCommissionMaxChangeRate) + + commissionRates, err := buildCommissionRates(rateStr, maxRateStr, maxChangeRateStr) + if err != nil { + return nil, err + } + + msg, err := stakingtypes.NewMsgCreateValidator( + sdk.ValAddress(valAddr), pk, amount, description, commissionRates, + ) + if err != nil { + return nil, err + } + if err := msg.ValidateBasic(); err != nil { + return nil, err + } + + return msg, nil +} + +// CreateValidatorMsgFlagSet returns the flagset, particular flags, and a description of defaults +// this is anticipated to be used with the gen-tx +func CreateValidatorMsgFlagSet(ipDefault string) (fs *flag.FlagSet, defaultsDesc string) { + fsCreateValidator := flag.NewFlagSet("", flag.ContinueOnError) + fsCreateValidator.String(cflags.FlagIP, ipDefault, "The node's public P2P IP") + fsCreateValidator.Uint(cflags.FlagP2PPort, 26656, "The node's public P2P port") + fsCreateValidator.String(cflags.FlagNodeID, "", "The node's NodeID") + fsCreateValidator.String(cflags.FlagMoniker, "", "The validator's (optional) moniker") + fsCreateValidator.String(cflags.FlagWebsite, "", "The validator's (optional) website") + fsCreateValidator.String(cflags.FlagSecurityContact, "", "The validator's (optional) security contact email") + fsCreateValidator.String(cflags.FlagDetails, "", "The validator's (optional) details") + fsCreateValidator.String(cflags.FlagIdentity, "", "The (optional) identity signature (ex. UPort or Keybase)") + fsCreateValidator.AddFlagSet(cflags.FlagSetCommissionCreate()) + fsCreateValidator.AddFlagSet(cflags.FlagSetAmount()) + fsCreateValidator.AddFlagSet(cflags.FlagSetPublicKey()) + + defaultsDesc = fmt.Sprintf(` + delegation amount: %s + commission rate: %s + commission max rate: %s + commission max change rate: %s +`, defaultAmount, defaultCommissionRate, + defaultCommissionMaxRate, defaultCommissionMaxChangeRate) + + return fsCreateValidator, defaultsDesc +} + +type TxCreateValidatorConfig struct { + ChainID string + NodeID string + Moniker string + + Amount string + + CommissionRate string + CommissionMaxRate string + CommissionMaxChangeRate string + + PubKey cryptotypes.PubKey + + IP string + P2PPort uint + Website string + SecurityContact string + Details string + Identity string +} + +func PrepareConfigForTxCreateValidator(flagSet *flag.FlagSet, moniker, nodeID, chainID string, valPubKey cryptotypes.PubKey) (TxCreateValidatorConfig, error) { + c := TxCreateValidatorConfig{} + + ip, err := flagSet.GetString(cflags.FlagIP) + if err != nil { + return c, err + } + + if ip == "" { + _, _ = fmt.Fprintf(os.Stderr, "failed to retrieve an external IP; the tx's memo field will be unset") + } + + p2pPort, err := flagSet.GetUint(cflags.FlagP2PPort) + if err != nil { + return c, err + } + + website, err := flagSet.GetString(cflags.FlagWebsite) + if err != nil { + return c, err + } + + securityContact, err := flagSet.GetString(cflags.FlagSecurityContact) + if err != nil { + return c, err + } + + details, err := flagSet.GetString(cflags.FlagDetails) + if err != nil { + return c, err + } + + identity, err := flagSet.GetString(cflags.FlagIdentity) + if err != nil { + return c, err + } + + c.Amount, err = flagSet.GetString(cflags.FlagAmount) + if err != nil { + return c, err + } + + c.CommissionRate, err = flagSet.GetString(cflags.FlagCommissionRate) + if err != nil { + return c, err + } + + c.CommissionMaxRate, err = flagSet.GetString(cflags.FlagCommissionMaxRate) + if err != nil { + return c, err + } + + c.CommissionMaxChangeRate, err = flagSet.GetString(cflags.FlagCommissionMaxChangeRate) + if err != nil { + return c, err + } + + c.IP = ip + c.P2PPort = p2pPort + c.Website = website + c.SecurityContact = securityContact + c.Identity = identity + c.NodeID = nodeID + c.PubKey = valPubKey + c.Website = website + c.SecurityContact = securityContact + c.Details = details + c.Identity = identity + c.ChainID = chainID + c.Moniker = moniker + + if c.Amount == "" { + c.Amount = defaultAmount + } + + if c.CommissionRate == "" { + c.CommissionRate = defaultCommissionRate + } + + if c.CommissionMaxRate == "" { + c.CommissionMaxRate = defaultCommissionMaxRate + } + + if c.CommissionMaxChangeRate == "" { + c.CommissionMaxChangeRate = defaultCommissionMaxChangeRate + } + + return c, nil +} + +// BuildCreateValidatorMsg makes a new MsgCreateValidator. +func BuildCreateValidatorMsg(clientCtx client.Context, config TxCreateValidatorConfig, txBldr tx.Factory, generateOnly bool) (tx.Factory, sdk.Msg, error) { + amounstStr := config.Amount + amount, err := sdk.ParseCoinNormalized(amounstStr) + if err != nil { + return txBldr, nil, err + } + + valAddr := clientCtx.GetFromAddress() + description := stakingtypes.NewDescription( + config.Moniker, + config.Identity, + config.Website, + config.SecurityContact, + config.Details, + ) + + // get the initial validator commission parameters + rateStr := config.CommissionRate + maxRateStr := config.CommissionMaxRate + maxChangeRateStr := config.CommissionMaxChangeRate + commissionRates, err := buildCommissionRates(rateStr, maxRateStr, maxChangeRateStr) + if err != nil { + return txBldr, nil, err + } + + msg, err := stakingtypes.NewMsgCreateValidator( + sdk.ValAddress(valAddr), + config.PubKey, + amount, + description, + commissionRates, + ) + if err != nil { + return txBldr, msg, err + } + + if generateOnly { + ip := config.IP + p2pPort := config.P2PPort + nodeID := config.NodeID + + if nodeID != "" && ip != "" && p2pPort > 0 { + txBldr = txBldr.WithMemo(fmt.Sprintf("%s@%s:%d", nodeID, ip, p2pPort)) + } + } + + return txBldr, msg, nil +} + +// GetTxStakingTokenizeSharesCmd defines a command for tokenizing shares from a validator. +func GetTxStakingTokenizeSharesCmd() *cobra.Command { + bech32PrefixValAddr := sdk.GetConfig().GetBech32ValidatorAddrPrefix() + bech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix() + + cmd := &cobra.Command{ + Use: "tokenize-share [validator-addr] [amount] [rewardOwner]", + Short: "Tokenize delegation to share tokens", + Args: cobra.ExactArgs(3), + Long: strings.TrimSpace( + fmt.Sprintf(`Tokenize delegation to share tokens. + +Example: +$ %s tx staking tokenize-share %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj 100stake %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj --from mykey +`, + version.AppName, bech32PrefixValAddr, bech32PrefixAccAddr, + ), + ), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + delAddr := cctx.GetFromAddress() + valAddr, err := sdk.ValAddressFromBech32(args[0]) + if err != nil { + return err + } + + amount, err := sdk.ParseCoinNormalized(args[1]) + if err != nil { + return err + } + + rewardOwner, err := sdk.AccAddressFromBech32(args[2]) + if err != nil { + return err + } + + msg := &stakingtypes.MsgTokenizeShares{ + DelegatorAddress: delAddr.String(), + ValidatorAddress: valAddr.String(), + Amount: amount, + TokenizedShareOwner: rewardOwner.String(), + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxStakingRedeemTokensCmd defines a command for redeeming tokens from a validator for shares. +func GetTxStakingRedeemTokensCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "redeem-tokens [amount]", + Short: "Redeem specified amount of share tokens to delegation", + Args: cobra.ExactArgs(1), + Long: strings.TrimSpace( + fmt.Sprintf(`Redeem specified amount of share tokens to delegation. + +Example: +$ %s tx staking redeem-tokens 100sharetoken --from mykey +`, + version.AppName, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + delAddr := cctx.GetFromAddress() + + amount, err := sdk.ParseCoinNormalized(args[0]) + if err != nil { + return err + } + + msg := &stakingtypes.MsgRedeemTokensForShares{ + DelegatorAddress: delAddr.String(), + Amount: amount, + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxStakingTransferTokenizeShareRecordCmd defines a command to transfer ownership of TokenizeShareRecord +func GetTxStakingTransferTokenizeShareRecordCmd() *cobra.Command { + bech32PrefixAccAddr := sdk.GetConfig().GetBech32AccountAddrPrefix() + + cmd := &cobra.Command{ + Use: "transfer-tokenize-share-record [record-id] [new-owner]", + Short: "Transfer ownership of TokenizeShareRecord", + Args: cobra.ExactArgs(2), + Long: strings.TrimSpace( + fmt.Sprintf(`Transfer ownership of TokenizeShareRecord. + +Example: +$ %s tx staking transfer-tokenize-share-record 1 %s1gghjut3ccd8ay0zduzj64hwre2fxs9ldmqhffj --from mykey +`, + version.AppName, bech32PrefixAccAddr, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + recordID, err := strconv.ParseUint(args[0], 10, 0) + if err != nil { + return err + } + + ownerAddr, err := sdk.AccAddressFromBech32(args[1]) + if err != nil { + return err + } + + msg := &stakingtypes.MsgTransferTokenizeShareRecord{ + Sender: cctx.GetFromAddress().String(), + TokenizeShareRecordId: recordID, + NewOwner: ownerAddr.String(), + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxStakingDisableTokenizeShares defines a command to disable tokenization for an address +func GetTxStakingDisableTokenizeShares() *cobra.Command { + cmd := &cobra.Command{ + Use: "disable-tokenize-shares", + Short: "Disable tokenization of shares", + Args: cobra.ExactArgs(0), + Long: strings.TrimSpace( + fmt.Sprintf(`Disables the tokenization of shares for an address. The account +must explicitly re-enable if they wish to tokenize again, at which point they must wait +the chain's unbonding period. + +Example: +$ %s tx staking disable-tokenize-shares --from mykey +`, version.AppName), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + msg := &stakingtypes.MsgDisableTokenizeShares{ + DelegatorAddress: cctx.GetFromAddress().String(), + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxStakingEnableTokenizeShares defines a command to re-enable tokenization for an address +func GetTxStakingEnableTokenizeShares() *cobra.Command { + cmd := &cobra.Command{ + Use: "enable-tokenize-shares", + Short: "Enable tokenization of shares", + Args: cobra.ExactArgs(0), + Long: strings.TrimSpace( + fmt.Sprintf(`Enables the tokenization of shares for an address after +it had been disable. This transaction queues the enablement of tokenization, but +the address must wait 1 unbonding period from the time of this transaction before +tokenization is permitted. + +Example: +$ %s tx staking enable-tokenize-shares --from mykey +`, version.AppName), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + msg := &stakingtypes.MsgEnableTokenizeShares{ + DelegatorAddress: cctx.GetFromAddress().String(), + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxStakingValidatorBondCmd defines a command to mark a delegation as a validator self bond +func GetTxStakingValidatorBondCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "validator-bond [validator]", + Short: "Mark a delegation as a validator self-bond", + Args: cobra.ExactArgs(1), + Long: strings.TrimSpace( + fmt.Sprintf(`Mark a delegation as a validator self-bond. + +Example: +$ %s tx staking validator-bond cosmosvaloper13h5xdxhsdaugwdrkusf8lkgu406h8t62jkqv3h --from mykey +`, + version.AppName, + ), + ), + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + msg := &stakingtypes.MsgValidatorBond{ + DelegatorAddress: cctx.GetFromAddress().String(), + ValidatorAddress: args[0], + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +func buildCommissionRates(rateStr, maxRateStr, maxChangeRateStr string) (commission stakingtypes.CommissionRates, err error) { + if rateStr == "" || maxRateStr == "" || maxChangeRateStr == "" { + return commission, errors.New("must specify all validator commission parameters") + } + + rate, err := sdkmath.LegacyNewDecFromStr(rateStr) + if err != nil { + return commission, err + } + + maxRate, err := sdkmath.LegacyNewDecFromStr(maxRateStr) + if err != nil { + return commission, err + } + + maxChangeRate, err := sdkmath.LegacyNewDecFromStr(maxChangeRateStr) + if err != nil { + return commission, err + } + + commission = stakingtypes.NewCommissionRates(rate, maxRate, maxChangeRate) + + return commission, nil +} diff --git a/go/cli/staking_tx_test.go b/go/cli/staking_tx_test.go new file mode 100644 index 00000000..55701988 --- /dev/null +++ b/go/cli/staking_tx_test.go @@ -0,0 +1,608 @@ +package cli_test + +import ( + "bytes" + "fmt" + "io" + + sdkmath "cosmossdk.io/math" + abci "github.com/cometbft/cometbft/abci/types" + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/crypto/hd" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" + clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli" + simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" + sdk "github.com/cosmos/cosmos-sdk/types" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/staking" + "github.com/cosmos/gogoproto/proto" + "github.com/spf13/pflag" + "github.com/stretchr/testify/require" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" +) + +var PKs = simtestutil.CreateTestPubKeys(500) + +type StakingCLITestSuite struct { + CLITestSuite + + addrs []sdk.AccAddress +} + +func (s *StakingCLITestSuite) SetupSuite() { + s.encCfg = testutilmod.MakeTestEncodingConfig(staking.AppModuleBasic{}) + s.kr = keyring.NewInMemory(s.encCfg.Codec) + s.baseCtx = client.Context{}. + WithKeyring(s.kr). + WithTxConfig(s.encCfg.TxConfig). + WithCodec(s.encCfg.Codec). + WithLegacyAmino(s.encCfg.Amino). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain"). + WithSignModeStr(cflags.SignModeDirect) + + var outBuf bytes.Buffer + ctxGen := func() client.Context { + bz, _ := s.encCfg.Codec.Marshal(&sdk.TxResponse{}) + c := clitestutil.NewMockTendermintRPC(abci.ResponseQuery{ + Value: bz, + }) + return s.baseCtx.WithClient(c) + } + s.cctx = ctxGen().WithOutput(&outBuf) + + s.addrs = make([]sdk.AccAddress, 0) + for i := 0; i < 3; i++ { + k, _, err := s.cctx.Keyring.NewMnemonic("NewValidator", keyring.English, sdk.FullFundraiserPath, keyring.DefaultBIP39Passphrase, hd.Secp256k1) + s.Require().NoError(err) + + pub, err := k.GetPubKey() + s.Require().NoError(err) + + newAddr := sdk.AccAddress(pub.Address()) + s.addrs = append(s.addrs, newAddr) + } +} + +func (s *StakingCLITestSuite) TestPrepareConfigForTxCreateValidator() { + chainID := "chainID" + ip := "1.1.1.1" + nodeID := "nodeID" + privKey := ed25519.GenPrivKey() + valPubKey := privKey.PubKey() + moniker := "DefaultMoniker" + mkTxValCfg := func(amount, commission, commissionMax, commissionMaxChange string) cli.TxCreateValidatorConfig { + return cli.TxCreateValidatorConfig{ + IP: ip, + ChainID: chainID, + NodeID: nodeID, + P2PPort: 26656, + PubKey: valPubKey, + Moniker: moniker, + Amount: amount, + CommissionRate: commission, + CommissionMaxRate: commissionMax, + CommissionMaxChangeRate: commissionMaxChange, + } + } + + tests := []struct { + name string + fsModify func(fs *pflag.FlagSet) + expectedCfg cli.TxCreateValidatorConfig + }{ + { + name: "all defaults", + fsModify: func(_ *pflag.FlagSet) {}, + expectedCfg: mkTxValCfg(cli.DefaultTokens.String()+sdk.DefaultBondDenom, "0.1", "0.2", "0.01"), + }, + { + name: "Custom amount", + fsModify: func(fs *pflag.FlagSet) { + err := fs.Set(cflags.FlagAmount, "2000stake") + if err != nil { + panic(err) + } + }, + expectedCfg: mkTxValCfg("2000stake", "0.1", "0.2", "0.01"), + }, + { + name: "Custom commission rate", + fsModify: func(fs *pflag.FlagSet) { + err := fs.Set(cflags.FlagCommissionRate, "0.54") + if err != nil { + panic(err) + } + }, + expectedCfg: mkTxValCfg(cli.DefaultTokens.String()+sdk.DefaultBondDenom, "0.54", "0.2", "0.01"), + }, + { + name: "Custom commission max rate", + fsModify: func(fs *pflag.FlagSet) { + err := fs.Set(cflags.FlagCommissionMaxRate, "0.89") + if err != nil { + panic(err) + } + }, + expectedCfg: mkTxValCfg(cli.DefaultTokens.String()+sdk.DefaultBondDenom, "0.1", "0.89", "0.01"), + }, + { + name: "Custom commission max change rate", + fsModify: func(fs *pflag.FlagSet) { + err := fs.Set(cflags.FlagCommissionMaxChangeRate, "0.55") + if err != nil { + panic(err) + } + }, + expectedCfg: mkTxValCfg(cli.DefaultTokens.String()+sdk.DefaultBondDenom, "0.1", "0.2", "0.55"), + }, + } + + for _, tc := range tests { + s.Run(tc.name, func() { + fs, _ := cli.CreateValidatorMsgFlagSet(ip) + fs.String(flags.FlagName, "", "name of private key with which to sign the gentx") + + tc.fsModify(fs) + + cvCfg, err := cli.PrepareConfigForTxCreateValidator(fs, moniker, nodeID, chainID, valPubKey) + require.NoError(s.T(), err) + + require.Equal(s.T(), tc.expectedCfg, cvCfg) + }) + } +} + +func (s *StakingCLITestSuite) TestNewCreateValidatorCmd() { + args := cli.TestFlags(). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10)))) + + consPrivKey := ed25519.GenPrivKey() + consPubKeyBz, err := s.encCfg.Codec.MarshalInterfaceJSON(consPrivKey.PubKey()) + require.NoError(s.T(), err) + require.NotNil(s.T(), consPubKeyBz) + + testCases := []struct { + name string + args []string + expectErr bool + expectedCode uint32 + respType proto.Message + }{ + { + "invalid transaction (missing amount)", + cli.TestFlags(). + WithIdentity("AFAF00C4"). + WithWebsite("https://newvalidator.io"). + WithSecurityContact("contact@newvalidator.io"). + WithDetails("'Hey, I am a new validator. Please delegate!'"). + WithCommissionRate("0.5"). + WithCommissionMaxRate("1.0"). + WithCommissionMaxChangeRate("0.1"). + WithFrom(s.addrs[0].String()). + Append(args), + true, 0, nil, + }, + { + "invalid transaction (missing pubkey)", + cli.TestFlags(). + WithAmount("100uakt"). + WithIdentity("AFAF00C4"). + WithWebsite("https://newvalidator.io"). + WithSecurityContact("contact@newvalidator.io"). + WithDetails("'Hey, I am a new validator. Please delegate!'"). + WithCommissionRate("0.5"). + WithCommissionMaxRate("1.0"). + WithCommissionMaxChangeRate("0.1"). + WithFrom(s.addrs[0].String()). + Append(args), + true, 0, nil, + }, + { + "invalid transaction (missing moniker)", + cli.TestFlags(). + WithPubkey(string(consPubKeyBz)). + WithAmount("100uakt"). + WithIdentity("AFAF00C4"). + WithWebsite("https://newvalidator.io"). + WithSecurityContact("contact@newvalidator.io"). + WithDetails("'Hey, I am a new validator. Please delegate!'"). + WithCommissionRate("0.5"). + WithCommissionMaxRate("1.0"). + WithCommissionMaxChangeRate("0.1"). + WithFrom(s.addrs[0].String()). + Append(args), + true, 0, nil, + }, + { + "valid transaction", + cli.TestFlags(). + WithPubkey(string(consPubKeyBz)). + WithAmount("100uakt"). + WithMoniker("NewValidator"). + WithIdentity("AFAF00C4"). + WithWebsite("https://newvalidator.io"). + WithSecurityContact("contact@newvalidator.io"). + WithDetails("'Hey, I am a new validator. Please delegate!'"). + WithCommissionRate("0.5"). + WithCommissionMaxRate("1.0"). + WithCommissionMaxChangeRate("0.1"). + WithFrom(s.addrs[0].String()). + Append(args), + false, 0, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxStakingCreateValidatorCmd() + out, err := clitestutil.ExecTestCLICmd(s.cctx, cmd, tc.args) + if tc.expectErr { + require.Error(s.T(), err) + } else { + require.NoError(s.T(), err, "test: %s\noutput: %s", tc.name, out.String()) + err = s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType) + require.NoError(s.T(), err, out.String(), "test: %s, output\n:", tc.name, out.String()) + + txResp := tc.respType.(*sdk.TxResponse) + require.Equal(s.T(), tc.expectedCode, txResp.Code, + "test: %s, output\n:", tc.name, out.String()) + } + }) + } +} + +func (s *StakingCLITestSuite) TestNewEditValidatorCmd() { + details := "bio" + identity := "test identity" + securityContact := "test contact" + website := "https://test.com" + + args := cli.TestFlags(). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10)))) + + testCases := []struct { + name string + args []string + expectErr bool + expectedCode uint32 + respType proto.Message + }{ + { + "with no edit flag (since all are optional)", + cli.TestFlags(). + WithFrom("with wrong from address"). + Append(args), + true, 0, nil, + }, + { + "with no edit flag (since all are optional)", + cli.TestFlags(). + WithFrom(s.addrs[0].String()). + Append(args), + false, 0, &sdk.TxResponse{}, + }, + { + "edit validator details", + cli.TestFlags(). + WithDetails(details). + WithFrom(s.addrs[0].String()). + Append(args), + false, 0, &sdk.TxResponse{}, + }, + { + "edit validator identity", + cli.TestFlags(). + WithIdentity(identity). + WithFrom(s.addrs[0].String()). + Append(args), + false, 0, &sdk.TxResponse{}, + }, + { + "edit validator security-contact", + cli.TestFlags(). + WithSecurityContact(securityContact). + WithFrom(s.addrs[0].String()). + Append(args), + false, 0, &sdk.TxResponse{}, + }, + { + "edit validator website", + cli.TestFlags(). + WithWebsite(website). + WithFrom(s.addrs[0].String()). + Append(args), + false, 0, &sdk.TxResponse{}, + }, + { + "with all edit flags", + cli.TestFlags(). + WithDetails(details). + WithIdentity(identity). + WithSecurityContact(securityContact). + WithWebsite(website). + WithFrom(s.addrs[0].String()). + Append(args), + false, 0, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxStakingEditValidatorCmd() + + out, err := clitestutil.ExecTestCLICmd(s.cctx, cmd, tc.args) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err, out.String()) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + + txResp := tc.respType.(*sdk.TxResponse) + s.Require().Equal(tc.expectedCode, txResp.Code, out.String()) + } + }) + } +} + +func (s *StakingCLITestSuite) TestNewDelegateCmd() { + args := cli.TestFlags(). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10)))) + + testCases := []struct { + name string + args []string + expectErr bool + expectedCode uint32 + respType proto.Message + }{ + { + "without delegate amount", + cli.TestFlags(). + With(sdk.ValAddress(s.addrs[0]).String()). + WithFrom(s.addrs[0].String()). + Append(args), + true, 0, nil, + }, + { + "without validator address", + cli.TestFlags(). + With(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(150)).String()). + WithFrom(s.addrs[0].String()). + Append(args), + true, 0, nil, + }, + { + "valid transaction of delegate", + cli.TestFlags(). + With( + sdk.ValAddress(s.addrs[0]).String(), + sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(150)).String(), + ). + WithFrom(s.addrs[0].String()). + Append(args), + false, 0, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxStakingDelegateCmd() + + out, err := clitestutil.ExecTestCLICmd(s.cctx, cmd, tc.args) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err, out.String()) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + + txResp := tc.respType.(*sdk.TxResponse) + s.Require().Equal(tc.expectedCode, txResp.Code, out.String()) + } + }) + } +} + +func (s *StakingCLITestSuite) TestNewRedelegateCmd() { + args := cli.TestFlags(). + WithSkipConfirm(). + WithBroadcastModeSync(). + WithFees(sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10)))) + + testCases := []struct { + name string + args []string + expectErr bool + expectedCode uint32 + respType proto.Message + }{ + { + "without amount", + cli.TestFlags(). + With( + sdk.ValAddress(s.addrs[0]).String(), // src-validator-addr + sdk.ValAddress(s.addrs[1]).String(), // dst-validator-addr + ). + WithFrom(s.addrs[0].String()). + Append(args), + true, 0, nil, + }, + { + "valid transaction of delegate", + cli.TestFlags(). + With( + sdk.ValAddress(s.addrs[0]).String(), // src-validator-addr + sdk.ValAddress(s.addrs[1]).String(), // dst-validator-addr + sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(150)).String(), // amount + ). + WithFrom(s.addrs[0].String()). + Append(args), + false, 0, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxStakingRedelegateCmd() + + out, err := clitestutil.ExecTestCLICmd(s.cctx, cmd, tc.args) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err, out.String()) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + + txResp := tc.respType.(*sdk.TxResponse) + s.Require().Equal(tc.expectedCode, txResp.Code, out.String()) + } + }) + } +} + +func (s *StakingCLITestSuite) TestNewUnbondCmd() { + testCases := []struct { + name string + args []string + expectErr bool + expectedCode uint32 + respType proto.Message + }{ + { + "Without unbond amount", + []string{ + sdk.ValAddress(s.addrs[0]).String(), + fmt.Sprintf("--%s=%s", flags.FlagFrom, s.addrs[0]), + fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation), + fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastSync), + fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10))).String()), + }, + true, 0, nil, + }, + { + "Without validator address", + []string{ + sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(150)).String(), + fmt.Sprintf("--%s=%s", flags.FlagFrom, s.addrs[0]), + fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation), + fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastSync), + fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10))).String()), + }, + true, 0, nil, + }, + { + "valid transaction of unbond", + []string{ + sdk.ValAddress(s.addrs[0]).String(), + sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(150)).String(), + fmt.Sprintf("--%s=%s", flags.FlagFrom, s.addrs[0]), + fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation), + fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastSync), + fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10))).String()), + }, + false, 0, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxStakingUnbondCmd() + + out, err := clitestutil.ExecTestCLICmd(s.cctx, cmd, tc.args) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err, out.String()) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + + txResp := tc.respType.(*sdk.TxResponse) + s.Require().Equal(tc.expectedCode, txResp.Code, out.String()) + } + }) + } +} + +func (s *StakingCLITestSuite) TestNewCancelUnbondingDelegationCmd() { + testCases := []struct { + name string + args []string + expectErr bool + expectedCode uint32 + respType proto.Message + }{ + { + "Without validator address", + []string{ + fmt.Sprintf("--%s=%s", flags.FlagFrom, s.addrs[0]), + fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation), + fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastSync), + fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10))).String()), + }, + true, 0, nil, + }, + { + "Without canceling unbond delegation amount", + []string{ + sdk.ValAddress(s.addrs[0]).String(), + fmt.Sprintf("--%s=%s", flags.FlagFrom, s.addrs[0]), + fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation), + fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastSync), + fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10))).String()), + }, + true, 0, nil, + }, + { + "Without unbond creation height", + []string{ + sdk.ValAddress(s.addrs[0]).String(), + sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(150)).String(), + fmt.Sprintf("--%s=%s", flags.FlagFrom, s.addrs[0]), + fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation), + fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastSync), + fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10))).String()), + }, + true, 0, nil, + }, + { + "valid transaction of canceling unbonding delegation", + []string{ + sdk.ValAddress(s.addrs[0]).String(), + sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(5)).String(), + sdkmath.NewInt(10000).String(), + fmt.Sprintf("--%s=%s", flags.FlagFrom, s.addrs[0]), + fmt.Sprintf("--%s=true", flags.FlagSkipConfirmation), + fmt.Sprintf("--%s=%s", flags.FlagBroadcastMode, flags.BroadcastSync), + fmt.Sprintf("--%s=%s", flags.FlagFees, sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdkmath.NewInt(10))).String()), + }, + false, 0, &sdk.TxResponse{}, + }, + } + + for _, tc := range testCases { + s.Run(tc.name, func() { + cmd := cli.GetTxStakingCancelUnbondingDelegationCmd() + out, err := clitestutil.ExecTestCLICmd(s.cctx, cmd, tc.args) + if tc.expectErr { + s.Require().Error(err) + } else { + s.Require().NoError(err, out.String()) + s.Require().NoError(s.cctx.Codec.UnmarshalJSON(out.Bytes(), tc.respType), out.String()) + + txResp := tc.respType.(*sdk.TxResponse) + s.Require().Equal(tc.expectedCode, txResp.Code, out.String()) + } + }) + } +} diff --git a/go/cli/suite_test.go b/go/cli/suite_test.go new file mode 100644 index 00000000..bca9e6a8 --- /dev/null +++ b/go/cli/suite_test.go @@ -0,0 +1,32 @@ +package cli_test + +import ( + "testing" + + "github.com/stretchr/testify/suite" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" +) + +type CLITestSuite struct { + suite.Suite + + kr keyring.Keyring + encCfg testutilmod.TestEncodingConfig + baseCtx client.Context + cctx client.Context +} + +func TestCLITestSuite(t *testing.T) { + suite.Run(t, new(AuthCLITestSuite)) + suite.Run(t, new(AuthzCLITestSuite)) + suite.Run(t, new(BankCLITestSuite)) + suite.Run(t, new(DistributionCLITestSuite)) + suite.Run(t, new(FeegrantCLITestSuite)) + suite.Run(t, new(GovCLITestSuite)) + suite.Run(t, new(GenesisCLITestSuite)) + suite.Run(t, new(SlashingCLITestSuite)) + suite.Run(t, new(StakingCLITestSuite)) +} diff --git a/go/cli/test_helpers.go b/go/cli/test_helpers.go new file mode 100644 index 00000000..269afe28 --- /dev/null +++ b/go/cli/test_helpers.go @@ -0,0 +1,906 @@ +package cli + +import ( + "fmt" + + sdkmath "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + govv1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + + dv1 "pkg.akt.dev/go/node/deployment/v1" + mtypes "pkg.akt.dev/go/node/market/v1" + + cflags "pkg.akt.dev/go/cli/flags" +) + +var ( + DefaultPowerReduction = sdkmath.NewIntFromUint64(sdk.DefaultPowerReduction.Uint64()) + DefaultMinDepositTokens = sdkmath.NewIntFromUint64(govv1.DefaultMinDepositTokens.Uint64()) +) + +type FlagsSet []string + +func TestFlags() FlagsSet { + return FlagsSet{} +} + +func (df FlagsSet) With(flags ...string) FlagsSet { + res := make([]string, len(df), len(df)+len(flags)) + + copy(res, df) + res = append(res, flags...) + + return res +} + +func (df FlagsSet) WithLimit(val int64) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagLimit, val)) + + return res +} + +func (df FlagsSet) Append(rhs FlagsSet) FlagsSet { + res := make([]string, len(df), len(df)+len(rhs)) + + copy(res, df) + res = append(res, rhs...) + + return res +} + +func (df FlagsSet) WithAllowedMsgs(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagAllowedMsgs, val)) + + return res +} + +func (df FlagsSet) WithGas(val int) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagGas, val)) + + return res +} + +func (df FlagsSet) WithGasAutoFlags() FlagsSet { + res := make([]string, len(df), len(df)+3) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagGas, cflags.GasFlagAuto)) + res = append(res, fmt.Sprintf("--%s=0.0025uakt", cflags.FlagGasPrices)) + res = append(res, fmt.Sprintf("--%s=1.5", cflags.FlagGasAdjustment)) + + return res +} + +func (df FlagsSet) WithGenerateOnly() FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=true", cflags.FlagGenerateOnly)) + + return res +} + +func (df FlagsSet) WithOverwrite() FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=true", cflags.FlagOverwrite)) + + return res +} + +func (df FlagsSet) WithOffline() FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=true", cflags.FlagOffline)) + + return res +} + +func (df FlagsSet) WithAccountNumber(val uint64) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagAccountNumber, val)) + + return res +} + +func (df FlagsSet) WithSequence(val uint64) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagSequence, val)) + + return res +} + +func (df FlagsSet) WithSkipConfirm() FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=true", cflags.FlagSkipConfirmation)) + + return res +} + +func (df FlagsSet) WithSignatureOnly() FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=true", cflags.FlagSigOnly)) + + return res +} + +func (df FlagsSet) WithNote(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagNote, val)) + + return res +} + +func (df FlagsSet) WithSpendLimit(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagSpendLimit, val)) + + return res +} + +func (df FlagsSet) WithPeriodLimit(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagPeriodLimit, val)) + + return res +} + +func (df FlagsSet) WithPeriod(val int64) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagPeriod, val)) + + return res +} + +func (df FlagsSet) WithEvents(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagEvents, val)) + + return res +} + +func (df FlagsSet) WithDenom(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagDenom, val)) + + return res +} + +func (df FlagsSet) WithMsgType(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagMsgType, val)) + + return res +} + +func (df FlagsSet) WithBroadcastModeSync() FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagBroadcastMode, cflags.BroadcastSync)) + + return res +} + +func (df FlagsSet) WithExpiration(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagExpiration, val)) + + return res +} + +func (df FlagsSet) WithCommission() FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=true", cflags.FlagCommission)) + + return res +} + +func (df FlagsSet) WithAllowList(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagAllowList, val)) + + return res +} + +func (df FlagsSet) WithAllowedValidators(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagAllowedValidators, val)) + + return res +} + +func (df FlagsSet) WithDenyValidators(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagDenyValidators, val)) + + return res +} + +func (df FlagsSet) WithSignMode(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagSignMode, val)) + + return res +} + +func (df FlagsSet) WithTip(val sdk.Coin) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagTip, val.String())) + + return res +} + +func (df FlagsSet) WithAux() FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=true", cflags.FlagAux)) + + return res +} + +func (df FlagsSet) WithMultisig(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagMultisig, val)) + + return res +} + +func (df FlagsSet) WithMetadata(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagMetadata, val)) + + return res +} + +func (df FlagsSet) WithProposal(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagProposal, val)) // nolint:staticcheck + + return res +} + +func (df FlagsSet) WithTitle(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagTitle, val)) + + return res +} + +func (df FlagsSet) WithType(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagType, val)) + + return res +} + +func (df FlagsSet) WithProposalType(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagProposalType, val)) // nolint:staticcheck + + return res +} + +func (df FlagsSet) WithDescription(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagDescription, val)) // nolint:staticcheck + + return res +} + +func (df FlagsSet) WithBroadcastModeBlock() FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagBroadcastMode, cflags.BroadcastBlock)) + + return res +} + +func (df FlagsSet) WithHome(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagHome, val)) + + return res +} + +func (df FlagsSet) WithChainID(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagChainID, val)) + + return res +} + +func (df FlagsSet) WithFees(coins sdk.Coins) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagFees, coins.String())) + + return res +} + +func (df FlagsSet) WithDeposit(coin sdk.Coin) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagDeposit, coin)) + + return res +} + +func (df FlagsSet) WithPrice(coin sdk.DecCoin) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagPrice, coin)) + + return res +} + +func (df FlagsSet) WithFrom(acc string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagFrom, acc)) + + return res +} + +func (df FlagsSet) WithFeeGranter(val sdk.AccAddress) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagFeeGranter, val.String())) + + return res +} + +func (df FlagsSet) WithFeePayer(val sdk.AccAddress) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagFeePayer, val.String())) + + return res +} + +func (df FlagsSet) WithDepositor(acc sdk.Address) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagDepositorAccount, acc)) + + return res +} + +func (df FlagsSet) WithOutput(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagOutput, val)) + + return res +} + +func (df FlagsSet) WithSerial(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagSerial, val)) + + return res +} + +func (df FlagsSet) WithOwner(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagOwner, val)) + + return res +} + +func (df FlagsSet) WithProvider(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagProvider, val)) + + return res +} + +func (df FlagsSet) WithDseq(val uint64) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagDSeq, val)) + + return res +} + +func (df FlagsSet) WithGseq(val uint32) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagGSeq, val)) + + return res +} + +func (df FlagsSet) WithOseq(val uint32) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagOSeq, val)) + + return res +} + +func (df FlagsSet) WithDeploymentID(val dv1.DeploymentID) FlagsSet { + res := make([]string, len(df), len(df)+2) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagOwner, val.Owner)) + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagDSeq, val.DSeq)) + + return res +} + +func (df FlagsSet) WithGroupID(val dv1.GroupID) FlagsSet { + res := make([]string, len(df), len(df)+3) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagOwner, val.Owner)) + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagDSeq, val.DSeq)) + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagGSeq, val.GSeq)) + + return res +} + +func (df FlagsSet) WithOrderID(val mtypes.OrderID) FlagsSet { + res := make([]string, len(df), len(df)+4) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagOwner, val.Owner)) + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagDSeq, val.DSeq)) + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagGSeq, val.GSeq)) + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagOSeq, val.OSeq)) + + return res +} + +func (df FlagsSet) WithBidID(val mtypes.BidID) FlagsSet { + res := make([]string, len(df), len(df)+5) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagOwner, val.Owner)) + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagDSeq, val.DSeq)) + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagGSeq, val.GSeq)) + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagOSeq, val.OSeq)) + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagProvider, val.Provider)) + + return res +} + +func (df FlagsSet) WithLeaseID(val mtypes.LeaseID) FlagsSet { + res := make([]string, len(df), len(df)+5) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagOwner, val.Owner)) + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagDSeq, val.DSeq)) + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagGSeq, val.GSeq)) + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagOSeq, val.OSeq)) + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagProvider, val.Provider)) + + return res +} + +func (df FlagsSet) WithState(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagState, val)) + + return res +} + +func (df FlagsSet) WithStatus(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagStatus, val)) + + return res +} + +func (df FlagsSet) WithHeight(val uint64) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%d", cflags.FlagHeight, val)) + + return res +} + +func (df FlagsSet) WithOutputJSON() FlagsSet { + return df.WithOutput("json") +} + +func (df FlagsSet) WithOutputYAML() FlagsSet { + return df.WithOutput("yaml") +} + +func (df FlagsSet) WithOutputText() FlagsSet { + return df.WithOutput("text") +} + +func (df FlagsSet) WithIdentity(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagIdentity, val)) + + return res +} + +func (df FlagsSet) WithWebsite(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagWebsite, val)) + + return res +} + +func (df FlagsSet) WithSecurityContact(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagSecurityContact, val)) + + return res +} + +func (df FlagsSet) WithDetails(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagDetails, val)) + + return res +} + +func (df FlagsSet) WithCommissionRate(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagCommissionRate, val)) + + return res +} + +func (df FlagsSet) WithCommissionMaxRate(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagCommissionMaxRate, val)) + + return res +} + +func (df FlagsSet) WithCommissionMaxChangeRate(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagCommissionMaxChangeRate, val)) + + return res +} + +func (df FlagsSet) WithAmount(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagAmount, val)) + + return res +} + +func (df FlagsSet) WithPubkey(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagPubKey, val)) + + return res +} + +func (df FlagsSet) WithMoniker(val string) FlagsSet { + res := make([]string, len(df), len(df)+1) + + copy(res, df) + + res = append(res, fmt.Sprintf("--%s=%s", cflags.FlagMoniker, val)) + + return res +} + +// // ExecTestCLICmd builds the client context, mocks the output and executes the command. +// func ExecTestCLICmd(ctx context.Context, cctx client.Context, cmd *cobra.Command, extraArgs ...string) (sdktest.BufferWriter, error) { +// _, out := sdktest.ApplyMockIO(cmd) +// +// { +// dupFlags := make(map[string]bool) +// for _, arg := range extraArgs { +// if !strings.HasPrefix(arg, "--") { +// continue +// } +// +// arg = strings.TrimPrefix(arg, "--") +// tokens := strings.Split(arg, "=") +// +// if _, exists := dupFlags[tokens[0]]; exists { +// return out, fmt.Errorf("test: duplicated flag \"%s\"", tokens[0]) +// } +// +// dupFlags[tokens[0]] = true +// } +// } +// +// cmd.SetArgs(extraArgs) +// err := cmd.ParseFlags(extraArgs) +// +// if err != nil { +// return out, err +// } +// +// cctx = cctx.WithOutput(out) +// +// if ctx == nil { +// ctx = context.Background() +// } +// +// opts, err := cflags.ClientOptionsFromFlags(cmd.Flags()) +// if err != nil { +// return out, err +// } +// +// ctx = context.WithValue(ctx, cli.ClientContextKey, &client.Context{}) +// ctx = context.WithValue(ctx, server.ServerContextKey, server.NewDefaultContext()) +// cmd.SetContext(ctx) +// +// if err = client.SetCmdClientContextHandler(cctx, cmd); err != nil { +// return out, err +// } +// +// tctx, err := client.GetClientTxContext(cmd) +// if err != nil { +// return out, err +// } +// +// cl, err := DiscoverClient(ctx, tctx, opts...) +// if err != nil { +// return out, err +// } +// +// ctx = context.WithValue(ctx, ContextTypeClient, cl) +// +// cmd.SetContext(ctx) +// +// if err := cmd.Execute(); err != nil { +// return out, err +// } +// +// return out, nil +// } +// +// // ExecQueryTestCLICmd builds the client context, mocks the output and executes the command. +// func ExecQueryTestCLICmd(ctx context.Context, cctx client.Context, cmd *cobra.Command, extraArgs ...string) (sdktest.BufferWriter, error) { +// _, out := sdktest.ApplyMockIO(cmd) +// +// { +// dupFlags := make(map[string]bool) +// for _, arg := range extraArgs { +// if !strings.HasPrefix(arg, "--") { +// continue +// } +// +// arg = strings.TrimPrefix(arg, "--") +// tokens := strings.Split(arg, "=") +// +// if _, exists := dupFlags[tokens[0]]; exists { +// return out, fmt.Errorf("test: duplicated flag \"%s\"", tokens[0]) +// } +// +// dupFlags[tokens[0]] = true +// } +// } +// +// cmd.SetArgs(extraArgs) +// err := cmd.ParseFlags(extraArgs) +// +// if err != nil { +// return out, err +// } +// +// cctx = cctx.WithOutput(out) +// +// if ctx == nil { +// ctx = context.Background() +// } +// +// ctx = context.WithValue(ctx, cli.ClientContextKey, &client.Context{}) +// ctx = context.WithValue(ctx, server.ServerContextKey, server.NewDefaultContext()) +// cmd.SetContext(ctx) +// +// if err = client.SetCmdClientContextHandler(cctx, cmd); err != nil { +// return out, err +// } +// +// qctx, err := client.GetClientQueryContext(cmd) +// if err != nil { +// return out, err +// } +// +// qcl, err := DiscoverQueryClient(ctx, qctx) +// if err != nil { +// return out, err +// } +// +// ctx = context.WithValue(ctx, ContextTypeQueryClient, qcl) +// cmd.SetContext(ctx) +// +// if err := cmd.Execute(); err != nil { +// return out, err +// } +// +// return out, nil +// } +// +// func MsgSendExec(ctx context.Context, cctx client.Context, from, to, amount fmt.Stringer, extraArgs ...string) (sdktest.BufferWriter, error) { +// args := []string{from.String(), to.String(), amount.String()} +// args = append(args, extraArgs...) +// +// return ExecTestCLICmd(ctx, cctx, GetTxBankSendTxCmd(), args...) +// } +// +// func QueryBalancesExec(ctx context.Context, cctx client.Context, address fmt.Stringer, extraArgs ...string) (sdktest.BufferWriter, error) { +// args := []string{address.String(), fmt.Sprintf("--%s=json", cflags.FlagOutput)} +// args = append(args, extraArgs...) +// +// return ExecQueryTestCLICmd(ctx, cctx, cli.GetBalancesCmd(), args...) +// } diff --git a/go/cli/testutil/auth.go b/go/cli/testutil/auth.go new file mode 100644 index 00000000..4f0de83b --- /dev/null +++ b/go/cli/testutil/auth.go @@ -0,0 +1,90 @@ +package testutil + +import ( + "context" + "fmt" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + "github.com/cosmos/cosmos-sdk/testutil" + + "pkg.akt.dev/go/cli" +) + +func TxSignExec(ctx context.Context, cctx client.Context, args ...string) (testutil.BufferWriter, error) { + cmd := cli.GetSignCommand() + + return ExecTestCLICmd(ctx, cctx, cmd, + cli.TestFlags(). + WithChainID(cctx.ChainID). + Append(args)...) +} + +func TxBroadcastExec(ctx context.Context, cctx client.Context, args ...string) (testutil.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetBroadcastCommand(), args...) +} + +func TxEncodeExec(ctx context.Context, cctx client.Context, filename string, extraArgs ...string) (testutil.BufferWriter, error) { + args := []string{ + fmt.Sprintf("--%s=%s", flags.FlagKeyringBackend, keyring.BackendTest), + filename, + } + + return ExecTestCLICmd(ctx, cctx, cli.GetEncodeCommand(), append(args, extraArgs...)...) +} + +func TxValidateSignaturesExec(ctx context.Context, cctx client.Context, args ...string) (testutil.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetValidateSignaturesCommand(), + cli.TestFlags(). + WithChainID(cctx.ChainID). + Append(args)...) +} + +func TxMultiSignExec(ctx context.Context, cctx client.Context, args ...string) (testutil.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetAuthMultiSignCmd(), cli.TestFlags().Append(args).WithChainID(cctx.ChainID)...) +} + +func TxSignBatchExec(ctx context.Context, cctx client.Context, args ...string) (testutil.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetSignBatchCommand(), args...) +} + +func TxDecodeExec(ctx context.Context, cctx client.Context, encodedTx string, extraArgs ...string) (testutil.BufferWriter, error) { + args := []string{ + fmt.Sprintf("--%s=%s", flags.FlagKeyringBackend, keyring.BackendTest), + encodedTx, + } + + return ExecTestCLICmd(ctx, cctx, cli.GetDecodeCommand(), append(args, extraArgs...)...) +} + +// TxAuxToFeeExec executes `GetAuxToFeeCommand` cli command with given args. +func TxAuxToFeeExec(ctx context.Context, cctx client.Context, filename string, extraArgs ...string) (testutil.BufferWriter, error) { + args := []string{ + filename, + } + + return ExecTestCLICmd(ctx, cctx, cli.GetAuxToFeeCommand(), append(args, extraArgs...)...) +} + +// func QueryAccountExec(ctx context.Context, cctx client.Context, address fmt.Stringer, extraArgs ...string) (testutil.BufferWriter, error) { +// args := []string{address.String(), fmt.Sprintf("--%s=json", tmcli.OutputFlag)} +// +// return ExecTestCLICmd(ctx, cctx, cli.GetAuthAccountCmd(), append(args, extraArgs...)...) +// } + +func TxMultiSignBatchExec(ctx context.Context, cctx client.Context, filename string, from string, sigFile1 string, sigFile2 string, extraArgs ...string) (testutil.BufferWriter, error) { + args := []string{ + fmt.Sprintf("--%s=%s", flags.FlagKeyringBackend, keyring.BackendTest), + filename, + from, + sigFile1, + sigFile2, + } + + args = append(args, extraArgs...) + + return ExecTestCLICmd(ctx, cctx, cli.GetMultiSignBatchCmd(), args...) +} + +// DONTCOVER diff --git a/go/cli/testutil/authz.go b/go/cli/testutil/authz.go new file mode 100644 index 00000000..ec694ee5 --- /dev/null +++ b/go/cli/testutil/authz.go @@ -0,0 +1,15 @@ +package testutil + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/testutil" + + "pkg.akt.dev/go/cli" +) + +func ExecCreateGrant(ctx context.Context, cctx client.Context, args ...string) (testutil.BufferWriter, error) { + cmd := cli.GetTxAuthzGrantAuthorizationCmd() + return ExecTestCLICmd(ctx, cctx, cmd, args...) +} diff --git a/go/cli/testutil/certs.go b/go/cli/testutil/certs.go new file mode 100644 index 00000000..9a5cb6cb --- /dev/null +++ b/go/cli/testutil/certs.go @@ -0,0 +1,50 @@ +package testutil + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/client" + sdktest "github.com/cosmos/cosmos-sdk/testutil" + + "pkg.akt.dev/go/cli" +) + +// TxGenerateServerExec is used for testing create server certificate tx +func TxGenerateServerExec(ctx context.Context, cctx client.Context, args ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetTxCertGenerateServerCmd(), args...) +} + +// TxGenerateClientExec is used for testing create client certificate tx +func TxGenerateClientExec(ctx context.Context, cctx client.Context, args ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetTxCertGenerateClientCmd(), args...) +} + +// TxPublishServerExec is used for testing create server certificate tx +func TxPublishServerExec(ctx context.Context, cctx client.Context, args ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetTxCertPublishServerCmd(), args...) +} + +// TxPublishClientExec is used for testing create client certificate tx +func TxPublishClientExec(ctx context.Context, cctx client.Context, args ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetTxCertPublishClientCmd(), args...) +} + +// TxRevokeServerExec is used for testing create server certificate tx +func TxRevokeServerExec(ctx context.Context, cctx client.Context, args ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetTxCertRevokeCmd(), args...) +} + +// TxRevokeClientExec is used for testing create client certificate tx +func TxRevokeClientExec(ctx context.Context, cctx client.Context, args ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetTxCertsRevokeClientCmd(), args...) +} + +// QueryCertificatesExec is used for testing certificates query +func QueryCertificatesExec(ctx context.Context, cctx client.Context, args ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryCertCertificatesCmd(), args...) +} + +// QueryCertificateExec is used for testing certificate query +func QueryCertificateExec(ctx context.Context, cctx client.Context, args ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryCertCertificatesCmd(), args...) +} diff --git a/go/cli/testutil/cmd.go b/go/cli/testutil/cmd.go new file mode 100644 index 00000000..2004461f --- /dev/null +++ b/go/cli/testutil/cmd.go @@ -0,0 +1,77 @@ +package testutil + +import ( + "context" + "fmt" + "strings" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/server" + "github.com/cosmos/cosmos-sdk/testutil" + + "pkg.akt.dev/go/cli" +) + +func execSetContext(ctx context.Context, cctx client.Context, cmd *cobra.Command, extraArgs ...string) (testutil.BufferWriter, error) { + cmd.SetArgs(extraArgs) + err := cmd.ParseFlags(extraArgs) + if err != nil { + return nil, err + } + + _, out := testutil.ApplyMockIO(cmd) + cctx = cctx.WithOutput(out) + + ctx = context.WithValue(ctx, cli.ClientContextKey, &client.Context{}) + ctx = context.WithValue(ctx, server.ServerContextKey, server.NewDefaultContext()) + + cmd.SetContext(ctx) + + if err := cli.SetCmdClientContextHandler(cctx, cmd); err != nil { + return nil, err + } + + return out, nil +} + +// ExecTestCLICmd builds the client context, mocks the output and executes the command. +func ExecTestCLICmd(ctx context.Context, cctx client.Context, cmd *cobra.Command, extraArgs ...string) (testutil.BufferWriter, error) { + { + dupFlags := make(map[string]bool) + for _, arg := range extraArgs { + if !strings.HasPrefix(arg, "--") { + continue + } + + arg = strings.TrimPrefix(arg, "--") + tokens := strings.Split(arg, "=") + + if _, exists := dupFlags[tokens[0]]; exists { + return nil, fmt.Errorf("test: duplicated flag \"%s\"", tokens[0]) + } + + dupFlags[tokens[0]] = true + } + } + + out, err := execSetContext(ctx, cctx, cmd, extraArgs...) + if err != nil { + return nil, err + } + + if err := cmd.Execute(); err != nil { + return out, err + } + + return out, nil +} + +func ExecSend(ctx context.Context, cctx client.Context, args ...string) (testutil.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetTxBankSendTxCmd(), args...) +} + +func QueryBalancesExec(ctx context.Context, cctx client.Context, args ...string) (testutil.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryBankBalancesCmd(), args...) +} diff --git a/go/cli/testutil/deployment.go b/go/cli/testutil/deployment.go new file mode 100644 index 00000000..28b895fa --- /dev/null +++ b/go/cli/testutil/deployment.go @@ -0,0 +1,93 @@ +package testutil + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/client" + sdktest "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" + + "pkg.akt.dev/go/cli" + dv1beta4 "pkg.akt.dev/go/node/deployment/v1beta4" +) + +// TxCreateDeploymentExec is used for testing create deployment tx +func TxCreateDeploymentExec(ctx context.Context, cctx client.Context, filePath string, extraArgs ...string) (sdktest.BufferWriter, error) { + args := []string{ + filePath, + } + + args = append(args, extraArgs...) + + return ExecTestCLICmd(ctx, cctx, cli.GetTxDeploymentCreateCmd(), args...) +} + +// TxUpdateDeploymentExec is used for testing update deployment tx +func TxUpdateDeploymentExec(ctx context.Context, cctx client.Context, filePath string, extraArgs ...string) (sdktest.BufferWriter, error) { + args := []string{ + filePath, + } + + args = append(args, extraArgs...) + + return ExecTestCLICmd(ctx, cctx, cli.GetTxDeploymentUpdateCmd(), args...) +} + +// TxCloseDeploymentExec is used for testing close deployment tx +// requires --dseq, --fees +func TxCloseDeploymentExec(ctx context.Context, cctx client.Context, extraArgs ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetTxDeploymentCloseCmd(), extraArgs...) +} + +// TxDepositDeploymentExec is used for testing deposit deployment tx +func TxDepositDeploymentExec(ctx context.Context, cctx client.Context, deposit sdk.Coin, extraArgs ...string) (sdktest.BufferWriter, error) { + args := []string{ + deposit.String(), + } + + args = append(args, extraArgs...) + + return ExecTestCLICmd(ctx, cctx, cli.GetTxDeploymentDepositCmd(), args...) +} + +// TxCloseGroupExec is used for testing close group tx +func TxCloseGroupExec(ctx context.Context, cctx client.Context, extraArgs ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetTxDeploymentGroupCloseCmd(), extraArgs...) +} + +func TxGrantAuthorizationExec(ctx context.Context, cctx client.Context, grantee sdk.AccAddress, extraArgs ...string) (sdktest.BufferWriter, error) { + dmin, _ := dv1beta4.DefaultParams().MinDepositFor("uakt") + + spendLimit := sdk.NewCoin(dmin.Denom, dmin.Amount.MulRaw(3)) + args := []string{ + grantee.String(), + spendLimit.String(), + } + args = append(args, extraArgs...) + + return ExecTestCLICmd(ctx, cctx, cli.GetTxDeploymentGrantAuthorizationCmd(), args...) +} + +func TxRevokeAuthorizationExec(ctx context.Context, cctx client.Context, grantee sdk.AccAddress, extraArgs ...string) (sdktest.BufferWriter, error) { + args := []string{ + grantee.String(), + } + args = append(args, extraArgs...) + + return ExecTestCLICmd(ctx, cctx, cli.GetTxDeploymentRevokeAuthorizationCmd(), args...) +} + +// QueryDeploymentsExec is used for testing deployments query +func QueryDeploymentsExec(ctx context.Context, cctx client.Context, extraArgs ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryDeploymentsCmd(), extraArgs...) +} + +// QueryDeploymentExec is used for testing deployment query +func QueryDeploymentExec(ctx context.Context, cctx client.Context, extraArgs ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryDeploymentCmd(), extraArgs...) +} + +// QueryGroupExec is used for testing group query +func QueryGroupExec(ctx context.Context, cctx client.Context, extraArgs ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryDeploymentGroupCmd(), extraArgs...) +} diff --git a/go/cli/testutil/gov.go b/go/cli/testutil/gov.go new file mode 100644 index 00000000..9db92da0 --- /dev/null +++ b/go/cli/testutil/gov.go @@ -0,0 +1,27 @@ +package testutil + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/testutil" + clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli" + govcli "github.com/cosmos/cosmos-sdk/x/gov/client/cli" +) + +// ExecGovSubmitLegacyProposal creates a tx for submit legacy proposal +// +//nolint:staticcheck // we are intentionally using a deprecated flag here. +func ExecGovSubmitLegacyProposal(ctx context.Context, cctx client.Context, args ...string) (testutil.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, govcli.NewCmdSubmitLegacyProposal(), args...) +} + +// MsgVote votes for a proposal +func MsgVote(clientCtx client.Context, args ...string) (testutil.BufferWriter, error) { + return clitestutil.ExecTestCLICmd(clientCtx, govcli.NewCmdWeightedVote(), args) +} + +// MsgDeposit deposits on a proposal +func MsgDeposit(clientCtx client.Context, args ...string) (testutil.BufferWriter, error) { + return clitestutil.ExecTestCLICmd(clientCtx, govcli.NewCmdDeposit(), args) +} diff --git a/go/cli/testutil/market.go b/go/cli/testutil/market.go new file mode 100644 index 00000000..44ce3551 --- /dev/null +++ b/go/cli/testutil/market.go @@ -0,0 +1,60 @@ +package testutil + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/client" + sdktest "github.com/cosmos/cosmos-sdk/testutil" + + "pkg.akt.dev/go/cli" +) + +// TxCreateBidExec is used for testing create bid tx +func TxCreateBidExec(ctx context.Context, cctx client.Context, extraArgs ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetTxMarketBidCreateCmd(), extraArgs...) +} + +// TxCloseBidExec is used for testing close bid tx +func TxCloseBidExec(ctx context.Context, cctx client.Context, extraArgs ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetTxMarketBidCloseCmd(), extraArgs...) +} + +// TxCreateLeaseExec is used for creating a lease +func TxCreateLeaseExec(ctx context.Context, cctx client.Context, extraArgs ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetTxMarketLeaseCreateCmd(), extraArgs...) +} + +// TxCloseLeaseExec is used for testing close order tx +func TxCloseLeaseExec(ctx context.Context, cctx client.Context, extraArgs ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetTxMarketLeaseCloseCmd(), extraArgs...) +} + +// QueryOrdersExec is used for testing orders query +func QueryOrdersExec(ctx context.Context, cctx client.Context, args ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryMarketOrdersCmd(), args...) +} + +// QueryOrderExec is used for testing order query +func QueryOrderExec(ctx context.Context, cctx client.Context, extraArgs ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryMarketOrderCmd(), extraArgs...) +} + +// QueryBidsExec is used for testing bids query +func QueryBidsExec(ctx context.Context, cctx client.Context, args ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryMarketBidsCmd(), args...) +} + +// QueryBidExec is used for testing bid query +func QueryBidExec(ctx context.Context, cctx client.Context, extraArgs ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryMarketBidCmd(), extraArgs...) +} + +// QueryLeasesExec is used for testing leases query +func QueryLeasesExec(ctx context.Context, cctx client.Context, args ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryMarketLeasesCmd(), args...) +} + +// QueryLeaseExec is used for testing lease query +func QueryLeaseExec(ctx context.Context, cctx client.Context, extraArgs ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryMarketLeaseCmd(), extraArgs...) +} diff --git a/go/cli/testutil/provider.go b/go/cli/testutil/provider.go new file mode 100644 index 00000000..4c561171 --- /dev/null +++ b/go/cli/testutil/provider.go @@ -0,0 +1,42 @@ +package testutil + +import ( + "context" + + "github.com/cosmos/cosmos-sdk/client" + sdktest "github.com/cosmos/cosmos-sdk/testutil" + + "pkg.akt.dev/go/cli" +) + +// TxCreateProviderExec is used for testing create provider tx +func TxCreateProviderExec(ctx context.Context, cctx client.Context, filepath string, extraArgs ...string) (sdktest.BufferWriter, error) { + args := []string{ + filepath, + } + + args = append(args, extraArgs...) + + return ExecTestCLICmd(ctx, cctx, cli.GetTxProviderCreateCmd(), args...) +} + +// TxUpdateProviderExec is used for testing update provider tx +func TxUpdateProviderExec(ctx context.Context, cctx client.Context, filepath string, extraArgs ...string) (sdktest.BufferWriter, error) { + args := []string{ + filepath, + } + + args = append(args, extraArgs...) + + return ExecTestCLICmd(ctx, cctx, cli.GetTxProviderUpdateCmd(), args...) +} + +// QueryProvidersExec is used for testing providers query +func QueryProvidersExec(ctx context.Context, cctx client.Context, args ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryGetProvidersCmd(), args...) +} + +// QueryProviderExec is used for testing provider query +func QueryProviderExec(ctx context.Context, cctx client.Context, extraArgs ...string) (sdktest.BufferWriter, error) { + return ExecTestCLICmd(ctx, cctx, cli.GetQueryProviderCmd(), extraArgs...) +} diff --git a/go/cli/testutil/tm_mocks.go b/go/cli/testutil/tm_mocks.go new file mode 100644 index 00000000..f3c96032 --- /dev/null +++ b/go/cli/testutil/tm_mocks.go @@ -0,0 +1,41 @@ +package testutil + +import ( + "context" + + abci "github.com/cometbft/cometbft/abci/types" + tmbytes "github.com/cometbft/cometbft/libs/bytes" + rpcclient "github.com/cometbft/cometbft/rpc/client" + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + coretypes "github.com/cometbft/cometbft/rpc/core/types" + tmtypes "github.com/cometbft/cometbft/types" + + "github.com/cosmos/cosmos-sdk/client" +) + +var _ client.TendermintRPC = (*MockTendermintRPC)(nil) + +type MockTendermintRPC struct { + rpcclientmock.Client + + responseQuery abci.ResponseQuery +} + +// NewMockTendermintRPC returns a mock TendermintRPC implementation. +// It is used for CLI testing. +func NewMockTendermintRPC(respQuery abci.ResponseQuery) MockTendermintRPC { + return MockTendermintRPC{responseQuery: respQuery} +} + +func (MockTendermintRPC) BroadcastTxSync(context.Context, tmtypes.Tx) (*coretypes.ResultBroadcastTx, error) { + return &coretypes.ResultBroadcastTx{Code: 0}, nil +} + +func (m MockTendermintRPC) ABCIQueryWithOptions( + _ context.Context, + _ string, + _ tmbytes.HexBytes, + _ rpcclient.ABCIQueryOptions, +) (*coretypes.ResultABCIQuery, error) { + return &coretypes.ResultABCIQuery{Response: m.responseQuery}, nil +} diff --git a/go/cli/testutil/validate.go b/go/cli/testutil/validate.go new file mode 100644 index 00000000..53ef284f --- /dev/null +++ b/go/cli/testutil/validate.go @@ -0,0 +1,61 @@ +package testutil + +import ( + "bytes" + "context" + "encoding/hex" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/gogoproto/jsonpb" + + nutils "pkg.akt.dev/go/node/utils" +) + +// GetTxFees is a gentle response to inappropriate approach of cli test utils +// send transaction may fail and calling cli routine won't know about it +func GetTxFees(ctx context.Context, t testing.TB, cctx client.Context, data []byte) sdk.FeeTx { + t.Helper() + + res := getTxResponse(ctx, t, cctx, data) + require.Zero(t, res.Code, res) + + var fees sdk.FeeTx + err := cctx.Codec.UnpackAny(res.Tx, &fees) + require.NoError(t, err) + + return fees +} + +// ValidateTxSuccessful is a gentle response to inappropriate approach of cli test utils +// send transaction may fail and calling cli routine won't know about it +func ValidateTxSuccessful(ctx context.Context, t testing.TB, cctx client.Context, data []byte) (*sdk.TxResponse, sdk.Tx) { + t.Helper() + + res := getTxResponse(ctx, t, cctx, data) + require.Zero(t, res.Code, res) + + var tx sdk.Tx + err := cctx.Codec.UnpackAny(res.Tx, &tx) + require.NoError(t, err) + + return res, tx +} + +func getTxResponse(ctx context.Context, t testing.TB, cctx client.Context, data []byte) *sdk.TxResponse { + var resp sdk.TxResponse + + err := jsonpb.Unmarshal(bytes.NewBuffer(data), &resp) + require.NoError(t, err) + + hash, err := hex.DecodeString(resp.TxHash) + require.NoError(t, err) + + res, err := nutils.QueryTx(ctx, cctx, hash) + require.NoError(t, err) + + return res +} diff --git a/go/cli/tx.go b/go/cli/tx.go new file mode 100644 index 00000000..28837bd5 --- /dev/null +++ b/go/cli/tx.go @@ -0,0 +1,123 @@ +package cli + +import ( + "context" + "errors" + + "github.com/spf13/cobra" + + "pkg.akt.dev/go/node/client/v1beta3" + + cflags "pkg.akt.dev/go/cli/flags" +) + +type ContextType string + +const ( + ContextTypeClient = ContextType("context-client") + ContextTypeQueryClient = ContextType("context-query-client") +) + +func TxPersistentPreRunE(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + + cctx, err := GetClientTxContext(cmd) + if err != nil { + return err + } + + if cctx.Codec == nil { + return errors.New("codec is not initialized") + } + + if cctx.LegacyAmino == nil { + return errors.New("legacy amino codec is not initialized") + } + + opts, err := cflags.ClientOptionsFromFlags(cmd.Flags()) + if err != nil { + return err + } + + cl, err := DiscoverClient(ctx, cctx, opts...) + if err != nil { + return err + } + + ctx = context.WithValue(ctx, ContextTypeClient, cl) + + cmd.SetContext(ctx) + + return nil +} + +func TxCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "tx", + Short: "Transactions subcommands", + } + + cmd.AddCommand( + GetTxAuthzCmd(), + GetTxBankCmd(), + GetTxCrisisCmd(), + getTxDistributionCmd(), + GetTxFeegrantCmd(), + GetTxEvidenceCmd([]*cobra.Command{}), + GetSignCommand(), + GetSignBatchCommand(), + GetAuthMultiSignCmd(), + GetValidateSignaturesCommand(), + GetBroadcastCommand(), + GetEncodeCommand(), + GetDecodeCommand(), + GetTxVestingCmd(), + cflags.LineBreak, + GetTxAuditCmd(), + GetTxCertCmd(), + GetTxDeploymentCmds(), + GetTxMarketCmds(), + GetTxProviderCmd(), + GetTxGovCmd( + []*cobra.Command{ + GetTxParamsSubmitParamChangeProposalCmd(), + GetTxUpgradeSubmitLegacyUpgradeProposal(), + GetTxUpgradeSubmitLegacyCancelUpgradeProposal(), + }, + ), + GetTxSlashingCmd(), + GetTxStakingCmd(), + ) + + cmd.PersistentFlags().String(cflags.FlagChainID, "", "The network chain ID") + + return cmd +} + +func MustClientFromContext(ctx context.Context) v1beta3.Client { + val := ctx.Value(ContextTypeClient) + if val == nil { + panic("context does not have client set") + } + + res, valid := val.(v1beta3.Client) + if !valid { + panic("invalid context value") + } + + return res +} + +func MustQueryClientFromContext(ctx context.Context) v1beta3.LightClient { + val := ctx.Value(ContextTypeQueryClient) + if val == nil { + panic("context does not have client set") + } + + res, valid := val.(v1beta3.LightClient) + if !valid { + panic("invalid context value") + } + + return res +} diff --git a/go/cli/upgrade_parse_test.go b/go/cli/upgrade_parse_test.go new file mode 100644 index 00000000..e08fe9ae --- /dev/null +++ b/go/cli/upgrade_parse_test.go @@ -0,0 +1,42 @@ +package cli + +import ( + "strconv" + "testing" + + "github.com/cosmos/cosmos-sdk/x/upgrade/types" + "github.com/stretchr/testify/require" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// nolint: staticcheck +func TestParseArgsToContent(t *testing.T) { + fs := GetTxUpgradeSubmitLegacyUpgradeProposal().Flags() + + proposal := types.SoftwareUpgradeProposal{ + Title: "proposal title", + Description: "proposal description", + Plan: types.Plan{ + Name: "plan name", + Height: 123456, + Info: "plan info", + }, + } + + fs.Set(cflags.FlagTitle, proposal.Title) + fs.Set(cflags.FlagDescription, proposal.Description) + fs.Set(cflags.FlagUpgradeHeight, strconv.FormatInt(proposal.Plan.Height, 10)) + fs.Set(cflags.FlagUpgradeInfo, proposal.Plan.Info) + + content, err := upgradeParseArgsToContent(fs, proposal.Plan.Name) + require.NoError(t, err) + + p, ok := content.(*types.SoftwareUpgradeProposal) + require.Equal(t, ok, true) + require.Equal(t, p.Title, proposal.Title) + require.Equal(t, p.Description, proposal.Description) + require.Equal(t, p.Plan.Name, proposal.Plan.Name) + require.Equal(t, p.Plan.Height, proposal.Plan.Height) + require.Equal(t, p.Plan.Info, proposal.Plan.Info) +} diff --git a/go/cli/upgrade_query.go b/go/cli/upgrade_query.go new file mode 100644 index 00000000..1ed1078e --- /dev/null +++ b/go/cli/upgrade_query.go @@ -0,0 +1,148 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetQueryUpgradeCmd returns the parent command for all x/upgrade CLI query commands. +func GetQueryUpgradeCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Querying commands for the upgrade module", + } + + cmd.AddCommand( + GetQueryUpgradeCurrentPlanCmd(), + GetQueryUpgradeAppliedPlanCmd(), + GetQueryUpgradeModuleVersionsCmd(), + ) + + return cmd +} + +// GetQueryUpgradeCurrentPlanCmd returns the query upgrade plan command. +func GetQueryUpgradeCurrentPlanCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "plan", + Short: "get upgrade plan (if one exists)", + Long: "Gets the currently scheduled upgrade plan, if one exists", + Args: cobra.ExactArgs(0), + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + params := types.QueryCurrentPlanRequest{} + res, err := cl.Query().Upgrade().CurrentPlan(cmd.Context(), ¶ms) + if err != nil { + return err + } + + if res.Plan == nil { + return fmt.Errorf("no upgrade scheduled") + } + + return cl.PrintMessage(res.GetPlan()) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetQueryUpgradeAppliedPlanCmd returns information about the block at which a completed +// upgrade was applied. +func GetQueryUpgradeAppliedPlanCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "applied [upgrade-name]", + Short: "block header for height at which a completed upgrade was applied", + Long: "If upgrade-name was previously executed on the chain, this returns the header for the block at which it was applied.\n" + + "This helps a client determine which binary was valid over a given range of blocks, as well as more context to understand past migrations.", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + cctx := cl.ClientContext() + + params := types.QueryAppliedPlanRequest{Name: args[0]} + res, err := cl.Query().Upgrade().AppliedPlan(ctx, ¶ms) + if err != nil { + return err + } + + if res.Height == 0 { + return fmt.Errorf("no upgrade found") + } + + // we got the height, now let's return the headers + node, err := cctx.GetNode() + if err != nil { + return err + } + headers, err := node.BlockchainInfo(ctx, res.Height, res.Height) + if err != nil { + return err + } + if len(headers.BlockMetas) == 0 { + return fmt.Errorf("no headers returned for height %d", res.Height) + } + + // always output json as Header is unreadable in toml ([]byte is a long list of numbers) + bz, err := cctx.LegacyAmino.MarshalJSONIndent(headers.BlockMetas[0], "", " ") + if err != nil { + return err + } + return cctx.PrintString(fmt.Sprintf("%s\n", bz)) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} + +// GetModuleVersionsCmd returns the module version list from state +func GetQueryUpgradeModuleVersionsCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "module_versions [optional module_name]", + Short: "get the list of module versions", + Long: "Gets a list of module names and their respective consensus versions.\n" + + "Following the command with a specific module name will return only\n" + + "that module's information.", + Args: cobra.MaximumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustQueryClientFromContext(ctx) + + var params types.QueryModuleVersionsRequest + + if len(args) == 1 { + params = types.QueryModuleVersionsRequest{ModuleName: args[0]} + } else { + params = types.QueryModuleVersionsRequest{} + } + + res, err := cl.Query().Upgrade().ModuleVersions(cmd.Context(), ¶ms) + if err != nil { + return err + } + + if res.ModuleVersions == nil { + return errors.ErrNotFound + } + + return cl.PrintMessage(res) + }, + } + + cflags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/go/cli/upgrade_query_test.go b/go/cli/upgrade_query_test.go new file mode 100644 index 00000000..1e30f450 --- /dev/null +++ b/go/cli/upgrade_query_test.go @@ -0,0 +1,174 @@ +package cli_test + +import ( + "context" + "fmt" + "io" + "testing" + + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + svrcmd "github.com/cosmos/cosmos-sdk/server/cmd" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/upgrade" + + "pkg.akt.dev/go/cli" + clitestutil "pkg.akt.dev/go/cli/testutil" +) + +func TestGetCurrentPlanCmd(t *testing.T) { + encCfg := testutilmod.MakeTestEncodingConfig(upgrade.AppModuleBasic{}) + kr := keyring.NewInMemory(encCfg.Codec) + baseCtx := client.Context{}. + WithKeyring(kr). + WithTxConfig(encCfg.TxConfig). + WithCodec(encCfg.Codec). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain") + + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + name: "json output", + args: cli.TestFlags().WithOutputJSON(), + expCmdOutput: `[--output=json]`, + }, + { + name: "text output", + args: cli.TestFlags().WithOutputText(), + expCmdOutput: `[--output=text]`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := svrcmd.CreateExecuteContext(context.Background()) + + cmd := cli.GetQueryUpgradeCurrentPlanCmd() + cmd.SetOut(io.Discard) + require.NotNil(t, cmd) + + cmd.SetContext(ctx) + cmd.SetArgs(tc.args) + + require.NoError(t, client.SetCmdClientContextHandler(baseCtx, cmd)) + + require.Contains(t, fmt.Sprint(cmd), "plan [] [] get upgrade plan (if one exists)") + require.Contains(t, fmt.Sprint(cmd), tc.expCmdOutput) + }) + } +} + +func TestGetAppliedPlanCmd(t *testing.T) { + encCfg := testutilmod.MakeTestEncodingConfig(upgrade.AppModuleBasic{}) + kr := keyring.NewInMemory(encCfg.Codec) + baseCtx := client.Context{}. + WithKeyring(kr). + WithTxConfig(encCfg.TxConfig). + WithCodec(encCfg.Codec). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain") + + testCases := []struct { + name string + args []string + expCmdOutput string + }{ + { + name: "json output", + args: cli.TestFlags().With("test-upgrade").WithOutputJSON(), + expCmdOutput: `[test-upgrade --output=json]`, + }, + { + name: "text output", + args: cli.TestFlags().With("test-upgrade").WithOutputText(), + expCmdOutput: `[test-upgrade --output=text]`, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + ctx := svrcmd.CreateExecuteContext(context.Background()) + + cmd := cli.GetQueryUpgradeAppliedPlanCmd() + cmd.SetOut(io.Discard) + require.NotNil(t, cmd) + + cmd.SetContext(ctx) + cmd.SetArgs(tc.args) + + require.NoError(t, client.SetCmdClientContextHandler(baseCtx, cmd)) + + require.Contains(t, fmt.Sprint(cmd), "applied [upgrade-name] [] [] block header for height at which a completed upgrade was applied") + require.Contains(t, fmt.Sprint(cmd), tc.expCmdOutput) + }) + } +} + +func TestGetModuleVersionsCmd(t *testing.T) { + encCfg := testutilmod.MakeTestEncodingConfig(upgrade.AppModuleBasic{}) + kr := keyring.NewInMemory(encCfg.Codec) + baseCtx := client.Context{}. + WithKeyring(kr). + WithTxConfig(encCfg.TxConfig). + WithCodec(encCfg.Codec). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain") + + testCases := []struct { + msg string + args []string + expCmdOutput string + }{ + { + msg: "test full query with json output", + args: cli.TestFlags().WithHeight(1).WithOutputJSON(), + expCmdOutput: `--height=1 --output=json`, + }, + { + msg: "test full query with text output", + args: cli.TestFlags().WithHeight(1).WithOutputText(), + expCmdOutput: `--height=1 --output=text`, + }, + { + msg: "test single module", + args: cli.TestFlags().With("bank").WithHeight(1), + expCmdOutput: `bank --height=1`, + }, + { + msg: "test non-existent module", + args: cli.TestFlags().With("abcdefg").WithHeight(1), + expCmdOutput: `abcdefg --height=1`, + }, + } + + for _, tc := range testCases { + t.Run(tc.msg, func(t *testing.T) { + ctx := svrcmd.CreateExecuteContext(context.Background()) + + cmd := cli.GetQueryUpgradeModuleVersionsCmd() + cmd.SetOut(io.Discard) + require.NotNil(t, cmd) + + cmd.SetContext(ctx) + cmd.SetArgs(tc.args) + + require.NoError(t, client.SetCmdClientContextHandler(baseCtx, cmd)) + + require.Contains(t, fmt.Sprint(cmd), "module_versions [optional module_name] [] [] get the list of module versions") + require.Contains(t, fmt.Sprint(cmd), tc.expCmdOutput) + }) + } +} diff --git a/go/cli/upgrade_tx.go b/go/cli/upgrade_tx.go new file mode 100644 index 00000000..0de79491 --- /dev/null +++ b/go/cli/upgrade_tx.go @@ -0,0 +1,202 @@ +package cli + +import ( + "os" + "path/filepath" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/gov/client/cli" + gov "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + "github.com/cosmos/cosmos-sdk/x/upgrade/plan" + "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetTxUpgradeCmd returns the transaction commands for this module +func GetTxUpgradeCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Upgrade transaction subcommands", + } + + return cmd +} + +// GetTxUpgradeSubmitLegacyUpgradeProposal implements a command handler for submitting a software upgrade proposal transaction. +func GetTxUpgradeSubmitLegacyUpgradeProposal() *cobra.Command { + cmd := &cobra.Command{ + Use: "software-upgrade [name] (--upgrade-height [height]) (--upgrade-info [info]) [flags]", + Args: cobra.ExactArgs(1), + Short: "Submit a software upgrade proposal", + Long: "Submit a software upgrade along with an initial deposit.\n" + + "Please specify a unique name and height for the upgrade to take effect.\n" + + "You may include info to reference a binary download link, in a format compatible with: https://github.com/cosmos/cosmos-sdk/tree/main/cosmovisor", + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + name := args[0] + content, err := upgradeParseArgsToContent(cmd.Flags(), name) + if err != nil { + return err + } + noValidate, err := cmd.Flags().GetBool(cflags.FlagNoValidate) + if err != nil { + return err + } + if !noValidate { + prop := content.(*types.SoftwareUpgradeProposal) //nolint:staticcheck // we are intentionally using a deprecated proposal type. + var daemonName string + if daemonName, err = cmd.Flags().GetString(cflags.FlagDaemonName); err != nil { + return err + } + var planInfo *plan.Info + if planInfo, err = plan.ParseInfo(prop.Plan.Info); err != nil { + return err + } + if err = planInfo.ValidateFull(daemonName); err != nil { + return err + } + } + + from := cctx.GetFromAddress() + + depositStr, err := cmd.Flags().GetString(cli.FlagDeposit) + if err != nil { + return err + } + deposit, err := sdk.ParseCoinsNormalized(depositStr) + if err != nil { + return err + } + + msg, err := gov.NewMsgSubmitProposal(content, deposit, from) + if err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cmd.Flags().String(cflags.FlagTitle, "", "title of proposal") + cmd.Flags().String(cflags.FlagDescription, "", "description of proposal") // nolint:staticcheck // we are intentionally using a deprecated flag here. + cmd.Flags().String(cflags.FlagDeposit, "", "deposit of proposal") + cmd.Flags().Int64(cflags.FlagUpgradeHeight, 0, "The height at which the upgrade must happen") // nolint:staticcheck + cmd.Flags().String(cflags.FlagUpgradeInfo, "", "Info for the upgrade plan such as new version download urls, etc.") // nolint:staticcheck + cmd.Flags().Bool(cflags.FlagNoValidate, false, "Skip validation of the upgrade info") + cmd.Flags().String(cflags.FlagDaemonName, getDefaultDaemonName(), "The name of the executable being upgraded (for upgrade-info validation). Default is the DAEMON_NAME env var if set, or else this executable") + + return cmd +} + +// GetTxUpgradeSubmitLegacyCancelUpgradeProposal implements a command handler for submitting a software upgrade cancel proposal transaction. +// Deprecated: please use NewCmdSubmitCancelUpgradeProposal instead. +func GetTxUpgradeSubmitLegacyCancelUpgradeProposal() *cobra.Command { + cmd := &cobra.Command{ + Use: "cancel-software-upgrade [flags]", + Args: cobra.ExactArgs(0), + Short: "Cancel the current software upgrade proposal", + Long: "Cancel a software upgrade along with an initial deposit.", + PersistentPreRunE: TxPersistentPreRunE, + RunE: func(cmd *cobra.Command, _ []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + from := cctx.GetFromAddress() + + depositStr, err := cmd.Flags().GetString(cli.FlagDeposit) + if err != nil { + return err + } + + deposit, err := sdk.ParseCoinsNormalized(depositStr) + if err != nil { + return err + } + + title, err := cmd.Flags().GetString(cli.FlagTitle) + if err != nil { + return err + } + + description, err := cmd.Flags().GetString(cli.FlagDescription) // nolint:staticcheck // we are intentionally using a deprecated flag here. + if err != nil { + return err + } + + content := types.NewCancelSoftwareUpgradeProposal(title, description) + + msg, err := gov.NewMsgSubmitProposal(content, deposit, from) + if err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cmd.Flags().String(cflags.FlagTitle, "", "title of proposal") + cmd.Flags().String(cflags.FlagDescription, "", "description of proposal") // nolint:staticcheck // we are intentionally using a deprecated flag here. + cmd.Flags().String(cflags.FlagDeposit, "", "deposit of proposal") + _ = cmd.MarkFlagRequired(cflags.FlagTitle) + _ = cmd.MarkFlagRequired(cflags.FlagDescription) // nolint:staticcheck // we are intentionally using a deprecated flag here. + + return cmd +} + +// getDefaultDaemonName gets the default name to use for the daemon. +// If a DAEMON_NAME env var is set, that is used. +// Otherwise, the last part of the currently running executable is used. +func getDefaultDaemonName() string { + // DAEMON_NAME is specifically used here to correspond with the Cosmovisor setup env vars. + name := os.Getenv("DAEMON_NAME") + if len(name) == 0 { + _, name = filepath.Split(os.Args[0]) + } + return name +} + +func upgradeParseArgsToContent(fs *pflag.FlagSet, name string) (gov.Content, error) { + title, err := fs.GetString(cli.FlagTitle) + if err != nil { + return nil, err + } + + description, err := fs.GetString(cli.FlagDescription) //nolint:staticcheck // we are intentionally using a deprecated flag here. + if err != nil { + return nil, err + } + + height, err := fs.GetInt64(cflags.FlagUpgradeHeight) //nolint:staticcheck + if err != nil { + return nil, err + } + + info, err := fs.GetString(cflags.FlagUpgradeInfo) //nolint:staticcheck + if err != nil { + return nil, err + } + + plan := types.Plan{Name: name, Height: height, Info: info} + content := types.NewSoftwareUpgradeProposal(title, description, plan) + + return content, nil +} diff --git a/go/cli/upgrade_tx_test.go b/go/cli/upgrade_tx_test.go new file mode 100644 index 00000000..bae95ddc --- /dev/null +++ b/go/cli/upgrade_tx_test.go @@ -0,0 +1,83 @@ +package cli_test + +import ( + "context" + "fmt" + "io" + "testing" + + rpcclientmock "github.com/cometbft/cometbft/rpc/client/mock" + "github.com/stretchr/testify/require" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + svrcmd "github.com/cosmos/cosmos-sdk/server/cmd" + clitestutil "github.com/cosmos/cosmos-sdk/testutil/cli" + testutilmod "github.com/cosmos/cosmos-sdk/types/module/testutil" + "github.com/cosmos/cosmos-sdk/x/upgrade" + + "pkg.akt.dev/go/cli" + cflags "pkg.akt.dev/go/cli/flags" +) + +func TestModuleVersionsCLI(t *testing.T) { + encCfg := testutilmod.MakeTestEncodingConfig(upgrade.AppModuleBasic{}) + kr := keyring.NewInMemory(encCfg.Codec) + baseCtx := client.Context{}. + WithKeyring(kr). + WithTxConfig(encCfg.TxConfig). + WithCodec(encCfg.Codec). + WithLegacyAmino(encCfg.Amino). + WithClient(clitestutil.MockTendermintRPC{Client: rpcclientmock.Client{}}). + WithAccountRetriever(client.MockAccountRetriever{}). + WithOutput(io.Discard). + WithChainID("test-chain"). + WithSignModeStr(cflags.SignModeDirect) + + testCases := []struct { + msg string + args []string + expCmdOutput string + }{ + { + msg: "test full query with json output", + args: cli.TestFlags().WithHeight(1).WithOutputJSON(), + expCmdOutput: `--height=1 --output=json`, + }, + { + msg: "test full query with text output", + args: cli.TestFlags().WithHeight(1).WithOutputText(), + expCmdOutput: `--height=1 --output=text`, + }, + { + msg: "test single module", + args: cli.TestFlags().With("bank").WithHeight(1), + expCmdOutput: `bank --height=1`, + }, + { + msg: "test non-existent module", + args: cli.TestFlags().With("abcdefg").WithHeight(1), + expCmdOutput: `abcdefg --height=1`, + }, + } + + for _, tc := range testCases { + t.Run(tc.msg, func(t *testing.T) { + cmd := cli.GetQueryUpgradeModuleVersionsCmd() + + ctx := svrcmd.CreateExecuteContext(context.Background()) + + cmd.SetOut(io.Discard) + require.NotNil(t, cmd) + + cmd.SetContext(ctx) + cmd.SetArgs(tc.args) + + require.NoError(t, client.SetCmdClientContextHandler(baseCtx, cmd)) + + if len(tc.args) != 0 { + require.Contains(t, fmt.Sprint(cmd), tc.expCmdOutput) + } + }) + } +} diff --git a/go/cli/utils.go b/go/cli/utils.go new file mode 100644 index 00000000..8e4a97f1 --- /dev/null +++ b/go/cli/utils.go @@ -0,0 +1,78 @@ +package cli + +import ( + "context" + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/spf13/pflag" + + cflags "pkg.akt.dev/go/cli/flags" + client "pkg.akt.dev/go/node/client/v1beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + mtypes "pkg.akt.dev/go/node/market/v1beta5" +) + +func DetectDeploymentDeposit(ctx context.Context, flags *pflag.FlagSet, cl client.QueryClient) (sdk.Coin, error) { + var deposit sdk.Coin + var depositStr string + var err error + + if !flags.Changed(cflags.FlagDeposit) { + resp, err := cl.Deployment().Params(ctx, &dtypes.QueryParamsRequest{}) + if err != nil { + return sdk.Coin{}, err + } + + // always default to AKT + for _, sCoin := range resp.Params.MinDeposits { + if sCoin.Denom == "uakt" { + depositStr = fmt.Sprintf("%s%s", sCoin.Amount, sCoin.Denom) + break + } + } + + if depositStr == "" { + return sdk.Coin{}, fmt.Errorf("couldn't query default deposit amount for uAKT") + } + } else { + depositStr, err = flags.GetString(cflags.FlagDeposit) + if err != nil { + return sdk.Coin{}, err + } + } + + deposit, err = sdk.ParseCoinNormalized(depositStr) + if err != nil { + return sdk.Coin{}, err + } + + return deposit, nil +} + +func DetectBidDeposit(ctx context.Context, flags *pflag.FlagSet, cl client.QueryClient) (sdk.Coin, error) { + var deposit sdk.Coin + var depositStr string + var err error + + if !flags.Changed(cflags.FlagDeposit) { + resp, err := cl.Market().Params(ctx, &mtypes.QueryParamsRequest{}) + if err != nil { + return sdk.Coin{}, err + } + + depositStr = resp.Params.BidMinDeposit.String() + } else { + depositStr, err = flags.GetString(cflags.FlagDeposit) + if err != nil { + return sdk.Coin{}, err + } + } + + deposit, err = sdk.ParseCoinNormalized(depositStr) + if err != nil { + return sdk.Coin{}, err + } + + return deposit, nil +} diff --git a/go/cli/validate.go b/go/cli/validate.go new file mode 100644 index 00000000..158b739f --- /dev/null +++ b/go/cli/validate.go @@ -0,0 +1,57 @@ +package cli + +import ( + "errors" + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +// ValidateCmd returns unknown command error or Help display if help flag set +func ValidateCmd(cmd *cobra.Command, args []string) error { + var unknownCmd string + var skipNext bool + + for _, arg := range args { + // search for help flag + if arg == "--help" || arg == "-h" { + return cmd.Help() + } + + // check if the current arg is a flag + switch { + case len(arg) > 0 && (arg[0] == '-'): + // the next arg should be skipped if the current arg is a + // flag and does not use "=" to assign the flag's value + if !strings.Contains(arg, "=") { + skipNext = true + } else { + skipNext = false + } + case skipNext: + // skip current arg + skipNext = false + case unknownCmd == "": + // unknown command found + // continue searching for help flag + unknownCmd = arg + } + } + + // return the help screen if no unknown command is found + if unknownCmd != "" { + err := fmt.Sprintf("unknown command \"%s\" for \"%s\"", unknownCmd, cmd.CalledAs()) + + // build suggestions for unknown argument + if suggestions := cmd.SuggestionsFor(unknownCmd); len(suggestions) > 0 { + err += "\n\nDid you mean this?\n" + for _, s := range suggestions { + err += fmt.Sprintf("\t%v\n", s) + } + } + return errors.New(err) + } + + return cmd.Help() +} diff --git a/go/cli/vesting_tx.go b/go/cli/vesting_tx.go new file mode 100644 index 00000000..5a8c8653 --- /dev/null +++ b/go/cli/vesting_tx.go @@ -0,0 +1,218 @@ +package cli + +import ( + "encoding/json" + "fmt" + "os" + "strconv" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/auth/vesting/types" + + cflags "pkg.akt.dev/go/cli/flags" +) + +// GetTxVestingCmd returns vesting module's transaction commands. +func GetTxVestingCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: "Vesting transaction subcommands", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand( + GetTxVestingCreateAccountCmd(), + GetTxVestingCreatePermanentLockedAccountCmd(), + GetTxVestingCreatePeriodicAccountCmd(), + ) + + return cmd +} + +// GetTxVestingCreateAccountCmd returns a CLI command handler for creating a +// MsgCreateVestingAccount transaction. +func GetTxVestingCreateAccountCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "create-vesting-account [to_address] [amount] [end_time]", + Short: "Create a new vesting account funded with an allocation of tokens.", + Long: `Create a new vesting account funded with an allocation of tokens. The +account can either be a delayed or continuous vesting account, which is determined +by the '--delayed' flag. All vesting accounts created will have their start time +set by the committed block's time. The end_time must be provided as a UNIX epoch +timestamp.`, + Args: cobra.ExactArgs(3), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + toAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + amount, err := sdk.ParseCoinsNormalized(args[1]) + if err != nil { + return err + } + + endTime, err := strconv.ParseInt(args[2], 10, 64) + if err != nil { + return err + } + + delayed, _ := cmd.Flags().GetBool(cflags.FlagDelayed) + + msg := types.NewMsgCreateVestingAccount(cctx.GetFromAddress(), toAddr, amount, endTime, delayed) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cmd.Flags().Bool(cflags.FlagDelayed, false, "Create a delayed vesting account if true") + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +// GetTxVestingCreatePermanentLockedAccountCmd returns a CLI command handler for creating a +// MsgCreatePermanentLockedAccount transaction. +func GetTxVestingCreatePermanentLockedAccountCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "create-permanent-locked-account [to_address] [amount]", + Short: "Create a new permanently locked account funded with an allocation of tokens.", + Long: `Create a new account funded with an allocation of permanently locked tokens. These +tokens may be used for staking but are non-transferable. Staking rewards will acrue as liquid and transferable +tokens.`, + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + toAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + amount, err := sdk.ParseCoinsNormalized(args[1]) + if err != nil { + return err + } + + msg := types.NewMsgCreatePermanentLockedAccount(cctx.GetFromAddress(), toAddr, amount) + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} + +type VestingData struct { + StartTime int64 `json:"start_time"` + Periods []InputPeriod `json:"periods"` +} + +type InputPeriod struct { + Coins string `json:"coins"` + Length int64 `json:"length_seconds"` +} + +// GetTxVestingCreatePeriodicAccountCmd returns a CLI command handler for creating a +// MsgCreatePeriodicVestingAccountCmd transaction. +func GetTxVestingCreatePeriodicAccountCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "create-periodic-vesting-account [to_address] [periods_json_file]", + Short: "Create a new vesting account funded with an allocation of tokens.", + Long: `A sequence of coins and period length in seconds. Periods are sequential, in that the duration of of a period only starts at the end of the previous period. The duration of the first period starts upon account creation. For instance, the following periods.json file shows 20 "test" coins vesting 30 days apart from each other. + Where periods.json contains: + + An array of coin strings and unix epoch times for coins to vest +{ "start_time": 1625204910, +"periods":[ + { + "coins": "10test", + "length_seconds":2592000 //30 days + }, + { + "coins": "10test", + "length_seconds":2592000 //30 days + }, +] + } + `, + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + cl := MustClientFromContext(ctx) + cctx := cl.ClientContext() + + toAddr, err := sdk.AccAddressFromBech32(args[0]) + if err != nil { + return err + } + + contents, err := os.ReadFile(args[1]) + if err != nil { + return err + } + + var vestingData VestingData + + err = json.Unmarshal(contents, &vestingData) + if err != nil { + return err + } + + var periods []types.Period + + for i, p := range vestingData.Periods { + + amount, err := sdk.ParseCoinsNormalized(p.Coins) + if err != nil { + return err + } + + if p.Length < 0 { + return fmt.Errorf("invalid period length of %d in period %d, length must be greater than 0", p.Length, i) + } + period := types.Period{Length: p.Length, Amount: amount} + periods = append(periods, period) + } + + msg := types.NewMsgCreatePeriodicVestingAccount(cctx.GetFromAddress(), toAddr, vestingData.StartTime, periods) + if err := msg.ValidateBasic(); err != nil { + return err + } + + resp, err := cl.Tx().BroadcastMsgs(ctx, []sdk.Msg{msg}) + if err != nil { + return err + } + + return cl.PrintMessage(resp) + }, + } + + cflags.AddTxFlagsToCmd(cmd) + + return cmd +} diff --git a/go/go.mod b/go/go.mod new file mode 100644 index 00000000..175c5c12 --- /dev/null +++ b/go/go.mod @@ -0,0 +1,191 @@ +module pkg.akt.dev/go + +go 1.23.0 + +require ( + cosmossdk.io/api v0.3.1 + cosmossdk.io/errors v1.0.1 + cosmossdk.io/math v1.3.0 + github.com/99designs/keyring v1.2.2 + github.com/boz/go-lifecycle v0.1.1 + github.com/cometbft/cometbft v0.37.6 + github.com/cosmos/cosmos-proto v1.0.0-beta.5 + github.com/cosmos/cosmos-sdk v0.47.16-ics-lsm + github.com/cosmos/gogoproto v1.4.12 + github.com/edwingeng/deque/v2 v2.1.1 + github.com/gogo/protobuf v1.3.2 + github.com/golang/protobuf v1.5.4 + github.com/grpc-ecosystem/grpc-gateway v1.16.0 + github.com/pseudomuto/protoc-gen-doc v1.5.1 + github.com/stretchr/testify v1.9.0 + go.step.sm/crypto v0.45.1 + google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 + google.golang.org/grpc v1.64.0 + google.golang.org/protobuf v1.34.1 + gopkg.in/yaml.v3 v3.0.1 + k8s.io/api v0.30.1 + k8s.io/apimachinery v0.30.1 +) + +replace ( + // use cosmos fork of keyring + github.com/99designs/keyring => github.com/cosmos/keyring v1.2.0 + + // pin gogoproto version to v1.4.10 + github.com/cosmos/gogoproto => github.com/cosmos/gogoproto v1.4.10 + + // dgrijalva/jwt-go is deprecated and doesn't receive security updates. + // TODO: remove it: https://github.com/cosmos/cosmos-sdk/issues/13134 + github.com/dgrijalva/jwt-go => github.com/golang-jwt/jwt/v4 v4.4.2 + + // Fix upstream GHSA-h395-qcrw-5vmq vulnerability. + // TODO Remove it: https://github.com/cosmos/cosmos-sdk/issues/10409 + github.com/gin-gonic/gin => github.com/gin-gonic/gin v1.8.1 + + // Use regen gogoproto fork + // To be removed in akash v2 release + github.com/gogo/protobuf => github.com/cosmos/gogoproto v1.3.3-alpha.regen.1 + + // replace broken goleveldb + github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 + // stick with compatible version or x/exp in v0.47.x line + // to be removed in akash v2 + golang.org/x/exp => golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb + + // ping x/sync to same version as sdk-47 + // to be removed in akash v2 + //golang.org/x/sync => golang.org/x/sync v0.6.0 + + // pin googleapis to same version as sdk-47 + // to be removed in akash v2 + google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 + // stick with compatible version of rapid in v0.47.x line + pgregory.net/rapid => pgregory.net/rapid v0.5.5 +) + +require ( + cosmossdk.io/core v0.5.1 // indirect + cosmossdk.io/depinject v1.0.0-alpha.4 // indirect + filippo.io/edwards25519 v1.1.0 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/ChainSafe/go-schnorrkel v1.0.0 // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver v1.5.0 // indirect + github.com/Masterminds/sprig v2.22.0+incompatible // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/errors v1.10.0 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cometbft/cometbft-db v0.7.0 // indirect + github.com/confio/ics23/go v0.9.1 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/iavl v0.20.1 // indirect + github.com/cosmos/ledger-cosmos-go v0.12.4 // indirect + github.com/danieljoos/wincred v1.1.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/getsentry/sentry-go v0.23.0 // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/golang/glog v1.2.0 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect + github.com/gorilla/websocket v1.5.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/gtank/merlin v0.1.1 // indirect + github.com/gtank/ristretto255 v0.1.2 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-uuid v1.0.1 // indirect + github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect + github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c // indirect + github.com/hdevalence/ed25519consensus v0.1.0 // indirect + github.com/huandu/skiplist v1.2.0 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/imdario/mergo v0.3.13 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/pseudomuto/protokit v0.2.0 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/cobra v1.8.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.18.2 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + github.com/tidwall/btree v1.6.0 // indirect + github.com/zondax/hid v0.9.2 // indirect + github.com/zondax/ledger-go v0.14.3 // indirect + go.etcd.io/bbolt v1.3.7 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/term v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + pgregory.net/rapid v1.1.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/go/go.sum b/go/go.sum new file mode 100644 index 00000000..8877e000 --- /dev/null +++ b/go/go.sum @@ -0,0 +1,669 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cosmossdk.io/api v0.3.1 h1:NNiOclKRR0AOlO4KIqeaG6PS6kswOMhHD0ir0SscNXE= +cosmossdk.io/api v0.3.1/go.mod h1:DfHfMkiNA2Uhy8fj0JJlOCYOBp4eWUUJ1te5zBGNyIw= +cosmossdk.io/core v0.5.1 h1:vQVtFrIYOQJDV3f7rw4pjjVqc1id4+mE0L9hHP66pyI= +cosmossdk.io/core v0.5.1/go.mod h1:KZtwHCLjcFuo0nmDc24Xy6CRNEL9Vl/MeimQ2aC7NLE= +cosmossdk.io/depinject v1.0.0-alpha.4 h1:PLNp8ZYAMPTUKyG9IK2hsbciDWqna2z1Wsl98okJopc= +cosmossdk.io/depinject v1.0.0-alpha.4/go.mod h1:HeDk7IkR5ckZ3lMGs/o91AVUc7E596vMaOmslGFM3yU= +cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= +cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= +cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= +cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= +cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= +cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= +cosmossdk.io/tools/rosetta v0.2.1 h1:ddOMatOH+pbxWbrGJKRAawdBkPYLfKXutK9IETnjYxw= +cosmossdk.io/tools/rosetta v0.2.1/go.mod h1:Pqdc1FdvkNV3LcNIkYWt2RQY6IP1ge6YWZk8MhhO9Hw= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= +github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= +github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/boz/go-lifecycle v0.1.1 h1:tG/wff7Zxbkf19g4D4I0G8Y4sq83iT5QjD4rzEf/zrI= +github.com/boz/go-lifecycle v0.1.1/go.mod h1:zdagAUMcC2C0OmQkBlJZFV77uF4GCVaGphAexGi7oho= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= +github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/errors v1.10.0 h1:lfxS8zZz1+OjtV4MtNWgboi/W5tyLEB6VQZBXN+0VUU= +github.com/cockroachdb/errors v1.10.0/go.mod h1:lknhIsEVQ9Ss/qKDBQS/UqFSvPQjOwNq2qyKAxtHRqE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/coinbase/rosetta-sdk-go/types v1.0.0 h1:jpVIwLcPoOeCR6o1tU+Xv7r5bMONNbHU7MuEHboiFuA= +github.com/coinbase/rosetta-sdk-go/types v1.0.0/go.mod h1:eq7W2TMRH22GTW0N0beDnN931DW0/WOI1R2sdHNHG4c= +github.com/cometbft/cometbft v0.37.6 h1:2BSD0lGPbcIyRd99Pf1zH0Sa8o0pbfqVWEDbZ4Ec2Uc= +github.com/cometbft/cometbft v0.37.6/go.mod h1:5FRkFil9uagHZogIX9x8z51c3GIPpQmdIN8Mq46HfzY= +github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= +github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= +github.com/confio/ics23/go v0.9.1 h1:3MV46eeWwO3xCauKyAtuAdJYMyPnnchW4iLr2bTw6/U= +github.com/confio/ics23/go v0.9.1/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= +github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= +github.com/cosmos/cosmos-sdk v0.47.16-ics-lsm h1:+mlfnZ4Cs8HMw9xy7Epjv56avptYSTsX3TVlUDX3Qcs= +github.com/cosmos/cosmos-sdk v0.47.16-ics-lsm/go.mod h1:uzvMwHXmuRDSOaF8ec9HickjLHJcItWBREdkaDHcPiE= +github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= +github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= +github.com/cosmos/gogoproto v1.3.3-alpha.regen.1 h1:Qmv/wAw4xHnjN5iZ9qHergfk1O7nnYl7ZsIY5lF+E9k= +github.com/cosmos/gogoproto v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= +github.com/cosmos/gogoproto v1.4.10 h1:QH/yT8X+c0F4ZDacDv3z+xE3WU1P1Z3wQoLMBRJoKuI= +github.com/cosmos/gogoproto v1.4.10/go.mod h1:3aAZzeRWpAwr+SS/LLkICX2/kDFyaYVzckBDzygIxek= +github.com/cosmos/iavl v0.20.1 h1:rM1kqeG3/HBT85vsZdoSNsehciqUQPWrR4BYmqE2+zg= +github.com/cosmos/iavl v0.20.1/go.mod h1:WO7FyvaZJoH65+HFOsDir7xU9FWk2w9cHXNW1XHcl7A= +github.com/cosmos/keyring v1.2.0 h1:8C1lBP9xhImmIabyXW4c3vFjjLiBdGCmfLUfeZlV1Yo= +github.com/cosmos/keyring v1.2.0/go.mod h1:fc+wB5KTk9wQ9sDx0kFXB3A0MaeGHM9AwRStKOQ5vOA= +github.com/cosmos/ledger-cosmos-go v0.12.4 h1:drvWt+GJP7Aiw550yeb3ON/zsrgW0jgh5saFCr7pDnw= +github.com/cosmos/ledger-cosmos-go v0.12.4/go.mod h1:fjfVWRf++Xkygt9wzCsjEBdjcf7wiiY35fv3ctT+k4M= +github.com/cosmos/rosetta-sdk-go v0.10.0 h1:E5RhTruuoA7KTIXUcMicL76cffyeoyvNybzUGSKFTcM= +github.com/cosmos/rosetta-sdk-go v0.10.0/go.mod h1:SImAZkb96YbwvoRkzSMQB6noNJXFgWl/ENIznEoYQI4= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= +github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/edwingeng/deque/v2 v2.1.1 h1:+xjC3TnaeMPLZMi7QQf9jN2K00MZmTwruApqplbL9IY= +github.com/edwingeng/deque/v2 v2.1.1/go.mod h1:HukI8CQe9KDmZCcURPZRYVYjH79Zy2tIjTF9sN3Bgb0= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE= +github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c h1:PdZEHcpa3117kJ1Wa5EYupzCzn9QlBby8Fx2YpZPYvo= +github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= +github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= +github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac h1:GcJkaxD5Wy/Ucn+L0USlpbGJy9O6+7r0nBI7ftJ7Uu0= +github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac/go.mod h1:dM7ihgFM8Do6WGIfOXWPgpJ+4bKGR/4ZkYh8HKDdFy4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= +github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/pseudomuto/protoc-gen-doc v1.5.1 h1:Ah259kcrio7Ix1Rhb6u8FCaOkzf9qRBqXnvAufg061w= +github.com/pseudomuto/protoc-gen-doc v1.5.1/go.mod h1:XpMKYg6zkcpgfpCfQ8GcWBDRtRxOmMR5w7pz4Xo+dYM= +github.com/pseudomuto/protokit v0.2.0 h1:hlnBDcy3YEDXH7kc9gV+NLaN0cDzhDvD1s7Y6FZ8RpM= +github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= +github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= +github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= +github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= +github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.step.sm/crypto v0.45.1 h1:Xb8XldsbqT6pDYsg46BVPP1euASNbeNAhzrlvUP3QWo= +go.step.sm/crypto v0.45.1/go.mod h1:XtJBuMuZb11YeJpG8uP3fyBl2MerXWJ/pWQX/Au+Kt8= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb h1:xIApU0ow1zwMa2uL1VDNeQlNVFTWMQxZUZCMDy0Q4Us= +golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU= +google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 h1:mxSlqyb8ZAHsYDCfiXN1EDdNTdvjUJSLY+OnAUtYNYA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= +k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= +k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= +k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v0.5.5 h1:jkgx1TjbQPD/feRoK+S/mXw9e1uj6WilpHrXJowi6oA= +pgregory.net/rapid v0.5.5/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/go/grpc/gogoreflection/fix_registration.go b/go/grpc/gogoreflection/fix_registration.go index 4a97108d..3a76cc9c 100644 --- a/go/grpc/gogoreflection/fix_registration.go +++ b/go/grpc/gogoreflection/fix_registration.go @@ -6,31 +6,50 @@ import ( "fmt" "reflect" - _ "github.com/gogo/protobuf/gogoproto" // required so it does register the gogoproto file descriptor - _ "k8s.io/apimachinery/pkg/api/resource" // required so it does register the k8s resource - - gogoproto "github.com/gogo/protobuf/proto" - - // nolint: staticcheck - "github.com/golang/protobuf/proto" - dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" - _ "github.com/regen-network/cosmos-proto" // look above + _ "github.com/cosmos/gogoproto/gogoproto" // required so it does register the gogoproto file descriptor + gogoproto "github.com/cosmos/gogoproto/proto" + dpb "github.com/cosmos/gogoproto/protoc-gen-gogo/descriptor" + "github.com/golang/protobuf/proto" //nolint:staticcheck + "k8s.io/apimachinery/pkg/api/resource" // required so it does register the k8s resource + + // we need to this transfer protobuf registration to gogoproto above + kproto "github.com/gogo/protobuf/proto" ) -var importsToFix = map[string][]string{ - "gogo.proto": { - "gogoproto/gogo.proto", - "github.com/gogo/protobuf/gogoproto/gogo.proto", - }, +type registerEntryType struct { + msg kproto.Message + protoType string +} - "cosmos.proto": {"cosmos_proto/cosmos.proto"}, - "k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto": {"k8s.io/apimachinery/pkg/api/resource/generated.proto"}, +type registerEntry struct { + protoFile string + types []registerEntryType } +var ( + fixProtos = []registerEntry{ + { + protoFile: "k8s.io/apimachinery/pkg/api/resource/generated.proto", + types: []registerEntryType{ + { + msg: (*resource.Quantity)(nil), + protoType: "k8s.io.apimachinery.pkg.api.resource.Quantity", + }, + { + msg: (*resource.QuantityValue)(nil), + protoType: "k8s.io.apimachinery.pkg.api.resource.QuantityValue", + }, + }, + }, + } + + importsToFix = map[string][]string{} +) + // fixRegistration is required because certain files register themselves in a way // but are imported by other files in a different way. -// NOTE(fdymylja): This fix should not be needed and should be addressed in some CI. -// Currently every cosmos-sdk proto file is importing gogo.proto as gogoproto/gogo.proto, +// NOTE(troian): This fix should not be needed and should be addressed in some CI. +// Currently, every cosmos-sdk proto file is importing gogo.proto as gogoproto/gogo.proto, // but gogo.proto registers itself as gogo.proto, same goes for cosmos.proto. func fixRegistration(registeredAs, importedAs string) error { raw := gogoproto.FileDescriptor(registeredAs) @@ -58,6 +77,14 @@ func init() { // in theory this shouldn't be required, generally speaking // proto files should be imported as their registration path + for _, fix := range fixProtos { + for _, fproto := range fix.types { + gogoproto.RegisterType(fproto.msg, fproto.protoType) + } + + gogoproto.RegisterFile(fix.protoFile, kproto.FileDescriptor(fix.protoFile)) + } + for registeredAs, imports := range importsToFix { for _, importedAs := range imports { err := fixRegistration(registeredAs, importedAs) @@ -95,7 +122,7 @@ func getFileDescriptor(filePath string) []byte { if len(fd) != 0 { return fd } - // nolint: staticcheck + // nolint:staticcheck return proto.FileDescriptor(filePath) } @@ -104,7 +131,7 @@ func getMessageType(name string) reflect.Type { if typ != nil { return typ } - // nolint: staticcheck + // nolint:staticcheck return proto.MessageType(name) } @@ -115,8 +142,9 @@ func getExtension(extID int32, m proto.Message) *gogoproto.ExtensionDesc { return desc } } + // check into proto registry - // nolint: staticcheck + // nolint:staticcheck for id, desc := range proto.RegisteredExtensions(m) { if id == extID { return &gogoproto.ExtensionDesc{ @@ -142,7 +170,7 @@ func getExtensionsNumbers(m proto.Message) []int32 { if len(out) != 0 { return out } - // nolint: staticcheck + // nolint:staticcheck protoExts := proto.RegisteredExtensions(m) out = make([]int32, 0, len(protoExts)) for id := range protoExts { diff --git a/go/grpc/gogoreflection/serverreflection.go b/go/grpc/gogoreflection/serverreflection.go index 936d3df3..f5544c12 100644 --- a/go/grpc/gogoreflection/serverreflection.go +++ b/go/grpc/gogoreflection/serverreflection.go @@ -16,24 +16,22 @@ * */ -/* -Package reflection implements server reflection service. - -The service implemented is defined in: -https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. - -To register server reflection on a gRPC server: - - import "google.golang.org/grpc/reflection" - - s := grpc.NewServer() - pb.RegisterYourOwnServer(s, &server{}) - - // Register reflection service on gRPC server. - reflection.Register(s) - - s.Serve(lis) -*/ +// Package gogoreflection implements server reflection service. +// +// The service implemented is defined in: +// https://github.com/grpc/grpc/blob/master/src/proto/grpc/reflection/v1alpha/reflection.proto. +// +// To register server reflection on a gRPC server: +// +// import "google.golang.org/grpc/reflection" +// +// s := grpc.NewServer() +// pb.RegisterYourOwnServer(s, &server{}) +// +// // Register reflection service on gRPC server. +// reflection.Register(s) +// +// s.Serve(lis) package gogoreflection // import "google.golang.org/grpc/reflection" import ( @@ -46,31 +44,22 @@ import ( "sort" "sync" - // nolint: staticcheck - "github.com/golang/protobuf/proto" - dpb "github.com/golang/protobuf/protoc-gen-go/descriptor" + dpb "github.com/cosmos/gogoproto/protoc-gen-gogo/descriptor" + "github.com/golang/protobuf/proto" //nolint:staticcheck "google.golang.org/grpc" "google.golang.org/grpc/codes" - rpb "google.golang.org/grpc/reflection/grpc_reflection_v1alpha" + rpb "google.golang.org/grpc/reflection/grpc_reflection_v1" "google.golang.org/grpc/status" ) type serverReflectionServer struct { rpb.UnimplementedServerReflectionServer - s *grpc.Server - + s *grpc.Server initSymbols sync.Once serviceNames []string symbols map[string]*dpb.FileDescriptorProto // map of fully-qualified names to files } -// Register registers the server reflection service on the given gRPC server. -func Register(s *grpc.Server) { - rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ - s: s, - }) -} - // protoMessage is used for type assertion on proto messages. // Generated proto message implements function Descriptor(), but Descriptor() // is not part of interface proto.Message. This interface is needed to @@ -79,6 +68,13 @@ type protoMessage interface { Descriptor() ([]byte, []int) } +// Register registers the server reflection service on the given gRPC server. +func Register(s *grpc.Server) { + rpb.RegisterServerReflectionServer(s, &serverReflectionServer{ + s: s, + }) +} + func (s *serverReflectionServer) getSymbols() (svcNames []string, symbolIndex map[string]*dpb.FileDescriptorProto) { s.initSymbols.Do(func() { serviceInfo := s.s.GetServiceInfo() @@ -188,7 +184,7 @@ func fqn(prefix, name string) string { // fileDescForType gets the file descriptor for the given type. // The given type should be a proto message. func (s *serverReflectionServer) fileDescForType(st reflect.Type) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(protoMessage) + m, ok := reflect.Zero(reflect.PointerTo(st)).Interface().(protoMessage) if !ok { return nil, fmt.Errorf("failed to create message from type: %v", st) } @@ -236,7 +232,7 @@ func typeForName(name string) (reflect.Type, error) { } func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescriptorProto, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) + m, ok := reflect.Zero(reflect.PointerTo(st)).Interface().(proto.Message) if !ok { return nil, fmt.Errorf("failed to create message from type: %v", st) } @@ -251,7 +247,7 @@ func fileDescContainingExtension(st reflect.Type, ext int32) (*dpb.FileDescripto } func (s *serverReflectionServer) allExtensionNumbersForType(st reflect.Type) ([]int32, error) { - m, ok := reflect.Zero(reflect.PtrTo(st)).Interface().(proto.Message) + m, ok := reflect.Zero(reflect.PointerTo(st)).Interface().(proto.Message) if !ok { return nil, fmt.Errorf("failed to create message from type: %v", st) } @@ -376,6 +372,8 @@ func (s *serverReflectionServer) allExtensionNumbersForTypeName(name string) ([] } // ServerReflectionInfo is the reflection service handler. +// +//nolint:staticcheck func (s *serverReflectionServer) ServerReflectionInfo(stream rpb.ServerReflection_ServerReflectionInfoServer) error { sentFileDescriptors := make(map[string]bool) for { diff --git a/go/inventory/v1/cluster.pb.go b/go/inventory/v1/cluster.pb.go index 8b45c53d..c0eaeb19 100644 --- a/go/inventory/v1/cluster.pb.go +++ b/go/inventory/v1/cluster.pb.go @@ -5,8 +5,8 @@ package v1 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -83,7 +83,7 @@ func init() { func init() { proto.RegisterFile("akash/inventory/v1/cluster.proto", fileDescriptor_205585ebfe7f68ce) } var fileDescriptor_205585ebfe7f68ce = []byte{ - // 292 bytes of a gzipped FileDescriptorProto + // 277 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcc, 0x4e, 0x2c, 0xce, 0xd0, 0xcf, 0xcc, 0x2b, 0x4b, 0xcd, 0x2b, 0xc9, 0x2f, 0xaa, 0xd4, 0x2f, 0x33, 0xd4, 0x4f, 0xce, 0x29, 0x2d, 0x2e, 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x02, 0xab, @@ -97,12 +97,11 @@ var fileDescriptor_205585ebfe7f68ce = []byte{ 0x2e, 0x76, 0xa8, 0xed, 0x12, 0x4c, 0x60, 0x2b, 0xa4, 0xb1, 0x59, 0x11, 0x0c, 0x51, 0xe2, 0x64, 0x07, 0xb5, 0x85, 0x1d, 0x2a, 0xf0, 0xea, 0x9e, 0x3c, 0x4c, 0xfb, 0xa7, 0x7b, 0xf2, 0x7c, 0x10, 0x9b, 0xa0, 0x02, 0x20, 0xbb, 0xf8, 0xa0, 0xfe, 0x80, 0x2a, 0x0f, 0x82, 0x29, 0xb6, 0x62, 0x79, - 0xb1, 0x40, 0x9e, 0xc1, 0xc9, 0xe7, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, + 0xb1, 0x40, 0x9e, 0xc1, 0xc9, 0xf4, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, - 0x8c, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, 0xee, 0xd1, 0xcd, - 0x4b, 0x2d, 0x29, 0xcf, 0x2f, 0xca, 0x86, 0xf2, 0x12, 0x0b, 0x32, 0xf5, 0xd3, 0xf3, 0x51, 0x42, - 0x31, 0x89, 0x0d, 0x1c, 0x7c, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x38, 0x3a, 0xd9, 0x8b, - 0xcd, 0x01, 0x00, 0x00, + 0xa4, 0x0b, 0xb2, 0xd3, 0xf5, 0x12, 0xb3, 0x4b, 0xf4, 0x52, 0x52, 0xcb, 0xf4, 0xd3, 0xf3, 0x51, + 0x82, 0x2b, 0x89, 0x0d, 0x1c, 0x4e, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x58, 0xcb, 0x33, + 0xda, 0xb6, 0x01, 0x00, 0x00, } func (m *Cluster) Marshal() (dAtA []byte, err error) { diff --git a/go/inventory/v1/cpu.pb.go b/go/inventory/v1/cpu.pb.go index 22c9a5c3..ec7a53fb 100644 --- a/go/inventory/v1/cpu.pb.go +++ b/go/inventory/v1/cpu.pb.go @@ -5,8 +5,8 @@ package v1 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -153,34 +153,33 @@ func init() { func init() { proto.RegisterFile("akash/inventory/v1/cpu.proto", fileDescriptor_ed2b8a0bd798e5a3) } var fileDescriptor_ed2b8a0bd798e5a3 = []byte{ - // 425 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0xcf, 0x6e, 0xd3, 0x30, - 0x18, 0xaf, 0xd3, 0xae, 0x74, 0x2e, 0x13, 0x52, 0xc4, 0x21, 0x0c, 0x94, 0xaf, 0xb2, 0x34, 0xa9, - 0x1c, 0x48, 0xb4, 0x72, 0x41, 0x48, 0x5c, 0x3a, 0x2e, 0x93, 0x40, 0x2a, 0x41, 0xe3, 0xc0, 0x89, - 0xac, 0x49, 0x3b, 0x6b, 0xab, 0x5d, 0xdc, 0x24, 0xa8, 0x6f, 0xc1, 0x73, 0xf0, 0x24, 0x3b, 0xee, - 0x84, 0x38, 0x19, 0x94, 0xde, 0x22, 0x4e, 0x79, 0x02, 0x94, 0xcf, 0x26, 0x02, 0x6d, 0xb7, 0xef, - 0xf7, 0xcf, 0x3f, 0xd9, 0xfe, 0xe8, 0x93, 0xf8, 0x32, 0xde, 0x5c, 0x84, 0x5c, 0x14, 0xa9, 0xc8, - 0xa4, 0xda, 0x86, 0xc5, 0x71, 0x38, 0x5f, 0xe7, 0xc1, 0x5a, 0xc9, 0x4c, 0xba, 0x2e, 0xaa, 0x41, - 0xab, 0x06, 0xc5, 0xf1, 0xe1, 0xc3, 0xa5, 0x5c, 0x4a, 0x94, 0xc3, 0x66, 0x32, 0xce, 0xc3, 0xa3, - 0x3b, 0xce, 0x51, 0xe9, 0x46, 0xe6, 0x6a, 0x9e, 0xae, 0x63, 0xae, 0x8c, 0x8d, 0xfd, 0x26, 0xf4, - 0xde, 0xc9, 0xec, 0xec, 0x54, 0x2c, 0xa4, 0xfb, 0x94, 0x3a, 0x3c, 0xf1, 0xc8, 0x88, 0x8c, 0xf7, - 0xa7, 0x8f, 0x4a, 0x0d, 0xce, 0xe9, 0xeb, 0x4a, 0x83, 0xc3, 0x93, 0x5a, 0xc3, 0xfe, 0x36, 0x5e, - 0x5d, 0xbd, 0x64, 0x3c, 0x61, 0x91, 0xc3, 0x13, 0xf7, 0x15, 0xed, 0x17, 0xa9, 0x48, 0xa4, 0xf2, - 0x1c, 0xb4, 0x1f, 0x95, 0x1a, 0xfa, 0x1f, 0x90, 0xa9, 0x34, 0x58, 0xad, 0xd6, 0x70, 0x60, 0x62, - 0x06, 0xb3, 0xc8, 0x0a, 0xee, 0x0b, 0xba, 0xb7, 0x92, 0x49, 0x7a, 0xe5, 0x75, 0x31, 0xcd, 0x4a, - 0x0d, 0x7b, 0x6f, 0x1b, 0xa2, 0xd2, 0x60, 0x94, 0x5a, 0xc3, 0x7d, 0x93, 0x45, 0xc8, 0x22, 0x43, - 0x63, 0xf1, 0x5c, 0xaa, 0x74, 0xe3, 0xf5, 0x46, 0x64, 0x7c, 0x60, 0x8b, 0x91, 0xc1, 0x62, 0x9c, - 0xfe, 0x29, 0x46, 0xdc, 0x14, 0x9b, 0xe1, 0x3b, 0xa1, 0xdd, 0x93, 0xd9, 0x99, 0xbb, 0xa2, 0x83, - 0xcf, 0x79, 0x2c, 0x32, 0x9e, 0x6d, 0xf1, 0xc2, 0xc3, 0xc9, 0x28, 0xb8, 0xfd, 0xb4, 0x41, 0x64, - 0x1f, 0x6c, 0x16, 0x73, 0x35, 0x0d, 0xaf, 0x35, 0x74, 0x4a, 0x0d, 0x83, 0x77, 0x36, 0x59, 0x69, - 0x68, 0x4f, 0xa9, 0x35, 0x3c, 0x30, 0x95, 0x7f, 0x19, 0x16, 0xb5, 0xa2, 0xfb, 0x89, 0xf6, 0xb8, - 0x58, 0x48, 0xcf, 0x19, 0x75, 0xc7, 0xc3, 0xc9, 0xe3, 0xbb, 0xaa, 0xec, 0x27, 0xb4, 0x2d, 0xbd, - 0x06, 0x55, 0x1a, 0x30, 0x58, 0x6b, 0x18, 0xda, 0x0f, 0x10, 0x0b, 0xc9, 0xbe, 0xfd, 0x84, 0x81, - 0xf5, 0xbf, 0x8f, 0xd0, 0x30, 0x7d, 0x73, 0x5d, 0xfa, 0xe4, 0xa6, 0xf4, 0xc9, 0xaf, 0xd2, 0x27, - 0x5f, 0x77, 0x7e, 0xe7, 0x66, 0xe7, 0x77, 0x7e, 0xec, 0xfc, 0xce, 0xc7, 0xc9, 0x92, 0x67, 0x17, - 0xf9, 0x79, 0x30, 0x97, 0xab, 0x10, 0x7b, 0x9f, 0x89, 0x34, 0xfb, 0x22, 0xd5, 0xa5, 0x45, 0xf1, - 0x9a, 0x87, 0x4b, 0xf9, 0xdf, 0xa2, 0x9c, 0xf7, 0x71, 0x39, 0x9e, 0xff, 0x09, 0x00, 0x00, 0xff, - 0xff, 0xf7, 0x33, 0x13, 0x2d, 0x8d, 0x02, 0x00, 0x00, + // 412 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0xcf, 0x8a, 0xd3, 0x40, + 0x18, 0xcf, 0xa4, 0xdd, 0xda, 0x9d, 0xba, 0x08, 0xc1, 0x43, 0xdc, 0x95, 0x7c, 0x61, 0x60, 0xa1, + 0x5e, 0x12, 0xb6, 0x22, 0x88, 0xe0, 0xa5, 0xf5, 0xd2, 0x83, 0x50, 0x23, 0xf5, 0xe0, 0xc9, 0xd8, + 0xa4, 0x71, 0x68, 0x9b, 0x89, 0xd3, 0x34, 0xd0, 0xb7, 0xf0, 0x39, 0x7c, 0x92, 0x1e, 0x7b, 0x12, + 0x4f, 0xa3, 0xa4, 0xb7, 0xe0, 0x29, 0x4f, 0x20, 0x99, 0x19, 0x83, 0x62, 0x6f, 0xdf, 0xef, 0xdf, + 0xfc, 0x98, 0x99, 0x0f, 0x3f, 0x0e, 0x57, 0xe1, 0xf6, 0x93, 0x4f, 0xd3, 0x22, 0x4e, 0x73, 0xc6, + 0xf7, 0x7e, 0x71, 0xe7, 0x2f, 0xb2, 0x9d, 0x97, 0x71, 0x96, 0x33, 0xcb, 0x92, 0xaa, 0xd7, 0xaa, + 0x5e, 0x71, 0x77, 0xfd, 0x30, 0x61, 0x09, 0x93, 0xb2, 0xdf, 0x4c, 0xca, 0x79, 0x7d, 0x7b, 0xe6, + 0x1c, 0x1e, 0x6f, 0xd9, 0x8e, 0x2f, 0xe2, 0x2c, 0xa4, 0x5c, 0xd9, 0xc8, 0x2f, 0x84, 0xef, 0x4d, + 0x66, 0xf3, 0x69, 0xba, 0x64, 0xd6, 0x13, 0x6c, 0xd2, 0xc8, 0x46, 0x2e, 0x1a, 0x5e, 0x8e, 0x1f, + 0x95, 0x02, 0xcc, 0xe9, 0xab, 0x4a, 0x80, 0x49, 0xa3, 0x5a, 0xc0, 0xe5, 0x3e, 0xdc, 0xac, 0x5f, + 0x10, 0x1a, 0x91, 0xc0, 0xa4, 0x91, 0xf5, 0x12, 0xf7, 0x8a, 0x38, 0x8d, 0x18, 0xb7, 0x4d, 0x69, + 0xbf, 0x2d, 0x05, 0xf4, 0xde, 0x49, 0xa6, 0x12, 0xa0, 0xb5, 0x5a, 0xc0, 0x95, 0x8a, 0x29, 0x4c, + 0x02, 0x2d, 0x58, 0xcf, 0xf1, 0xc5, 0x86, 0x45, 0xf1, 0xda, 0xee, 0xc8, 0x34, 0x29, 0x05, 0x5c, + 0xbc, 0x6e, 0x88, 0x4a, 0x80, 0x52, 0x6a, 0x01, 0xf7, 0x55, 0x56, 0x42, 0x12, 0x28, 0x5a, 0x16, + 0x2f, 0x18, 0x8f, 0xb7, 0x76, 0xd7, 0x45, 0xc3, 0x2b, 0x5d, 0x2c, 0x19, 0x59, 0x2c, 0xa7, 0xbf, + 0x8a, 0x25, 0x6e, 0x8a, 0xd5, 0xf0, 0x0d, 0xe1, 0xce, 0x64, 0x36, 0xb7, 0x36, 0xb8, 0xff, 0x79, + 0x17, 0xa6, 0x39, 0xcd, 0xf7, 0xf2, 0xc2, 0x83, 0x91, 0xeb, 0xfd, 0xff, 0xb4, 0x5e, 0xa0, 0x1f, + 0x6c, 0x16, 0x52, 0x3e, 0xf6, 0x0f, 0x02, 0x8c, 0x52, 0x40, 0xff, 0x8d, 0x4e, 0x56, 0x02, 0xda, + 0x53, 0x6a, 0x01, 0x0f, 0x54, 0xe5, 0x1f, 0x86, 0x04, 0xad, 0x68, 0x7d, 0xc0, 0x5d, 0x9a, 0x2e, + 0x99, 0x6d, 0xba, 0x9d, 0xe1, 0x60, 0x74, 0x73, 0xae, 0x4a, 0x7f, 0x42, 0xdb, 0xd2, 0x6d, 0x50, + 0x25, 0x40, 0x06, 0x6b, 0x01, 0x03, 0xfd, 0x01, 0xe9, 0x92, 0x91, 0xaf, 0x3f, 0xa0, 0xaf, 0xfd, + 0x6f, 0x03, 0x69, 0x18, 0x3f, 0x3b, 0x94, 0x0e, 0x3a, 0x96, 0x0e, 0xfa, 0x59, 0x3a, 0xe8, 0xcb, + 0xc9, 0x31, 0x8e, 0x27, 0xc7, 0xf8, 0x7e, 0x72, 0x8c, 0xf7, 0x37, 0xd9, 0x2a, 0xf1, 0xc2, 0x55, + 0xee, 0x45, 0x71, 0xe1, 0x27, 0xec, 0x9f, 0x8d, 0xf8, 0xd8, 0x93, 0x5b, 0xf0, 0xf4, 0x77, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x0f, 0x45, 0x13, 0x77, 0x76, 0x02, 0x00, 0x00, } func (m *CPUInfo) Marshal() (dAtA []byte, err error) { diff --git a/go/inventory/v1/gpu.pb.go b/go/inventory/v1/gpu.pb.go index f69ac814..d2992fc8 100644 --- a/go/inventory/v1/gpu.pb.go +++ b/go/inventory/v1/gpu.pb.go @@ -5,8 +5,8 @@ package v1 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -169,39 +169,38 @@ func init() { func init() { proto.RegisterFile("akash/inventory/v1/gpu.proto", fileDescriptor_2cc01b12bd00ffcc) } var fileDescriptor_2cc01b12bd00ffcc = []byte{ - // 505 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x4f, 0x6b, 0xdb, 0x30, - 0x14, 0x8f, 0x9b, 0x2c, 0x71, 0x14, 0xc6, 0x86, 0xd8, 0xc1, 0x74, 0xc3, 0x0a, 0x82, 0x42, 0x19, - 0xd4, 0xa6, 0xc9, 0x6d, 0xb0, 0x4b, 0x28, 0x94, 0x8c, 0x76, 0x64, 0xee, 0xba, 0xc3, 0x2e, 0x99, - 0x1a, 0x2b, 0xae, 0x68, 0x6d, 0x65, 0x8a, 0x93, 0x91, 0x7e, 0x8a, 0x7d, 0x8e, 0x5d, 0xf7, 0x25, - 0x7a, 0xec, 0x69, 0xec, 0xa4, 0x0d, 0xe7, 0x96, 0x63, 0x3e, 0xc1, 0x90, 0xe4, 0x3f, 0x1d, 0xed, - 0xed, 0xbd, 0xdf, 0xbf, 0x67, 0xeb, 0x3d, 0xf0, 0x8a, 0x5c, 0x91, 0xf9, 0xa5, 0xcf, 0x92, 0x25, - 0x4d, 0x52, 0x2e, 0x56, 0xfe, 0xf2, 0xd0, 0x8f, 0x66, 0x0b, 0x6f, 0x26, 0x78, 0xca, 0x21, 0xd4, - 0xac, 0x57, 0xb2, 0xde, 0xf2, 0x70, 0xf7, 0x45, 0xc4, 0x23, 0xae, 0x69, 0x5f, 0x55, 0x46, 0xb9, - 0xbb, 0xf7, 0x48, 0x8e, 0xa0, 0x73, 0xbe, 0x10, 0x13, 0x3a, 0x23, 0x4c, 0x18, 0x19, 0xfe, 0x59, - 0x07, 0xad, 0xe3, 0xd1, 0xf9, 0x30, 0x99, 0x72, 0xf8, 0x16, 0x34, 0x97, 0x34, 0x09, 0xb9, 0x70, - 0xac, 0xae, 0xb5, 0xdf, 0x1e, 0xec, 0x65, 0x12, 0x35, 0x3f, 0x69, 0x64, 0x23, 0x51, 0xce, 0x6d, - 0x25, 0x7a, 0xba, 0x22, 0xf1, 0xf5, 0x1b, 0x6c, 0x7a, 0x1c, 0xe4, 0x04, 0x7c, 0x07, 0xda, 0xa6, - 0x1a, 0xb3, 0xd0, 0xd9, 0xd1, 0x09, 0x07, 0x99, 0x44, 0xb6, 0x49, 0x18, 0x1e, 0x6d, 0x24, 0xaa, - 0x04, 0x5b, 0x89, 0x9e, 0xdf, 0x8f, 0x19, 0xb3, 0x10, 0x07, 0xb6, 0xa9, 0x87, 0x21, 0xec, 0x83, - 0x46, 0x42, 0x62, 0xea, 0xd4, 0x75, 0x0c, 0xca, 0x24, 0x6a, 0xbc, 0x27, 0x31, 0xdd, 0x48, 0xa4, - 0xf1, 0xad, 0x44, 0x1d, 0xe3, 0x56, 0x1d, 0x0e, 0x34, 0x08, 0x8f, 0x40, 0x2b, 0xe6, 0x21, 0xbd, - 0x66, 0xa1, 0xd3, 0xd0, 0xbe, 0xd7, 0x99, 0x44, 0xad, 0x53, 0x05, 0xe9, 0xe9, 0xb6, 0x66, 0xcd, - 0xf0, 0x67, 0xc6, 0x5e, 0x20, 0x38, 0x28, 0xac, 0xf0, 0x04, 0xb4, 0x59, 0x92, 0x52, 0x31, 0x25, - 0x13, 0xea, 0x3c, 0xd1, 0x39, 0x5e, 0x26, 0x51, 0x7b, 0x58, 0x80, 0xea, 0x3f, 0x4a, 0x45, 0xf5, - 0x1f, 0x25, 0x84, 0x83, 0x8a, 0x86, 0x1f, 0x41, 0x27, 0xa6, 0x31, 0x17, 0xab, 0xf1, 0x9c, 0xdd, - 0x50, 0xa7, 0xa9, 0xf3, 0xfa, 0x99, 0x44, 0xe0, 0x54, 0xc3, 0x67, 0xec, 0x46, 0x05, 0xde, 0x17, - 0x6d, 0x25, 0x82, 0xf9, 0xd7, 0x55, 0x20, 0x0e, 0x40, 0x5c, 0x1a, 0xf0, 0x2f, 0x0b, 0xd4, 0x8f, - 0x47, 0xe7, 0x30, 0x06, 0xf6, 0xd7, 0x05, 0x49, 0x52, 0x96, 0xae, 0xf4, 0xce, 0x3a, 0xbd, 0xae, - 0xf7, 0xf0, 0x42, 0xbc, 0x20, 0xdf, 0xfb, 0x88, 0x30, 0x31, 0xf0, 0x6f, 0x25, 0xaa, 0xa9, 0xbd, - 0x7c, 0xc8, 0x9d, 0xea, 0x65, 0x8a, 0x94, 0xea, 0x65, 0x0a, 0x04, 0x07, 0x25, 0x09, 0xbf, 0x80, - 0x06, 0x4b, 0xa6, 0xdc, 0xd9, 0xe9, 0xd6, 0xf7, 0x3b, 0xbd, 0x97, 0x8f, 0x8d, 0xca, 0x6f, 0xa9, - 0x9c, 0xd2, 0x50, 0x9d, 0x5a, 0x9b, 0x32, 0x56, 0x6b, 0x53, 0x1d, 0xfe, 0xf1, 0x07, 0xd9, 0xb9, - 0xfe, 0x2c, 0xd0, 0x82, 0xc1, 0xc9, 0x6d, 0xe6, 0x5a, 0x77, 0x99, 0x6b, 0xfd, 0xcd, 0x5c, 0xeb, - 0xfb, 0xda, 0xad, 0xdd, 0xad, 0xdd, 0xda, 0xef, 0xb5, 0x5b, 0xfb, 0xdc, 0x8b, 0x58, 0x7a, 0xb9, - 0xb8, 0xf0, 0x26, 0x3c, 0xf6, 0xf5, 0xdc, 0x83, 0x84, 0xa6, 0xdf, 0xb8, 0xb8, 0xca, 0x3b, 0x32, - 0x63, 0x7e, 0xc4, 0xff, 0xbb, 0xf7, 0x8b, 0xa6, 0xbe, 0xf1, 0xfe, 0xbf, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xed, 0x18, 0x1d, 0x0d, 0x54, 0x03, 0x00, 0x00, + // 493 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x4f, 0x6f, 0xd3, 0x30, + 0x14, 0x6f, 0xd6, 0xd0, 0xa5, 0xae, 0x10, 0xc8, 0xe2, 0x10, 0x6d, 0x28, 0xae, 0x2c, 0x4d, 0x9a, + 0x90, 0x48, 0xb4, 0x55, 0x5c, 0x90, 0xb8, 0x54, 0x93, 0xa6, 0x22, 0x86, 0x4a, 0xc6, 0x38, 0x70, + 0x29, 0x66, 0x71, 0x83, 0xd5, 0x25, 0x2e, 0x69, 0x1a, 0xa9, 0xfb, 0x14, 0x7c, 0x0e, 0xae, 0x7c, + 0x89, 0x1d, 0x77, 0x42, 0x9c, 0x0c, 0x72, 0x6f, 0x3d, 0xf6, 0x13, 0x20, 0xdb, 0xf9, 0x33, 0xc4, + 0x6e, 0xef, 0xfd, 0xfe, 0xbd, 0xc4, 0xef, 0x81, 0xa7, 0x64, 0x46, 0x16, 0x5f, 0x02, 0x96, 0x16, + 0x34, 0xcd, 0x79, 0xb6, 0x0a, 0x8a, 0xa3, 0x20, 0x9e, 0x2f, 0xfd, 0x79, 0xc6, 0x73, 0x0e, 0xa1, + 0x66, 0xfd, 0x9a, 0xf5, 0x8b, 0xa3, 0xbd, 0x27, 0x31, 0x8f, 0xb9, 0xa6, 0x03, 0x55, 0x19, 0xe5, + 0xde, 0xc1, 0x3d, 0x39, 0x19, 0x5d, 0xf0, 0x65, 0x76, 0x49, 0xe7, 0x84, 0x65, 0x46, 0x86, 0x7f, + 0xb4, 0xc1, 0xee, 0xe9, 0xf8, 0x62, 0x94, 0x4e, 0x39, 0x7c, 0x05, 0x3a, 0x05, 0x4d, 0x23, 0x9e, + 0xb9, 0x56, 0xdf, 0x3a, 0xec, 0x0e, 0x0f, 0xa4, 0x40, 0x9d, 0x0f, 0x1a, 0xd9, 0x08, 0x54, 0x72, + 0x5b, 0x81, 0x1e, 0xae, 0x48, 0x72, 0xf5, 0x12, 0x9b, 0x1e, 0x87, 0x25, 0x01, 0x5f, 0x83, 0xae, + 0xa9, 0x26, 0x2c, 0x72, 0x77, 0x74, 0xc2, 0x73, 0x29, 0x90, 0x63, 0x12, 0x46, 0x27, 0x1b, 0x81, + 0x1a, 0xc1, 0x56, 0xa0, 0xc7, 0x77, 0x63, 0x26, 0x2c, 0xc2, 0xa1, 0x63, 0xea, 0x51, 0x04, 0x07, + 0xc0, 0x4e, 0x49, 0x42, 0xdd, 0xb6, 0x8e, 0x41, 0x52, 0x20, 0xfb, 0x2d, 0x49, 0xe8, 0x46, 0x20, + 0x8d, 0x6f, 0x05, 0xea, 0x19, 0xb7, 0xea, 0x70, 0xa8, 0x41, 0x78, 0x02, 0x76, 0x13, 0x1e, 0xd1, + 0x2b, 0x16, 0xb9, 0xb6, 0xf6, 0x3d, 0x93, 0x02, 0xed, 0x9e, 0x29, 0x48, 0x4f, 0x77, 0x34, 0x6b, + 0x86, 0x3f, 0x32, 0xf6, 0x0a, 0xc1, 0x61, 0x65, 0x85, 0x6f, 0x40, 0x97, 0xa5, 0x39, 0xcd, 0xa6, + 0xe4, 0x92, 0xba, 0x0f, 0x74, 0x8e, 0x2f, 0x05, 0xea, 0x8e, 0x2a, 0x50, 0xfd, 0x47, 0xad, 0x68, + 0xfe, 0xa3, 0x86, 0x70, 0xd8, 0xd0, 0xf0, 0x3d, 0xe8, 0x25, 0x34, 0xe1, 0xd9, 0x6a, 0xb2, 0x60, + 0xd7, 0xd4, 0xed, 0xe8, 0xbc, 0x81, 0x14, 0x08, 0x9c, 0x69, 0xf8, 0x9c, 0x5d, 0xab, 0xc0, 0xbb, + 0xa2, 0xad, 0x40, 0xb0, 0xfc, 0xba, 0x06, 0xc4, 0x21, 0x48, 0x6a, 0x03, 0xfe, 0x69, 0x81, 0xf6, + 0xe9, 0xf8, 0x02, 0x26, 0xc0, 0xf9, 0xba, 0x24, 0x69, 0xce, 0xf2, 0x95, 0xde, 0x59, 0xef, 0xb8, + 0xef, 0xff, 0x7f, 0x21, 0x7e, 0x58, 0xee, 0x7d, 0x4c, 0x58, 0x36, 0x0c, 0x6e, 0x04, 0x6a, 0xa9, + 0xbd, 0xbc, 0x2b, 0x9d, 0xea, 0x65, 0xaa, 0x94, 0xe6, 0x65, 0x2a, 0x04, 0x87, 0x35, 0x09, 0x3f, + 0x01, 0x9b, 0xa5, 0x53, 0xee, 0xee, 0xf4, 0xdb, 0x87, 0xbd, 0xe3, 0xfd, 0xfb, 0x46, 0x95, 0xb7, + 0x54, 0x4f, 0xb1, 0x55, 0xa7, 0xd6, 0xa6, 0x8c, 0xcd, 0xda, 0x54, 0x87, 0xbf, 0xff, 0x46, 0x4e, + 0xa9, 0x3f, 0x0f, 0xb5, 0x60, 0xf8, 0xe2, 0x46, 0x7a, 0xd6, 0xad, 0xf4, 0xac, 0x3f, 0xd2, 0xb3, + 0xbe, 0xad, 0xbd, 0xd6, 0xed, 0xda, 0x6b, 0xfd, 0x5a, 0x7b, 0xad, 0x8f, 0xfb, 0xf3, 0x59, 0xec, + 0x93, 0x59, 0xee, 0x47, 0xb4, 0x08, 0x62, 0xfe, 0xcf, 0x61, 0x7f, 0xee, 0xe8, 0x63, 0x1e, 0xfc, + 0x0d, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x80, 0xe9, 0xb6, 0x3d, 0x03, 0x00, 0x00, } func (m *GPUInfo) Marshal() (dAtA []byte, err error) { diff --git a/go/inventory/v1/memory.pb.go b/go/inventory/v1/memory.pb.go index 036cdeb6..4e18a669 100644 --- a/go/inventory/v1/memory.pb.go +++ b/go/inventory/v1/memory.pb.go @@ -5,8 +5,8 @@ package v1 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -153,35 +153,34 @@ func init() { func init() { proto.RegisterFile("akash/inventory/v1/memory.proto", fileDescriptor_bf744888c94a0308) } var fileDescriptor_bf744888c94a0308 = []byte{ - // 438 bytes of a gzipped FileDescriptorProto + // 425 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0xbf, 0x6e, 0xd3, 0x40, - 0x1c, 0x8e, 0x53, 0x37, 0x6a, 0x2e, 0x20, 0xc4, 0x89, 0xc1, 0xea, 0xe0, 0x5f, 0x74, 0x52, 0xa5, - 0x2e, 0xd8, 0x34, 0x5d, 0x10, 0x12, 0x4b, 0x36, 0x24, 0x10, 0xe0, 0x54, 0x0c, 0x2c, 0xe8, 0xda, - 0x5e, 0xd2, 0x53, 0x6b, 0x9f, 0xb9, 0x5c, 0x82, 0xdc, 0x57, 0x60, 0xe1, 0x39, 0x78, 0x92, 0x8e, - 0x1d, 0x99, 0xae, 0xe8, 0xb2, 0x79, 0xf4, 0x13, 0xa0, 0xbb, 0x73, 0x1d, 0x10, 0xdd, 0xfc, 0x7d, - 0xbf, 0xef, 0x8f, 0xf4, 0xf9, 0x10, 0xd0, 0x4b, 0xba, 0xbc, 0x48, 0x79, 0xb1, 0x66, 0x85, 0x12, - 0xb2, 0x4a, 0xd7, 0x47, 0x69, 0xce, 0x72, 0x21, 0xab, 0xa4, 0x94, 0x42, 0x09, 0x8c, 0x9d, 0x20, - 0xe9, 0x04, 0xc9, 0xfa, 0x68, 0xff, 0xd9, 0x42, 0x2c, 0x84, 0x3b, 0xa7, 0xf6, 0xcb, 0x2b, 0xf7, - 0x0f, 0x1e, 0x88, 0x92, 0x6c, 0x29, 0x56, 0xf2, 0x8c, 0x95, 0x94, 0x4b, 0x2f, 0x23, 0xdf, 0xfb, - 0x08, 0xbd, 0x73, 0x0d, 0x6f, 0x8a, 0xb9, 0xc0, 0xaf, 0xd1, 0x60, 0xcd, 0x8a, 0x73, 0x21, 0xa3, - 0x60, 0x1c, 0x1c, 0x0e, 0xa7, 0x07, 0x46, 0xc3, 0xe0, 0x93, 0x63, 0x6a, 0x0d, 0xed, 0xad, 0xd1, - 0xf0, 0xb8, 0xa2, 0xf9, 0xd5, 0x2b, 0xe2, 0x31, 0xc9, 0xda, 0x03, 0x3e, 0x46, 0xa1, 0xaa, 0x4a, - 0x16, 0xf5, 0x9d, 0x19, 0x8c, 0x86, 0xf0, 0xa4, 0x2a, 0x59, 0xad, 0xc1, 0xf1, 0x8d, 0x86, 0x91, - 0x37, 0x5a, 0x44, 0x32, 0x47, 0xe2, 0xf7, 0x08, 0x29, 0xa1, 0xe8, 0xd5, 0x97, 0x25, 0xbf, 0x66, - 0xd1, 0x8e, 0xb3, 0xbe, 0x30, 0x1a, 0x86, 0x27, 0x96, 0x9d, 0xf1, 0x6b, 0xeb, 0xff, 0x4b, 0xd2, - 0x68, 0x78, 0xda, 0xa6, 0x74, 0x1c, 0xc9, 0x86, 0xea, 0x5e, 0x8d, 0x5f, 0xa2, 0xdd, 0x65, 0xc9, - 0xd8, 0x79, 0x14, 0xba, 0x2c, 0x62, 0x34, 0xec, 0xce, 0x2c, 0x51, 0x6b, 0xf0, 0x97, 0x46, 0xc3, - 0x23, 0x1f, 0xe1, 0x20, 0xc9, 0x3c, 0x4d, 0xee, 0x02, 0x34, 0xf0, 0x6b, 0xe0, 0x1c, 0xed, 0x7d, - 0x5d, 0xd1, 0x42, 0x71, 0x55, 0xb9, 0x2d, 0x46, 0x93, 0x71, 0xf2, 0xff, 0xf8, 0x49, 0xd6, 0x4e, - 0xfa, 0x81, 0x72, 0x39, 0x4d, 0x6f, 0x34, 0xf4, 0x8c, 0x86, 0xbd, 0x8f, 0xad, 0xb3, 0xd6, 0xd0, - 0xa5, 0x34, 0x1a, 0x9e, 0xf8, 0xce, 0x7b, 0x86, 0x64, 0xdd, 0x11, 0xcf, 0x51, 0xc8, 0x8b, 0xb9, - 0x88, 0xfa, 0xe3, 0x9d, 0xc3, 0xd1, 0x24, 0x7e, 0xa8, 0x6a, 0xfb, 0x9b, 0xa6, 0x93, 0xb6, 0x28, - 0xb4, 0xc8, 0xae, 0x6b, 0xbd, 0xdb, 0x75, 0x2d, 0x22, 0x3f, 0xef, 0x60, 0xb4, 0xb5, 0xcc, 0x32, - 0xa7, 0x99, 0xbe, 0xbd, 0x31, 0x71, 0x70, 0x6b, 0xe2, 0xe0, 0xb7, 0x89, 0x83, 0x1f, 0x9b, 0xb8, - 0x77, 0xbb, 0x89, 0x7b, 0xbf, 0x36, 0x71, 0xef, 0xf3, 0x64, 0xc1, 0xd5, 0xc5, 0xea, 0x34, 0x39, - 0x13, 0x79, 0xea, 0xda, 0x9f, 0x17, 0x4c, 0x7d, 0x13, 0xf2, 0xb2, 0x45, 0xb4, 0xe4, 0xe9, 0x42, - 0xfc, 0xf3, 0xa0, 0x4e, 0x07, 0xee, 0x11, 0x1d, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x8f, 0xd0, - 0x7e, 0x48, 0xb8, 0x02, 0x00, 0x00, + 0x18, 0x8f, 0x53, 0x37, 0x6a, 0x2e, 0x20, 0xc4, 0x89, 0xc1, 0x2a, 0x92, 0xbf, 0xe8, 0xa4, 0x4a, + 0x9d, 0x6c, 0x9a, 0x0a, 0x09, 0x21, 0xb1, 0x64, 0x63, 0x40, 0x80, 0x53, 0x31, 0xb0, 0xa0, 0x83, + 0x5c, 0x82, 0x95, 0xda, 0x67, 0xce, 0x57, 0x4b, 0xee, 0x2b, 0xb0, 0xf0, 0x1c, 0x3c, 0x49, 0xc7, + 0x8e, 0x4c, 0x57, 0x74, 0xd9, 0x3c, 0xfa, 0x09, 0xd0, 0xdd, 0xb9, 0x0e, 0x88, 0x6c, 0xfe, 0xfd, + 0xfd, 0xa4, 0x9f, 0x0f, 0x01, 0xdd, 0xd0, 0xf2, 0x6b, 0x9c, 0xe6, 0x15, 0xcb, 0x25, 0x17, 0x75, + 0x5c, 0x9d, 0xc5, 0x19, 0xcb, 0xb8, 0xa8, 0xa3, 0x42, 0x70, 0xc9, 0x31, 0xb6, 0x86, 0xa8, 0x37, + 0x44, 0xd5, 0xd9, 0xf1, 0x93, 0x35, 0x5f, 0x73, 0x2b, 0xc7, 0xe6, 0xcb, 0x39, 0x8f, 0x4f, 0xf6, + 0x54, 0x09, 0x56, 0xf2, 0x2b, 0xf1, 0x85, 0x15, 0x34, 0x15, 0xce, 0x46, 0xbe, 0x0f, 0x11, 0x7a, + 0x63, 0x2f, 0xbc, 0xce, 0x57, 0x1c, 0xbf, 0x42, 0xa3, 0x8a, 0xe5, 0x4b, 0x2e, 0x02, 0x6f, 0xea, + 0x9d, 0x8e, 0xe7, 0x27, 0x5a, 0xc1, 0xe8, 0x83, 0x65, 0x1a, 0x05, 0x9d, 0xd6, 0x2a, 0x78, 0x58, + 0xd3, 0xec, 0xf2, 0x25, 0x71, 0x98, 0x24, 0x9d, 0x80, 0xcf, 0x91, 0x2f, 0xeb, 0x82, 0x05, 0x43, + 0x1b, 0x06, 0xad, 0xc0, 0xbf, 0xa8, 0x0b, 0xd6, 0x28, 0xb0, 0x7c, 0xab, 0x60, 0xe2, 0x82, 0x06, + 0x91, 0xc4, 0x92, 0xf8, 0x2d, 0x42, 0x92, 0x4b, 0x7a, 0xf9, 0xa9, 0x4c, 0xaf, 0x59, 0x70, 0x60, + 0xa3, 0xcf, 0xb4, 0x82, 0xf1, 0x85, 0x61, 0x17, 0xe9, 0xb5, 0xc9, 0xff, 0x65, 0x69, 0x15, 0x3c, + 0xee, 0x5a, 0x7a, 0x8e, 0x24, 0x63, 0x79, 0xef, 0xc6, 0x2f, 0xd0, 0x61, 0x59, 0x30, 0xb6, 0x0c, + 0x7c, 0xdb, 0x45, 0xb4, 0x82, 0xc3, 0x85, 0x21, 0x1a, 0x05, 0x4e, 0x69, 0x15, 0x3c, 0x70, 0x15, + 0x16, 0x92, 0xc4, 0xd1, 0xe4, 0xce, 0x43, 0x23, 0xb7, 0x06, 0xce, 0xd0, 0xd1, 0xb7, 0x2b, 0x9a, + 0xcb, 0x54, 0xd6, 0x76, 0x8b, 0xc9, 0x6c, 0x1a, 0xfd, 0x3f, 0x7e, 0x94, 0x74, 0x93, 0xbe, 0xa3, + 0xa9, 0x98, 0xc7, 0x37, 0x0a, 0x06, 0x5a, 0xc1, 0xd1, 0xfb, 0x2e, 0xd9, 0x28, 0xe8, 0x5b, 0x5a, + 0x05, 0x8f, 0xdc, 0xcd, 0x7b, 0x86, 0x24, 0xbd, 0x88, 0x57, 0xc8, 0x4f, 0xf3, 0x15, 0x0f, 0x86, + 0xd3, 0x83, 0xd3, 0xc9, 0x2c, 0xdc, 0x77, 0x6a, 0xf7, 0x9b, 0xe6, 0xb3, 0xee, 0x90, 0x6f, 0x90, + 0x59, 0xd7, 0x64, 0x77, 0xeb, 0x1a, 0x44, 0x7e, 0xde, 0xc1, 0x64, 0x17, 0x59, 0x24, 0xd6, 0x33, + 0x7f, 0x7e, 0xa3, 0x43, 0xef, 0x56, 0x87, 0xde, 0x6f, 0x1d, 0x7a, 0x3f, 0xb6, 0xe1, 0xe0, 0x76, + 0x1b, 0x0e, 0x7e, 0x6d, 0xc3, 0xc1, 0xc7, 0xa7, 0xc5, 0x66, 0x1d, 0xd1, 0x8d, 0x8c, 0x96, 0xac, + 0x8a, 0xd7, 0xfc, 0x9f, 0x97, 0xf3, 0x79, 0x64, 0x5f, 0xcb, 0xf9, 0x9f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x66, 0x07, 0x2b, 0x02, 0xa1, 0x02, 0x00, 0x00, } func (m *MemoryInfo) Marshal() (dAtA []byte, err error) { diff --git a/go/inventory/v1/metrics.go b/go/inventory/v1/metrics.go index bde7e3cc..41aa7f5f 100644 --- a/go/inventory/v1/metrics.go +++ b/go/inventory/v1/metrics.go @@ -3,7 +3,7 @@ package v1 import ( sdk "github.com/cosmos/cosmos-sdk/types" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" ) type ResourcesMetric struct { @@ -26,11 +26,11 @@ type Metrics struct { } type MetricTotal struct { - CPU uint64 `json:"cpu"` - GPU uint64 `json:"gpu"` - Memory uint64 `json:"memory"` - StorageEphemeral uint64 `json:"storage_ephemeral"` - Storage map[string]int64 `json:"storage,omitempty"` + CPU uint64 `json:"cpu"` + GPU uint64 `json:"gpu"` + Memory uint64 `json:"memory"` + StorageEphemeral uint64 `json:"storage_ephemeral"` + Storage map[string]uint64 `json:"storage,omitempty"` } type StorageStatus struct { @@ -73,7 +73,7 @@ func (inv *MetricTotal) AddResources(res dtypes.ResourceUnit) { } else { val := sdk.NewIntFromUint64(uint64(inv.Storage[storageClass])) val = val.Add(storage.Quantity.Val.MulRaw(int64(res.Count))) - inv.Storage[storageClass] = val.Int64() + inv.Storage[storageClass] = val.Uint64() } } diff --git a/go/inventory/v1/node.pb.go b/go/inventory/v1/node.pb.go index d37c2d29..4598dff7 100644 --- a/go/inventory/v1/node.pb.go +++ b/go/inventory/v1/node.pb.go @@ -5,8 +5,8 @@ package v1 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -137,31 +137,30 @@ func init() { func init() { proto.RegisterFile("akash/inventory/v1/node.proto", fileDescriptor_5f97c0fb35079221) } var fileDescriptor_5f97c0fb35079221 = []byte{ - // 375 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x31, 0x4f, 0xc2, 0x40, - 0x1c, 0xc5, 0x7b, 0x40, 0x4c, 0x5a, 0x08, 0x92, 0x6a, 0x4c, 0x43, 0x62, 0x0f, 0x2f, 0x0e, 0x2c, - 0xb6, 0x01, 0x36, 0x1d, 0x4c, 0xca, 0x6a, 0x18, 0xea, 0xe6, 0x62, 0x8e, 0x72, 0x29, 0x0d, 0xb4, - 0x47, 0x7a, 0x05, 0x43, 0xe2, 0xea, 0xee, 0xc7, 0xf1, 0x23, 0x30, 0x32, 0x3a, 0x5d, 0x4c, 0xd9, - 0x3a, 0xf6, 0x13, 0x98, 0xb6, 0x58, 0x5a, 0xd4, 0xad, 0xf7, 0x7e, 0xaf, 0xef, 0xdd, 0xfd, 0xf3, - 0x97, 0x2e, 0xf1, 0x0c, 0xb3, 0xa9, 0xee, 0x78, 0x2b, 0xe2, 0x05, 0xd4, 0x5f, 0xeb, 0xab, 0x9e, - 0xee, 0xd1, 0x09, 0xd1, 0x16, 0x3e, 0x0d, 0xa8, 0x2c, 0xa7, 0x58, 0xcb, 0xb1, 0xb6, 0xea, 0xb5, - 0xcf, 0x6d, 0x6a, 0xd3, 0x14, 0xeb, 0xc9, 0x57, 0xe6, 0x6c, 0xa3, 0x3f, 0x82, 0x7c, 0xc2, 0xe8, - 0xd2, 0xb7, 0x08, 0xcb, 0x3c, 0xe8, 0x55, 0x6a, 0x8d, 0xe8, 0x84, 0x0c, 0xf1, 0x02, 0x8f, 0x9d, - 0xb9, 0x13, 0x38, 0x84, 0xc9, 0x53, 0xe9, 0x94, 0x05, 0xd4, 0xc7, 0x36, 0x79, 0xb6, 0xe6, 0x98, - 0x31, 0xc2, 0x14, 0xd0, 0xa9, 0x76, 0x45, 0xe3, 0x3e, 0xe4, 0xb0, 0xf9, 0x98, 0xa1, 0x61, 0x46, - 0x22, 0x0e, 0x8f, 0xcd, 0x31, 0x87, 0x17, 0x6b, 0xec, 0xce, 0x6f, 0xd1, 0x11, 0x40, 0x66, 0x93, - 0x95, 0x7e, 0x46, 0x1f, 0x15, 0xa9, 0x96, 0xd4, 0xcb, 0x03, 0xa9, 0xe6, 0x61, 0x97, 0x28, 0xa0, - 0x03, 0xba, 0xa2, 0x01, 0x43, 0x0e, 0x6b, 0x23, 0xec, 0x92, 0x88, 0xc3, 0x54, 0x8f, 0x39, 0xac, - 0x67, 0x91, 0xc9, 0x09, 0x99, 0xa9, 0x28, 0x33, 0x49, 0xcc, 0x9f, 0xa3, 0x54, 0x3a, 0xa0, 0x5b, - 0xef, 0x5f, 0x69, 0xbf, 0xa7, 0xa3, 0x25, 0x0d, 0xe6, 0x8f, 0xd1, 0xe8, 0x6f, 0x38, 0x14, 0x42, - 0x0e, 0xc5, 0x5c, 0x8a, 0x38, 0x3c, 0x04, 0xc5, 0x1c, 0xb6, 0xb2, 0xaa, 0x5c, 0x42, 0xe6, 0x01, - 0xcb, 0x6f, 0x40, 0x6a, 0x58, 0x85, 0x69, 0x29, 0xd5, 0xb4, 0xf8, 0xfa, 0xbf, 0xe2, 0xe2, 0x64, - 0x8d, 0xbb, 0x7d, 0x77, 0xa3, 0xa8, 0x46, 0x1c, 0x96, 0x12, 0x63, 0x0e, 0xcf, 0xb2, 0x1b, 0x14, - 0x55, 0x64, 0x96, 0x4c, 0xc6, 0xc3, 0x26, 0x54, 0xc1, 0x36, 0x54, 0xc1, 0x57, 0xa8, 0x82, 0xf7, - 0x9d, 0x2a, 0x6c, 0x77, 0xaa, 0xf0, 0xb9, 0x53, 0x85, 0xa7, 0xbe, 0xed, 0x04, 0xd3, 0xe5, 0x58, - 0xb3, 0xa8, 0xab, 0xa7, 0x97, 0xba, 0xf1, 0x48, 0xf0, 0x42, 0xfd, 0xd9, 0xfe, 0x84, 0x17, 0x8e, - 0x6e, 0xd3, 0xd2, 0x5a, 0x8c, 0x4f, 0xd2, 0x6d, 0x18, 0x7c, 0x07, 0x00, 0x00, 0xff, 0xff, 0x7f, - 0xc1, 0xd6, 0x12, 0x7c, 0x02, 0x00, 0x00, + // 364 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x31, 0x6f, 0xe2, 0x30, + 0x1c, 0xc5, 0x63, 0x40, 0x27, 0x25, 0x20, 0x0e, 0xe5, 0x4e, 0xa7, 0x88, 0x53, 0x63, 0x6a, 0x75, + 0x60, 0x72, 0x04, 0xa8, 0x4b, 0x3b, 0x54, 0x0a, 0x3b, 0x43, 0xba, 0x75, 0xa9, 0x0c, 0x58, 0x21, + 0x02, 0x62, 0x14, 0xa7, 0x91, 0x90, 0xba, 0x76, 0xef, 0xc7, 0xe9, 0x47, 0x60, 0x64, 0xec, 0x64, + 0x55, 0x61, 0xcb, 0x98, 0x4f, 0x50, 0x25, 0x6e, 0x43, 0x42, 0xdb, 0x2d, 0x79, 0xbf, 0xe7, 0xf7, + 0xec, 0xff, 0x5f, 0x3b, 0x23, 0x4b, 0xc2, 0x17, 0x96, 0xe7, 0x47, 0xd4, 0x0f, 0x59, 0xb0, 0xb5, + 0xa2, 0x81, 0xe5, 0xb3, 0x39, 0xc5, 0x9b, 0x80, 0x85, 0x4c, 0xd7, 0x73, 0x8c, 0x0b, 0x8c, 0xa3, + 0x41, 0xf7, 0xaf, 0xcb, 0x5c, 0x96, 0x63, 0x2b, 0xfb, 0x92, 0xce, 0x2e, 0xfa, 0x26, 0x28, 0xa0, + 0x9c, 0x3d, 0x04, 0x33, 0xca, 0xa5, 0x07, 0x3d, 0x6a, 0x9d, 0x09, 0x9b, 0xd3, 0x31, 0xd9, 0x90, + 0xa9, 0xb7, 0xf2, 0x42, 0x8f, 0x72, 0x7d, 0xa1, 0xfd, 0xe6, 0x21, 0x0b, 0x88, 0x4b, 0xef, 0x67, + 0x2b, 0xc2, 0x39, 0xe5, 0x06, 0xe8, 0xd5, 0xfb, 0xaa, 0x7d, 0x13, 0x0b, 0xd8, 0xbe, 0x95, 0x68, + 0x2c, 0x49, 0x22, 0xe0, 0xa9, 0x39, 0x15, 0xf0, 0xdf, 0x96, 0xac, 0x57, 0x57, 0xe8, 0x04, 0x20, + 0xa7, 0xcd, 0x2b, 0x87, 0xd1, 0x4b, 0x4d, 0x6b, 0x64, 0xf5, 0xfa, 0x48, 0x6b, 0xf8, 0x64, 0x4d, + 0x0d, 0xd0, 0x03, 0x7d, 0xd5, 0x86, 0xb1, 0x80, 0x8d, 0x09, 0x59, 0xd3, 0x44, 0xc0, 0x5c, 0x4f, + 0x05, 0x6c, 0xca, 0xc8, 0xec, 0x0f, 0x39, 0xb9, 0xa8, 0x73, 0x4d, 0x2d, 0x9e, 0x63, 0xd4, 0x7a, + 0xa0, 0xdf, 0x1c, 0x9e, 0xe3, 0xaf, 0xd3, 0xc1, 0x59, 0x83, 0xf3, 0x69, 0xb4, 0x87, 0x3b, 0x01, + 0x95, 0x58, 0x40, 0xb5, 0x90, 0x12, 0x01, 0x8f, 0x41, 0xa9, 0x80, 0x1d, 0x59, 0x55, 0x48, 0xc8, + 0x39, 0x62, 0xfd, 0x09, 0x68, 0xad, 0x59, 0x69, 0x5a, 0x46, 0x3d, 0x2f, 0xbe, 0xf8, 0xa9, 0xb8, + 0x3c, 0x59, 0xfb, 0xfa, 0xa3, 0xbb, 0x55, 0x56, 0x13, 0x01, 0x2b, 0x89, 0xa9, 0x80, 0x7f, 0xe4, + 0x0d, 0xca, 0x2a, 0x72, 0x2a, 0x26, 0xfb, 0x72, 0x17, 0x9b, 0x60, 0x1f, 0x9b, 0xe0, 0x2d, 0x36, + 0xc1, 0xf3, 0xc1, 0x54, 0xf6, 0x07, 0x53, 0x79, 0x3d, 0x98, 0xca, 0xdd, 0xff, 0xcd, 0xd2, 0xc5, + 0x64, 0x19, 0xe2, 0x39, 0x8d, 0x2c, 0x97, 0x55, 0xf6, 0x3f, 0xfd, 0x95, 0xaf, 0x7d, 0xf4, 0x1e, + 0x00, 0x00, 0xff, 0xff, 0x60, 0xd5, 0x85, 0xca, 0x65, 0x02, 0x00, 0x00, } func (m *NodeCapabilities) Marshal() (dAtA []byte, err error) { diff --git a/go/inventory/v1/resourcepair.go b/go/inventory/v1/resourcepair.go index 712c6f88..5a1aa7ec 100644 --- a/go/inventory/v1/resourcepair.go +++ b/go/inventory/v1/resourcepair.go @@ -6,7 +6,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "k8s.io/apimachinery/pkg/api/resource" - types "github.com/akash-network/akash-api/go/node/types/v1beta3" + types "pkg.akt.dev/go/node/types/resources/v1beta4" ) func NewResourcePair(allocatable, allocated int64, format resource.Format) ResourcePair { @@ -68,7 +68,7 @@ func (m *ResourcePair) SubMilliNLZ(val types.ResourceValue) bool { } allocated := m.Allocated.DeepCopy() - allocated.Add(*resource.NewMilliQuantity(int64(val.Value()), resource.DecimalSI)) + allocated.Add(*resource.NewMilliQuantity(int64(val.Value()), resource.DecimalSI)) // nolint: gosec allocatable := m.Allocatable.DeepCopy() @@ -91,7 +91,7 @@ func (m *ResourcePair) SubNLZ(val types.ResourceValue) bool { } allocated := m.Allocated.DeepCopy() - allocated.Add(*resource.NewQuantity(int64(val.Value()), resource.DecimalSI)) + allocated.Add(*resource.NewQuantity(int64(val.Value()), resource.DecimalSI)) // nolint: gosec allocatable := m.Allocatable.DeepCopy() diff --git a/go/inventory/v1/resourcepair.pb.go b/go/inventory/v1/resourcepair.pb.go index 34540b76..0249744e 100644 --- a/go/inventory/v1/resourcepair.pb.go +++ b/go/inventory/v1/resourcepair.pb.go @@ -5,14 +5,14 @@ package v1 import ( fmt "fmt" - github_com_akash_network_akash_api_go_node_types_v1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" - v1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" resource "k8s.io/apimachinery/pkg/api/resource" math "math" math_bits "math/bits" + pkg_akt_dev_go_node_types_attributes_v1 "pkg.akt.dev/go/node/types/attributes/v1" + v1 "pkg.akt.dev/go/node/types/attributes/v1" ) // Reference imports to suppress errors if they are not otherwise used. @@ -28,9 +28,9 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // ResourcePair to extents resource.Quantity to provide total and available units of the resource type ResourcePair struct { - Allocatable *resource.Quantity `protobuf:"bytes,1,opt,name=allocatable,proto3" json:"allocatable" yaml:"allocatable"` - Allocated *resource.Quantity `protobuf:"bytes,2,opt,name=allocated,proto3" json:"allocated" yaml:"allocated"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` + Allocatable *resource.Quantity `protobuf:"bytes,1,opt,name=allocatable,proto3" json:"allocatable" yaml:"allocatable"` + Allocated *resource.Quantity `protobuf:"bytes,2,opt,name=allocated,proto3" json:"allocated" yaml:"allocated"` + Attributes pkg_akt_dev_go_node_types_attributes_v1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=pkg.akt.dev/go/node/types/attributes/v1.Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` } func (m *ResourcePair) Reset() { *m = ResourcePair{} } @@ -80,7 +80,7 @@ func (m *ResourcePair) GetAllocated() *resource.Quantity { return nil } -func (m *ResourcePair) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes { +func (m *ResourcePair) GetAttributes() pkg_akt_dev_go_node_types_attributes_v1.Attributes { if m != nil { return m.Attributes } @@ -96,34 +96,34 @@ func init() { } var fileDescriptor_995cee7bf7b692e7 = []byte{ - // 428 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0x3f, 0x8f, 0xd3, 0x30, - 0x18, 0xc6, 0x63, 0x8a, 0x90, 0x2e, 0x65, 0x40, 0xd1, 0x0d, 0xd5, 0x21, 0xe2, 0x53, 0x24, 0xa4, - 0x1b, 0xc0, 0x56, 0x7b, 0x0c, 0xa7, 0xdb, 0x5a, 0x56, 0x06, 0xc8, 0xc8, 0xe6, 0x24, 0xaf, 0x52, - 0x2b, 0x7f, 0x1c, 0x39, 0x4e, 0x50, 0x36, 0xc4, 0xc8, 0xc4, 0x47, 0x60, 0xe6, 0x63, 0x30, 0x75, - 0xec, 0xc8, 0x64, 0x50, 0xba, 0x40, 0xc7, 0x7e, 0x02, 0x94, 0x3f, 0x6d, 0x82, 0x60, 0x40, 0xb7, - 0xc5, 0xef, 0xfb, 0xf8, 0xf7, 0x3c, 0xca, 0x63, 0xf3, 0x29, 0x8b, 0x58, 0xbe, 0xa6, 0x3c, 0x2d, - 0x21, 0x55, 0x42, 0x56, 0xb4, 0x9c, 0x53, 0x09, 0xb9, 0x28, 0xa4, 0x0f, 0x19, 0xe3, 0x92, 0x64, - 0x52, 0x28, 0x61, 0x59, 0xad, 0x8c, 0x9c, 0x64, 0xa4, 0x9c, 0x5f, 0x9c, 0x87, 0x22, 0x14, 0xed, - 0x9a, 0x36, 0x5f, 0x9d, 0xf2, 0xc2, 0xe9, 0x80, 0x1e, 0xcb, 0x81, 0x96, 0x73, 0x0f, 0x14, 0xbb, - 0xa6, 0x4c, 0x29, 0xc9, 0xbd, 0x42, 0x41, 0xaf, 0x79, 0x11, 0xdd, 0xe4, 0x84, 0x0b, 0xca, 0x32, - 0x9e, 0x30, 0x7f, 0xcd, 0x53, 0x90, 0x15, 0xcd, 0xa2, 0xb0, 0x19, 0x9c, 0xdc, 0x69, 0x08, 0x29, - 0x48, 0xa6, 0x20, 0xe8, 0x6e, 0x39, 0xbf, 0x26, 0xe6, 0x43, 0xb7, 0x5f, 0xbe, 0x66, 0x5c, 0x5a, - 0x1f, 0x91, 0x39, 0x65, 0x71, 0x2c, 0x7c, 0xa6, 0x98, 0x17, 0xc3, 0x0c, 0x5d, 0xa2, 0xab, 0xe9, - 0x82, 0x90, 0x8e, 0x4e, 0xc6, 0x74, 0x92, 0x45, 0x61, 0x33, 0x20, 0x47, 0x3a, 0x79, 0x53, 0xb0, - 0x54, 0x71, 0x55, 0xad, 0x6e, 0x36, 0x1a, 0xa3, 0x5a, 0xe3, 0xe9, 0x72, 0x40, 0xed, 0x35, 0x1e, - 0x93, 0x0f, 0x1a, 0x5b, 0x15, 0x4b, 0xe2, 0x5b, 0x67, 0x34, 0x74, 0xdc, 0xb1, 0xc4, 0x7a, 0x8f, - 0xcc, 0xb3, 0xfe, 0x0c, 0xc1, 0xec, 0xde, 0x9d, 0xa2, 0x2c, 0xfa, 0x28, 0x67, 0xcb, 0x23, 0x68, - 0xaf, 0xf1, 0x40, 0x3d, 0x68, 0xfc, 0xe8, 0x8f, 0x18, 0x10, 0x38, 0xee, 0xb0, 0xb6, 0xbe, 0x22, - 0xd3, 0x3c, 0xfd, 0xea, 0x7c, 0x36, 0xb9, 0x9c, 0x5c, 0x4d, 0x17, 0x4f, 0x48, 0x57, 0x5d, 0x53, - 0x08, 0xe9, 0x0b, 0x21, 0xcb, 0xa3, 0x6a, 0xf5, 0x01, 0x6d, 0x34, 0x36, 0xf6, 0x1a, 0x9f, 0x0f, - 0x37, 0x9f, 0x89, 0x84, 0x2b, 0x48, 0x32, 0x55, 0x1d, 0x34, 0x7e, 0xdc, 0x3b, 0xfe, 0x63, 0xeb, - 0x7c, 0xf9, 0x8e, 0x5f, 0x86, 0x5c, 0xad, 0x0b, 0x8f, 0xf8, 0x22, 0xa1, 0xad, 0xd1, 0xf3, 0x14, - 0xd4, 0x3b, 0x21, 0xa3, 0xfe, 0xd4, 0x34, 0x1a, 0x0a, 0x9a, 0x8a, 0x00, 0xa8, 0xaa, 0x32, 0xc8, - 0xe9, 0x5f, 0x19, 0x72, 0x77, 0x94, 0xfa, 0xf6, 0xfe, 0xcf, 0xcf, 0xd8, 0x58, 0xbd, 0xda, 0xd4, - 0x36, 0xda, 0xd6, 0x36, 0xfa, 0x51, 0xdb, 0xe8, 0xd3, 0xce, 0x36, 0xb6, 0x3b, 0xdb, 0xf8, 0xb6, - 0xb3, 0x8d, 0xb7, 0x8b, 0xff, 0x33, 0x1c, 0x3f, 0x68, 0xef, 0x41, 0xfb, 0x80, 0xae, 0x7f, 0x07, - 0x00, 0x00, 0xff, 0xff, 0xf4, 0xbc, 0x8c, 0x91, 0xed, 0x02, 0x00, 0x00, + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xbf, 0x8e, 0xd3, 0x40, + 0x10, 0xc6, 0xbd, 0x04, 0x21, 0x9d, 0x43, 0x81, 0xac, 0x2b, 0xa2, 0x3b, 0xc9, 0x7b, 0x32, 0x42, + 0x4a, 0x81, 0x76, 0x15, 0x03, 0x52, 0x74, 0x5d, 0xf2, 0x04, 0xe0, 0x92, 0x6e, 0x1d, 0x8f, 0x7c, + 0x2b, 0xff, 0x59, 0xb3, 0xbb, 0xb1, 0xe4, 0x8e, 0x9a, 0x8a, 0x47, 0xa0, 0xe6, 0x01, 0x78, 0x86, + 0x94, 0x57, 0xd2, 0xb0, 0x20, 0xa7, 0x41, 0x29, 0xf3, 0x04, 0xc8, 0x7f, 0x12, 0xfb, 0x10, 0x15, + 0x9d, 0x67, 0xbe, 0xcf, 0xbf, 0xf9, 0xec, 0x19, 0xfb, 0x05, 0x4b, 0x98, 0xba, 0xa3, 0x3c, 0x2f, + 0x21, 0xd7, 0x42, 0x56, 0xb4, 0x5c, 0x50, 0x09, 0x4a, 0x6c, 0xe5, 0x06, 0x0a, 0xc6, 0x25, 0x29, + 0xa4, 0xd0, 0xc2, 0x71, 0x5a, 0x1b, 0x39, 0xdb, 0x48, 0xb9, 0xb8, 0xba, 0x8c, 0x45, 0x2c, 0x5a, + 0x99, 0x36, 0x4f, 0x9d, 0xf3, 0x6a, 0xde, 0x01, 0x43, 0xa6, 0x80, 0x32, 0xad, 0x25, 0x0f, 0xb7, + 0x1a, 0x54, 0x83, 0x3d, 0x57, 0xbd, 0xf3, 0x75, 0xb2, 0x54, 0x94, 0x0b, 0xca, 0x0a, 0x9e, 0xb1, + 0xcd, 0x1d, 0xcf, 0x41, 0x56, 0xb4, 0x48, 0xe2, 0xa6, 0x71, 0xce, 0x40, 0x63, 0xc8, 0x41, 0x32, + 0x0d, 0x51, 0xf7, 0x96, 0xf7, 0x63, 0x62, 0x3f, 0x0d, 0x7a, 0xf1, 0x2d, 0xe3, 0xd2, 0xf9, 0x84, + 0xec, 0x29, 0x4b, 0x53, 0xb1, 0x61, 0x9a, 0x85, 0x29, 0xcc, 0xd0, 0x0d, 0x9a, 0x4f, 0x7d, 0x42, + 0x92, 0xa5, 0x22, 0x5c, 0x90, 0x31, 0x9d, 0x14, 0x49, 0xdc, 0x34, 0xc8, 0x89, 0x4e, 0xde, 0x6d, + 0x59, 0xae, 0xb9, 0xae, 0xd6, 0xcb, 0x9d, 0xc1, 0xa8, 0x36, 0x78, 0xba, 0x1a, 0x50, 0x07, 0x83, + 0xc7, 0xe4, 0xa3, 0xc1, 0x4e, 0xc5, 0xb2, 0xf4, 0xd6, 0x1b, 0x35, 0xbd, 0x60, 0x6c, 0x71, 0x3e, + 0x22, 0xfb, 0xa2, 0xaf, 0x21, 0x9a, 0x3d, 0xfa, 0xaf, 0x28, 0x7e, 0x1f, 0xe5, 0x62, 0x75, 0x02, + 0x1d, 0x0c, 0x1e, 0xa8, 0x47, 0x83, 0x9f, 0x3d, 0x88, 0x01, 0x91, 0x17, 0x0c, 0xb2, 0xf3, 0x0d, + 0xd9, 0xf6, 0xf0, 0xe3, 0x67, 0x93, 0x9b, 0xc9, 0x7c, 0xea, 0x3f, 0x27, 0xdd, 0x02, 0x9b, 0xb5, + 0x90, 0x41, 0x25, 0xe5, 0x82, 0xac, 0x4e, 0xd5, 0xfa, 0xc3, 0xce, 0x60, 0xeb, 0x60, 0xf0, 0xe5, + 0x60, 0x78, 0x29, 0x32, 0xae, 0x21, 0x2b, 0x74, 0x75, 0x34, 0xf8, 0xba, 0x1f, 0xfb, 0x0f, 0xd5, + 0xfb, 0xfa, 0x13, 0xfb, 0xed, 0x27, 0x25, 0x9a, 0x44, 0x50, 0xd2, 0x58, 0xd0, 0x5c, 0x44, 0x40, + 0x75, 0x55, 0x80, 0x7a, 0x78, 0x08, 0xc3, 0x44, 0x15, 0x8c, 0x92, 0xde, 0x3e, 0xfe, 0xfd, 0x05, + 0x5b, 0xeb, 0x37, 0xbb, 0xda, 0x45, 0xf7, 0xb5, 0x8b, 0x7e, 0xd5, 0x2e, 0xfa, 0xbc, 0x77, 0xad, + 0xfb, 0xbd, 0x6b, 0x7d, 0xdf, 0xbb, 0xd6, 0xfb, 0xeb, 0xbf, 0xf8, 0xe3, 0x9b, 0x0d, 0x9f, 0xb4, + 0xd7, 0xf1, 0xea, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd9, 0x9f, 0xb4, 0x93, 0xd0, 0x02, 0x00, + 0x00, } func (m *ResourcePair) Marshal() (dAtA []byte, err error) { @@ -357,7 +357,7 @@ func (m *ResourcePair) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Attributes = append(m.Attributes, v1beta3.Attribute{}) + m.Attributes = append(m.Attributes, v1.Attribute{}) if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/go/inventory/v1/resources.pb.go b/go/inventory/v1/resources.pb.go index 3f290fb9..7bb4cfd5 100644 --- a/go/inventory/v1/resources.pb.go +++ b/go/inventory/v1/resources.pb.go @@ -5,8 +5,8 @@ package v1 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -117,38 +117,37 @@ func init() { } var fileDescriptor_f20a722bd8ee01b5 = []byte{ - // 488 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x4f, 0x6b, 0xd4, 0x40, - 0x18, 0x87, 0x37, 0x6e, 0xbb, 0x87, 0x48, 0xdb, 0x35, 0x88, 0x0d, 0x8b, 0x64, 0x96, 0x01, 0xa1, - 0x1e, 0x4c, 0x68, 0xbd, 0x79, 0x33, 0x8b, 0x2e, 0xa2, 0x95, 0x90, 0xb2, 0x1e, 0x04, 0x59, 0xa6, - 0xd9, 0x61, 0x36, 0x74, 0xb3, 0x33, 0x4c, 0x32, 0x91, 0x7e, 0x09, 0x11, 0xbc, 0xf8, 0x19, 0xfc, - 0x24, 0x3d, 0x16, 0x4f, 0x9e, 0x46, 0xd9, 0xbd, 0xe5, 0x98, 0x4f, 0x20, 0xc9, 0x4c, 0x8a, 0xfb, - 0xc7, 0xd2, 0x5b, 0xde, 0xf9, 0xbd, 0xef, 0xf3, 0x3e, 0x13, 0x18, 0x13, 0xa2, 0x0b, 0x94, 0x4e, - 0xbd, 0x78, 0x9e, 0xe3, 0x79, 0x46, 0xf9, 0xa5, 0x97, 0x1f, 0x7b, 0x1c, 0xa7, 0x54, 0xf0, 0x08, - 0xa7, 0x2e, 0xe3, 0x34, 0xa3, 0x96, 0x55, 0xf7, 0xb8, 0x37, 0x3d, 0x6e, 0x7e, 0xdc, 0x7b, 0x48, - 0x28, 0xa1, 0x75, 0xec, 0x55, 0x5f, 0xaa, 0xb3, 0xf7, 0x78, 0x0b, 0x2d, 0x62, 0xe2, 0x96, 0x94, - 0xdc, 0xa4, 0x60, 0x4b, 0x9a, 0xe0, 0xa4, 0xda, 0xa7, 0x1a, 0x9e, 0xdc, 0xa2, 0xca, 0x50, 0xcc, - 0x55, 0x1b, 0xfc, 0xb9, 0x6b, 0xee, 0xbd, 0xa7, 0x13, 0x1c, 0x36, 0xb7, 0xb0, 0xde, 0x9a, 0xed, - 0x88, 0x09, 0xdb, 0xe8, 0x1b, 0x47, 0xf7, 0x4f, 0x0e, 0xdd, 0xcd, 0xdb, 0xb8, 0x83, 0x60, 0xe4, - 0xf7, 0xaf, 0x24, 0x68, 0x2d, 0x24, 0x68, 0x0f, 0x82, 0x51, 0x21, 0x41, 0x35, 0x52, 0x4a, 0x60, - 0x5e, 0xa2, 0x64, 0xf6, 0x02, 0x46, 0x4c, 0xc0, 0xb0, 0x3a, 0xb2, 0x3e, 0x99, 0x1d, 0x65, 0x65, - 0xdf, 0xab, 0x79, 0xbd, 0x6d, 0xbc, 0xd3, 0xba, 0xc3, 0x7f, 0xaa, 0x91, 0x1d, 0x55, 0x17, 0x12, - 0xe8, 0xd9, 0x52, 0x82, 0x3d, 0x05, 0x56, 0x35, 0x0c, 0x75, 0x60, 0x9d, 0x99, 0x6d, 0xc2, 0x84, - 0xdd, 0xfe, 0xbf, 0xeb, 0x30, 0x18, 0xf9, 0x47, 0x8d, 0xeb, 0x50, 0xb9, 0x92, 0x7f, 0x5d, 0x09, - 0x13, 0xf0, 0xc7, 0x6f, 0xb0, 0x33, 0x0c, 0x46, 0x69, 0x58, 0x45, 0xd6, 0x77, 0xc3, 0x7c, 0x80, - 0xd9, 0x14, 0x27, 0x98, 0xa3, 0xd9, 0x38, 0xcd, 0x28, 0x47, 0x04, 0xdb, 0x3b, 0xf5, 0x8e, 0xfe, - 0xb6, 0x1d, 0xcd, 0xbf, 0x0b, 0x50, 0xcc, 0xfd, 0x37, 0x7a, 0x59, 0xf7, 0x55, 0x83, 0x38, 0x53, - 0x84, 0x42, 0x82, 0x4d, 0x6c, 0x29, 0x81, 0xad, 0x3c, 0x36, 0x22, 0x18, 0x76, 0xf1, 0x1a, 0xc2, - 0xfa, 0x66, 0x98, 0xdd, 0x9c, 0xce, 0x44, 0x82, 0xd3, 0x31, 0xca, 0x32, 0x14, 0x4d, 0xf1, 0xc4, - 0xde, 0xbd, 0xa3, 0xd9, 0x6b, 0x6d, 0x76, 0xf0, 0x41, 0x11, 0x5e, 0x6a, 0x40, 0x21, 0xc1, 0x06, - 0xb4, 0x94, 0xe0, 0x50, 0x79, 0xad, 0x27, 0x30, 0x3c, 0xc8, 0x57, 0xe7, 0xad, 0x2f, 0x86, 0xd9, - 0x9c, 0x8d, 0x13, 0x2a, 0xe6, 0x19, 0x9e, 0xd8, 0x9d, 0x3b, 0x4a, 0x0d, 0xb4, 0xd4, 0xbe, 0x96, - 0x3a, 0x55, 0xf3, 0x85, 0x04, 0xeb, 0xc8, 0x52, 0x82, 0x47, 0xab, 0x4a, 0x3a, 0x80, 0xe1, 0x7e, - 0xbe, 0x32, 0xec, 0xbf, 0xbb, 0x5a, 0x38, 0xc6, 0xf5, 0xc2, 0x31, 0xfe, 0x2c, 0x1c, 0xe3, 0xeb, - 0xd2, 0x69, 0x5d, 0x2f, 0x9d, 0xd6, 0xaf, 0xa5, 0xd3, 0xfa, 0x78, 0x42, 0xe2, 0x6c, 0x2a, 0xce, - 0xdd, 0x88, 0x26, 0x5e, 0xad, 0xf6, 0x6c, 0x8e, 0xb3, 0xcf, 0x94, 0x5f, 0xe8, 0x0a, 0xb1, 0xd8, - 0x23, 0x74, 0xe5, 0xd5, 0x9c, 0x77, 0xea, 0x97, 0xf2, 0xfc, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x7c, 0x50, 0xd8, 0xc7, 0xfd, 0x03, 0x00, 0x00, + // 475 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x8a, 0xd3, 0x40, + 0x18, 0xc7, 0x1b, 0xbb, 0xdb, 0xc3, 0xc8, 0xee, 0xd6, 0x20, 0x6e, 0xa8, 0x92, 0x29, 0x03, 0xc2, + 0x7a, 0x49, 0x58, 0xc5, 0x8b, 0x37, 0x53, 0xb4, 0x88, 0xac, 0x84, 0x2c, 0xf5, 0x20, 0x48, 0x19, + 0xd3, 0x61, 0x5a, 0xda, 0x74, 0x86, 0x24, 0x13, 0xd8, 0x97, 0x10, 0xc1, 0x8b, 0xcf, 0xe0, 0x93, + 0xec, 0x71, 0xf1, 0xe4, 0x69, 0x94, 0xf6, 0x96, 0x63, 0x9e, 0x40, 0x92, 0x99, 0x2c, 0xb6, 0xcd, + 0x2e, 0x7b, 0x6b, 0xe6, 0xff, 0xff, 0x7e, 0xdf, 0x6f, 0x0a, 0x03, 0x10, 0x9e, 0xe3, 0x64, 0xea, + 0xce, 0x96, 0x19, 0x59, 0xa6, 0x2c, 0xbe, 0x70, 0xb3, 0x53, 0x37, 0x26, 0x09, 0x13, 0x71, 0x48, + 0x12, 0x87, 0xc7, 0x2c, 0x65, 0xa6, 0x59, 0x75, 0x9c, 0xeb, 0x8e, 0x93, 0x9d, 0xf6, 0x1e, 0x52, + 0x46, 0x59, 0x15, 0xbb, 0xe5, 0x2f, 0xd5, 0xec, 0x3d, 0x69, 0xa0, 0x85, 0x5c, 0xdc, 0x92, 0xd2, + 0xeb, 0x14, 0x36, 0xa4, 0x11, 0x89, 0xca, 0x7d, 0xaa, 0xf0, 0xf4, 0x16, 0x55, 0x8e, 0x67, 0xb1, + 0xaa, 0xa1, 0x5f, 0xfb, 0xe0, 0xe0, 0x03, 0x9b, 0x90, 0xa0, 0xbe, 0x85, 0xf9, 0x1e, 0xb4, 0x43, + 0x2e, 0x2c, 0xa3, 0x6f, 0x9c, 0xdc, 0x7f, 0x7e, 0xec, 0xec, 0xde, 0xc6, 0x19, 0xf8, 0x23, 0xaf, + 0x7f, 0x29, 0x61, 0x6b, 0x25, 0x61, 0x7b, 0xe0, 0x8f, 0x72, 0x09, 0xcb, 0x91, 0x42, 0x42, 0x70, + 0x81, 0xa3, 0xc5, 0x2b, 0x14, 0x72, 0x81, 0x82, 0xf2, 0xc8, 0xfc, 0x0c, 0x3a, 0xca, 0xca, 0xba, + 0x57, 0xf1, 0x7a, 0x4d, 0xbc, 0xb3, 0xaa, 0xe1, 0x3d, 0xd3, 0xc8, 0x8e, 0xfa, 0xce, 0x25, 0xd4, + 0xb3, 0x85, 0x84, 0x07, 0x0a, 0xac, 0xbe, 0x51, 0xa0, 0x03, 0xf3, 0x1c, 0xb4, 0x29, 0x17, 0x56, + 0xfb, 0x66, 0xd7, 0xa1, 0x3f, 0xf2, 0x4e, 0x6a, 0xd7, 0xa1, 0x72, 0xa5, 0xff, 0xbb, 0x52, 0x2e, + 0xd0, 0xcf, 0x3f, 0x70, 0x6f, 0xe8, 0x8f, 0x92, 0xa0, 0x8c, 0xcc, 0x1f, 0x06, 0x78, 0x40, 0xf8, + 0x94, 0x44, 0x24, 0xc6, 0x8b, 0x71, 0x92, 0xb2, 0x18, 0x53, 0x62, 0xed, 0x55, 0x3b, 0xfa, 0x4d, + 0x3b, 0xea, 0xff, 0xce, 0xc7, 0xb3, 0xd8, 0x7b, 0xa7, 0x97, 0x75, 0xdf, 0xd4, 0x88, 0x73, 0x45, + 0xc8, 0x25, 0xdc, 0xc5, 0x16, 0x12, 0x5a, 0xca, 0x63, 0x27, 0x42, 0x41, 0x97, 0x6c, 0x21, 0xcc, + 0xef, 0x06, 0xe8, 0x66, 0x6c, 0x21, 0x22, 0x92, 0x8c, 0x71, 0x9a, 0xe2, 0x70, 0x4a, 0x26, 0xd6, + 0xfe, 0x1d, 0xcd, 0xde, 0x6a, 0xb3, 0xa3, 0x8f, 0x8a, 0xf0, 0x5a, 0x03, 0x72, 0x09, 0x77, 0xa0, + 0x85, 0x84, 0xc7, 0xca, 0x6b, 0x3b, 0x41, 0xc1, 0x51, 0xb6, 0x39, 0x6f, 0x7e, 0x35, 0x40, 0x7d, + 0x36, 0x8e, 0x98, 0x58, 0xa6, 0x64, 0x62, 0x75, 0xee, 0x28, 0x35, 0xd0, 0x52, 0x87, 0x5a, 0xea, + 0x4c, 0xcd, 0xe7, 0x12, 0x6e, 0x23, 0x0b, 0x09, 0x1f, 0x6d, 0x2a, 0xe9, 0x00, 0x05, 0x87, 0xd9, + 0xc6, 0xb0, 0xf7, 0xf2, 0x72, 0x65, 0x1b, 0x57, 0x2b, 0xdb, 0xf8, 0xbb, 0xb2, 0x8d, 0x6f, 0x6b, + 0xbb, 0x75, 0xb5, 0xb6, 0x5b, 0xbf, 0xd7, 0x76, 0xeb, 0xd3, 0x63, 0x3e, 0xa7, 0x0e, 0x9e, 0xa7, + 0xce, 0x84, 0x64, 0x2e, 0x65, 0x1b, 0xcf, 0xe3, 0x4b, 0xa7, 0x7a, 0x12, 0x2f, 0xfe, 0x05, 0x00, + 0x00, 0xff, 0xff, 0xca, 0x7c, 0xf6, 0x39, 0xe6, 0x03, 0x00, 0x00, } func (m *NodeResources) Marshal() (dAtA []byte, err error) { diff --git a/go/inventory/v1/service.pb.go b/go/inventory/v1/service.pb.go index 02db95d0..738b3db2 100644 --- a/go/inventory/v1/service.pb.go +++ b/go/inventory/v1/service.pb.go @@ -6,8 +6,8 @@ package v1 import ( context "context" fmt "fmt" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -30,7 +30,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("akash/inventory/v1/service.proto", fileDescriptor_19b1fad552cee5dc) } var fileDescriptor_19b1fad552cee5dc = []byte{ - // 319 bytes of a gzipped FileDescriptorProto + // 306 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcc, 0x4e, 0x2c, 0xce, 0xd0, 0xcf, 0xcc, 0x2b, 0x4b, 0xcd, 0x2b, 0xc9, 0x2f, 0xaa, 0xd4, 0x2f, 0x33, 0xd4, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x02, 0xab, @@ -41,16 +41,16 @@ var fileDescriptor_19b1fad552cee5dc = []byte{ 0x8b, 0x4b, 0x52, 0x8b, 0x20, 0x2a, 0x8c, 0xe6, 0x33, 0x72, 0xb1, 0xfb, 0xe5, 0xa7, 0xa4, 0x06, 0x05, 0x38, 0x0b, 0x05, 0x71, 0x71, 0x06, 0x96, 0xa6, 0x16, 0x55, 0x82, 0xf8, 0x42, 0x62, 0x7a, 0x10, 0x8b, 0xf5, 0x60, 0xae, 0xd2, 0x73, 0x05, 0xb9, 0x4a, 0x4a, 0x42, 0x0f, 0xd3, 0x07, 0x7a, - 0x20, 0x1d, 0x4a, 0xc2, 0x4d, 0x97, 0x9f, 0x4c, 0x66, 0xe2, 0x15, 0xe2, 0x80, 0x39, 0x22, 0x89, - 0x51, 0x4b, 0xc8, 0x81, 0x8b, 0x2b, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x97, 0x3c, 0x43, 0x0d, 0x18, + 0x20, 0x1d, 0x4a, 0xc2, 0x4d, 0x97, 0x9f, 0x4c, 0x66, 0xe2, 0x4d, 0x62, 0xd4, 0x12, 0xe2, 0x80, + 0xb9, 0x43, 0xc8, 0x81, 0x8b, 0x2b, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x97, 0x3c, 0x43, 0x0d, 0x18, 0x8d, 0x36, 0x33, 0x72, 0x71, 0x39, 0x43, 0xdc, 0x0c, 0x72, 0x64, 0x2c, 0x17, 0x0f, 0xd8, 0x91, - 0x50, 0x21, 0x9c, 0x46, 0x4a, 0x63, 0x33, 0x12, 0xaa, 0x49, 0x49, 0x02, 0xec, 0x54, 0x21, 0x21, - 0x5e, 0x90, 0x53, 0xe1, 0x2a, 0x40, 0xee, 0x75, 0xe3, 0xe2, 0x85, 0xb8, 0x97, 0x12, 0xf3, 0x0d, - 0x18, 0x9d, 0x7c, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, - 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0xca, 0x28, 0x3d, - 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0x1f, 0x6c, 0x84, 0x6e, 0x5e, 0x6a, 0x49, - 0x79, 0x7e, 0x51, 0x36, 0x94, 0x07, 0x8a, 0xe8, 0xf4, 0x7c, 0x94, 0x38, 0x4b, 0x62, 0x03, 0x5b, - 0x6e, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x1a, 0x86, 0xd8, 0xe4, 0x60, 0x02, 0x00, 0x00, + 0x50, 0x21, 0x9c, 0x46, 0x4a, 0x63, 0x33, 0x12, 0xaa, 0x49, 0x49, 0x02, 0xec, 0x54, 0x21, 0x90, + 0x53, 0x79, 0x41, 0x4e, 0x85, 0x2b, 0x12, 0x72, 0xe3, 0xe2, 0x85, 0xb8, 0x97, 0x12, 0xf3, 0x0d, + 0x18, 0x9d, 0x4c, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, + 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x4a, 0xba, 0x20, + 0x3b, 0x5d, 0x2f, 0x31, 0xbb, 0x44, 0x2f, 0x25, 0xb5, 0x4c, 0x3f, 0x3d, 0x1f, 0x25, 0x72, 0x92, + 0xd8, 0xc0, 0xb6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x57, 0x71, 0xf1, 0x23, 0x49, 0x02, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/go/inventory/v1/service.pb.gw.go b/go/inventory/v1/service.pb.gw.go index 85d71370..0b50671e 100644 --- a/go/inventory/v1/service.pb.gw.go +++ b/go/inventory/v1/service.pb.gw.go @@ -196,7 +196,7 @@ func RegisterNodeRPCHandlerClient(ctx context.Context, mux *runtime.ServeMux, cl } var ( - pattern_NodeRPC_QueryNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "node"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_NodeRPC_QueryNode_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "node"}, "", runtime.AssumeColonVerbOpt(false))) ) var ( @@ -265,7 +265,7 @@ func RegisterClusterRPCHandlerClient(ctx context.Context, mux *runtime.ServeMux, } var ( - pattern_ClusterRPC_QueryCluster_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "inventory"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_ClusterRPC_QueryCluster_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "inventory"}, "", runtime.AssumeColonVerbOpt(false))) ) var ( diff --git a/go/inventory/v1/storage.pb.go b/go/inventory/v1/storage.pb.go index bab6d6d1..dabfad20 100644 --- a/go/inventory/v1/storage.pb.go +++ b/go/inventory/v1/storage.pb.go @@ -5,8 +5,8 @@ package v1 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -137,30 +137,29 @@ func init() { func init() { proto.RegisterFile("akash/inventory/v1/storage.proto", fileDescriptor_98ef685b221b1af7) } var fileDescriptor_98ef685b221b1af7 = []byte{ - // 355 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x31, 0x4f, 0xc2, 0x40, - 0x1c, 0xc5, 0x5b, 0x53, 0x14, 0x5b, 0x13, 0x93, 0xc6, 0x81, 0x30, 0xf4, 0x48, 0x8d, 0x89, 0x8b, - 0xbd, 0x00, 0x8b, 0x71, 0xc4, 0x89, 0xc4, 0x44, 0x2c, 0x8b, 0x71, 0x3b, 0x48, 0x5b, 0x2e, 0xc0, - 0xfd, 0xeb, 0xf5, 0xc0, 0x90, 0xf8, 0x21, 0xfc, 0x58, 0x8c, 0xc4, 0xc9, 0xe9, 0x62, 0xca, 0xc6, - 0xc8, 0x27, 0x30, 0xbd, 0xab, 0x04, 0x23, 0x5b, 0xdf, 0xfb, 0xbf, 0xff, 0xfd, 0xfa, 0xee, 0xec, - 0x06, 0x19, 0x93, 0x6c, 0x84, 0x29, 0x9b, 0x47, 0x4c, 0x00, 0x5f, 0xe0, 0x79, 0x13, 0x67, 0x02, - 0x38, 0x49, 0xa2, 0x20, 0xe5, 0x20, 0xc0, 0x75, 0x55, 0x22, 0xd8, 0x25, 0x82, 0x79, 0xb3, 0x7e, - 0x91, 0x40, 0x02, 0x6a, 0x8c, 0x8b, 0x2f, 0x9d, 0xac, 0x5f, 0x1d, 0x38, 0x8b, 0x47, 0x19, 0xcc, - 0xf8, 0x30, 0x4a, 0x09, 0xe5, 0x3a, 0xe6, 0xbf, 0xdb, 0x4e, 0x5f, 0x13, 0xba, 0x2c, 0x06, 0xf7, - 0xd6, 0xae, 0x0c, 0x27, 0x24, 0xcb, 0x6a, 0x66, 0xc3, 0xbc, 0x3e, 0xed, 0xf8, 0xb9, 0x44, 0x95, - 0xfb, 0xc2, 0xd8, 0x48, 0xa4, 0x27, 0x5b, 0x89, 0xce, 0x16, 0x64, 0x3a, 0xb9, 0xf3, 0x95, 0xf4, - 0x43, 0x6d, 0xbb, 0x6d, 0xdb, 0xa2, 0x90, 0x66, 0xb5, 0x23, 0xb5, 0x88, 0x72, 0x89, 0xac, 0xee, - 0x63, 0xaf, 0xbf, 0x91, 0x48, 0xf9, 0x5b, 0x89, 0x1c, 0xbd, 0x56, 0x28, 0x3f, 0x54, 0xa6, 0xff, - 0x69, 0xda, 0x27, 0x25, 0xde, 0x9d, 0xda, 0xd5, 0xd7, 0x19, 0x61, 0x82, 0x8a, 0x85, 0xa2, 0x3b, - 0xad, 0x46, 0xf0, 0xbf, 0x6d, 0x10, 0x96, 0x1d, 0x7a, 0x84, 0xf2, 0x0e, 0x5e, 0x4a, 0x64, 0xe4, - 0x12, 0x55, 0x9f, 0xca, 0xcd, 0x8d, 0x44, 0xbb, 0x53, 0xb6, 0x12, 0x9d, 0x6b, 0xe4, 0xaf, 0xe3, - 0x87, 0xbb, 0xa1, 0xfb, 0x6c, 0x5b, 0x94, 0xc5, 0xa0, 0xfe, 0xd7, 0x69, 0xa1, 0x43, 0xa8, 0xbd, - 0x8b, 0xe9, 0x5c, 0x96, 0x24, 0xab, 0x50, 0xaa, 0x14, 0x8b, 0x61, 0xaf, 0x14, 0x8b, 0xa1, 0x28, - 0x55, 0x44, 0x1f, 0x96, 0xb9, 0x67, 0xae, 0x72, 0xcf, 0xfc, 0xce, 0x3d, 0xf3, 0x63, 0xed, 0x19, - 0xab, 0xb5, 0x67, 0x7c, 0xad, 0x3d, 0xe3, 0xa5, 0x95, 0x50, 0x31, 0x9a, 0x0d, 0x82, 0x21, 0x4c, - 0xb1, 0xe2, 0xdd, 0xb0, 0x48, 0xbc, 0x01, 0x1f, 0x97, 0x8a, 0xa4, 0x14, 0x27, 0xf0, 0xe7, 0xcd, - 0x06, 0xc7, 0xea, 0x9d, 0xda, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x05, 0x29, 0xcd, 0xfa, 0x1c, - 0x02, 0x00, 0x00, + // 343 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x31, 0x4b, 0xc3, 0x40, + 0x14, 0xc7, 0x13, 0x49, 0xb5, 0x5e, 0x04, 0x21, 0x38, 0x94, 0x0a, 0xb9, 0x72, 0x22, 0x38, 0x5d, + 0x68, 0x8b, 0x20, 0x8e, 0x75, 0xea, 0x64, 0x4d, 0x17, 0x71, 0x3b, 0x6b, 0x12, 0x43, 0xdb, 0x7b, + 0xf1, 0x92, 0x06, 0x0a, 0x7e, 0x08, 0x3f, 0x56, 0xc7, 0xe2, 0xe4, 0x74, 0x48, 0xba, 0x75, 0xec, + 0x27, 0x90, 0xdc, 0xc5, 0x52, 0xb1, 0x5b, 0xde, 0xff, 0xfd, 0xdf, 0xfb, 0xe5, 0x7f, 0x0f, 0xb5, + 0xd8, 0x98, 0xa5, 0xaf, 0x5e, 0xcc, 0xf3, 0x80, 0x67, 0x20, 0xe6, 0x5e, 0xde, 0xf6, 0xd2, 0x0c, + 0x04, 0x8b, 0x02, 0x9a, 0x08, 0xc8, 0xc0, 0x71, 0x94, 0x83, 0x6e, 0x1d, 0x34, 0x6f, 0x37, 0xcf, + 0x22, 0x88, 0x40, 0xb5, 0xbd, 0xf2, 0x4b, 0x3b, 0x9b, 0x97, 0x7b, 0x76, 0x89, 0x20, 0x85, 0x99, + 0x18, 0x05, 0x09, 0x8b, 0x85, 0xb6, 0x91, 0x77, 0x64, 0x0f, 0x35, 0xa1, 0xcf, 0x43, 0x70, 0x6e, + 0x50, 0x6d, 0x34, 0x61, 0x69, 0xda, 0x30, 0x5b, 0xe6, 0xd5, 0x71, 0x8f, 0x14, 0x12, 0xd7, 0xee, + 0x4a, 0x61, 0x2d, 0xb1, 0xee, 0x6c, 0x24, 0x3e, 0x99, 0xb3, 0xe9, 0xe4, 0x96, 0xa8, 0x92, 0xf8, + 0x5a, 0x76, 0xba, 0xc8, 0x8a, 0x21, 0x49, 0x1b, 0x07, 0x6a, 0x10, 0x17, 0x12, 0x5b, 0xfd, 0xfb, + 0xc1, 0x70, 0x2d, 0xb1, 0xd2, 0x37, 0x12, 0xdb, 0x7a, 0xac, 0xac, 0x88, 0xaf, 0x44, 0xf2, 0x69, + 0xa2, 0xa3, 0x0a, 0xef, 0x4c, 0x51, 0xfd, 0x6d, 0xc6, 0x78, 0x16, 0x67, 0x73, 0x45, 0xb7, 0x3b, + 0x2d, 0xfa, 0x3f, 0x2d, 0xf5, 0xab, 0x0c, 0x03, 0x16, 0x8b, 0x9e, 0xb7, 0x90, 0xd8, 0x28, 0x24, + 0xae, 0x3f, 0x54, 0x93, 0x6b, 0x89, 0xb7, 0x5b, 0x36, 0x12, 0x9f, 0x6a, 0xe4, 0xaf, 0x42, 0xfc, + 0x6d, 0xd3, 0x79, 0x44, 0x56, 0xcc, 0x43, 0x50, 0xff, 0x6b, 0x77, 0xf0, 0x3e, 0xd4, 0xce, 0xc3, + 0xf4, 0x2e, 0x2a, 0x92, 0x55, 0x56, 0x2a, 0x14, 0x0f, 0x61, 0x27, 0x14, 0x0f, 0xa1, 0x0c, 0x55, + 0x5a, 0xaf, 0x17, 0x85, 0x6b, 0x2e, 0x0b, 0xd7, 0xfc, 0x2e, 0x5c, 0xf3, 0x63, 0xe5, 0x1a, 0xcb, + 0x95, 0x6b, 0x7c, 0xad, 0x5c, 0xe3, 0xe9, 0x3c, 0x19, 0x47, 0x94, 0x8d, 0x33, 0xfa, 0x12, 0xe4, + 0x5e, 0x04, 0x7f, 0x8e, 0xf3, 0x7c, 0xa8, 0x0e, 0xd2, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0x3b, + 0x0e, 0x7d, 0x00, 0x05, 0x02, 0x00, 0x00, } func (m *StorageInfo) Marshal() (dAtA []byte, err error) { diff --git a/go/manifest/v2beta1/group.go b/go/manifest/v2beta1/group.go deleted file mode 100644 index 5eee487f..00000000 --- a/go/manifest/v2beta1/group.go +++ /dev/null @@ -1,23 +0,0 @@ -package v2beta1 - -import ( - types "github.com/akash-network/akash-api/go/node/types/v1beta2" -) - -// GetName returns the name of group -func (g Group) GetName() string { - return g.Name -} - -// GetResources returns list of resources in a group -func (g Group) GetResources() []types.Resources { - resources := make([]types.Resources, 0, len(g.Services)) - for _, s := range g.Services { - resources = append(resources, types.Resources{ - Resources: s.Resources, - Count: s.Count, - }) - } - - return resources -} diff --git a/go/manifest/v2beta1/group.pb.go b/go/manifest/v2beta1/group.pb.go deleted file mode 100644 index 22268f01..00000000 --- a/go/manifest/v2beta1/group.pb.go +++ /dev/null @@ -1,398 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/manifest/v2beta1/group.proto - -package v2beta1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Group store name and list of services -type Group struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` - Services []Service `protobuf:"bytes,2,rep,name=services,proto3" json:"services" yaml:"services"` -} - -func (m *Group) Reset() { *m = Group{} } -func (*Group) ProtoMessage() {} -func (*Group) Descriptor() ([]byte, []int) { - return fileDescriptor_18ff3fe1d1a21258, []int{0} -} -func (m *Group) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Group.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Group) XXX_Merge(src proto.Message) { - xxx_messageInfo_Group.Merge(m, src) -} -func (m *Group) XXX_Size() int { - return m.Size() -} -func (m *Group) XXX_DiscardUnknown() { - xxx_messageInfo_Group.DiscardUnknown(m) -} - -var xxx_messageInfo_Group proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Group)(nil), "akash.manifest.v2beta1.Group") -} - -func init() { - proto.RegisterFile("akash/manifest/v2beta1/group.proto", fileDescriptor_18ff3fe1d1a21258) -} - -var fileDescriptor_18ff3fe1d1a21258 = []byte{ - // 268 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0xcf, 0x4d, 0xcc, 0xcb, 0x4c, 0x4b, 0x2d, 0x2e, 0xd1, 0x2f, 0x33, 0x4a, 0x4a, 0x2d, - 0x49, 0x34, 0xd4, 0x4f, 0x2f, 0xca, 0x2f, 0x2d, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, - 0x03, 0xab, 0xd1, 0x83, 0xa9, 0xd1, 0x83, 0xaa, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, - 0xd1, 0x07, 0xb1, 0x20, 0xaa, 0xa5, 0x54, 0x70, 0x98, 0x58, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, - 0x0a, 0x51, 0xa5, 0x34, 0x87, 0x91, 0x8b, 0xd5, 0x1d, 0x64, 0x87, 0x90, 0x36, 0x17, 0x4b, 0x5e, - 0x62, 0x6e, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xf8, 0xab, 0x7b, 0xf2, 0x60, 0xfe, - 0xa7, 0x7b, 0xf2, 0xdc, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x20, 0x9e, 0x52, 0x10, 0x58, 0x50, - 0x28, 0x81, 0x8b, 0x03, 0x6a, 0x4e, 0xb1, 0x04, 0x93, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0xbc, 0x1e, - 0x76, 0xd7, 0xe9, 0x05, 0x43, 0xd4, 0x39, 0x29, 0x9f, 0xb8, 0x27, 0xcf, 0xf0, 0xea, 0x9e, 0x3c, - 0x5c, 0xe3, 0xa7, 0x7b, 0xf2, 0xfc, 0x10, 0x93, 0x61, 0x22, 0x4a, 0x41, 0x70, 0x49, 0x2b, 0x96, - 0x8e, 0x05, 0xf2, 0x0c, 0x4e, 0x11, 0x37, 0x1e, 0xca, 0x31, 0x34, 0x3c, 0x92, 0x63, 0x3c, 0xf1, - 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, - 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xb3, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, - 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0x0b, 0x74, 0xf3, 0x52, 0x4b, 0xca, 0xf3, 0x8b, 0xb2, 0xa1, - 0xbc, 0xc4, 0x82, 0x4c, 0xfd, 0xf4, 0x7c, 0x8c, 0x60, 0x48, 0x62, 0x03, 0xfb, 0xdf, 0x18, 0x10, - 0x00, 0x00, 0xff, 0xff, 0xbb, 0xf1, 0x49, 0x23, 0x79, 0x01, 0x00, 0x00, -} - -func (m *Group) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Group) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Services) > 0 { - for iNdEx := len(m.Services) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Services[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGroup(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGroup(dAtA []byte, offset int, v uint64) int { - offset -= sovGroup(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Group) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovGroup(uint64(l)) - } - if len(m.Services) > 0 { - for _, e := range m.Services { - l = e.Size() - n += 1 + l + sovGroup(uint64(l)) - } - } - return n -} - -func sovGroup(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGroup(x uint64) (n int) { - return sovGroup(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Group) String() string { - if this == nil { - return "nil" - } - repeatedStringForServices := "[]Service{" - for _, f := range this.Services { - repeatedStringForServices += fmt.Sprintf("%v", f) + "," - } - repeatedStringForServices += "}" - s := strings.Join([]string{`&Group{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Services:` + repeatedStringForServices + `,`, - `}`, - }, "") - return s -} -func valueToStringGroup(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Group) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Group: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Services = append(m.Services, Service{}) - if err := m.Services[len(m.Services)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGroup(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGroup - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGroup - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGroup - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGroup = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGroup = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGroup = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/manifest/v2beta1/httpoptions.pb.go b/go/manifest/v2beta1/httpoptions.pb.go deleted file mode 100644 index 57badd95..00000000 --- a/go/manifest/v2beta1/httpoptions.pb.go +++ /dev/null @@ -1,535 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/manifest/v2beta1/httpoptions.proto - -package v2beta1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ServiceExposeHTTPOptions -type ServiceExposeHTTPOptions struct { - MaxBodySize uint32 `protobuf:"varint,1,opt,name=max_body_size,json=maxBodySize,proto3" json:"maxBodySize" yaml:"maxBodySize"` - ReadTimeout uint32 `protobuf:"varint,2,opt,name=read_timeout,json=readTimeout,proto3" json:"readTimeout" yaml:"readTimeout"` - SendTimeout uint32 `protobuf:"varint,3,opt,name=send_timeout,json=sendTimeout,proto3" json:"sendTimeout" yaml:"sendTimeout"` - NextTries uint32 `protobuf:"varint,4,opt,name=next_tries,json=nextTries,proto3" json:"nextTries" yaml:"nextTries"` - NextTimeout uint32 `protobuf:"varint,5,opt,name=next_timeout,json=nextTimeout,proto3" json:"nextTimeout" yaml:"nextTimeout"` - NextCases []string `protobuf:"bytes,6,rep,name=next_cases,json=nextCases,proto3" json:"nextCases" yaml:"nextCases"` -} - -func (m *ServiceExposeHTTPOptions) Reset() { *m = ServiceExposeHTTPOptions{} } -func (*ServiceExposeHTTPOptions) ProtoMessage() {} -func (*ServiceExposeHTTPOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_5fa251bcf6bf1d0e, []int{0} -} -func (m *ServiceExposeHTTPOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceExposeHTTPOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServiceExposeHTTPOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ServiceExposeHTTPOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceExposeHTTPOptions.Merge(m, src) -} -func (m *ServiceExposeHTTPOptions) XXX_Size() int { - return m.Size() -} -func (m *ServiceExposeHTTPOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceExposeHTTPOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceExposeHTTPOptions proto.InternalMessageInfo - -func (m *ServiceExposeHTTPOptions) GetMaxBodySize() uint32 { - if m != nil { - return m.MaxBodySize - } - return 0 -} - -func (m *ServiceExposeHTTPOptions) GetReadTimeout() uint32 { - if m != nil { - return m.ReadTimeout - } - return 0 -} - -func (m *ServiceExposeHTTPOptions) GetSendTimeout() uint32 { - if m != nil { - return m.SendTimeout - } - return 0 -} - -func (m *ServiceExposeHTTPOptions) GetNextTries() uint32 { - if m != nil { - return m.NextTries - } - return 0 -} - -func (m *ServiceExposeHTTPOptions) GetNextTimeout() uint32 { - if m != nil { - return m.NextTimeout - } - return 0 -} - -func (m *ServiceExposeHTTPOptions) GetNextCases() []string { - if m != nil { - return m.NextCases - } - return nil -} - -func init() { - proto.RegisterType((*ServiceExposeHTTPOptions)(nil), "akash.manifest.v2beta1.ServiceExposeHTTPOptions") -} - -func init() { - proto.RegisterFile("akash/manifest/v2beta1/httpoptions.proto", fileDescriptor_5fa251bcf6bf1d0e) -} - -var fileDescriptor_5fa251bcf6bf1d0e = []byte{ - // 377 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xb1, 0x6a, 0xdb, 0x40, - 0x1c, 0x87, 0xa5, 0xba, 0x35, 0x58, 0xad, 0xa1, 0x88, 0x52, 0x44, 0x87, 0x93, 0x2b, 0x28, 0x78, - 0xa9, 0x44, 0x12, 0xc8, 0x90, 0x29, 0x38, 0x04, 0x9c, 0x29, 0x41, 0xd6, 0x10, 0xb2, 0x88, 0x93, - 0x7d, 0x91, 0x0f, 0x47, 0x3a, 0xa1, 0x3b, 0x3b, 0xb2, 0xa7, 0x3c, 0x42, 0xde, 0x2a, 0x19, 0x3d, - 0x7a, 0x12, 0x89, 0xbc, 0x79, 0xf4, 0x13, 0x84, 0x3b, 0x29, 0xd6, 0x25, 0xde, 0x4e, 0x9f, 0x3e, - 0x3e, 0x7e, 0xc3, 0x5f, 0xeb, 0xc2, 0x09, 0xa4, 0x63, 0x27, 0x82, 0x31, 0xbe, 0x45, 0x94, 0x39, - 0xb3, 0xc3, 0x00, 0x31, 0x78, 0xe0, 0x8c, 0x19, 0x4b, 0x48, 0xc2, 0x30, 0x89, 0xa9, 0x9d, 0xa4, - 0x84, 0x11, 0xfd, 0xb7, 0x30, 0xed, 0x77, 0xd3, 0xae, 0xcc, 0x3f, 0xbf, 0x42, 0x12, 0x12, 0xa1, - 0x38, 0xfc, 0x55, 0xda, 0xd6, 0x53, 0x43, 0x33, 0x06, 0x28, 0x9d, 0xe1, 0x21, 0x3a, 0xcf, 0x12, - 0x42, 0x51, 0xdf, 0xf3, 0xae, 0x2e, 0xcb, 0xa0, 0x7e, 0xa1, 0xb5, 0x23, 0x98, 0xf9, 0x01, 0x19, - 0xcd, 0x7d, 0x8a, 0x17, 0xc8, 0x50, 0x3b, 0x6a, 0xb7, 0xdd, 0xfb, 0xb7, 0xc9, 0xcd, 0xef, 0x11, - 0xcc, 0x7a, 0x64, 0x34, 0x1f, 0xe0, 0x05, 0xda, 0xe6, 0xa6, 0x3e, 0x87, 0xd1, 0xdd, 0x89, 0x25, - 0x41, 0xcb, 0x95, 0x15, 0xbd, 0xaf, 0xfd, 0x48, 0x11, 0x1c, 0xf9, 0x0c, 0x47, 0x88, 0x4c, 0x99, - 0xf1, 0xa5, 0x2e, 0x71, 0xee, 0x95, 0xb8, 0x2e, 0x49, 0xd0, 0x72, 0x65, 0x85, 0x97, 0x28, 0x8a, - 0xeb, 0x52, 0xa3, 0x2e, 0x71, 0xbe, 0x57, 0x92, 0xa0, 0xe5, 0xca, 0x8a, 0x7e, 0xaa, 0x69, 0x31, - 0xca, 0x98, 0xcf, 0x52, 0x8c, 0xa8, 0xf1, 0x55, 0x74, 0xfe, 0x6e, 0x72, 0xb3, 0xc5, 0xa9, 0xc7, - 0xe1, 0x36, 0x37, 0x7f, 0x96, 0x95, 0x1d, 0xb2, 0xdc, 0xfa, 0x37, 0xdf, 0x52, 0x16, 0xaa, 0x2d, - 0xdf, 0xea, 0x2d, 0x42, 0xfa, 0xbc, 0x45, 0x82, 0x96, 0x2b, 0x2b, 0xbb, 0x2d, 0x43, 0x48, 0x11, - 0x35, 0x9a, 0x9d, 0x46, 0xb7, 0x55, 0x6f, 0x39, 0xe3, 0xf0, 0xe3, 0x16, 0x81, 0xaa, 0x2d, 0xe2, - 0xdd, 0xbb, 0x5e, 0xbd, 0x02, 0xe5, 0xa1, 0x00, 0xea, 0x73, 0x01, 0xd4, 0x65, 0x01, 0xd4, 0x97, - 0x02, 0xa8, 0x8f, 0x6b, 0xa0, 0x2c, 0xd7, 0x40, 0x59, 0xad, 0x81, 0x72, 0x73, 0x1c, 0x62, 0x36, - 0x9e, 0x06, 0xf6, 0x90, 0x44, 0x8e, 0x38, 0x92, 0xff, 0x31, 0x62, 0xf7, 0x24, 0x9d, 0x54, 0x5f, - 0x30, 0xc1, 0x4e, 0x48, 0xf6, 0x6e, 0x2c, 0x68, 0x8a, 0x53, 0x39, 0x7a, 0x0b, 0x00, 0x00, 0xff, - 0xff, 0x39, 0x7c, 0x4a, 0xd7, 0x84, 0x02, 0x00, 0x00, -} - -func (m *ServiceExposeHTTPOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceExposeHTTPOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceExposeHTTPOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.NextCases) > 0 { - for iNdEx := len(m.NextCases) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.NextCases[iNdEx]) - copy(dAtA[i:], m.NextCases[iNdEx]) - i = encodeVarintHttpoptions(dAtA, i, uint64(len(m.NextCases[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if m.NextTimeout != 0 { - i = encodeVarintHttpoptions(dAtA, i, uint64(m.NextTimeout)) - i-- - dAtA[i] = 0x28 - } - if m.NextTries != 0 { - i = encodeVarintHttpoptions(dAtA, i, uint64(m.NextTries)) - i-- - dAtA[i] = 0x20 - } - if m.SendTimeout != 0 { - i = encodeVarintHttpoptions(dAtA, i, uint64(m.SendTimeout)) - i-- - dAtA[i] = 0x18 - } - if m.ReadTimeout != 0 { - i = encodeVarintHttpoptions(dAtA, i, uint64(m.ReadTimeout)) - i-- - dAtA[i] = 0x10 - } - if m.MaxBodySize != 0 { - i = encodeVarintHttpoptions(dAtA, i, uint64(m.MaxBodySize)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintHttpoptions(dAtA []byte, offset int, v uint64) int { - offset -= sovHttpoptions(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ServiceExposeHTTPOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MaxBodySize != 0 { - n += 1 + sovHttpoptions(uint64(m.MaxBodySize)) - } - if m.ReadTimeout != 0 { - n += 1 + sovHttpoptions(uint64(m.ReadTimeout)) - } - if m.SendTimeout != 0 { - n += 1 + sovHttpoptions(uint64(m.SendTimeout)) - } - if m.NextTries != 0 { - n += 1 + sovHttpoptions(uint64(m.NextTries)) - } - if m.NextTimeout != 0 { - n += 1 + sovHttpoptions(uint64(m.NextTimeout)) - } - if len(m.NextCases) > 0 { - for _, s := range m.NextCases { - l = len(s) - n += 1 + l + sovHttpoptions(uint64(l)) - } - } - return n -} - -func sovHttpoptions(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozHttpoptions(x uint64) (n int) { - return sovHttpoptions(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ServiceExposeHTTPOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceExposeHTTPOptions{`, - `MaxBodySize:` + fmt.Sprintf("%v", this.MaxBodySize) + `,`, - `ReadTimeout:` + fmt.Sprintf("%v", this.ReadTimeout) + `,`, - `SendTimeout:` + fmt.Sprintf("%v", this.SendTimeout) + `,`, - `NextTries:` + fmt.Sprintf("%v", this.NextTries) + `,`, - `NextTimeout:` + fmt.Sprintf("%v", this.NextTimeout) + `,`, - `NextCases:` + fmt.Sprintf("%v", this.NextCases) + `,`, - `}`, - }, "") - return s -} -func valueToStringHttpoptions(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ServiceExposeHTTPOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceExposeHTTPOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceExposeHTTPOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxBodySize", wireType) - } - m.MaxBodySize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxBodySize |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadTimeout", wireType) - } - m.ReadTimeout = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadTimeout |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SendTimeout", wireType) - } - m.SendTimeout = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SendTimeout |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NextTries", wireType) - } - m.NextTries = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NextTries |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NextTimeout", wireType) - } - m.NextTimeout = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NextTimeout |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NextCases", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHttpoptions - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHttpoptions - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NextCases = append(m.NextCases, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipHttpoptions(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthHttpoptions - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipHttpoptions(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthHttpoptions - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupHttpoptions - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthHttpoptions - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthHttpoptions = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowHttpoptions = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupHttpoptions = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/manifest/v2beta1/manifest.go b/go/manifest/v2beta1/manifest.go deleted file mode 100644 index 0bd0d701..00000000 --- a/go/manifest/v2beta1/manifest.go +++ /dev/null @@ -1,300 +0,0 @@ -package v2beta1 - -import ( - "fmt" - "math" - "regexp" - "strings" - - "github.com/pkg/errors" - - k8svalidation "k8s.io/apimachinery/pkg/util/validation" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta2" - types "github.com/akash-network/akash-api/go/node/types/v1beta2" -) - -var ( - serviceNameValidationRegex = regexp.MustCompile(`^[a-z]([-a-z0-9]*[a-z0-9])?$`) - hostnameMaxLen = 255 -) - -// Manifest store list of groups -type Manifest []Group - -// GetGroups returns a manifest with groups list -func (m Manifest) GetGroups() []Group { - return m -} - -// ValidateManifest does validation for manifest -func ValidateManifest(m Manifest) error { - if len(m) == 0 { - return fmt.Errorf("%w: manifest is empty", ErrInvalidManifest) - } - return validateManifestGroups(m.GetGroups()) -} - -type validateManifestGroupsHelper struct { - hostnames map[string]int // used as a set - globalServiceCount uint -} - -func validateManifestGroups(groups []Group) error { - helper := validateManifestGroupsHelper{ - hostnames: make(map[string]int), - } - names := make(map[string]int) // used as a set - for _, group := range groups { - if err := validateManifestGroup(group, &helper); err != nil { - return err - } - if _, exists := names[group.GetName()]; exists { - return fmt.Errorf("%w: duplicate group %q", ErrInvalidManifest, group.GetName()) - } - - names[group.GetName()] = 0 // Value stored is not used - } - if helper.globalServiceCount == 0 { - return fmt.Errorf("%w: zero global services", ErrInvalidManifest) - } - return nil -} - -func validateManifestGroup(group Group, helper *validateManifestGroupsHelper) error { - if 0 == len(group.Services) { - return fmt.Errorf("%w: group %q contains no services", ErrInvalidManifest, group.GetName()) - } - - if err := dtypes.ValidateResourceList(&group); err != nil { - return err - } - for _, s := range group.Services { - if err := validateManifestService(s, helper); err != nil { - return err - } - } - return nil -} - -func validateManifestService(service Service, helper *validateManifestGroupsHelper) error { - if len(service.Name) == 0 { - return fmt.Errorf("%w: service name is empty", ErrInvalidManifest) - } - - serviceNameValid := serviceNameValidationRegex.MatchString(service.Name) - if !serviceNameValid { - return fmt.Errorf("%w: service %q name is invalid", ErrInvalidManifest, service.Name) - } - - if len(service.Image) == 0 { - return fmt.Errorf("%w: service %q has empty image name", ErrInvalidManifest, service.Name) - } - - for _, envVar := range service.Env { - idx := strings.Index(envVar, "=") - if idx == 0 { - return fmt.Errorf("%w: service %q defines an env. var. with an empty name", ErrInvalidManifest, service.Name) - } - - var envVarName string - if idx > 0 { - envVarName = envVar[0:idx] - } else { - envVarName = envVar - } - - if 0 != len(k8svalidation.IsEnvVarName(envVarName)) { - return fmt.Errorf("%w: service %q defines an env. var. with an invalid name %q", ErrInvalidManifest, service.Name, envVarName) - } - - } - - for _, serviceExpose := range service.Expose { - if err := validateServiceExpose(service.Name, serviceExpose, helper); err != nil { - return err - } - } - - return nil -} - -func validateServiceExpose(serviceName string, serviceExpose ServiceExpose, helper *validateManifestGroupsHelper) error { - if serviceExpose.Port == 0 || serviceExpose.Port > math.MaxUint16 { - return fmt.Errorf("%w: service %q port value must be 0 < value <= 65535 ", ErrInvalidManifest, serviceName) - } - - switch serviceExpose.Proto { - case TCP, UDP: - break - default: - return fmt.Errorf("%w: service %q protocol %q unknown", ErrInvalidManifest, serviceName, serviceExpose.Proto) - } - - if serviceExpose.Global { - helper.globalServiceCount++ - } - - for _, host := range serviceExpose.Hosts { - if !isValidHostname(host) { - return fmt.Errorf("%w: service %q has invalid hostname %q", ErrInvalidManifest, serviceName, host) - } - - _, exists := helper.hostnames[host] - if exists { - return errors.Errorf("hostname %q is duplicated, this is not allowed", host) - } - helper.hostnames[host] = 0 // Value stored does not matter - } - - return nil -} - -func isValidHostname(hostname string) bool { - return len(hostname) <= hostnameMaxLen && 0 == len(k8svalidation.IsDNS1123Subdomain(hostname)) -} - -func ValidateManifestWithGroupSpecs(m *Manifest, gspecs []*dtypes.GroupSpec) error { - rlists := make([]types.ResourceGroup, 0, len(gspecs)) - for _, gspec := range gspecs { - rlists = append(rlists, gspec) - } - return validateManifestDeploymentGroups(m.GetGroups(), rlists) -} - -func ValidateManifestWithDeployment(m *Manifest, dgroups []dtypes.Group) error { - rgroups := make([]types.ResourceGroup, 0, len(dgroups)) - for _, dgroup := range dgroups { - rgroups = append(rgroups, dgroup) - } - - return validateManifestDeploymentGroups(m.GetGroups(), rgroups) -} - -func validateManifestDeploymentGroups(mgroups []Group, dgroups []types.ResourceGroup) error { - if len(mgroups) != len(dgroups) { - return errors.Errorf("invalid manifest: group count mismatch (%v != %v)", len(mgroups), len(dgroups)) - } - - dgroupByName := make(map[string]types.ResourceGroup) - - for _, dgroup := range dgroups { - dgroupByName[dgroup.GetName()] = dgroup - } - - for _, mgroup := range mgroups { - dgroup, dgroupExists := dgroupByName[mgroup.GetName()] - - if !dgroupExists { - return errors.Errorf("invalid manifest: unknown deployment group ('%v')", mgroup.GetName()) - } - - if err := validateManifestDeploymentGroup(mgroup, dgroup); err != nil { - return err - } - } - - return nil -} - -func validateManifestDeploymentGroup(mgroup Group, dgroup types.ResourceGroup) error { - mlist := make([]types.Resources, len(mgroup.GetResources())) - copy(mlist, mgroup.GetResources()) - - httpOnlyEndpointsCountForDeploymentGroup := 0 - otherEndpointsCountForDeploymentGroup := 0 - - // Iterate over all deployment groups -deploymentGroupLoop: - for _, drec := range dgroup.GetResources() { - for _, endpoint := range drec.Resources.Endpoints { - switch endpoint.Kind { - case types.Endpoint_SHARED_HTTP: - httpOnlyEndpointsCountForDeploymentGroup++ - case types.Endpoint_RANDOM_PORT: - otherEndpointsCountForDeploymentGroup++ - } - } - // Find a matching manifest group - for idx := range mlist { - mrec := mlist[idx] - - // Check that this manifest group is not yet exhausted - if mrec.Count == 0 { - continue - } - - if !drec.Resources.CPU.Equal(mrec.Resources.CPU) || - !drec.Resources.Memory.Equal(mrec.Resources.Memory) || - !drec.Resources.Storage.Equal(mrec.Resources.Storage) { - continue - } - - // If the manifest group contains more resources than the deployment group, then - // fulfill the deployment group entirely - if mrec.Count >= drec.Count { - mrec.Count -= drec.Count - drec.Count = 0 - } else { - // Partially fulfill the deployment group since the manifest group contains less - drec.Count -= mrec.Count - mrec.Count = 0 - } - - // Update the value stored in the list - mlist[idx] = mrec - - // If the deployment group is fulfilled then break out and - // move to the next deployment - if drec.Count == 0 { - continue deploymentGroupLoop - } - } - // If this point is reached then the deployment group cannot be fully matched - // against the given manifest groups - return fmt.Errorf("%w: underutilized deployment group %q", ErrManifestCrossValidation, dgroup.GetName()) - } - - // Search for any manifest groups which are not fully satisfied - for _, mrec := range mlist { - if mrec.Count > 0 { - return fmt.Errorf("%w: manifest resources %q is not fully matched with deployment groups", ErrManifestCrossValidation, mgroup.GetName()) - } - } - - httpOnlyEndpointCount := 0 - otherEndpointCount := 0 - - for _, service := range mgroup.Services { - for _, serviceExpose := range service.Expose { - if serviceExpose.Global { - if IsIngress(serviceExpose) { - httpOnlyEndpointCount++ - } else { - otherEndpointCount++ - } - } - } - } - - if otherEndpointCount != otherEndpointsCountForDeploymentGroup { - return errors.Errorf("invalid manifest: mismatch on number of endpoints %d != %d", otherEndpointCount, otherEndpointsCountForDeploymentGroup) - } - - if httpOnlyEndpointCount != httpOnlyEndpointsCountForDeploymentGroup { - return errors.Errorf("invalid manifest: mismatch on number of HTTP only endpoints %d != %d", httpOnlyEndpointCount, httpOnlyEndpointsCountForDeploymentGroup) - } - - return nil -} - -func IsIngress(expose ServiceExpose) bool { - return expose.Proto == TCP && expose.Global && 80 == ExposeExternalPort(expose) -} - -func ExposeExternalPort(expose ServiceExpose) int32 { - if expose.ExternalPort == 0 { - return int32(expose.Port) - } - return int32(expose.ExternalPort) -} diff --git a/go/manifest/v2beta1/manifest_cross_validation_test.go b/go/manifest/v2beta1/manifest_cross_validation_test.go deleted file mode 100644 index 94843272..00000000 --- a/go/manifest/v2beta1/manifest_cross_validation_test.go +++ /dev/null @@ -1,168 +0,0 @@ -package v2beta1_test - -import ( - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/stretchr/testify/require" - - manifest "github.com/akash-network/akash-api/go/manifest/v2beta1" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta2" - akashtypes "github.com/akash-network/akash-api/go/node/types/v1beta2" - tutil "github.com/akash-network/akash-api/go/testutil" - "github.com/akash-network/akash-api/go/testutil/v1beta2" -) - -func TestManifestWithEmptyDeployment(t *testing.T) { - m := simpleManifest() - deployment := make([]dtypes.Group, 0) - err := manifest.ValidateManifestWithDeployment(&m, deployment) - require.Error(t, err) -} - -func simpleDeployment(t *testing.T) []dtypes.Group { - deployment := make([]dtypes.Group, 1) - gid := testutil.GroupID(t) - resources := make([]dtypes.Resource, 1) - resources[0] = dtypes.Resource{ - Resources: simpleResourceUnits(), - Count: 1, - Price: sdk.NewInt64DecCoin(tutil.CoinDenom, 1), - } - deployment[0] = dtypes.Group{ - GroupID: gid, - State: 0, - GroupSpec: dtypes.GroupSpec{ - Name: nameOfTestGroup, - Requirements: akashtypes.PlacementRequirements{}, - Resources: resources, - }, - } - - return deployment -} - -func TestManifestWithDeployment(t *testing.T) { - m := simpleManifest() - deployment := simpleDeployment(t) - err := manifest.ValidateManifestWithDeployment(&m, deployment) - require.NoError(t, err) -} - -func TestManifestWithDeploymentMultipleCount(t *testing.T) { - addl := uint32(tutil.RandRangeInt(1, 20)) - m := simpleManifest() - m[0].Services[0].Count += addl - deployment := simpleDeployment(t) - deployment[0].GroupSpec.Resources[0].Count += addl - err := manifest.ValidateManifestWithDeployment(&m, deployment) - require.NoError(t, err) -} - -func TestManifestWithDeploymentMultiple(t *testing.T) { - cpu := int64(tutil.RandRangeInt(1024, 2000)) - storage := int64(tutil.RandRangeInt(2000, 3000)) - memory := int64(tutil.RandRangeInt(3001, 4000)) - - m := make(manifest.Manifest, 3) - m[0] = simpleManifest()[0] - m[0].Services[0].Resources.CPU.Units.Val = sdk.NewInt(cpu) - m[0].Name = "testgroup-2" - - m[1] = simpleManifest()[0] - m[1].Services[0].Resources.Storage[0].Quantity.Val = sdk.NewInt(storage) - m[1].Name = "testgroup-1" - - m[2] = simpleManifest()[0] - m[2].Services[0].Resources.Memory.Quantity.Val = sdk.NewInt(memory) - m[2].Name = "testgroup-0" - - deployment := make([]dtypes.Group, 3) - deployment[0] = simpleDeployment(t)[0] - deployment[0].GroupSpec.Resources[0].Resources.Memory.Quantity.Val = sdk.NewInt(memory) - deployment[0].GroupSpec.Name = "testgroup-0" - - deployment[1] = simpleDeployment(t)[0] - deployment[1].GroupSpec.Resources[0].Resources.Storage[0].Quantity.Val = sdk.NewInt(storage) - deployment[1].GroupSpec.Name = "testgroup-1" - - deployment[2] = simpleDeployment(t)[0] - deployment[2].GroupSpec.Resources[0].Resources.CPU.Units.Val = sdk.NewInt(cpu) - deployment[2].GroupSpec.Name = "testgroup-2" - - err := manifest.ValidateManifestWithDeployment(&m, deployment) - require.NoError(t, err) -} - -func TestManifestWithDeploymentCPUMismatch(t *testing.T) { - m := simpleManifest() - deployment := simpleDeployment(t) - deployment[0].GroupSpec.Resources[0].Resources.CPU.Units.Val = sdk.NewInt(999) - err := manifest.ValidateManifestWithDeployment(&m, deployment) - require.Error(t, err) - require.Regexp(t, "^.*underutilized deployment group.+$", err) -} - -func TestManifestWithDeploymentMemoryMismatch(t *testing.T) { - m := simpleManifest() - deployment := simpleDeployment(t) - deployment[0].GroupSpec.Resources[0].Resources.Memory.Quantity.Val = sdk.NewInt(99999) - err := manifest.ValidateManifestWithDeployment(&m, deployment) - require.Error(t, err) - require.Regexp(t, "^.*underutilized deployment group.+$", err) -} - -func TestManifestWithDeploymentStorageMismatch(t *testing.T) { - m := simpleManifest() - deployment := simpleDeployment(t) - deployment[0].GroupSpec.Resources[0].Resources.Storage[0].Quantity.Val = sdk.NewInt(99999) - err := manifest.ValidateManifestWithDeployment(&m, deployment) - require.Error(t, err) - require.Regexp(t, "^.*underutilized deployment group.+$", err) -} - -func TestManifestWithDeploymentCountMismatch(t *testing.T) { - m := simpleManifest() - deployment := simpleDeployment(t) - deployment[0].GroupSpec.Resources[0].Count++ - err := manifest.ValidateManifestWithDeployment(&m, deployment) - require.Error(t, err) - require.Regexp(t, "^.*underutilized deployment group.+$", err) -} - -func TestManifestWithManifestGroupMismatch(t *testing.T) { - m := simpleManifest() - deployment := simpleDeployment(t) - m[0].Services[0].Count++ - err := manifest.ValidateManifestWithDeployment(&m, deployment) - require.Error(t, err) - require.Regexp(t, "^.*manifest resources .+ not fully matched.+$", err) -} - -func TestManifestWithEndpointMismatchA(t *testing.T) { - m := simpleManifest() - - // Make this require an endpoint - m[0].Services[0].Expose[0] = manifest.ServiceExpose{ - Port: 2000, - ExternalPort: 0, - Proto: manifest.TCP, - Service: "", - Global: true, - Hosts: nil, - } - deployment := simpleDeployment(t) - err := manifest.ValidateManifestWithDeployment(&m, deployment) - require.Error(t, err) - require.Regexp(t, "^.*mismatch on number of endpoints.+$", err) -} - -func TestManifestWithEndpointMismatchB(t *testing.T) { - m := simpleManifest() - deployment := simpleDeployment(t) - // Add an endpoint where the manifest doesn't call for it - deployment[0].GroupSpec.Resources[0].Resources.Endpoints = append(deployment[0].GroupSpec.Resources[0].Resources.Endpoints, akashtypes.Endpoint{}) - err := manifest.ValidateManifestWithDeployment(&m, deployment) - require.Error(t, err) - require.Regexp(t, "^.*mismatch on number of HTTP only endpoints.+$", err) -} diff --git a/go/manifest/v2beta1/manifest_test.go b/go/manifest/v2beta1/manifest_test.go deleted file mode 100644 index a9b8d28e..00000000 --- a/go/manifest/v2beta1/manifest_test.go +++ /dev/null @@ -1,465 +0,0 @@ -package v2beta1_test - -import ( - "bytes" - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/stretchr/testify/require" - - "github.com/stretchr/testify/assert" - - manifest "github.com/akash-network/akash-api/go/manifest/v2beta1" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta2" - akashtypes "github.com/akash-network/akash-api/go/node/types/v1beta2" - "github.com/akash-network/akash-api/go/testutil/v1beta2" -) - -var ( - randCPU1 = uint64(testutil.RandCPUUnits()) - randCPU2 = randCPU1 + 1 - randMemory = testutil.RandMemoryQuantity() - randStorage = testutil.RandStorageQuantity() -) - -var randUnits1 = akashtypes.ResourceUnits{ - CPU: &akashtypes.CPU{ - Units: akashtypes.NewResourceValue(randCPU1), - }, - Memory: &akashtypes.Memory{ - Quantity: akashtypes.NewResourceValue(randMemory), - }, - Storage: akashtypes.Volumes{ - akashtypes.Storage{ - Quantity: akashtypes.NewResourceValue(randStorage), - }, - }, -} - -var randUnits2 = akashtypes.ResourceUnits{ - CPU: &akashtypes.CPU{ - Units: akashtypes.NewResourceValue(randCPU2), - }, - Memory: &akashtypes.Memory{ - Quantity: akashtypes.NewResourceValue(randMemory), - }, - Storage: akashtypes.Volumes{ - akashtypes.Storage{ - Quantity: akashtypes.NewResourceValue(randStorage), - }, - }, -} - -func Test_ValidateManifest(t *testing.T) { - tests := []struct { - name string - ok bool - mgroups []manifest.Group - dgroups []*dtypes.GroupSpec - }{ - { - name: "empty", - ok: true, - }, - - { - name: "single", - ok: true, - mgroups: []manifest.Group{ - { - Name: "foo", - Services: []manifest.Service{ - { - Name: "svc1", - Resources: randUnits1, - Count: 3, - }, - }, - }, - }, - dgroups: []*dtypes.GroupSpec{ - { - Name: "foo", - Resources: []dtypes.Resource{ - { - Resources: randUnits1, - Count: 3, - }, - }, - }, - }, - }, - - { - name: "multi-mgroup", - ok: true, - mgroups: []manifest.Group{ - { - Name: "foo", - Services: []manifest.Service{ - { - Name: "svc1", - Resources: randUnits1, - Count: 1, - }, - { - Name: "svc1", - Resources: randUnits1, - Count: 2, - }, - }, - }, - }, - dgroups: []*dtypes.GroupSpec{ - { - Name: "foo", - Resources: []dtypes.Resource{ - { - Resources: randUnits1, - Count: 3, - }, - }, - }, - }, - }, - - { - name: "multi-dgroup", - ok: true, - mgroups: []manifest.Group{ - { - Name: "foo", - Services: []manifest.Service{ - { - Name: "svc1", - Resources: randUnits1, - Count: 3, - }, - }, - }, - }, - dgroups: []*dtypes.GroupSpec{ - { - Name: "foo", - Resources: []dtypes.Resource{ - { - Resources: randUnits1, - Count: 2, - }, - { - Resources: randUnits1, - Count: 1, - }, - }, - }, - }, - }, - - { - name: "mismatch-name", - ok: false, - mgroups: []manifest.Group{ - { - Name: "foo-bad", - Services: []manifest.Service{ - { - Name: "svc1", - Resources: randUnits1, - Count: 3, - }, - }, - }, - }, - dgroups: []*dtypes.GroupSpec{ - { - Name: "foo", - Resources: []dtypes.Resource{ - { - Resources: randUnits1, - Count: 3, - }, - }, - }, - }, - }, - - { - name: "mismatch-cpu", - ok: false, - mgroups: []manifest.Group{ - { - Name: "foo", - Services: []manifest.Service{ - { - Name: "svc1", - Resources: randUnits2, - Count: 3, - }, - }, - }, - }, - dgroups: []*dtypes.GroupSpec{ - { - Name: "foo", - Resources: []dtypes.Resource{ - { - Resources: randUnits1, - Count: 3, - }, - }, - }, - }, - }, - - { - name: "mismatch-group-count", - ok: false, - mgroups: []manifest.Group{ - { - Name: "foo", - Services: []manifest.Service{ - { - Name: "svc1", - Resources: randUnits2, - Count: 3, - }, - }, - }, - }, - dgroups: []*dtypes.GroupSpec{}, - }, - } - - for _, test := range tests { - m := manifest.Manifest(test.mgroups) - err := manifest.ValidateManifestWithGroupSpecs(&m, test.dgroups) - if test.ok { - assert.NoError(t, err, test.name) - } else { - assert.Error(t, err, test.name) - } - } -} - -func TestNilManifestIsInvalid(t *testing.T) { - err := manifest.ValidateManifest(nil) - require.Error(t, err) - require.Regexp(t, "^.*manifest is empty.*$", err) -} - -const nameOfTestService = "test-service" -const nameOfTestGroup = "testGroup" - -func simpleResourceUnits() akashtypes.ResourceUnits { - return akashtypes.ResourceUnits{ - CPU: &akashtypes.CPU{ - Units: akashtypes.ResourceValue{ - Val: sdk.NewIntFromUint64(randCPU1), - }, - Attributes: nil, - }, - Memory: &akashtypes.Memory{ - Quantity: akashtypes.ResourceValue{ - Val: sdk.NewIntFromUint64(randMemory), - }, - Attributes: nil, - }, - Storage: akashtypes.Volumes{ - akashtypes.Storage{ - Name: "default", - Quantity: akashtypes.ResourceValue{ - Val: sdk.NewIntFromUint64(randStorage), - }, - }, - }, - Endpoints: []akashtypes.Endpoint{ - { - Kind: akashtypes.Endpoint_SHARED_HTTP, - }, - }, - } -} - -func simpleManifest() manifest.Manifest { - expose := make([]manifest.ServiceExpose, 1) - expose[0].Global = true - expose[0].Port = 80 - expose[0].Proto = manifest.TCP - expose[0].Hosts = make([]string, 1) - expose[0].Hosts[0] = "host.test" - services := make([]manifest.Service, 1) - services[0] = manifest.Service{ - Name: nameOfTestService, - Image: "test/image:1.0", - Command: nil, - Args: nil, - Env: nil, - Resources: simpleResourceUnits(), - Count: 1, - Expose: expose, - } - m := make(manifest.Manifest, 1) - m[0] = manifest.Group{ - Name: nameOfTestGroup, - Services: services, - } - - return m -} - -func TestSimpleManifestIsValid(t *testing.T) { - m := simpleManifest() - err := manifest.ValidateManifest(m) - require.NoError(t, err) -} - -func TestManifestWithNoGlobalServicesIsInvalid(t *testing.T) { - m := simpleManifest() - m[0].Services[0].Expose[0].Global = false - err := manifest.ValidateManifest(m) - require.Error(t, err) - require.Regexp(t, "^.*zero global services.*$", err) -} - -func TestManifestWithBadServiceNameIsInvalid(t *testing.T) { - m := simpleManifest() - m[0].Services[0].Name = "a_bad_service_name" // should not contain underscores - err := manifest.ValidateManifest(m) - require.Error(t, err) - require.Regexp(t, "^.*name is invalid.*$", err) - - m[0].Services[0].Name = "a-name-" // should not end with dash - err = manifest.ValidateManifest(m) - require.Error(t, err) - require.Regexp(t, "^.*name is invalid.*$", err) -} - -func TestManifestWithServiceNameIsValid(t *testing.T) { - m := simpleManifest() - - m[0].Services[0].Name = "9aaa-bar" // does not allow starting with a number - err := manifest.ValidateManifest(m) - require.ErrorIs(t, err, manifest.ErrInvalidManifest) - require.Regexp(t, "^.*name is invalid.*$", err) -} - -func TestManifestWithDuplicateHostIsInvalid(t *testing.T) { - m := simpleManifest() - hosts := make([]string, 2) - const hostname = "a.test" - hosts[0] = hostname - hosts[1] = hostname - m[0].Services[0].Expose[0].Hosts = hosts - err := manifest.ValidateManifest(m) - require.Error(t, err) - require.Regexp(t, "^.*hostname.+is duplicated.*$", err) -} - -func TestManifestWithDashInHostname(t *testing.T) { - m := simpleManifest() - hosts := make([]string, 1) - hosts[0] = "a-test.com" - m[0].Services[0].Expose[0].Hosts = hosts - err := manifest.ValidateManifest(m) - require.NoError(t, err) -} - -func TestManifestWithBadHostIsInvalid(t *testing.T) { - m := simpleManifest() - hosts := make([]string, 2) - hosts[0] = "bob.test" // valid - hosts[1] = "-bob" // invalid - m[0].Services[0].Expose[0].Hosts = hosts - err := manifest.ValidateManifest(m) - require.Error(t, err) - require.Regexp(t, "^.*invalid hostname.*$", err) -} - -func TestManifestWithLongHostIsInvalid(t *testing.T) { - m := simpleManifest() - hosts := make([]string, 1) - buf := &bytes.Buffer{} - for i := 0; i != 255; i++ { - _, err := buf.WriteRune('a') - require.NoError(t, err) - } - _, err := buf.WriteString(".com") - require.NoError(t, err) - - hosts[0] = buf.String() - m[0].Services[0].Expose[0].Hosts = hosts - err = manifest.ValidateManifest(m) - require.Error(t, err) - require.Regexp(t, "^.*invalid hostname.*$", err) -} - -func TestManifestWithDuplicateGroupIsInvalid(t *testing.T) { - mDuplicate := make(manifest.Manifest, 2) - mDuplicate[0] = simpleManifest()[0] - mDuplicate[1] = simpleManifest()[0] - mDuplicate[1].Services[0].Expose[0].Hosts[0] = "anotherhost.test" - err := manifest.ValidateManifest(mDuplicate) - require.Error(t, err) - require.Regexp(t, "^.*duplicate group.*$", err) -} - -func TestManifestWithNoServicesInvalid(t *testing.T) { - m := simpleManifest() - m[0].Services = nil - err := manifest.ValidateManifest(m) - require.Error(t, err) - require.Regexp(t, "^.*contains no services.*$", err) -} - -func TestManifestWithEmptyServiceNameInvalid(t *testing.T) { - m := simpleManifest() - m[0].Services[0].Name = "" - err := manifest.ValidateManifest(m) - require.Error(t, err) - require.Regexp(t, "^.*service name is empty.*$", err) -} - -func TestManifestWithEmptyImageNameInvalid(t *testing.T) { - m := simpleManifest() - m[0].Services[0].Image = "" - err := manifest.ValidateManifest(m) - require.Error(t, err) - require.Regexp(t, "^.*service.+has empty image name.*$", err) -} - -func TestManifestWithEmptyEnvValueIsValid(t *testing.T) { - m := simpleManifest() - envVars := make([]string, 1) - envVars[0] = "FOO=" // sets FOO to empty string - m[0].Services[0].Env = envVars - err := manifest.ValidateManifest(m) - require.NoError(t, err) -} - -func TestManifestWithEmptyEnvNameIsInvalid(t *testing.T) { - m := simpleManifest() - envVars := make([]string, 1) - envVars[0] = "=FOO" // invalid - m[0].Services[0].Env = envVars - err := manifest.ValidateManifest(m) - require.Error(t, err) - require.Regexp(t, `^.*var\. with an empty name.*$`, err) -} - -func TestManifestWithBadEnvNameIsInvalid(t *testing.T) { - m := simpleManifest() - envVars := make([]string, 1) - envVars[0] = "9VAR=FOO" // invalid because it starts with a digit - m[0].Services[0].Env = envVars - err := manifest.ValidateManifest(m) - require.Error(t, err) - require.Regexp(t, `^.*var\. with an invalid name.*$`, err) -} - -func TestManifestServiceUnknownProtocolIsInvalid(t *testing.T) { - m := simpleManifest() - m[0].Services[0].Expose[0].Proto = "ICMP" - err := manifest.ValidateManifest(m) - require.Error(t, err) - require.Regexp(t, `^.*protocol .+ unknown.*$`, err) -} diff --git a/go/manifest/v2beta1/manifest_validation_errors.go b/go/manifest/v2beta1/manifest_validation_errors.go deleted file mode 100644 index 2046c98e..00000000 --- a/go/manifest/v2beta1/manifest_validation_errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package v2beta1 - -import ( - "errors" -) - -var ( - ErrInvalidManifest = errors.New("invalid manifest") - ErrManifestCrossValidation = errors.New("manifest cross validation error") -) diff --git a/go/manifest/v2beta1/parse.go b/go/manifest/v2beta1/parse.go deleted file mode 100644 index 78feb106..00000000 --- a/go/manifest/v2beta1/parse.go +++ /dev/null @@ -1,65 +0,0 @@ -package v2beta1 - -import ( - "errors" - "fmt" - "strings" - - corev1 "k8s.io/api/core/v1" -) - -var ( - errUnknownServiceProtocol = errors.New("unknown service protocol") - ErrUnsupportedServiceProtocol = errors.New("unsupported service protocol") -) - -type ServiceProtocol string - -const ( - TCP = ServiceProtocol("TCP") - UDP = ServiceProtocol("UDP") -) - -func (sp ServiceProtocol) ToString() string { - return string(sp) -} - -func (sp ServiceProtocol) ToKube() (corev1.Protocol, error) { - switch sp { - case TCP: - return corev1.ProtocolTCP, nil - case UDP: - return corev1.ProtocolUDP, nil - } - - return corev1.Protocol(""), fmt.Errorf("%w: %v", errUnknownServiceProtocol, sp) -} - -func ServiceProtocolFromKube(proto corev1.Protocol) (ServiceProtocol, error) { - switch proto { - case corev1.ProtocolTCP: - return TCP, nil - case corev1.ProtocolUDP: - return UDP, nil - } - - return ServiceProtocol(""), fmt.Errorf("%w: %v", errUnknownServiceProtocol, proto) -} - -func ParseServiceProtocol(input string) (ServiceProtocol, error) { - var result ServiceProtocol - - // This is not a case-sensitive parse, so make all input uppercase - input = strings.ToUpper(input) - - switch input { - case "TCP", "": // The empty string (no input) implies TCP - result = TCP - case "UDP": - result = UDP - default: - return result, ErrUnsupportedServiceProtocol - } - - return result, nil -} diff --git a/go/manifest/v2beta1/service.go b/go/manifest/v2beta1/service.go deleted file mode 100644 index 4086c8a2..00000000 --- a/go/manifest/v2beta1/service.go +++ /dev/null @@ -1,3 +0,0 @@ -package v2beta1 - -type Services []Service diff --git a/go/manifest/v2beta1/service.pb.go b/go/manifest/v2beta1/service.pb.go deleted file mode 100644 index ce8aa646..00000000 --- a/go/manifest/v2beta1/service.pb.go +++ /dev/null @@ -1,1274 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/manifest/v2beta1/service.proto - -package v2beta1 - -import ( - fmt "fmt" - v1beta2 "github.com/akash-network/akash-api/go/node/types/v1beta2" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// StorageParams -type StorageParams struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` - Mount string `protobuf:"bytes,2,opt,name=mount,proto3" json:"mount" yaml:"mount"` - ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"readOnly" yaml:"readOnly"` -} - -func (m *StorageParams) Reset() { *m = StorageParams{} } -func (*StorageParams) ProtoMessage() {} -func (*StorageParams) Descriptor() ([]byte, []int) { - return fileDescriptor_fa76aee1c3e9a132, []int{0} -} -func (m *StorageParams) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StorageParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StorageParams.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StorageParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_StorageParams.Merge(m, src) -} -func (m *StorageParams) XXX_Size() int { - return m.Size() -} -func (m *StorageParams) XXX_DiscardUnknown() { - xxx_messageInfo_StorageParams.DiscardUnknown(m) -} - -var xxx_messageInfo_StorageParams proto.InternalMessageInfo - -func (m *StorageParams) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *StorageParams) GetMount() string { - if m != nil { - return m.Mount - } - return "" -} - -func (m *StorageParams) GetReadOnly() bool { - if m != nil { - return m.ReadOnly - } - return false -} - -// ServiceParams -type ServiceParams struct { - Storage []StorageParams `protobuf:"bytes,1,rep,name=storage,proto3" json:"storage" yaml:"storage"` -} - -func (m *ServiceParams) Reset() { *m = ServiceParams{} } -func (*ServiceParams) ProtoMessage() {} -func (*ServiceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_fa76aee1c3e9a132, []int{1} -} -func (m *ServiceParams) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServiceParams.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ServiceParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceParams.Merge(m, src) -} -func (m *ServiceParams) XXX_Size() int { - return m.Size() -} -func (m *ServiceParams) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceParams.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceParams proto.InternalMessageInfo - -func (m *ServiceParams) GetStorage() []StorageParams { - if m != nil { - return m.Storage - } - return nil -} - -// Service stores name, image, args, env, unit, count and expose list of service -type Service struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` - Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image" yaml:"image"` - Command []string `protobuf:"bytes,3,rep,name=command,proto3" json:"command" yaml:"command"` - Args []string `protobuf:"bytes,4,rep,name=args,proto3" json:"args" yaml:"args"` - Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env" yaml:"env"` - Resources v1beta2.ResourceUnits `protobuf:"bytes,6,opt,name=resources,proto3" json:"resources" yaml:"resources"` - Count uint32 `protobuf:"varint,7,opt,name=count,proto3" json:"count" yaml:"count"` - Expose []ServiceExpose `protobuf:"bytes,8,rep,name=expose,proto3" json:"expose" yaml:"expose"` - Params *ServiceParams `protobuf:"bytes,9,opt,name=params,proto3" json:"params" yaml:"params"` -} - -func (m *Service) Reset() { *m = Service{} } -func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_fa76aee1c3e9a132, []int{2} -} -func (m *Service) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Service.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Service) XXX_Merge(src proto.Message) { - xxx_messageInfo_Service.Merge(m, src) -} -func (m *Service) XXX_Size() int { - return m.Size() -} -func (m *Service) XXX_DiscardUnknown() { - xxx_messageInfo_Service.DiscardUnknown(m) -} - -var xxx_messageInfo_Service proto.InternalMessageInfo - -func (m *Service) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Service) GetImage() string { - if m != nil { - return m.Image - } - return "" -} - -func (m *Service) GetCommand() []string { - if m != nil { - return m.Command - } - return nil -} - -func (m *Service) GetArgs() []string { - if m != nil { - return m.Args - } - return nil -} - -func (m *Service) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - -func (m *Service) GetResources() v1beta2.ResourceUnits { - if m != nil { - return m.Resources - } - return v1beta2.ResourceUnits{} -} - -func (m *Service) GetCount() uint32 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *Service) GetExpose() []ServiceExpose { - if m != nil { - return m.Expose - } - return nil -} - -func (m *Service) GetParams() *ServiceParams { - if m != nil { - return m.Params - } - return nil -} - -func init() { - proto.RegisterType((*StorageParams)(nil), "akash.manifest.v2beta1.StorageParams") - proto.RegisterType((*ServiceParams)(nil), "akash.manifest.v2beta1.ServiceParams") - proto.RegisterType((*Service)(nil), "akash.manifest.v2beta1.Service") -} - -func init() { - proto.RegisterFile("akash/manifest/v2beta1/service.proto", fileDescriptor_fa76aee1c3e9a132) -} - -var fileDescriptor_fa76aee1c3e9a132 = []byte{ - // 590 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xcd, 0x7c, 0x69, 0x9b, 0x66, 0xfa, 0x05, 0x90, 0x85, 0xc0, 0xad, 0x84, 0xc7, 0x1d, 0x51, - 0x64, 0x51, 0x61, 0xab, 0x41, 0x02, 0x09, 0xb1, 0xb2, 0xc4, 0x1a, 0x34, 0x15, 0x12, 0x62, 0x01, - 0x9a, 0xb8, 0x53, 0xd7, 0x6a, 0xec, 0x89, 0x3c, 0x4e, 0x20, 0x3b, 0x1e, 0x81, 0xf7, 0xe0, 0x45, - 0xb2, 0xac, 0xc4, 0xa6, 0xab, 0x11, 0x24, 0xbb, 0x2c, 0xfd, 0x04, 0x68, 0x7e, 0x8c, 0xa9, 0x40, - 0x54, 0xac, 0x92, 0x7b, 0xee, 0xb9, 0x77, 0xce, 0x9c, 0x39, 0x09, 0xbc, 0x4f, 0xcf, 0xa9, 0x38, - 0x8b, 0x72, 0x5a, 0x64, 0xa7, 0x4c, 0x54, 0xd1, 0x6c, 0x38, 0x62, 0x15, 0x3d, 0x8a, 0x04, 0x2b, - 0x67, 0x59, 0xc2, 0xc2, 0x49, 0xc9, 0x2b, 0xee, 0xdc, 0xd1, 0xac, 0xb0, 0x61, 0x85, 0x96, 0xb5, - 0x77, 0x3b, 0xe5, 0x29, 0xd7, 0x94, 0x48, 0x7d, 0x33, 0xec, 0xbd, 0x87, 0x7f, 0xdf, 0xc9, 0x3e, - 0x4e, 0xb8, 0xb0, 0x9b, 0xf7, 0x1e, 0x18, 0xee, 0x88, 0x0a, 0x16, 0xcd, 0x8e, 0x14, 0x6f, 0x18, - 0x95, 0x4c, 0xf0, 0x69, 0x99, 0xb0, 0x69, 0x91, 0x55, 0xc2, 0xf0, 0xf0, 0x17, 0x00, 0x07, 0xc7, - 0x15, 0x2f, 0x69, 0xca, 0x5e, 0xd1, 0x92, 0xe6, 0xc2, 0x39, 0x84, 0x1b, 0x05, 0xcd, 0x99, 0x0b, - 0x7c, 0x10, 0xf4, 0xe3, 0xbb, 0x6b, 0x89, 0x74, 0x5d, 0x4b, 0xb4, 0x33, 0xa7, 0xf9, 0xf8, 0x19, - 0x56, 0x15, 0x26, 0x1a, 0x74, 0x22, 0xb8, 0x99, 0xf3, 0x69, 0x51, 0xb9, 0xff, 0x69, 0xf6, 0xee, - 0x5a, 0x22, 0x03, 0xd4, 0x12, 0xfd, 0x6f, 0xe8, 0xba, 0xc4, 0xc4, 0xc0, 0xce, 0x73, 0xd8, 0x2f, - 0x19, 0x3d, 0x79, 0xcf, 0x8b, 0xf1, 0xdc, 0xed, 0xfa, 0x20, 0xd8, 0x8e, 0xd1, 0x5a, 0xa2, 0x6d, - 0x05, 0xbe, 0x2c, 0xc6, 0xf3, 0x5a, 0xa2, 0x9b, 0x66, 0xae, 0x41, 0x30, 0xf9, 0xd9, 0xc4, 0x02, - 0x0e, 0x8e, 0xcd, 0x65, 0xad, 0xd8, 0x11, 0xec, 0x09, 0xa3, 0xde, 0x05, 0x7e, 0x37, 0xd8, 0x19, - 0x1e, 0x84, 0x7f, 0xb6, 0x34, 0xbc, 0x72, 0xc9, 0x78, 0x7f, 0x21, 0x51, 0x67, 0x2d, 0x51, 0x33, - 0x5d, 0x4b, 0x74, 0xc3, 0x1c, 0x6b, 0x01, 0x4c, 0x9a, 0x16, 0xfe, 0xba, 0x01, 0x7b, 0xf6, 0xd4, - 0x7f, 0x36, 0x27, 0xcb, 0x95, 0xb4, 0x5f, 0xcc, 0xd1, 0x40, 0x6b, 0x8e, 0x2e, 0x31, 0x31, 0xb0, - 0xf3, 0x14, 0xf6, 0x12, 0x9e, 0xe7, 0xb4, 0x38, 0x71, 0xbb, 0x7e, 0x37, 0xe8, 0xc7, 0xf7, 0x94, - 0x44, 0x0b, 0xb5, 0x12, 0x2d, 0x80, 0x49, 0xd3, 0x52, 0xb2, 0x68, 0x99, 0x0a, 0x77, 0x43, 0x4f, - 0x69, 0x59, 0xaa, 0x6e, 0x65, 0xa9, 0x0a, 0x13, 0x0d, 0x3a, 0x87, 0xb0, 0xcb, 0x8a, 0x99, 0xbb, - 0xa9, 0xb9, 0xbb, 0x0b, 0x89, 0xc0, 0x5a, 0x22, 0x05, 0xd5, 0x12, 0x41, 0x43, 0x67, 0xc5, 0x0c, - 0x13, 0x05, 0x39, 0xa7, 0xea, 0xbd, 0x4c, 0x6c, 0x84, 0xbb, 0xe5, 0x83, 0x60, 0x67, 0xb8, 0x6f, - 0x2d, 0x56, 0xd9, 0x0a, 0x6d, 0xb6, 0x42, 0x62, 0x49, 0xaf, 0x55, 0xb6, 0xe2, 0x03, 0x6b, 0x6f, - 0x3b, 0x5b, 0x4b, 0x74, 0xab, 0x79, 0x57, 0x0b, 0x61, 0xd2, 0xb6, 0x95, 0x57, 0x89, 0x0e, 0x52, - 0xcf, 0x07, 0xc1, 0xc0, 0x78, 0x95, 0x5c, 0x0d, 0x52, 0x62, 0x83, 0xa4, 0x3f, 0x9d, 0x77, 0x70, - 0xcb, 0x04, 0xde, 0xdd, 0xbe, 0xe6, 0xe1, 0xcd, 0xd3, 0xbd, 0xd0, 0xe4, 0x18, 0x59, 0x65, 0x76, - 0xb8, 0x96, 0x68, 0x60, 0xaf, 0xac, 0x6b, 0x4c, 0x6c, 0x43, 0xed, 0x9f, 0xe8, 0xac, 0xb8, 0x7d, - 0x7d, 0xeb, 0xeb, 0xf6, 0xdb, 0x60, 0x21, 0xeb, 0xa7, 0x1d, 0x6e, 0xf7, 0x9b, 0x1a, 0x13, 0xdb, - 0x88, 0xdf, 0x5c, 0x7e, 0xf7, 0x3a, 0x9f, 0x96, 0x1e, 0x58, 0x2c, 0x3d, 0x70, 0xb1, 0xf4, 0xc0, - 0xb7, 0xa5, 0x07, 0x3e, 0xaf, 0xbc, 0xce, 0xc5, 0xca, 0xeb, 0x5c, 0xae, 0xbc, 0xce, 0xdb, 0x27, - 0x69, 0x56, 0x9d, 0x4d, 0x47, 0x61, 0xc2, 0xf3, 0x48, 0x9f, 0xfd, 0xa8, 0x60, 0xd5, 0x07, 0x5e, - 0x9e, 0xdb, 0x8a, 0x4e, 0xb2, 0x28, 0xe5, 0xbf, 0xfd, 0x1d, 0x8c, 0xb6, 0xf4, 0x2f, 0xfb, 0xf1, - 0x8f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x74, 0xed, 0xf0, 0x71, 0x83, 0x04, 0x00, 0x00, -} - -func (m *StorageParams) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StorageParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StorageParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ReadOnly { - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.Mount) > 0 { - i -= len(m.Mount) - copy(dAtA[i:], m.Mount) - i = encodeVarintService(dAtA, i, uint64(len(m.Mount))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintService(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ServiceParams) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Storage) > 0 { - for iNdEx := len(m.Storage) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Storage[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Service) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Service) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Params != nil { - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if len(m.Expose) > 0 { - for iNdEx := len(m.Expose) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Expose[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if m.Count != 0 { - i = encodeVarintService(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x38 - } - { - size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Env[iNdEx]) - copy(dAtA[i:], m.Env[iNdEx]) - i = encodeVarintService(dAtA, i, uint64(len(m.Env[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Args) > 0 { - for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Args[iNdEx]) - copy(dAtA[i:], m.Args[iNdEx]) - i = encodeVarintService(dAtA, i, uint64(len(m.Args[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Command) > 0 { - for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Command[iNdEx]) - copy(dAtA[i:], m.Command[iNdEx]) - i = encodeVarintService(dAtA, i, uint64(len(m.Command[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Image) > 0 { - i -= len(m.Image) - copy(dAtA[i:], m.Image) - i = encodeVarintService(dAtA, i, uint64(len(m.Image))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintService(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintService(dAtA []byte, offset int, v uint64) int { - offset -= sovService(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *StorageParams) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Mount) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if m.ReadOnly { - n += 2 - } - return n -} - -func (m *ServiceParams) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Storage) > 0 { - for _, e := range m.Storage { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - return n -} - -func (m *Service) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Image) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovService(uint64(l)) - } - } - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovService(uint64(l)) - } - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovService(uint64(l)) - } - } - l = m.Resources.Size() - n += 1 + l + sovService(uint64(l)) - if m.Count != 0 { - n += 1 + sovService(uint64(m.Count)) - } - if len(m.Expose) > 0 { - for _, e := range m.Expose { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - if m.Params != nil { - l = m.Params.Size() - n += 1 + l + sovService(uint64(l)) - } - return n -} - -func sovService(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozService(x uint64) (n int) { - return sovService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *StorageParams) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StorageParams{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Mount:` + fmt.Sprintf("%v", this.Mount) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceParams) String() string { - if this == nil { - return "nil" - } - repeatedStringForStorage := "[]StorageParams{" - for _, f := range this.Storage { - repeatedStringForStorage += strings.Replace(strings.Replace(f.String(), "StorageParams", "StorageParams", 1), `&`, ``, 1) + "," - } - repeatedStringForStorage += "}" - s := strings.Join([]string{`&ServiceParams{`, - `Storage:` + repeatedStringForStorage + `,`, - `}`, - }, "") - return s -} -func (this *Service) String() string { - if this == nil { - return "nil" - } - repeatedStringForExpose := "[]ServiceExpose{" - for _, f := range this.Expose { - repeatedStringForExpose += fmt.Sprintf("%v", f) + "," - } - repeatedStringForExpose += "}" - s := strings.Join([]string{`&Service{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `Args:` + fmt.Sprintf("%v", this.Args) + `,`, - `Env:` + fmt.Sprintf("%v", this.Env) + `,`, - `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "ResourceUnits", "v1beta2.ResourceUnits", 1), `&`, ``, 1) + `,`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `Expose:` + repeatedStringForExpose + `,`, - `Params:` + strings.Replace(this.Params.String(), "ServiceParams", "ServiceParams", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringService(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *StorageParams) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mount", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mount = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceParams) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Storage = append(m.Storage, StorageParams{}) - if err := m.Storage[len(m.Storage)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Service) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Service: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expose", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expose = append(m.Expose, ServiceExpose{}) - if err := m.Expose[len(m.Expose)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Params == nil { - m.Params = &ServiceParams{} - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipService(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthService - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupService - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthService - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthService = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowService = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupService = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/manifest/v2beta1/serviceexpose.pb.go b/go/manifest/v2beta1/serviceexpose.pb.go deleted file mode 100644 index 7eca5b66..00000000 --- a/go/manifest/v2beta1/serviceexpose.pb.go +++ /dev/null @@ -1,729 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/manifest/v2beta1/serviceexpose.proto - -package v2beta1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ServiceExpose stores exposed ports and hosts details -type ServiceExpose struct { - // port on the container - Port uint32 `protobuf:"varint,1,opt,name=port,proto3" json:"port" yaml:"port"` - // port on the service definition - ExternalPort uint32 `protobuf:"varint,2,opt,name=external_port,json=externalPort,proto3" json:"externalPort" yaml:"externalPort"` - Proto ServiceProtocol `protobuf:"bytes,3,opt,name=proto,proto3,casttype=ServiceProtocol" json:"proto" yaml:"proto"` - Service string `protobuf:"bytes,4,opt,name=service,proto3" json:"service" yaml:"service"` - Global bool `protobuf:"varint,5,opt,name=global,proto3" json:"global" yaml:"global"` - Hosts []string `protobuf:"bytes,6,rep,name=hosts,proto3" json:"hosts" yaml:"hosts"` - HTTPOptions ServiceExposeHTTPOptions `protobuf:"bytes,7,opt,name=http_options,json=httpOptions,proto3" json:"httpOptions" yaml:"httpOptions"` - // The name of the IP address associated with this, if any - IP string `protobuf:"bytes,8,opt,name=ip,proto3" json:"ip" yaml:"ip"` - // The sequence number of the associated endpoint in the on-chain data - EndpointSequenceNumber uint32 `protobuf:"varint,9,opt,name=endpoint_sequence_number,json=endpointSequenceNumber,proto3" json:"endpointSequenceNumber" yaml:"endpointSequenceNumber"` -} - -func (m *ServiceExpose) Reset() { *m = ServiceExpose{} } -func (*ServiceExpose) ProtoMessage() {} -func (*ServiceExpose) Descriptor() ([]byte, []int) { - return fileDescriptor_a282b25ed8ea7811, []int{0} -} -func (m *ServiceExpose) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceExpose) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServiceExpose.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ServiceExpose) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceExpose.Merge(m, src) -} -func (m *ServiceExpose) XXX_Size() int { - return m.Size() -} -func (m *ServiceExpose) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceExpose.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceExpose proto.InternalMessageInfo - -func (m *ServiceExpose) GetPort() uint32 { - if m != nil { - return m.Port - } - return 0 -} - -func (m *ServiceExpose) GetExternalPort() uint32 { - if m != nil { - return m.ExternalPort - } - return 0 -} - -func (m *ServiceExpose) GetProto() ServiceProtocol { - if m != nil { - return m.Proto - } - return "" -} - -func (m *ServiceExpose) GetService() string { - if m != nil { - return m.Service - } - return "" -} - -func (m *ServiceExpose) GetGlobal() bool { - if m != nil { - return m.Global - } - return false -} - -func (m *ServiceExpose) GetHosts() []string { - if m != nil { - return m.Hosts - } - return nil -} - -func (m *ServiceExpose) GetHTTPOptions() ServiceExposeHTTPOptions { - if m != nil { - return m.HTTPOptions - } - return ServiceExposeHTTPOptions{} -} - -func (m *ServiceExpose) GetIP() string { - if m != nil { - return m.IP - } - return "" -} - -func (m *ServiceExpose) GetEndpointSequenceNumber() uint32 { - if m != nil { - return m.EndpointSequenceNumber - } - return 0 -} - -func init() { - proto.RegisterType((*ServiceExpose)(nil), "akash.manifest.v2beta1.ServiceExpose") -} - -func init() { - proto.RegisterFile("akash/manifest/v2beta1/serviceexpose.proto", fileDescriptor_a282b25ed8ea7811) -} - -var fileDescriptor_a282b25ed8ea7811 = []byte{ - // 534 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0x4f, 0x6f, 0xd3, 0x3e, - 0x18, 0xc7, 0xeb, 0x6e, 0xed, 0x56, 0xb7, 0xfd, 0xfd, 0x24, 0x83, 0x46, 0x36, 0xb4, 0xb8, 0xf2, - 0x85, 0xf0, 0x2f, 0x81, 0x4d, 0x02, 0x04, 0xb7, 0x48, 0x48, 0x20, 0x21, 0xa8, 0xb2, 0x1d, 0x10, - 0x97, 0x2a, 0x2d, 0xa6, 0xb5, 0xd6, 0xc6, 0x26, 0x71, 0xc7, 0xb8, 0xf1, 0x06, 0x90, 0x78, 0x07, - 0xbc, 0x9d, 0x1d, 0x77, 0xdc, 0xc9, 0x82, 0xf4, 0x96, 0x63, 0x8e, 0x9c, 0x50, 0x6c, 0x57, 0xeb, - 0xc4, 0x76, 0xcb, 0xf3, 0x7d, 0x3e, 0xdf, 0xc7, 0x5f, 0xf9, 0x71, 0xe0, 0xbd, 0xf8, 0x28, 0xce, - 0x26, 0xc1, 0x2c, 0x4e, 0xd8, 0x27, 0x9a, 0xc9, 0xe0, 0x78, 0x6f, 0x48, 0x65, 0xfc, 0x38, 0xc8, - 0x68, 0x7a, 0xcc, 0x46, 0x94, 0x9e, 0x08, 0x9e, 0x51, 0x5f, 0xa4, 0x5c, 0x72, 0xb4, 0xa5, 0x59, - 0x7f, 0xc9, 0xfa, 0x96, 0xdd, 0xb9, 0x39, 0xe6, 0x63, 0xae, 0x91, 0xa0, 0xfa, 0x32, 0xf4, 0x8e, - 0x77, 0xcd, 0xe4, 0x89, 0x94, 0x82, 0x0b, 0xc9, 0x78, 0x92, 0x19, 0x92, 0xfc, 0x6c, 0xc0, 0xee, - 0x81, 0x39, 0xef, 0xa5, 0x3e, 0x0f, 0xdd, 0x87, 0xeb, 0x82, 0xa7, 0xd2, 0x01, 0x3d, 0xe0, 0x75, - 0xc3, 0x5b, 0x85, 0xc2, 0xba, 0x2e, 0x15, 0x6e, 0x7f, 0x8d, 0x67, 0xd3, 0xe7, 0xa4, 0xaa, 0x48, - 0xa4, 0x45, 0xf4, 0x06, 0x76, 0xe9, 0x89, 0xa4, 0x69, 0x12, 0x4f, 0x07, 0xda, 0x55, 0xd7, 0xae, - 0x3b, 0x85, 0xc2, 0x9d, 0x65, 0xa3, 0x6f, 0xdc, 0x37, 0x8c, 0x7b, 0x55, 0x25, 0xd1, 0x25, 0x08, - 0x85, 0xb0, 0xa1, 0x53, 0x39, 0x6b, 0x3d, 0xe0, 0xb5, 0xc2, 0x07, 0x85, 0xc2, 0x46, 0x28, 0x15, - 0xee, 0xd8, 0xc3, 0x75, 0xea, 0x3f, 0x0a, 0xff, 0x6f, 0x53, 0xf7, 0x2b, 0x61, 0xc4, 0xa7, 0x91, - 0x21, 0xd1, 0x53, 0xb8, 0x61, 0xef, 0xcf, 0x59, 0xd7, 0x53, 0x76, 0x0b, 0x85, 0x97, 0x52, 0xa9, - 0xf0, 0x7f, 0x66, 0x8e, 0x15, 0x48, 0xb4, 0x6c, 0xa1, 0x7d, 0xd8, 0x1c, 0x4f, 0xf9, 0x30, 0x9e, - 0x3a, 0x8d, 0x1e, 0xf0, 0x36, 0xc3, 0xdb, 0x85, 0xc2, 0x56, 0x29, 0x15, 0xee, 0x1a, 0x9b, 0xa9, - 0x49, 0x64, 0x1b, 0x28, 0x80, 0x8d, 0x09, 0xcf, 0x64, 0xe6, 0x34, 0x7b, 0x6b, 0x5e, 0x2b, 0xdc, - 0xae, 0x12, 0x6b, 0xe1, 0x22, 0xb1, 0x2e, 0x49, 0x64, 0x64, 0xf4, 0x1d, 0xc0, 0x4e, 0xb5, 0x85, - 0x81, 0x5d, 0x83, 0xb3, 0xd1, 0x03, 0x5e, 0x7b, 0xef, 0x91, 0x7f, 0xf5, 0x7e, 0xfd, 0x4b, 0xbb, - 0x79, 0x75, 0x78, 0xd8, 0x7f, 0x67, 0x7c, 0xe1, 0xb3, 0x53, 0x85, 0x6b, 0xb9, 0xc2, 0xed, 0x15, - 0xb1, 0x50, 0xb8, 0x5d, 0x0d, 0xb7, 0x65, 0xa9, 0x30, 0xb2, 0x19, 0x2e, 0x44, 0x12, 0xad, 0x22, - 0xe8, 0x2e, 0xac, 0x33, 0xe1, 0x6c, 0xea, 0x9b, 0xda, 0xce, 0x15, 0xae, 0xbf, 0xee, 0x17, 0x0a, - 0xd7, 0x99, 0x28, 0x15, 0x6e, 0x19, 0x33, 0x13, 0x24, 0xaa, 0x33, 0x81, 0xe6, 0xd0, 0xa1, 0xc9, - 0x47, 0xc1, 0x59, 0x22, 0x07, 0x19, 0xfd, 0x3c, 0xa7, 0xc9, 0x88, 0x0e, 0x92, 0xf9, 0x6c, 0x48, - 0x53, 0xa7, 0xa5, 0xd7, 0xfe, 0xa2, 0x50, 0x78, 0x6b, 0xc9, 0x1c, 0x58, 0xe4, 0xad, 0x26, 0x4a, - 0x85, 0x77, 0xed, 0x03, 0xb8, 0xb2, 0x4f, 0xa2, 0x6b, 0x8c, 0xe1, 0xfb, 0xf3, 0xdf, 0x6e, 0xed, - 0x5b, 0xee, 0x82, 0xd3, 0xdc, 0x05, 0x67, 0xb9, 0x0b, 0x7e, 0xe5, 0x2e, 0xf8, 0xb1, 0x70, 0x6b, - 0x67, 0x0b, 0xb7, 0x76, 0xbe, 0x70, 0x6b, 0x1f, 0x9e, 0x8c, 0x99, 0x9c, 0xcc, 0x87, 0xfe, 0x88, - 0xcf, 0x02, 0x7d, 0x8d, 0x0f, 0x13, 0x2a, 0xbf, 0xf0, 0xf4, 0xc8, 0x56, 0xb1, 0x60, 0xc1, 0x98, - 0xff, 0xf3, 0x37, 0x0c, 0x9b, 0xfa, 0xc5, 0xec, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xaa, 0x35, - 0xfb, 0x0f, 0x88, 0x03, 0x00, 0x00, -} - -func (m *ServiceExpose) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceExpose) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceExpose) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.EndpointSequenceNumber != 0 { - i = encodeVarintServiceexpose(dAtA, i, uint64(m.EndpointSequenceNumber)) - i-- - dAtA[i] = 0x48 - } - if len(m.IP) > 0 { - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.IP))) - i-- - dAtA[i] = 0x42 - } - { - size, err := m.HTTPOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintServiceexpose(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - if len(m.Hosts) > 0 { - for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Hosts[iNdEx]) - copy(dAtA[i:], m.Hosts[iNdEx]) - i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.Hosts[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if m.Global { - i-- - if m.Global { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.Service) > 0 { - i -= len(m.Service) - copy(dAtA[i:], m.Service) - i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.Service))) - i-- - dAtA[i] = 0x22 - } - if len(m.Proto) > 0 { - i -= len(m.Proto) - copy(dAtA[i:], m.Proto) - i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.Proto))) - i-- - dAtA[i] = 0x1a - } - if m.ExternalPort != 0 { - i = encodeVarintServiceexpose(dAtA, i, uint64(m.ExternalPort)) - i-- - dAtA[i] = 0x10 - } - if m.Port != 0 { - i = encodeVarintServiceexpose(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintServiceexpose(dAtA []byte, offset int, v uint64) int { - offset -= sovServiceexpose(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ServiceExpose) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Port != 0 { - n += 1 + sovServiceexpose(uint64(m.Port)) - } - if m.ExternalPort != 0 { - n += 1 + sovServiceexpose(uint64(m.ExternalPort)) - } - l = len(m.Proto) - if l > 0 { - n += 1 + l + sovServiceexpose(uint64(l)) - } - l = len(m.Service) - if l > 0 { - n += 1 + l + sovServiceexpose(uint64(l)) - } - if m.Global { - n += 2 - } - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - l = len(s) - n += 1 + l + sovServiceexpose(uint64(l)) - } - } - l = m.HTTPOptions.Size() - n += 1 + l + sovServiceexpose(uint64(l)) - l = len(m.IP) - if l > 0 { - n += 1 + l + sovServiceexpose(uint64(l)) - } - if m.EndpointSequenceNumber != 0 { - n += 1 + sovServiceexpose(uint64(m.EndpointSequenceNumber)) - } - return n -} - -func sovServiceexpose(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozServiceexpose(x uint64) (n int) { - return sovServiceexpose(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ServiceExpose) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceExpose{`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `ExternalPort:` + fmt.Sprintf("%v", this.ExternalPort) + `,`, - `Proto:` + fmt.Sprintf("%v", this.Proto) + `,`, - `Service:` + fmt.Sprintf("%v", this.Service) + `,`, - `Global:` + fmt.Sprintf("%v", this.Global) + `,`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `HTTPOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPOptions), "ServiceExposeHTTPOptions", "ServiceExposeHTTPOptions", 1), `&`, ``, 1) + `,`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `EndpointSequenceNumber:` + fmt.Sprintf("%v", this.EndpointSequenceNumber) + `,`, - `}`, - }, "") - return s -} -func valueToStringServiceexpose(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ServiceExpose) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceExpose: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceExpose: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalPort", wireType) - } - m.ExternalPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExternalPort |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proto", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthServiceexpose - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthServiceexpose - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Proto = ServiceProtocol(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthServiceexpose - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthServiceexpose - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Service = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Global", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Global = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthServiceexpose - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthServiceexpose - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTPOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthServiceexpose - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthServiceexpose - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.HTTPOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthServiceexpose - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthServiceexpose - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndpointSequenceNumber", wireType) - } - m.EndpointSequenceNumber = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndpointSequenceNumber |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipServiceexpose(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthServiceexpose - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipServiceexpose(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthServiceexpose - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupServiceexpose - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthServiceexpose - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthServiceexpose = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowServiceexpose = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupServiceexpose = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/manifest/v2beta2/errors.go b/go/manifest/v2beta2/errors.go deleted file mode 100644 index 2f1abada..00000000 --- a/go/manifest/v2beta2/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package v2beta2 - -import ( - "errors" -) - -var ( - ErrInvalidManifest = errors.New("invalid manifest") - ErrManifestCrossValidation = errors.New("manifest cross-validation error") -) diff --git a/go/manifest/v2beta2/group.go b/go/manifest/v2beta2/group.go deleted file mode 100644 index 09ee3e1c..00000000 --- a/go/manifest/v2beta2/group.go +++ /dev/null @@ -1,79 +0,0 @@ -package v2beta2 - -import ( - "fmt" - "sort" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" -) - -var _ dtypes.ResourceGroup = (*Group)(nil) - -// GetName returns the name of group -func (g Group) GetName() string { - return g.Name -} - -func (g Group) GetResourceUnits() dtypes.ResourceUnits { - groups := make(map[uint32]*dtypes.ResourceUnit) - - for _, svc := range g.Services { - if _, exists := groups[svc.Resources.ID]; !exists { - groups[svc.Resources.ID] = &dtypes.ResourceUnit{ - Resources: svc.Resources, - Count: svc.Count, - } - } else { - groups[svc.Resources.ID].Count += svc.Count - } - } - - units := make(dtypes.ResourceUnits, 0, len(groups)) - - for i := range groups { - units = append(units, *groups[i]) - } - - return units -} - -func (g Group) AllHostnames() []string { - allHostnames := make([]string, 0) - for _, service := range g.Services { - for _, expose := range service.Expose { - allHostnames = append(allHostnames, expose.Hosts...) - } - } - - return allHostnames -} - -func (g *Group) Validate(helper *validateManifestGroupsHelper) error { - if 0 == len(g.Services) { - return fmt.Errorf("%w: group %q contains no services", ErrInvalidManifest, g.GetName()) - } - - if !sort.IsSorted(g.Services) { - return fmt.Errorf("%w: group %q services is not sorted", ErrInvalidManifest, g.GetName()) - } - - for _, s := range g.Services { - if err := s.validate(helper); err != nil { - return err - } - } - - return nil -} - -// checkAgainstGSpec check if manifest group is within GroupSpec resources -// NOTE: it modifies caller's gspec -func (g *Group) checkAgainstGSpec(gspec *groupSpec) error { - for _, svc := range g.Services { - if err := svc.checkAgainstGSpec(gspec); err != nil { - return fmt.Errorf("%w: group %q: %w", ErrManifestCrossValidation, g.Name, err) - } - } - - return nil -} diff --git a/go/manifest/v2beta2/group.pb.go b/go/manifest/v2beta2/group.pb.go deleted file mode 100644 index e1a23a1a..00000000 --- a/go/manifest/v2beta2/group.pb.go +++ /dev/null @@ -1,399 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/manifest/v2beta2/group.proto - -package v2beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Group store name and list of services -type Group struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` - Services Services `protobuf:"bytes,2,rep,name=services,proto3,castrepeated=Services" json:"services" yaml:"services"` -} - -func (m *Group) Reset() { *m = Group{} } -func (*Group) ProtoMessage() {} -func (*Group) Descriptor() ([]byte, []int) { - return fileDescriptor_9735f16570903042, []int{0} -} -func (m *Group) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Group.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Group) XXX_Merge(src proto.Message) { - xxx_messageInfo_Group.Merge(m, src) -} -func (m *Group) XXX_Size() int { - return m.Size() -} -func (m *Group) XXX_DiscardUnknown() { - xxx_messageInfo_Group.DiscardUnknown(m) -} - -var xxx_messageInfo_Group proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Group)(nil), "akash.manifest.v2beta2.Group") -} - -func init() { - proto.RegisterFile("akash/manifest/v2beta2/group.proto", fileDescriptor_9735f16570903042) -} - -var fileDescriptor_9735f16570903042 = []byte{ - // 276 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0xcf, 0x4d, 0xcc, 0xcb, 0x4c, 0x4b, 0x2d, 0x2e, 0xd1, 0x2f, 0x33, 0x4a, 0x4a, 0x2d, - 0x49, 0x34, 0xd2, 0x4f, 0x2f, 0xca, 0x2f, 0x2d, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, - 0x03, 0xab, 0xd1, 0x83, 0xa9, 0xd1, 0x83, 0xaa, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, - 0xd1, 0x07, 0xb1, 0x20, 0xaa, 0xa5, 0x54, 0x70, 0x98, 0x58, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, - 0x0a, 0x51, 0xa5, 0xb4, 0x82, 0x91, 0x8b, 0xd5, 0x1d, 0x64, 0x87, 0x90, 0x36, 0x17, 0x4b, 0x5e, - 0x62, 0x6e, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xf8, 0xab, 0x7b, 0xf2, 0x60, 0xfe, - 0xa7, 0x7b, 0xf2, 0xdc, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x20, 0x9e, 0x52, 0x10, 0x58, 0x50, - 0x28, 0x87, 0x8b, 0x03, 0x6a, 0x4e, 0xb1, 0x04, 0x93, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0xbc, 0x1e, - 0x76, 0xd7, 0xe9, 0x05, 0x43, 0xd4, 0x39, 0xe9, 0x9f, 0xb8, 0x27, 0xcf, 0xf0, 0xea, 0x9e, 0x3c, - 0x5c, 0xe3, 0xa7, 0x7b, 0xf2, 0xfc, 0x10, 0x93, 0x61, 0x22, 0x4a, 0xab, 0xee, 0xcb, 0x73, 0x40, - 0xd5, 0x17, 0x07, 0xc1, 0x15, 0x5a, 0xb1, 0x74, 0x2c, 0x90, 0x67, 0x70, 0x8a, 0xb8, 0xf1, 0x50, - 0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, - 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, - 0x99, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x83, 0x5d, 0xa3, 0x9b, - 0x97, 0x5a, 0x52, 0x9e, 0x5f, 0x94, 0x0d, 0xe5, 0x25, 0x16, 0x64, 0xea, 0xa7, 0xe7, 0x63, 0x04, - 0x49, 0x12, 0x1b, 0x38, 0x2c, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xac, 0x94, 0xcd, 0xe5, - 0x85, 0x01, 0x00, 0x00, -} - -func (m *Group) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Group) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Services) > 0 { - for iNdEx := len(m.Services) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Services[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGroup(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGroup(dAtA []byte, offset int, v uint64) int { - offset -= sovGroup(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Group) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovGroup(uint64(l)) - } - if len(m.Services) > 0 { - for _, e := range m.Services { - l = e.Size() - n += 1 + l + sovGroup(uint64(l)) - } - } - return n -} - -func sovGroup(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGroup(x uint64) (n int) { - return sovGroup(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Group) String() string { - if this == nil { - return "nil" - } - repeatedStringForServices := "[]Service{" - for _, f := range this.Services { - repeatedStringForServices += fmt.Sprintf("%v", f) + "," - } - repeatedStringForServices += "}" - s := strings.Join([]string{`&Group{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Services:` + repeatedStringForServices + `,`, - `}`, - }, "") - return s -} -func valueToStringGroup(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Group) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Group: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Services = append(m.Services, Service{}) - if err := m.Services[len(m.Services)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGroup(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGroup - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGroup - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGroup - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGroup = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGroup = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGroup = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/manifest/v2beta2/httpoptions.pb.go b/go/manifest/v2beta2/httpoptions.pb.go deleted file mode 100644 index cada1f23..00000000 --- a/go/manifest/v2beta2/httpoptions.pb.go +++ /dev/null @@ -1,535 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/manifest/v2beta2/httpoptions.proto - -package v2beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ServiceExposeHTTPOptions -type ServiceExposeHTTPOptions struct { - MaxBodySize uint32 `protobuf:"varint,1,opt,name=max_body_size,json=maxBodySize,proto3" json:"maxBodySize" yaml:"maxBodySize"` - ReadTimeout uint32 `protobuf:"varint,2,opt,name=read_timeout,json=readTimeout,proto3" json:"readTimeout" yaml:"readTimeout"` - SendTimeout uint32 `protobuf:"varint,3,opt,name=send_timeout,json=sendTimeout,proto3" json:"sendTimeout" yaml:"sendTimeout"` - NextTries uint32 `protobuf:"varint,4,opt,name=next_tries,json=nextTries,proto3" json:"nextTries" yaml:"nextTries"` - NextTimeout uint32 `protobuf:"varint,5,opt,name=next_timeout,json=nextTimeout,proto3" json:"nextTimeout" yaml:"nextTimeout"` - NextCases []string `protobuf:"bytes,6,rep,name=next_cases,json=nextCases,proto3" json:"nextCases" yaml:"nextCases"` -} - -func (m *ServiceExposeHTTPOptions) Reset() { *m = ServiceExposeHTTPOptions{} } -func (*ServiceExposeHTTPOptions) ProtoMessage() {} -func (*ServiceExposeHTTPOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_8764ded002d8de0f, []int{0} -} -func (m *ServiceExposeHTTPOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceExposeHTTPOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServiceExposeHTTPOptions.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ServiceExposeHTTPOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceExposeHTTPOptions.Merge(m, src) -} -func (m *ServiceExposeHTTPOptions) XXX_Size() int { - return m.Size() -} -func (m *ServiceExposeHTTPOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceExposeHTTPOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceExposeHTTPOptions proto.InternalMessageInfo - -func (m *ServiceExposeHTTPOptions) GetMaxBodySize() uint32 { - if m != nil { - return m.MaxBodySize - } - return 0 -} - -func (m *ServiceExposeHTTPOptions) GetReadTimeout() uint32 { - if m != nil { - return m.ReadTimeout - } - return 0 -} - -func (m *ServiceExposeHTTPOptions) GetSendTimeout() uint32 { - if m != nil { - return m.SendTimeout - } - return 0 -} - -func (m *ServiceExposeHTTPOptions) GetNextTries() uint32 { - if m != nil { - return m.NextTries - } - return 0 -} - -func (m *ServiceExposeHTTPOptions) GetNextTimeout() uint32 { - if m != nil { - return m.NextTimeout - } - return 0 -} - -func (m *ServiceExposeHTTPOptions) GetNextCases() []string { - if m != nil { - return m.NextCases - } - return nil -} - -func init() { - proto.RegisterType((*ServiceExposeHTTPOptions)(nil), "akash.manifest.v2beta2.ServiceExposeHTTPOptions") -} - -func init() { - proto.RegisterFile("akash/manifest/v2beta2/httpoptions.proto", fileDescriptor_8764ded002d8de0f) -} - -var fileDescriptor_8764ded002d8de0f = []byte{ - // 377 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xb1, 0x6a, 0xdb, 0x40, - 0x1c, 0x87, 0xa5, 0xba, 0x35, 0x58, 0xad, 0xa1, 0x88, 0x52, 0x44, 0x87, 0x93, 0x2b, 0x28, 0x78, - 0xa9, 0x04, 0x0e, 0x64, 0xc8, 0x14, 0x1c, 0x02, 0xce, 0x94, 0x20, 0x6b, 0x08, 0x59, 0xc4, 0xc9, - 0xbe, 0xc8, 0x87, 0x23, 0x9d, 0xd0, 0x9d, 0x1d, 0xd9, 0x53, 0x1e, 0x21, 0x6f, 0x95, 0x8c, 0x1e, - 0x3d, 0x89, 0x44, 0xde, 0x3c, 0xfa, 0x09, 0xc2, 0x9d, 0x14, 0xeb, 0x12, 0x6f, 0xa7, 0x4f, 0x1f, - 0x1f, 0xbf, 0xe1, 0xaf, 0x75, 0xe1, 0x14, 0xd2, 0x89, 0x13, 0xc1, 0x18, 0xdf, 0x22, 0xca, 0x9c, - 0x79, 0x2f, 0x40, 0x0c, 0xf6, 0x9c, 0x09, 0x63, 0x09, 0x49, 0x18, 0x26, 0x31, 0xb5, 0x93, 0x94, - 0x30, 0xa2, 0xff, 0x16, 0xa6, 0xfd, 0x6e, 0xda, 0x95, 0xf9, 0xe7, 0x57, 0x48, 0x42, 0x22, 0x14, - 0x87, 0xbf, 0x4a, 0xdb, 0x7a, 0x6a, 0x68, 0xc6, 0x10, 0xa5, 0x73, 0x3c, 0x42, 0xe7, 0x59, 0x42, - 0x28, 0x1a, 0x78, 0xde, 0xd5, 0x65, 0x19, 0xd4, 0x2f, 0xb4, 0x76, 0x04, 0x33, 0x3f, 0x20, 0xe3, - 0x85, 0x4f, 0xf1, 0x12, 0x19, 0x6a, 0x47, 0xed, 0xb6, 0xfb, 0xff, 0xb6, 0xb9, 0xf9, 0x3d, 0x82, - 0x59, 0x9f, 0x8c, 0x17, 0x43, 0xbc, 0x44, 0xbb, 0xdc, 0xd4, 0x17, 0x30, 0xba, 0x3b, 0xb1, 0x24, - 0x68, 0xb9, 0xb2, 0xa2, 0x0f, 0xb4, 0x1f, 0x29, 0x82, 0x63, 0x9f, 0xe1, 0x08, 0x91, 0x19, 0x33, - 0xbe, 0xd4, 0x25, 0xce, 0xbd, 0x12, 0xd7, 0x25, 0x09, 0x5a, 0xae, 0xac, 0xf0, 0x12, 0x45, 0x71, - 0x5d, 0x6a, 0xd4, 0x25, 0xce, 0x0f, 0x4a, 0x12, 0xb4, 0x5c, 0x59, 0xd1, 0x4f, 0x35, 0x2d, 0x46, - 0x19, 0xf3, 0x59, 0x8a, 0x11, 0x35, 0xbe, 0x8a, 0xce, 0xdf, 0x6d, 0x6e, 0xb6, 0x38, 0xf5, 0x38, - 0xdc, 0xe5, 0xe6, 0xcf, 0xb2, 0xb2, 0x47, 0x96, 0x5b, 0xff, 0xe6, 0x5b, 0xca, 0x42, 0xb5, 0xe5, - 0x5b, 0xbd, 0x45, 0x48, 0x9f, 0xb7, 0x48, 0xd0, 0x72, 0x65, 0x65, 0xbf, 0x65, 0x04, 0x29, 0xa2, - 0x46, 0xb3, 0xd3, 0xe8, 0xb6, 0xea, 0x2d, 0x67, 0x1c, 0x7e, 0xdc, 0x22, 0x50, 0xb5, 0x45, 0xbc, - 0xfb, 0xd7, 0xeb, 0x57, 0xa0, 0x3c, 0x14, 0x40, 0x7d, 0x2e, 0x80, 0xba, 0x2a, 0x80, 0xfa, 0x52, - 0x00, 0xf5, 0x71, 0x03, 0x94, 0xd5, 0x06, 0x28, 0xeb, 0x0d, 0x50, 0x6e, 0x8e, 0x43, 0xcc, 0x26, - 0xb3, 0xc0, 0x1e, 0x91, 0xc8, 0x11, 0x47, 0xf2, 0x3f, 0x46, 0xec, 0x9e, 0xa4, 0xd3, 0xea, 0x0b, - 0x26, 0xd8, 0x09, 0xc9, 0xc1, 0x8d, 0x05, 0x4d, 0x71, 0x2a, 0x47, 0x6f, 0x01, 0x00, 0x00, 0xff, - 0xff, 0x0d, 0x59, 0x8b, 0x64, 0x84, 0x02, 0x00, 0x00, -} - -func (m *ServiceExposeHTTPOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceExposeHTTPOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceExposeHTTPOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.NextCases) > 0 { - for iNdEx := len(m.NextCases) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.NextCases[iNdEx]) - copy(dAtA[i:], m.NextCases[iNdEx]) - i = encodeVarintHttpoptions(dAtA, i, uint64(len(m.NextCases[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if m.NextTimeout != 0 { - i = encodeVarintHttpoptions(dAtA, i, uint64(m.NextTimeout)) - i-- - dAtA[i] = 0x28 - } - if m.NextTries != 0 { - i = encodeVarintHttpoptions(dAtA, i, uint64(m.NextTries)) - i-- - dAtA[i] = 0x20 - } - if m.SendTimeout != 0 { - i = encodeVarintHttpoptions(dAtA, i, uint64(m.SendTimeout)) - i-- - dAtA[i] = 0x18 - } - if m.ReadTimeout != 0 { - i = encodeVarintHttpoptions(dAtA, i, uint64(m.ReadTimeout)) - i-- - dAtA[i] = 0x10 - } - if m.MaxBodySize != 0 { - i = encodeVarintHttpoptions(dAtA, i, uint64(m.MaxBodySize)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintHttpoptions(dAtA []byte, offset int, v uint64) int { - offset -= sovHttpoptions(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ServiceExposeHTTPOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.MaxBodySize != 0 { - n += 1 + sovHttpoptions(uint64(m.MaxBodySize)) - } - if m.ReadTimeout != 0 { - n += 1 + sovHttpoptions(uint64(m.ReadTimeout)) - } - if m.SendTimeout != 0 { - n += 1 + sovHttpoptions(uint64(m.SendTimeout)) - } - if m.NextTries != 0 { - n += 1 + sovHttpoptions(uint64(m.NextTries)) - } - if m.NextTimeout != 0 { - n += 1 + sovHttpoptions(uint64(m.NextTimeout)) - } - if len(m.NextCases) > 0 { - for _, s := range m.NextCases { - l = len(s) - n += 1 + l + sovHttpoptions(uint64(l)) - } - } - return n -} - -func sovHttpoptions(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozHttpoptions(x uint64) (n int) { - return sovHttpoptions(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ServiceExposeHTTPOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceExposeHTTPOptions{`, - `MaxBodySize:` + fmt.Sprintf("%v", this.MaxBodySize) + `,`, - `ReadTimeout:` + fmt.Sprintf("%v", this.ReadTimeout) + `,`, - `SendTimeout:` + fmt.Sprintf("%v", this.SendTimeout) + `,`, - `NextTries:` + fmt.Sprintf("%v", this.NextTries) + `,`, - `NextTimeout:` + fmt.Sprintf("%v", this.NextTimeout) + `,`, - `NextCases:` + fmt.Sprintf("%v", this.NextCases) + `,`, - `}`, - }, "") - return s -} -func valueToStringHttpoptions(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ServiceExposeHTTPOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceExposeHTTPOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceExposeHTTPOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxBodySize", wireType) - } - m.MaxBodySize = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxBodySize |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadTimeout", wireType) - } - m.ReadTimeout = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadTimeout |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SendTimeout", wireType) - } - m.SendTimeout = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SendTimeout |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NextTries", wireType) - } - m.NextTries = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NextTries |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NextTimeout", wireType) - } - m.NextTimeout = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NextTimeout |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NextCases", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHttpoptions - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthHttpoptions - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NextCases = append(m.NextCases, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipHttpoptions(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthHttpoptions - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipHttpoptions(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowHttpoptions - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthHttpoptions - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupHttpoptions - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthHttpoptions - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthHttpoptions = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowHttpoptions = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupHttpoptions = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/manifest/v2beta2/manifest.go b/go/manifest/v2beta2/manifest.go deleted file mode 100644 index 69b7ce95..00000000 --- a/go/manifest/v2beta2/manifest.go +++ /dev/null @@ -1,67 +0,0 @@ -package v2beta2 - -import ( - "crypto/sha256" - "encoding/json" - "fmt" - "regexp" - - sdk "github.com/cosmos/cosmos-sdk/types" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" -) - -var ( - serviceNameValidationRegex = regexp.MustCompile(`^[a-z]([-a-z0-9]*[a-z0-9])?$`) - hostnameMaxLen = 255 -) - -// Manifest store list of groups -type Manifest Groups - -// GetGroups returns a manifest with groups list -func (m Manifest) GetGroups() Groups { - return Groups(m) -} - -// Validate does validation for manifest -func (m Manifest) Validate() error { - if len(m) == 0 { - return fmt.Errorf("%w: manifest is empty", ErrInvalidManifest) - } - - return m.GetGroups().Validate() -} - -func (m Manifest) CheckAgainstDeployment(dgroups []dtypes.Group) error { - gspecs := make([]*dtypes.GroupSpec, 0, len(dgroups)) - - for _, dgroup := range dgroups { - gspec := dgroup.GroupSpec - gspecs = append(gspecs, &gspec) - } - - return m.CheckAgainstGSpecs(gspecs) -} - -func (m Manifest) CheckAgainstGSpecs(gspecs dtypes.GroupSpecs) error { - return m.GetGroups().CheckAgainstGSpecs(gspecs) -} - -// Version calculates the identifying deterministic hash for an SDL. -// Sha256 returns 32 byte sum of the SDL. -func (m Manifest) Version() ([]byte, error) { - data, err := json.Marshal(m) - if err != nil { - return nil, err - } - - sortedBytes, err := sdk.SortJSON(data) - if err != nil { - return nil, err - } - - sum := sha256.Sum256(sortedBytes) - - return sum[:], nil -} diff --git a/go/manifest/v2beta2/manifest_cross_validation_test.go b/go/manifest/v2beta2/manifest_cross_validation_test.go deleted file mode 100644 index a8b08051..00000000 --- a/go/manifest/v2beta2/manifest_cross_validation_test.go +++ /dev/null @@ -1,195 +0,0 @@ -package v2beta2 - -import ( - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/stretchr/testify/require" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - akashtypes "github.com/akash-network/akash-api/go/node/types/v1beta3" - tutil "github.com/akash-network/akash-api/go/testutil" - "github.com/akash-network/akash-api/go/testutil/v1beta3" -) - -const ( - validationPrefix = `^manifest cross-validation error: ` - groupPrefix = validationPrefix + `group ".+": ` - resourcesIDPrefix = groupPrefix + `resources ID \(\d+\): ` - servicePrefix = groupPrefix + `service ".+": ` - resourcesMismatchRegex = servicePrefix + `CPU|GPU|Memory|Storage resources mismatch for ID \d+$` - overUtilizedGroup = servicePrefix + `over-utilized replicas \(\d+\) > group spec resources count \(\d+\)$` - overUtilizedEndpoints = servicePrefix + `resources ID \(\d+\): over-utilized HTTP|PORT|IP endpoints$` - underUtilizedGroupResources = resourcesIDPrefix + `under-utilized \(\d+\) resources` - underUtilizedGroupEndpoints = resourcesIDPrefix + `under-utilized \(\d+\) HTTP|PORT|IP endpoints` -) - -func TestManifestWithEmptyDeployment(t *testing.T) { - m := simpleManifest(1) - deployment := make([]dtypes.Group, 0) - err := m.CheckAgainstDeployment(deployment) - require.Error(t, err) -} - -func simpleDeployment(t *testing.T, expose ServiceExposes, count uint32) []dtypes.Group { - deployment := make([]dtypes.Group, 1) - gid := testutil.GroupID(t) - resources := make(dtypes.ResourceUnits, 1) - resources[0] = dtypes.ResourceUnit{ - Resources: simpleResources(expose), - Count: count, - Price: sdk.NewInt64DecCoin(tutil.CoinDenom, 1), - } - deployment[0] = dtypes.Group{ - GroupID: gid, - State: 0, - GroupSpec: dtypes.GroupSpec{ - Name: nameOfTestGroup, - Requirements: akashtypes.PlacementRequirements{}, - Resources: resources, - }, - } - - return deployment -} - -func TestManifestWithDeployment(t *testing.T) { - m := simpleManifest(1) - deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) - err := m.CheckAgainstDeployment(deployment) - require.NoError(t, err) -} - -func TestManifestWithDeploymentMultipleCount(t *testing.T) { - addl := uint32(tutil.RandRangeInt(1, 20)) - m := simpleManifest(addl) - - deployment := simpleDeployment(t, m[0].Services[0].Expose, addl) - - err := m.CheckAgainstDeployment(deployment) - require.NoError(t, err) -} - -func TestManifestWithDeploymentMultiple(t *testing.T) { - cpu := int64(tutil.RandRangeInt(1024, 2000)) - storage := int64(tutil.RandRangeInt(2000, 3000)) - memory := int64(tutil.RandRangeInt(3001, 4000)) - - m := make(Manifest, 3) - m[0] = simpleManifest(1)[0] - m[0].Services[0].Resources.CPU.Units.Val = sdk.NewInt(cpu) - m[0].Name = "testgroup-2" - - m[1] = simpleManifest(1)[0] - m[1].Services[0].Resources.Storage[0].Quantity.Val = sdk.NewInt(storage) - m[1].Name = "testgroup-1" - m[1].Services[0].Expose[0].Hosts = []string{"host1.test"} - - m[2] = simpleManifest(1)[0] - m[2].Services[0].Resources.Memory.Quantity.Val = sdk.NewInt(memory) - m[2].Name = "testgroup-0" - m[2].Services[0].Expose[0].Hosts = []string{"host2.test"} - - deployment := make([]dtypes.Group, 3) - deployment[0] = simpleDeployment(t, m[0].Services[0].Expose, 1)[0] - deployment[0].GroupSpec.Resources[0].Memory.Quantity.Val = sdk.NewInt(memory) - deployment[0].GroupSpec.Name = "testgroup-0" - - deployment[1] = simpleDeployment(t, m[1].Services[0].Expose, 1)[0] - deployment[1].GroupSpec.Resources[0].Storage[0].Quantity.Val = sdk.NewInt(storage) - deployment[1].GroupSpec.Name = "testgroup-1" - - deployment[2] = simpleDeployment(t, m[2].Services[0].Expose, 1)[0] - deployment[2].GroupSpec.Resources[0].CPU.Units.Val = sdk.NewInt(cpu) - deployment[2].GroupSpec.Name = "testgroup-2" - - err := m.CheckAgainstDeployment(deployment) - - require.NoError(t, err) -} - -func TestManifestWithDeploymentCPUMismatch(t *testing.T) { - m := simpleManifest(1) - deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) - deployment[0].GroupSpec.Resources[0].CPU.Units.Val = sdk.NewInt(999) - err := m.CheckAgainstDeployment(deployment) - require.Error(t, err) - require.Regexp(t, resourcesMismatchRegex, err) -} - -func TestManifestWithDeploymentGPUMismatch(t *testing.T) { - m := simpleManifest(1) - deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) - deployment[0].GroupSpec.Resources[0].GPU.Units.Val = sdk.NewInt(200) - err := m.CheckAgainstDeployment(deployment) - require.Error(t, err) - require.Regexp(t, resourcesMismatchRegex, err) -} - -func TestManifestWithDeploymentMemoryMismatch(t *testing.T) { - m := simpleManifest(1) - deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) - deployment[0].GroupSpec.Resources[0].Memory.Quantity.Val = sdk.NewInt(99999) - err := m.CheckAgainstDeployment(deployment) - require.Error(t, err) - require.Regexp(t, resourcesMismatchRegex, err) -} - -func TestManifestWithDeploymentStorageMismatch(t *testing.T) { - m := simpleManifest(1) - deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) - deployment[0].GroupSpec.Resources[0].Storage[0].Quantity.Val = sdk.NewInt(99999) - err := m.CheckAgainstDeployment(deployment) - require.Error(t, err) - require.Regexp(t, resourcesMismatchRegex, err) -} - -func TestManifestWithDeploymentCountMismatch(t *testing.T) { - m := simpleManifest(1) - deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) - deployment[0].GroupSpec.Resources[0].Count++ - err := m.CheckAgainstDeployment(deployment) - require.Error(t, err) - require.Regexp(t, underUtilizedGroupResources, err) -} - -func TestManifestWithManifestGroupMismatch(t *testing.T) { - m := simpleManifest(1) - deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) - m[0].Services[0].Count++ - err := m.CheckAgainstDeployment(deployment) - require.Error(t, err) - require.Regexp(t, overUtilizedGroup, err) -} - -func TestManifestWithEndpointMismatchA(t *testing.T) { - m := simpleManifest(1) - - // Make this require an endpoint - m[0].Services[0].Expose[0] = ServiceExpose{ - Port: 2000, - ExternalPort: 0, - Proto: TCP, - Service: "", - Global: true, - Hosts: nil, - } - deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) - - // Remove an endpoint where the manifest calls for it - deployment[0].GroupSpec.Resources[0].Endpoints = akashtypes.Endpoints{} - - err := m.CheckAgainstDeployment(deployment) - require.Error(t, err) - require.Regexp(t, overUtilizedEndpoints, err) -} - -func TestManifestWithEndpointMismatchB(t *testing.T) { - m := simpleManifest(1) - deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) - // Add an endpoint where the manifest doesn't call for it - deployment[0].GroupSpec.Resources[0].Endpoints = append(deployment[0].GroupSpec.Resources[0].Endpoints, akashtypes.Endpoint{}) - err := m.CheckAgainstDeployment(deployment) - require.Error(t, err) - require.Regexp(t, underUtilizedGroupEndpoints, err) -} diff --git a/go/manifest/v2beta2/manifest_test.go b/go/manifest/v2beta2/manifest_test.go deleted file mode 100644 index 18caf4ee..00000000 --- a/go/manifest/v2beta2/manifest_test.go +++ /dev/null @@ -1,469 +0,0 @@ -package v2beta2 - -import ( - "bytes" - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - akashtypes "github.com/akash-network/akash-api/go/node/types/v1beta3" - "github.com/akash-network/akash-api/go/testutil/v1beta3" -) - -const ( - nameOfTestService = "test-service" - nameOfTestGroup = "testGroup" -) - -var ( - randCPU1 = uint64(testutil.RandCPUUnits()) - randCPU2 = randCPU1 + 1 - randGPU1 = uint64(testutil.RandGPUUnits()) - randMemory = testutil.RandMemoryQuantity() - randStorage = testutil.RandStorageQuantity() -) - -var randUnits1 = akashtypes.Resources{ - ID: 1, - CPU: &akashtypes.CPU{ - Units: akashtypes.NewResourceValue(randCPU1), - }, - GPU: &akashtypes.GPU{ - Units: akashtypes.NewResourceValue(randGPU1), - }, - Memory: &akashtypes.Memory{ - Quantity: akashtypes.NewResourceValue(randMemory), - }, - Storage: akashtypes.Volumes{ - akashtypes.Storage{ - Quantity: akashtypes.NewResourceValue(randStorage), - }, - }, - Endpoints: akashtypes.Endpoints{}, -} - -var randUnits3 = akashtypes.Resources{ - ID: 1, - CPU: &akashtypes.CPU{ - Units: akashtypes.NewResourceValue(randCPU2), - }, - Memory: &akashtypes.Memory{ - Quantity: akashtypes.NewResourceValue(randMemory), - }, - Storage: akashtypes.Volumes{ - akashtypes.Storage{ - Quantity: akashtypes.NewResourceValue(randStorage), - }, - }, - Endpoints: akashtypes.Endpoints{}, -} - -func simpleResources(exposes ServiceExposes) akashtypes.Resources { - return akashtypes.Resources{ - ID: 1, - CPU: &akashtypes.CPU{ - Units: akashtypes.ResourceValue{ - Val: sdk.NewIntFromUint64(randCPU1), - }, - Attributes: nil, - }, - Memory: &akashtypes.Memory{ - Quantity: akashtypes.ResourceValue{ - Val: sdk.NewIntFromUint64(randMemory), - }, - Attributes: nil, - }, - GPU: &akashtypes.GPU{ - Units: akashtypes.ResourceValue{ - Val: sdk.NewIntFromUint64(randGPU1), - }, - Attributes: nil, - }, - Storage: akashtypes.Volumes{ - akashtypes.Storage{ - Name: "default", - Quantity: akashtypes.ResourceValue{ - Val: sdk.NewIntFromUint64(randStorage), - }, - }, - }, - Endpoints: exposes.GetEndpoints(), - } -} - -func TestNilManifestIsInvalid(t *testing.T) { - m := Manifest{} - err := m.Validate() - - require.Error(t, err) - require.Regexp(t, "^.*manifest is empty.*$", err) -} - -func simpleManifest(svcCount uint32) Manifest { - expose := make([]ServiceExpose, 1) - expose[0].Global = true - expose[0].Port = 80 - expose[0].Proto = TCP - expose[0].Hosts = make([]string, 1) - expose[0].Hosts[0] = "host.test" - - services := make([]Service, 1) - services[0] = Service{ - Name: nameOfTestService, - Image: "test/image:1.0", - Command: nil, - Args: nil, - Env: nil, - Resources: simpleResources(expose), - Count: svcCount, - Expose: expose, - } - - m := make(Manifest, 1) - m[0] = Group{ - Name: nameOfTestGroup, - Services: services, - } - - return m -} - -func TestSimpleManifestIsValid(t *testing.T) { - m := simpleManifest(1) - err := m.Validate() - require.NoError(t, err) -} - -func TestSimpleManifestInvalidResourcesID(t *testing.T) { - m := simpleManifest(1) - m[0].Services[0].Resources.ID = 0 - err := m.Validate() - require.Error(t, err) -} - -func TestManifestWithNoGlobalServicesIsInvalid(t *testing.T) { - m := simpleManifest(1) - m[0].Services[0].Expose[0].Global = false - err := m.Validate() - require.Error(t, err) - require.Regexp(t, "^.*zero global services.*$", err) -} - -func TestManifestWithBadServiceNameIsInvalid(t *testing.T) { - m := simpleManifest(1) - m[0].Services[0].Name = "a_bad_service_name" // should not contain underscores - err := m.Validate() - require.Error(t, err) - require.Regexp(t, "^.*name is invalid.*$", err) - - m[0].Services[0].Name = "a-name-" // should not end with dash - err = m.Validate() - require.Error(t, err) - require.Regexp(t, "^.*name is invalid.*$", err) -} - -func TestManifestWithServiceNameIsValid(t *testing.T) { - m := simpleManifest(1) - - m[0].Services[0].Name = "9aaa-bar" // does not allow starting with a number - err := m.Validate() - require.ErrorIs(t, err, ErrInvalidManifest) - require.Regexp(t, "^.*name is invalid.*$", err) -} - -func TestManifestWithDuplicateHostIsInvalid(t *testing.T) { - m := simpleManifest(1) - hosts := make([]string, 2) - const hostname = "a.test" - hosts[0] = hostname - hosts[1] = hostname - m[0].Services[0].Expose[0].Hosts = hosts - err := m.Validate() - require.Error(t, err) - require.Regexp(t, "^.*hostname.+is duplicated.*$", err) -} - -func TestManifestWithDashInHostname(t *testing.T) { - m := simpleManifest(1) - hosts := make([]string, 1) - hosts[0] = "a-test.com" - m[0].Services[0].Expose[0].Hosts = hosts - err := m.Validate() - require.NoError(t, err) -} - -func TestManifestWithBadHostIsInvalid(t *testing.T) { - m := simpleManifest(1) - hosts := make([]string, 2) - hosts[0] = "bob.test" // valid - hosts[1] = "-bob" // invalid - m[0].Services[0].Expose[0].Hosts = hosts - err := m.Validate() - require.Error(t, err) - require.Regexp(t, "^.*invalid hostname.*$", err) -} - -func TestManifestWithLongHostIsInvalid(t *testing.T) { - m := simpleManifest(1) - hosts := make([]string, 1) - buf := &bytes.Buffer{} - for i := 0; i != 255; i++ { - _, err := buf.WriteRune('a') - require.NoError(t, err) - } - _, err := buf.WriteString(".com") - require.NoError(t, err) - - hosts[0] = buf.String() - m[0].Services[0].Expose[0].Hosts = hosts - err = m.Validate() - require.Error(t, err) - require.Regexp(t, "^.*invalid hostname.*$", err) -} - -func TestManifestWithDuplicateGroupIsInvalid(t *testing.T) { - mDuplicate := make(Manifest, 2) - mDuplicate[0] = simpleManifest(1)[0] - mDuplicate[1] = simpleManifest(1)[0] - mDuplicate[1].Services[0].Expose[0].Hosts[0] = "anotherhost.test" - err := mDuplicate.Validate() - require.Error(t, err) - require.Regexp(t, "^.*duplicate group.*$", err) -} - -func TestManifestWithNoServicesInvalid(t *testing.T) { - m := simpleManifest(1) - m[0].Services = nil - err := m.Validate() - require.Error(t, err) - require.Regexp(t, "^.*contains no services.*$", err) -} - -func TestManifestWithEmptyServiceNameInvalid(t *testing.T) { - m := simpleManifest(1) - m[0].Services[0].Name = "" - err := m.Validate() - require.Error(t, err) - require.Regexp(t, "^.*service name is empty.*$", err) -} - -func TestManifestWithEmptyImageNameInvalid(t *testing.T) { - m := simpleManifest(1) - m[0].Services[0].Image = "" - err := m.Validate() - require.Error(t, err) - require.Regexp(t, "^.*service.+has empty image name.*$", err) -} - -func TestManifestWithEmptyEnvValueIsValid(t *testing.T) { - m := simpleManifest(1) - envVars := make([]string, 1) - envVars[0] = "FOO=" // sets FOO to empty string - m[0].Services[0].Env = envVars - err := m.Validate() - require.NoError(t, err) -} - -func TestManifestWithEmptyEnvNameIsInvalid(t *testing.T) { - m := simpleManifest(1) - envVars := make([]string, 1) - envVars[0] = "=FOO" // invalid - m[0].Services[0].Env = envVars - err := m.Validate() - require.Error(t, err) - require.Regexp(t, `^.*var\. with an empty name.*$`, err) -} - -func TestManifestWithBadEnvNameIsInvalid(t *testing.T) { - m := simpleManifest(1) - envVars := make([]string, 1) - envVars[0] = "9VAR=FOO" // invalid because it starts with a digit - m[0].Services[0].Env = envVars - err := m.Validate() - require.Error(t, err) - require.Regexp(t, `^.*var\. with an invalid name.*$`, err) -} - -func TestManifestServiceUnknownProtocolIsInvalid(t *testing.T) { - m := simpleManifest(1) - m[0].Services[0].Expose[0].Proto = "ICMP" - err := m.Validate() - require.Error(t, err) - require.Regexp(t, `^.*protocol .+ unknown.*$`, err) -} - -func Test_ValidateManifest(t *testing.T) { - expose := make([]ServiceExpose, 1) - expose[0].Global = true - expose[0].Port = 80 - expose[0].Proto = TCP - expose[0].Hosts = make([]string, 1) - expose[0].Hosts[0] = "host.test" - - tests := []struct { - name string - ok bool - mani Manifest - dgroups []*dtypes.GroupSpec - }{ - { - name: "empty", - ok: false, - }, - - { - name: "single", - ok: true, - mani: []Group{ - { - Name: "foo", - Services: []Service{ - { - Name: "svc1", - Image: "test", - Resources: simpleResources(expose), - Count: 3, - Expose: expose, - }, - }, - }, - }, - dgroups: []*dtypes.GroupSpec{ - { - Name: "foo", - Resources: dtypes.ResourceUnits{ - { - Resources: simpleResources(expose), - Count: 3, - }, - }, - }, - }, - }, - - { - name: "multi-mgroup", - ok: true, - mani: []Group{ - { - Name: "foo", - Services: []Service{ - { - Name: "svc1", - Image: "test", - Resources: simpleResources(expose), - Count: 1, - Expose: expose, - }, - { - Name: "svc1", - Image: "test", - Resources: simpleResources(expose), - Count: 2, - }, - }, - }, - }, - dgroups: []*dtypes.GroupSpec{ - { - Name: "foo", - Resources: dtypes.ResourceUnits{ - { - Resources: simpleResources(expose), - Count: 3, - }, - }, - }, - }, - }, - - { - name: "mismatch-name", - ok: false, - mani: []Group{ - { - Name: "foo-bad", - Services: []Service{ - { - Name: "svc1", - Image: "test", - Resources: randUnits1, - Count: 3, - }, - }, - }, - }, - dgroups: []*dtypes.GroupSpec{ - { - Name: "foo", - Resources: dtypes.ResourceUnits{ - { - Resources: randUnits1, - Count: 3, - }, - }, - }, - }, - }, - - { - name: "mismatch-cpu", - ok: false, - mani: []Group{ - { - Name: "foo", - Services: []Service{ - { - Name: "svc1", - Image: "test", - Resources: randUnits3, - Count: 3, - }, - }, - }, - }, - dgroups: []*dtypes.GroupSpec{ - { - Name: "foo", - Resources: dtypes.ResourceUnits{ - { - Resources: randUnits1, - Count: 3, - }, - }, - }, - }, - }, - - { - name: "mismatch-group-count", - ok: false, - mani: []Group{ - { - Name: "foo", - Services: []Service{ - { - Name: "svc1", - Image: "test", - Resources: randUnits3, - Count: 3, - }, - }, - }, - }, - dgroups: []*dtypes.GroupSpec{}, - }, - } - - for _, test := range tests { - err := test.mani.CheckAgainstGSpecs(test.dgroups) - if test.ok { - assert.NoError(t, err, test.name) - } else { - assert.Error(t, err, test.name) - } - } -} diff --git a/go/manifest/v2beta2/parse.go b/go/manifest/v2beta2/parse.go deleted file mode 100644 index e6c46a01..00000000 --- a/go/manifest/v2beta2/parse.go +++ /dev/null @@ -1,65 +0,0 @@ -package v2beta2 - -import ( - "errors" - "fmt" - "strings" - - corev1 "k8s.io/api/core/v1" -) - -var ( - errUnknownServiceProtocol = errors.New("unknown service protocol") - ErrUnsupportedServiceProtocol = errors.New("unsupported service protocol") -) - -type ServiceProtocol string - -const ( - TCP = ServiceProtocol("TCP") - UDP = ServiceProtocol("UDP") -) - -func (sp ServiceProtocol) ToString() string { - return string(sp) -} - -func (sp ServiceProtocol) ToKube() (corev1.Protocol, error) { - switch sp { - case TCP: - return corev1.ProtocolTCP, nil - case UDP: - return corev1.ProtocolUDP, nil - } - - return corev1.Protocol(""), fmt.Errorf("%w: %v", errUnknownServiceProtocol, sp) -} - -func ServiceProtocolFromKube(proto corev1.Protocol) (ServiceProtocol, error) { - switch proto { - case corev1.ProtocolTCP: - return TCP, nil - case corev1.ProtocolUDP: - return UDP, nil - } - - return ServiceProtocol(""), fmt.Errorf("%w: %v", errUnknownServiceProtocol, proto) -} - -func ParseServiceProtocol(input string) (ServiceProtocol, error) { - var result ServiceProtocol - - // This is not a case-sensitive parse, so make all input uppercase - input = strings.ToUpper(input) - - switch input { - case "TCP", "": // The empty string (no input) implies TCP - result = TCP - case "UDP": - result = UDP - default: - return result, ErrUnsupportedServiceProtocol - } - - return result, nil -} diff --git a/go/manifest/v2beta2/service.go b/go/manifest/v2beta2/service.go deleted file mode 100644 index 454e858a..00000000 --- a/go/manifest/v2beta2/service.go +++ /dev/null @@ -1,108 +0,0 @@ -package v2beta2 - -import ( - "fmt" - "sort" - "strings" - - k8svalidation "k8s.io/apimachinery/pkg/util/validation" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" -) - -func (s *Service) validate(helper *validateManifestGroupsHelper) error { - if len(s.Name) == 0 { - return fmt.Errorf("%w: service name is empty", ErrInvalidManifest) - } - - serviceNameValid := serviceNameValidationRegex.MatchString(s.Name) - if !serviceNameValid { - return fmt.Errorf("%w: service %q name is invalid", ErrInvalidManifest, s.Name) - } - - if len(s.Image) == 0 { - return fmt.Errorf("%w: service %q has empty image name", ErrInvalidManifest, s.Name) - } - - if err := s.Resources.Validate(); err != nil { - return err - } - - for _, envVar := range s.Env { - idx := strings.Index(envVar, "=") - if idx == 0 { - return fmt.Errorf("%w: service %q defines an env. var. with an empty name", ErrInvalidManifest, s.Name) - } - - var envVarName string - if idx > 0 { - envVarName = envVar[0:idx] - } else { - envVarName = envVar - } - - if 0 != len(k8svalidation.IsEnvVarName(envVarName)) { - return fmt.Errorf("%w: service %q defines an env. var. with an invalid name %q", ErrInvalidManifest, s.Name, envVarName) - } - } - - if !sort.IsSorted(s.Expose) { - return fmt.Errorf("%w: service %q: expose is not sorted", ErrInvalidManifest, s.Name) - } - - for _, serviceExpose := range s.Expose { - if err := serviceExpose.validate(helper); err != nil { - return fmt.Errorf("%w: service %q: %w", ErrInvalidManifest, s.Name, err) - } - } - - return nil -} - -func (s *Service) checkAgainstGSpec(gspec *groupSpec) error { - // find resource units by id - var gRes *dtypes.ResourceUnit - - for idx := range gspec.gs.Resources { - if s.Resources.ID == gspec.gs.Resources[idx].ID { - gRes = &gspec.gs.Resources[idx] - break - } - } - - if gRes == nil { - return fmt.Errorf("service %q: not found deployment group resources with ID = %d", s.Name, s.Resources.ID) - } - - if s.Count > gRes.Count { - return fmt.Errorf("service %q: over-utilized replicas (%d) > group spec resources count (%d)", - s.Name, s.Count, gRes.Count) - } - - // do not compare resources directly - if !s.Resources.CPU.Equal(gRes.CPU) { - return fmt.Errorf("service %q: CPU resources mismatch for ID %d", s.Name, s.Resources.ID) - } - - if !s.Resources.GPU.Equal(gRes.GPU) { - return fmt.Errorf("service %q: GPU resources mismatch for ID %d", s.Name, s.Resources.ID) - } - - if !s.Resources.Memory.Equal(gRes.Memory) { - return fmt.Errorf("service %q: Memory resources mismatch for ID %d", s.Name, s.Resources.ID) - } - - if !s.Resources.Storage.Equal(gRes.Storage) { - return fmt.Errorf("service %q: Storage resources mismatch for ID %d", s.Name, s.Resources.ID) - } - - for _, expose := range s.Expose { - if err := expose.checkAgainstResources(gRes, gspec.endpoints); err != nil { - return fmt.Errorf("service %q: resource ID %d: %w", s.Name, gRes.ID, err) - } - } - - gRes.Count -= s.Count - - return nil -} diff --git a/go/manifest/v2beta2/service.pb.go b/go/manifest/v2beta2/service.pb.go deleted file mode 100644 index 165d7f9d..00000000 --- a/go/manifest/v2beta2/service.pb.go +++ /dev/null @@ -1,1679 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/manifest/v2beta2/service.proto - -package v2beta2 - -import ( - fmt "fmt" - v1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// StorageParams -type StorageParams struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` - Mount string `protobuf:"bytes,2,opt,name=mount,proto3" json:"mount" yaml:"mount"` - ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"readOnly" yaml:"readOnly"` -} - -func (m *StorageParams) Reset() { *m = StorageParams{} } -func (*StorageParams) ProtoMessage() {} -func (*StorageParams) Descriptor() ([]byte, []int) { - return fileDescriptor_de124a4cb11edaa1, []int{0} -} -func (m *StorageParams) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StorageParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_StorageParams.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *StorageParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_StorageParams.Merge(m, src) -} -func (m *StorageParams) XXX_Size() int { - return m.Size() -} -func (m *StorageParams) XXX_DiscardUnknown() { - xxx_messageInfo_StorageParams.DiscardUnknown(m) -} - -var xxx_messageInfo_StorageParams proto.InternalMessageInfo - -func (m *StorageParams) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *StorageParams) GetMount() string { - if m != nil { - return m.Mount - } - return "" -} - -func (m *StorageParams) GetReadOnly() bool { - if m != nil { - return m.ReadOnly - } - return false -} - -// ServiceParams -type ServiceParams struct { - Storage []StorageParams `protobuf:"bytes,1,rep,name=storage,proto3" json:"storage" yaml:"storage"` -} - -func (m *ServiceParams) Reset() { *m = ServiceParams{} } -func (*ServiceParams) ProtoMessage() {} -func (*ServiceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_de124a4cb11edaa1, []int{1} -} -func (m *ServiceParams) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServiceParams.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ServiceParams) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceParams.Merge(m, src) -} -func (m *ServiceParams) XXX_Size() int { - return m.Size() -} -func (m *ServiceParams) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceParams.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceParams proto.InternalMessageInfo - -func (m *ServiceParams) GetStorage() []StorageParams { - if m != nil { - return m.Storage - } - return nil -} - -// Credentials to fetch image from registry -type ServiceImageCredentials struct { - Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host" yaml:"host"` - Email string `protobuf:"bytes,2,opt,name=email,proto3" json:"email" yaml:"email"` - Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username" yaml:"username"` - Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password" yaml:"password"` -} - -func (m *ServiceImageCredentials) Reset() { *m = ServiceImageCredentials{} } -func (*ServiceImageCredentials) ProtoMessage() {} -func (*ServiceImageCredentials) Descriptor() ([]byte, []int) { - return fileDescriptor_de124a4cb11edaa1, []int{2} -} -func (m *ServiceImageCredentials) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceImageCredentials) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServiceImageCredentials.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ServiceImageCredentials) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceImageCredentials.Merge(m, src) -} -func (m *ServiceImageCredentials) XXX_Size() int { - return m.Size() -} -func (m *ServiceImageCredentials) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceImageCredentials.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceImageCredentials proto.InternalMessageInfo - -func (m *ServiceImageCredentials) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -func (m *ServiceImageCredentials) GetEmail() string { - if m != nil { - return m.Email - } - return "" -} - -func (m *ServiceImageCredentials) GetUsername() string { - if m != nil { - return m.Username - } - return "" -} - -func (m *ServiceImageCredentials) GetPassword() string { - if m != nil { - return m.Password - } - return "" -} - -// Service stores name, image, args, env, unit, count and expose list of service -type Service struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` - Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image" yaml:"image"` - Command []string `protobuf:"bytes,3,rep,name=command,proto3" json:"command" yaml:"command"` - Args []string `protobuf:"bytes,4,rep,name=args,proto3" json:"args" yaml:"args"` - Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env" yaml:"env"` - Resources v1beta3.Resources `protobuf:"bytes,6,opt,name=resources,proto3" json:"resources" yaml:"resources"` - Count uint32 `protobuf:"varint,7,opt,name=count,proto3" json:"count" yaml:"count"` - Expose ServiceExposes `protobuf:"bytes,8,rep,name=expose,proto3,castrepeated=ServiceExposes" json:"expose" yaml:"expose"` - Params *ServiceParams `protobuf:"bytes,9,opt,name=params,proto3" json:"params" yaml:"params"` - Credentials *ServiceImageCredentials `protobuf:"bytes,10,opt,name=credentials,proto3" json:"credentials" yaml:"credentials"` -} - -func (m *Service) Reset() { *m = Service{} } -func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_de124a4cb11edaa1, []int{3} -} -func (m *Service) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Service.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Service) XXX_Merge(src proto.Message) { - xxx_messageInfo_Service.Merge(m, src) -} -func (m *Service) XXX_Size() int { - return m.Size() -} -func (m *Service) XXX_DiscardUnknown() { - xxx_messageInfo_Service.DiscardUnknown(m) -} - -var xxx_messageInfo_Service proto.InternalMessageInfo - -func (m *Service) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Service) GetImage() string { - if m != nil { - return m.Image - } - return "" -} - -func (m *Service) GetCommand() []string { - if m != nil { - return m.Command - } - return nil -} - -func (m *Service) GetArgs() []string { - if m != nil { - return m.Args - } - return nil -} - -func (m *Service) GetEnv() []string { - if m != nil { - return m.Env - } - return nil -} - -func (m *Service) GetResources() v1beta3.Resources { - if m != nil { - return m.Resources - } - return v1beta3.Resources{} -} - -func (m *Service) GetCount() uint32 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *Service) GetExpose() ServiceExposes { - if m != nil { - return m.Expose - } - return nil -} - -func (m *Service) GetParams() *ServiceParams { - if m != nil { - return m.Params - } - return nil -} - -func (m *Service) GetCredentials() *ServiceImageCredentials { - if m != nil { - return m.Credentials - } - return nil -} - -func init() { - proto.RegisterType((*StorageParams)(nil), "akash.manifest.v2beta2.StorageParams") - proto.RegisterType((*ServiceParams)(nil), "akash.manifest.v2beta2.ServiceParams") - proto.RegisterType((*ServiceImageCredentials)(nil), "akash.manifest.v2beta2.ServiceImageCredentials") - proto.RegisterType((*Service)(nil), "akash.manifest.v2beta2.Service") -} - -func init() { - proto.RegisterFile("akash/manifest/v2beta2/service.proto", fileDescriptor_de124a4cb11edaa1) -} - -var fileDescriptor_de124a4cb11edaa1 = []byte{ - // 713 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0x8d, 0xbf, 0xb4, 0xf9, 0x99, 0x7c, 0x29, 0xc8, 0x42, 0xd4, 0xad, 0x54, 0x4f, 0x18, 0x51, - 0x29, 0x50, 0x61, 0xab, 0xa9, 0x04, 0x12, 0xb0, 0x32, 0x62, 0xc1, 0x0a, 0x34, 0xdd, 0x20, 0x16, - 0xa0, 0x89, 0x33, 0xb8, 0x56, 0x63, 0x4f, 0xe4, 0x71, 0x52, 0x2a, 0xb1, 0xe0, 0x11, 0x78, 0x0e, - 0x78, 0x91, 0x2e, 0xbb, 0xec, 0x6a, 0x0a, 0xe9, 0x2e, 0x3b, 0xfc, 0x04, 0x68, 0x7e, 0x5c, 0xa7, - 0x80, 0xa8, 0x58, 0x25, 0xe7, 0xdc, 0x73, 0xe7, 0xde, 0x39, 0x73, 0xaf, 0xc1, 0x5d, 0x72, 0x48, - 0xf8, 0x81, 0x9f, 0x90, 0x34, 0x7e, 0x4f, 0x79, 0xee, 0xcf, 0x06, 0x43, 0x9a, 0x93, 0x81, 0xcf, - 0x69, 0x36, 0x8b, 0x43, 0xea, 0x4d, 0x32, 0x96, 0x33, 0xfb, 0xb6, 0x52, 0x79, 0xa5, 0xca, 0x33, - 0xaa, 0xcd, 0x5b, 0x11, 0x8b, 0x98, 0x92, 0xf8, 0xf2, 0x9f, 0x56, 0x6f, 0xde, 0xff, 0xfb, 0x99, - 0xf4, 0xc3, 0x84, 0x71, 0x73, 0xf2, 0x26, 0xd2, 0xda, 0x21, 0xe1, 0xd4, 0x9f, 0xed, 0x4a, 0xdd, - 0x9e, 0x9f, 0x51, 0xce, 0xa6, 0x59, 0x48, 0xb9, 0xd6, 0xa0, 0xaf, 0x16, 0xe8, 0xee, 0xe7, 0x2c, - 0x23, 0x11, 0x7d, 0x45, 0x32, 0x92, 0x70, 0x7b, 0x07, 0xac, 0xa4, 0x24, 0xa1, 0x8e, 0xd5, 0xb3, - 0xfa, 0xed, 0x60, 0x7d, 0x21, 0xa0, 0xc2, 0x85, 0x80, 0x9d, 0x63, 0x92, 0x8c, 0x1f, 0x23, 0x89, - 0x10, 0x56, 0xa4, 0xed, 0x83, 0xd5, 0x84, 0x4d, 0xd3, 0xdc, 0xf9, 0x4f, 0xa9, 0x37, 0x16, 0x02, - 0x6a, 0xa2, 0x10, 0xf0, 0x7f, 0x2d, 0x57, 0x10, 0x61, 0x4d, 0xdb, 0x4f, 0x41, 0x3b, 0xa3, 0x64, - 0xf4, 0x8e, 0xa5, 0xe3, 0x63, 0xa7, 0xde, 0xb3, 0xfa, 0xad, 0x00, 0x2e, 0x04, 0x6c, 0x49, 0xf2, - 0x65, 0x3a, 0x3e, 0x2e, 0x04, 0xbc, 0xa1, 0xf3, 0x4a, 0x06, 0xe1, 0xcb, 0x20, 0xe2, 0xa0, 0xbb, - 0xaf, 0x2f, 0x6a, 0x9a, 0x1d, 0x82, 0x26, 0xd7, 0xdd, 0x3b, 0x56, 0xaf, 0xde, 0xef, 0x0c, 0xb6, - 0xbd, 0x3f, 0xdb, 0xe9, 0x5d, 0xb9, 0x64, 0x70, 0xe7, 0x44, 0xc0, 0xda, 0x42, 0xc0, 0x32, 0xbb, - 0x10, 0x70, 0x4d, 0x97, 0x35, 0x04, 0xc2, 0x65, 0x08, 0xfd, 0xb0, 0xc0, 0xba, 0xa9, 0xfa, 0x22, - 0x21, 0x11, 0x7d, 0x96, 0xd1, 0x11, 0x4d, 0xf3, 0x98, 0x8c, 0x95, 0x59, 0x07, 0x8c, 0xe7, 0xcb, - 0x66, 0x49, 0x5c, 0x99, 0x25, 0x11, 0xc2, 0x8a, 0x94, 0x66, 0xd1, 0x84, 0xc4, 0xe3, 0x65, 0xb3, - 0x14, 0x51, 0x99, 0xa5, 0x20, 0xc2, 0x9a, 0xb6, 0x9f, 0x80, 0xd6, 0x94, 0xd3, 0x4c, 0x3d, 0x47, - 0x5d, 0xe5, 0x28, 0xaf, 0x4a, 0xae, 0xf2, 0xaa, 0x64, 0x10, 0xbe, 0x0c, 0xca, 0xe4, 0x09, 0xe1, - 0xfc, 0x88, 0x65, 0x23, 0x67, 0xa5, 0x4a, 0x2e, 0xb9, 0x2a, 0xb9, 0x64, 0x10, 0xbe, 0x0c, 0xa2, - 0xf3, 0x55, 0xd0, 0x34, 0x77, 0xfe, 0xe7, 0x81, 0x88, 0xa5, 0x49, 0xcb, 0x77, 0x54, 0x44, 0x75, - 0x47, 0x05, 0x11, 0xd6, 0xb4, 0xfd, 0x08, 0x34, 0x43, 0x96, 0x24, 0x24, 0x1d, 0x39, 0xf5, 0x5e, - 0xbd, 0xdf, 0x0e, 0xb6, 0xe4, 0xb3, 0x18, 0xaa, 0x7a, 0x16, 0x43, 0x20, 0x5c, 0x86, 0x64, 0x5b, - 0x24, 0x8b, 0xb8, 0xb3, 0xa2, 0xb2, 0x54, 0x5b, 0x12, 0x57, 0x6d, 0x49, 0x84, 0xb0, 0x22, 0xed, - 0x1d, 0x50, 0xa7, 0xe9, 0xcc, 0x59, 0x55, 0xda, 0x8d, 0x13, 0x01, 0xad, 0x85, 0x80, 0x92, 0x2a, - 0x04, 0x04, 0xc6, 0xfa, 0x74, 0x86, 0xb0, 0xa4, 0xec, 0xa1, 0x9c, 0x51, 0xb3, 0x26, 0x4e, 0xa3, - 0x67, 0xf5, 0x3b, 0x83, 0x2d, 0x33, 0x56, 0x72, 0x97, 0x3c, 0xb3, 0x4b, 0x1e, 0x2e, 0x45, 0xc1, - 0xb6, 0x19, 0xa7, 0x2a, 0xaf, 0x10, 0xf0, 0x66, 0x39, 0xc7, 0x86, 0x42, 0xb8, 0x0a, 0x4b, 0x9f, - 0x42, 0xb5, 0x38, 0xcd, 0x9e, 0xd5, 0xef, 0x6a, 0x9f, 0xc2, 0xab, 0x8b, 0x13, 0x9a, 0xc5, 0x51, - 0xbf, 0xf6, 0x04, 0x34, 0xf4, 0x72, 0x3b, 0xad, 0x6b, 0x06, 0x5d, 0x3f, 0xdb, 0x73, 0x25, 0x0e, - 0x76, 0x4d, 0x67, 0x26, 0xb9, 0x10, 0xb0, 0x6b, 0xae, 0xab, 0x30, 0xfa, 0x72, 0x0e, 0xd7, 0xae, - 0x64, 0x70, 0x6c, 0xa4, 0xf6, 0x5b, 0xd0, 0x98, 0xa8, 0x6d, 0x71, 0xda, 0xca, 0x83, 0xeb, 0x2a, - 0x9a, 0xd5, 0x82, 0xc6, 0x5d, 0x93, 0x5c, 0x55, 0xd4, 0x18, 0x61, 0x13, 0xb0, 0x3f, 0x82, 0x4e, - 0x58, 0xad, 0x92, 0x03, 0x54, 0x11, 0xff, 0x9a, 0x22, 0xbf, 0x6e, 0x60, 0x70, 0xcf, 0x94, 0x5b, - 0x3e, 0xab, 0x10, 0xd0, 0x36, 0x1e, 0x56, 0x24, 0xc2, 0xcb, 0x92, 0xe0, 0xf5, 0xd9, 0x77, 0xb7, - 0xf6, 0x69, 0xee, 0x5a, 0x27, 0x73, 0xd7, 0x3a, 0x9d, 0xbb, 0xd6, 0xb7, 0xb9, 0x6b, 0x7d, 0xbe, - 0x70, 0x6b, 0xa7, 0x17, 0x6e, 0xed, 0xec, 0xc2, 0xad, 0xbd, 0x79, 0x18, 0xc5, 0xf9, 0xc1, 0x74, - 0xe8, 0x85, 0x2c, 0xf1, 0x55, 0x53, 0x0f, 0x52, 0x9a, 0x1f, 0xb1, 0xec, 0xd0, 0x20, 0x32, 0x89, - 0xfd, 0x88, 0xfd, 0xf6, 0x29, 0x1e, 0x36, 0xd4, 0x97, 0x75, 0xef, 0x67, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x11, 0xd3, 0x4b, 0xa0, 0xff, 0x05, 0x00, 0x00, -} - -func (m *StorageParams) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StorageParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StorageParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ReadOnly { - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if len(m.Mount) > 0 { - i -= len(m.Mount) - copy(dAtA[i:], m.Mount) - i = encodeVarintService(dAtA, i, uint64(len(m.Mount))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintService(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ServiceParams) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceParams) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Storage) > 0 { - for iNdEx := len(m.Storage) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Storage[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *ServiceImageCredentials) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceImageCredentials) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceImageCredentials) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Password) > 0 { - i -= len(m.Password) - copy(dAtA[i:], m.Password) - i = encodeVarintService(dAtA, i, uint64(len(m.Password))) - i-- - dAtA[i] = 0x22 - } - if len(m.Username) > 0 { - i -= len(m.Username) - copy(dAtA[i:], m.Username) - i = encodeVarintService(dAtA, i, uint64(len(m.Username))) - i-- - dAtA[i] = 0x1a - } - if len(m.Email) > 0 { - i -= len(m.Email) - copy(dAtA[i:], m.Email) - i = encodeVarintService(dAtA, i, uint64(len(m.Email))) - i-- - dAtA[i] = 0x12 - } - if len(m.Host) > 0 { - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintService(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Service) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Service) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Credentials != nil { - { - size, err := m.Credentials.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if m.Params != nil { - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if len(m.Expose) > 0 { - for iNdEx := len(m.Expose) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Expose[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if m.Count != 0 { - i = encodeVarintService(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x38 - } - { - size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintService(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Env[iNdEx]) - copy(dAtA[i:], m.Env[iNdEx]) - i = encodeVarintService(dAtA, i, uint64(len(m.Env[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Args) > 0 { - for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Args[iNdEx]) - copy(dAtA[i:], m.Args[iNdEx]) - i = encodeVarintService(dAtA, i, uint64(len(m.Args[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Command) > 0 { - for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Command[iNdEx]) - copy(dAtA[i:], m.Command[iNdEx]) - i = encodeVarintService(dAtA, i, uint64(len(m.Command[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Image) > 0 { - i -= len(m.Image) - copy(dAtA[i:], m.Image) - i = encodeVarintService(dAtA, i, uint64(len(m.Image))) - i-- - dAtA[i] = 0x12 - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintService(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintService(dAtA []byte, offset int, v uint64) int { - offset -= sovService(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *StorageParams) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Mount) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if m.ReadOnly { - n += 2 - } - return n -} - -func (m *ServiceParams) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Storage) > 0 { - for _, e := range m.Storage { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - return n -} - -func (m *ServiceImageCredentials) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Email) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Username) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Password) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - return n -} - -func (m *Service) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - l = len(m.Image) - if l > 0 { - n += 1 + l + sovService(uint64(l)) - } - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovService(uint64(l)) - } - } - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovService(uint64(l)) - } - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovService(uint64(l)) - } - } - l = m.Resources.Size() - n += 1 + l + sovService(uint64(l)) - if m.Count != 0 { - n += 1 + sovService(uint64(m.Count)) - } - if len(m.Expose) > 0 { - for _, e := range m.Expose { - l = e.Size() - n += 1 + l + sovService(uint64(l)) - } - } - if m.Params != nil { - l = m.Params.Size() - n += 1 + l + sovService(uint64(l)) - } - if m.Credentials != nil { - l = m.Credentials.Size() - n += 1 + l + sovService(uint64(l)) - } - return n -} - -func sovService(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozService(x uint64) (n int) { - return sovService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *StorageParams) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StorageParams{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Mount:` + fmt.Sprintf("%v", this.Mount) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceParams) String() string { - if this == nil { - return "nil" - } - repeatedStringForStorage := "[]StorageParams{" - for _, f := range this.Storage { - repeatedStringForStorage += strings.Replace(strings.Replace(f.String(), "StorageParams", "StorageParams", 1), `&`, ``, 1) + "," - } - repeatedStringForStorage += "}" - s := strings.Join([]string{`&ServiceParams{`, - `Storage:` + repeatedStringForStorage + `,`, - `}`, - }, "") - return s -} -func (this *ServiceImageCredentials) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceImageCredentials{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `Email:` + fmt.Sprintf("%v", this.Email) + `,`, - `Username:` + fmt.Sprintf("%v", this.Username) + `,`, - `Password:` + fmt.Sprintf("%v", this.Password) + `,`, - `}`, - }, "") - return s -} -func (this *Service) String() string { - if this == nil { - return "nil" - } - repeatedStringForExpose := "[]ServiceExpose{" - for _, f := range this.Expose { - repeatedStringForExpose += fmt.Sprintf("%v", f) + "," - } - repeatedStringForExpose += "}" - s := strings.Join([]string{`&Service{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `Args:` + fmt.Sprintf("%v", this.Args) + `,`, - `Env:` + fmt.Sprintf("%v", this.Env) + `,`, - `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resources", "v1beta3.Resources", 1), `&`, ``, 1) + `,`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `Expose:` + repeatedStringForExpose + `,`, - `Params:` + strings.Replace(this.Params.String(), "ServiceParams", "ServiceParams", 1) + `,`, - `Credentials:` + strings.Replace(this.Credentials.String(), "ServiceImageCredentials", "ServiceImageCredentials", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringService(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *StorageParams) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mount", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Mount = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceParams) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceParams: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceParams: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Storage = append(m.Storage, StorageParams{}) - if err := m.Storage[len(m.Storage)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceImageCredentials) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceImageCredentials: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceImageCredentials: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Email", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Email = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Username = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Password = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Service) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Service: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Expose", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Expose = append(m.Expose, ServiceExpose{}) - if err := m.Expose[len(m.Expose)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Params == nil { - m.Params = &ServiceParams{} - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Credentials", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowService - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthService - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthService - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Credentials == nil { - m.Credentials = &ServiceImageCredentials{} - } - if err := m.Credentials.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipService(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthService - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipService(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowService - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthService - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupService - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthService - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthService = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowService = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupService = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/manifest/v2beta2/serviceexpose.go b/go/manifest/v2beta2/serviceexpose.go deleted file mode 100644 index c6b65ab9..00000000 --- a/go/manifest/v2beta2/serviceexpose.go +++ /dev/null @@ -1,108 +0,0 @@ -package v2beta2 - -import ( - "fmt" - "math" - "sort" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - types "github.com/akash-network/akash-api/go/node/types/v1beta3" -) - -func (s *ServiceExpose) GetEndpoints() types.Endpoints { - if !s.Global { - return types.Endpoints{} - } - - endpoints := make(types.Endpoints, 0, 1) - - if len(s.IP) != 0 { - endpoints = make(types.Endpoints, 0, 2) - - endpoints = append( - endpoints, - types.Endpoint{ - Kind: types.Endpoint_LEASED_IP, - SequenceNumber: s.EndpointSequenceNumber, - }, - ) - } - - kind := types.Endpoint_RANDOM_PORT - if s.IsIngress() { - kind = types.Endpoint_SHARED_HTTP - } - - endpoints = append(endpoints, types.Endpoint{Kind: kind}) - - sort.Sort(endpoints) - - return endpoints -} - -func (s *ServiceExpose) validate(helper *validateManifestGroupsHelper) error { - if s.Port == 0 || s.Port > math.MaxUint16 { - return fmt.Errorf("port value must be 0 < value <= 65535 ") - } - - switch s.Proto { - case TCP, UDP: - break - default: - return fmt.Errorf("protocol %q unknown", s.Proto) - } - - if s.Global { - helper.globalServiceCount++ - } - - for _, host := range s.Hosts { - if !isValidHostname(host) { - return fmt.Errorf("has invalid hostname %q", host) - } - - _, exists := helper.hostnames[host] - if exists { - return fmt.Errorf("hostname %q is duplicated, this is not allowed", host) - } - helper.hostnames[host] = 0 // Value stored does not matter - } - - return nil -} - -func (s *ServiceExpose) checkAgainstResources(res *dtypes.ResourceUnit, eps validateEndpointsHelper) error { - if s.Global { - eph := eps[res.ID] - - if s.IsIngress() { - if !eph.tryDecHTTP() { - return fmt.Errorf("over-utilized HTTP endpoints") - } - } else { - if !eph.tryDecPort() { - return fmt.Errorf("over-utilized PORT endpoints") - } - } - - if len(s.IP) > 0 { - if !eph.tryDecIP() { - return fmt.Errorf("over-utilized IP endpoints") - } - } - } - - return nil -} - -func (s *ServiceExpose) IsIngress() bool { - return s.Proto == TCP && s.Global && 80 == s.GetExternalPort() -} - -func (s *ServiceExpose) GetExternalPort() int32 { - if s.ExternalPort == 0 { - return int32(s.Port) - } - - return int32(s.ExternalPort) -} diff --git a/go/manifest/v2beta2/serviceexpose.pb.go b/go/manifest/v2beta2/serviceexpose.pb.go deleted file mode 100644 index 4323ffe6..00000000 --- a/go/manifest/v2beta2/serviceexpose.pb.go +++ /dev/null @@ -1,666 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/manifest/v2beta2/serviceexpose.proto - -package v2beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ServiceExpose stores exposed ports and hosts details -type ServiceExpose struct { - // port on the container - Port uint32 `protobuf:"varint,1,opt,name=port,proto3" json:"port" yaml:"port"` - // port on the service definition - ExternalPort uint32 `protobuf:"varint,2,opt,name=external_port,json=externalPort,proto3" json:"externalPort" yaml:"externalPort"` - Proto ServiceProtocol `protobuf:"bytes,3,opt,name=proto,proto3,casttype=ServiceProtocol" json:"proto" yaml:"proto"` - Service string `protobuf:"bytes,4,opt,name=service,proto3" json:"service" yaml:"service"` - Global bool `protobuf:"varint,5,opt,name=global,proto3" json:"global" yaml:"global"` - Hosts []string `protobuf:"bytes,6,rep,name=hosts,proto3" json:"hosts" yaml:"hosts"` - HTTPOptions ServiceExposeHTTPOptions `protobuf:"bytes,7,opt,name=http_options,json=httpOptions,proto3" json:"httpOptions" yaml:"httpOptions"` - // The name of the IP address associated with this, if any - IP string `protobuf:"bytes,8,opt,name=ip,proto3" json:"ip" yaml:"ip"` - // The sequence number of the associated endpoint in the on-chain data - EndpointSequenceNumber uint32 `protobuf:"varint,9,opt,name=endpoint_sequence_number,json=endpointSequenceNumber,proto3" json:"endpointSequenceNumber" yaml:"endpointSequenceNumber"` -} - -func (m *ServiceExpose) Reset() { *m = ServiceExpose{} } -func (*ServiceExpose) ProtoMessage() {} -func (*ServiceExpose) Descriptor() ([]byte, []int) { - return fileDescriptor_e8dad39b3d78f39d, []int{0} -} -func (m *ServiceExpose) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceExpose) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ServiceExpose.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ServiceExpose) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceExpose.Merge(m, src) -} -func (m *ServiceExpose) XXX_Size() int { - return m.Size() -} -func (m *ServiceExpose) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceExpose.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceExpose proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ServiceExpose)(nil), "akash.manifest.v2beta2.ServiceExpose") -} - -func init() { - proto.RegisterFile("akash/manifest/v2beta2/serviceexpose.proto", fileDescriptor_e8dad39b3d78f39d) -} - -var fileDescriptor_e8dad39b3d78f39d = []byte{ - // 540 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0x3f, 0x6f, 0xd3, 0x40, - 0x18, 0xc6, 0xed, 0x34, 0x49, 0x9b, 0x4b, 0x02, 0xd2, 0x81, 0x8a, 0x5b, 0x54, 0x5f, 0xe4, 0x05, - 0xf3, 0xcf, 0x46, 0xa9, 0x04, 0xa8, 0x6c, 0x96, 0x90, 0x40, 0x42, 0x10, 0xb9, 0x1d, 0x10, 0x4b, - 0xe4, 0x84, 0x23, 0x39, 0x35, 0xf1, 0x1d, 0xf6, 0xa5, 0x94, 0x8d, 0x91, 0x05, 0x89, 0x8f, 0xc0, - 0xc2, 0x77, 0xe9, 0xd8, 0xb1, 0xd3, 0x09, 0x9c, 0xcd, 0xa3, 0x47, 0x26, 0xe4, 0xbb, 0xb3, 0x9a, - 0xaa, 0xed, 0xe6, 0xf7, 0x79, 0x7f, 0xcf, 0x7b, 0x8f, 0xee, 0x3d, 0x83, 0x07, 0xd1, 0x61, 0x94, - 0x4e, 0xfd, 0x79, 0x14, 0x93, 0x4f, 0x38, 0xe5, 0xfe, 0x51, 0x7f, 0x84, 0x79, 0xd4, 0xf7, 0x53, - 0x9c, 0x1c, 0x91, 0x31, 0xc6, 0xc7, 0x8c, 0xa6, 0xd8, 0x63, 0x09, 0xe5, 0x14, 0x6e, 0x4a, 0xd6, - 0xab, 0x58, 0x4f, 0xb3, 0xdb, 0xb7, 0x27, 0x74, 0x42, 0x25, 0xe2, 0x97, 0x5f, 0x8a, 0xde, 0x76, - 0xaf, 0x99, 0x3c, 0xe5, 0x9c, 0x51, 0xc6, 0x09, 0x8d, 0x53, 0x45, 0x3a, 0xbf, 0x1b, 0xa0, 0xbb, - 0xaf, 0xce, 0x7b, 0x29, 0xcf, 0x83, 0x0f, 0x41, 0x9d, 0xd1, 0x84, 0x5b, 0x66, 0xcf, 0x74, 0xbb, - 0xc1, 0x9d, 0x5c, 0x20, 0x59, 0x17, 0x02, 0xb5, 0xbf, 0x46, 0xf3, 0xd9, 0x9e, 0x53, 0x56, 0x4e, - 0x28, 0x45, 0xf8, 0x06, 0x74, 0xf1, 0x31, 0xc7, 0x49, 0x1c, 0xcd, 0x86, 0xd2, 0x55, 0x93, 0xae, - 0x7b, 0xb9, 0x40, 0x9d, 0xaa, 0x31, 0x50, 0xee, 0x5b, 0xca, 0xbd, 0xaa, 0x3a, 0xe1, 0x05, 0x08, - 0x06, 0xa0, 0x21, 0x53, 0x59, 0x6b, 0x3d, 0xd3, 0x6d, 0x05, 0x8f, 0x72, 0x81, 0x94, 0x50, 0x08, - 0xd4, 0xd1, 0x87, 0xcb, 0xd4, 0xff, 0x04, 0xba, 0xa9, 0x53, 0x0f, 0x4a, 0x61, 0x4c, 0x67, 0xa1, - 0x22, 0xe1, 0x33, 0xb0, 0xae, 0xef, 0xcf, 0xaa, 0xcb, 0x29, 0x3b, 0xb9, 0x40, 0x95, 0x54, 0x08, - 0x74, 0x43, 0xcd, 0xd1, 0x82, 0x13, 0x56, 0x2d, 0xb8, 0x0b, 0x9a, 0x93, 0x19, 0x1d, 0x45, 0x33, - 0xab, 0xd1, 0x33, 0xdd, 0x8d, 0xe0, 0x6e, 0x2e, 0x90, 0x56, 0x0a, 0x81, 0xba, 0xca, 0xa6, 0x6a, - 0x27, 0xd4, 0x0d, 0xe8, 0x83, 0xc6, 0x94, 0xa6, 0x3c, 0xb5, 0x9a, 0xbd, 0x35, 0xb7, 0x15, 0x6c, - 0x95, 0x89, 0xa5, 0x70, 0x9e, 0x58, 0x96, 0x4e, 0xa8, 0x64, 0xf8, 0xc3, 0x04, 0x9d, 0x72, 0x0b, - 0x43, 0xbd, 0x06, 0x6b, 0xbd, 0x67, 0xba, 0xed, 0xfe, 0x13, 0xef, 0xea, 0xfd, 0x7a, 0x17, 0x76, - 0xf3, 0xea, 0xe0, 0x60, 0xf0, 0x4e, 0xf9, 0x82, 0xe7, 0x27, 0x02, 0x19, 0x99, 0x40, 0xed, 0x15, - 0x31, 0x17, 0xa8, 0x5d, 0x0e, 0xd7, 0x65, 0x21, 0x10, 0xd4, 0x19, 0xce, 0x45, 0x27, 0x5c, 0x45, - 0xe0, 0x7d, 0x50, 0x23, 0xcc, 0xda, 0x90, 0x37, 0xb5, 0x95, 0x09, 0x54, 0x7b, 0x3d, 0xc8, 0x05, - 0xaa, 0x11, 0x56, 0x08, 0xd4, 0x52, 0x66, 0xc2, 0x9c, 0xb0, 0x46, 0x18, 0x5c, 0x00, 0x0b, 0xc7, - 0x1f, 0x19, 0x25, 0x31, 0x1f, 0xa6, 0xf8, 0xf3, 0x02, 0xc7, 0x63, 0x3c, 0x8c, 0x17, 0xf3, 0x11, - 0x4e, 0xac, 0x96, 0x5c, 0xfb, 0x8b, 0x5c, 0xa0, 0xcd, 0x8a, 0xd9, 0xd7, 0xc8, 0x5b, 0x49, 0x14, - 0x02, 0xed, 0xe8, 0x07, 0x70, 0x65, 0xdf, 0x09, 0xaf, 0x31, 0xee, 0xd5, 0xbf, 0xff, 0x42, 0x46, - 0xf0, 0xfe, 0xec, 0xaf, 0x6d, 0x7c, 0xcb, 0x6c, 0xf3, 0x24, 0xb3, 0xcd, 0xd3, 0xcc, 0x36, 0xff, - 0x64, 0xb6, 0xf9, 0x73, 0x69, 0x1b, 0xa7, 0x4b, 0xdb, 0x38, 0x5b, 0xda, 0xc6, 0x87, 0xa7, 0x13, - 0xc2, 0xa7, 0x8b, 0x91, 0x37, 0xa6, 0x73, 0x5f, 0x5e, 0xe6, 0xe3, 0x18, 0xf3, 0x2f, 0x34, 0x39, - 0xd4, 0x55, 0xc4, 0x88, 0x3f, 0xa1, 0x97, 0xfe, 0x89, 0x51, 0x53, 0xbe, 0x9b, 0xdd, 0xff, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xb3, 0x23, 0xc2, 0x68, 0x8e, 0x03, 0x00, 0x00, -} - -func (m *ServiceExpose) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceExpose) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceExpose) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.EndpointSequenceNumber != 0 { - i = encodeVarintServiceexpose(dAtA, i, uint64(m.EndpointSequenceNumber)) - i-- - dAtA[i] = 0x48 - } - if len(m.IP) > 0 { - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.IP))) - i-- - dAtA[i] = 0x42 - } - { - size, err := m.HTTPOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintServiceexpose(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - if len(m.Hosts) > 0 { - for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Hosts[iNdEx]) - copy(dAtA[i:], m.Hosts[iNdEx]) - i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.Hosts[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - if m.Global { - i-- - if m.Global { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if len(m.Service) > 0 { - i -= len(m.Service) - copy(dAtA[i:], m.Service) - i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.Service))) - i-- - dAtA[i] = 0x22 - } - if len(m.Proto) > 0 { - i -= len(m.Proto) - copy(dAtA[i:], m.Proto) - i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.Proto))) - i-- - dAtA[i] = 0x1a - } - if m.ExternalPort != 0 { - i = encodeVarintServiceexpose(dAtA, i, uint64(m.ExternalPort)) - i-- - dAtA[i] = 0x10 - } - if m.Port != 0 { - i = encodeVarintServiceexpose(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintServiceexpose(dAtA []byte, offset int, v uint64) int { - offset -= sovServiceexpose(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ServiceExpose) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Port != 0 { - n += 1 + sovServiceexpose(uint64(m.Port)) - } - if m.ExternalPort != 0 { - n += 1 + sovServiceexpose(uint64(m.ExternalPort)) - } - l = len(m.Proto) - if l > 0 { - n += 1 + l + sovServiceexpose(uint64(l)) - } - l = len(m.Service) - if l > 0 { - n += 1 + l + sovServiceexpose(uint64(l)) - } - if m.Global { - n += 2 - } - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - l = len(s) - n += 1 + l + sovServiceexpose(uint64(l)) - } - } - l = m.HTTPOptions.Size() - n += 1 + l + sovServiceexpose(uint64(l)) - l = len(m.IP) - if l > 0 { - n += 1 + l + sovServiceexpose(uint64(l)) - } - if m.EndpointSequenceNumber != 0 { - n += 1 + sovServiceexpose(uint64(m.EndpointSequenceNumber)) - } - return n -} - -func sovServiceexpose(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozServiceexpose(x uint64) (n int) { - return sovServiceexpose(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ServiceExpose) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceExpose{`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `ExternalPort:` + fmt.Sprintf("%v", this.ExternalPort) + `,`, - `Proto:` + fmt.Sprintf("%v", this.Proto) + `,`, - `Service:` + fmt.Sprintf("%v", this.Service) + `,`, - `Global:` + fmt.Sprintf("%v", this.Global) + `,`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `HTTPOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPOptions), "ServiceExposeHTTPOptions", "ServiceExposeHTTPOptions", 1), `&`, ``, 1) + `,`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `EndpointSequenceNumber:` + fmt.Sprintf("%v", this.EndpointSequenceNumber) + `,`, - `}`, - }, "") - return s -} -func valueToStringServiceexpose(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ServiceExpose) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceExpose: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceExpose: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalPort", wireType) - } - m.ExternalPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExternalPort |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Proto", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthServiceexpose - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthServiceexpose - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Proto = ServiceProtocol(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthServiceexpose - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthServiceexpose - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Service = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Global", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Global = bool(v != 0) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthServiceexpose - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthServiceexpose - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTPOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthServiceexpose - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthServiceexpose - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.HTTPOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthServiceexpose - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthServiceexpose - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EndpointSequenceNumber", wireType) - } - m.EndpointSequenceNumber = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.EndpointSequenceNumber |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipServiceexpose(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthServiceexpose - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipServiceexpose(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowServiceexpose - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthServiceexpose - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupServiceexpose - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthServiceexpose - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthServiceexpose = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowServiceexpose = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupServiceexpose = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/manifest/v2beta3/errors.go b/go/manifest/v2beta3/errors.go new file mode 100644 index 00000000..1ac41fc9 --- /dev/null +++ b/go/manifest/v2beta3/errors.go @@ -0,0 +1,10 @@ +package v2beta3 + +import ( + "errors" +) + +var ( + ErrInvalidManifest = errors.New("invalid manifest") + ErrManifestCrossValidation = errors.New("manifest cross-validation error") +) diff --git a/go/manifest/v2beta3/group.go b/go/manifest/v2beta3/group.go new file mode 100644 index 00000000..e3d7a88d --- /dev/null +++ b/go/manifest/v2beta3/group.go @@ -0,0 +1,79 @@ +package v2beta3 + +import ( + "fmt" + "sort" + + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" +) + +var _ dtypes.ResourceGroup = (*Group)(nil) + +// GetName returns the name of group +func (g Group) GetName() string { + return g.Name +} + +func (g Group) GetResourceUnits() dtypes.ResourceUnits { + groups := make(map[uint32]*dtypes.ResourceUnit) + + for _, svc := range g.Services { + if _, exists := groups[svc.Resources.ID]; !exists { + groups[svc.Resources.ID] = &dtypes.ResourceUnit{ + Resources: svc.Resources, + Count: svc.Count, + } + } else { + groups[svc.Resources.ID].Count += svc.Count + } + } + + units := make(dtypes.ResourceUnits, 0, len(groups)) + + for i := range groups { + units = append(units, *groups[i]) + } + + return units +} + +func (g Group) AllHostnames() []string { + allHostnames := make([]string, 0) + for _, service := range g.Services { + for _, expose := range service.Expose { + allHostnames = append(allHostnames, expose.Hosts...) + } + } + + return allHostnames +} + +func (g *Group) Validate(helper *validateManifestGroupsHelper) error { + if 0 == len(g.Services) { + return fmt.Errorf("%w: group %q contains no services", ErrInvalidManifest, g.GetName()) + } + + if !sort.IsSorted(g.Services) { + return fmt.Errorf("%w: group %q services is not sorted", ErrInvalidManifest, g.GetName()) + } + + for _, s := range g.Services { + if err := s.validate(helper); err != nil { + return err + } + } + + return nil +} + +// checkAgainstGSpec check if manifest group is within GroupSpec resources +// NOTE: it modifies caller's gspec +func (g *Group) checkAgainstGSpec(gspec *groupSpec) error { + for _, svc := range g.Services { + if err := svc.checkAgainstGSpec(gspec); err != nil { + return fmt.Errorf("%w: group %q: %w", ErrManifestCrossValidation, g.Name, err) + } + } + + return nil +} diff --git a/go/manifest/v2beta3/group.pb.go b/go/manifest/v2beta3/group.pb.go new file mode 100644 index 00000000..0d50e73c --- /dev/null +++ b/go/manifest/v2beta3/group.pb.go @@ -0,0 +1,398 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/manifest/v2beta3/group.proto + +package v2beta3 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Group store name and list of services +type Group struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` + Services Services `protobuf:"bytes,2,rep,name=services,proto3,castrepeated=Services" json:"services" yaml:"services"` +} + +func (m *Group) Reset() { *m = Group{} } +func (*Group) ProtoMessage() {} +func (*Group) Descriptor() ([]byte, []int) { + return fileDescriptor_d7cd4686cd5336b6, []int{0} +} +func (m *Group) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Group.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_Group.Merge(m, src) +} +func (m *Group) XXX_Size() int { + return m.Size() +} +func (m *Group) XXX_DiscardUnknown() { + xxx_messageInfo_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_Group proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Group)(nil), "akash.manifest.v2beta3.Group") +} + +func init() { + proto.RegisterFile("akash/manifest/v2beta3/group.proto", fileDescriptor_d7cd4686cd5336b6) +} + +var fileDescriptor_d7cd4686cd5336b6 = []byte{ + // 261 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0xcf, 0x4d, 0xcc, 0xcb, 0x4c, 0x4b, 0x2d, 0x2e, 0xd1, 0x2f, 0x33, 0x4a, 0x4a, 0x2d, + 0x49, 0x34, 0xd6, 0x4f, 0x2f, 0xca, 0x2f, 0x2d, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, + 0x03, 0xab, 0xd1, 0x83, 0xa9, 0xd1, 0x83, 0xaa, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, + 0xd1, 0x07, 0xb1, 0x20, 0xaa, 0xa5, 0x54, 0x70, 0x98, 0x58, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, + 0x0a, 0x51, 0xa5, 0xb4, 0x82, 0x91, 0x8b, 0xd5, 0x1d, 0x64, 0x87, 0x90, 0x36, 0x17, 0x4b, 0x5e, + 0x62, 0x6e, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xf8, 0xab, 0x7b, 0xf2, 0x60, 0xfe, + 0xa7, 0x7b, 0xf2, 0xdc, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x20, 0x9e, 0x52, 0x10, 0x58, 0x50, + 0x28, 0x87, 0x8b, 0x03, 0x6a, 0x4e, 0xb1, 0x04, 0x93, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0xbc, 0x1e, + 0x76, 0xd7, 0xe9, 0x05, 0x43, 0xd4, 0x39, 0xe9, 0x9f, 0xb8, 0x27, 0xcf, 0xf0, 0xea, 0x9e, 0x3c, + 0x5c, 0xe3, 0xa7, 0x7b, 0xf2, 0xfc, 0x10, 0x93, 0x61, 0x22, 0x4a, 0xab, 0xee, 0xcb, 0x73, 0x40, + 0xd5, 0x17, 0x07, 0xc1, 0x15, 0x5a, 0xb1, 0x74, 0x2c, 0x90, 0x67, 0x70, 0x72, 0xbc, 0xf1, 0x50, + 0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, + 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, + 0xc9, 0x17, 0x64, 0xa7, 0xeb, 0x25, 0x66, 0x97, 0xe8, 0xa5, 0xa4, 0x96, 0xe9, 0xa7, 0xe7, 0x63, + 0xf8, 0x3d, 0x89, 0x0d, 0xec, 0x69, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5a, 0x62, 0xc1, + 0x30, 0x6e, 0x01, 0x00, 0x00, +} + +func (m *Group) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Group) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Services) > 0 { + for iNdEx := len(m.Services) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Services[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroup(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGroup(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGroup(dAtA []byte, offset int, v uint64) int { + offset -= sovGroup(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Group) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovGroup(uint64(l)) + } + if len(m.Services) > 0 { + for _, e := range m.Services { + l = e.Size() + n += 1 + l + sovGroup(uint64(l)) + } + } + return n +} + +func sovGroup(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGroup(x uint64) (n int) { + return sovGroup(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Group) String() string { + if this == nil { + return "nil" + } + repeatedStringForServices := "[]Service{" + for _, f := range this.Services { + repeatedStringForServices += fmt.Sprintf("%v", f) + "," + } + repeatedStringForServices += "}" + s := strings.Join([]string{`&Group{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Services:` + repeatedStringForServices + `,`, + `}`, + }, "") + return s +} +func valueToStringGroup(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Group) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Group: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGroup + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroup + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Services = append(m.Services, Service{}) + if err := m.Services[len(m.Services)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGroup(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroup + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGroup(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGroup + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGroup + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGroup = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGroup = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/manifest/v2beta2/groups.go b/go/manifest/v2beta3/groups.go similarity index 96% rename from go/manifest/v2beta2/groups.go rename to go/manifest/v2beta3/groups.go index ad5187a5..b34f33a3 100644 --- a/go/manifest/v2beta2/groups.go +++ b/go/manifest/v2beta3/groups.go @@ -1,9 +1,9 @@ -package v2beta2 +package v2beta3 import ( "fmt" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" ) type Groups []Group diff --git a/go/manifest/v2beta2/helpers.go b/go/manifest/v2beta3/helpers.go similarity index 83% rename from go/manifest/v2beta2/helpers.go rename to go/manifest/v2beta3/helpers.go index a080b6b7..03ab77e0 100644 --- a/go/manifest/v2beta2/helpers.go +++ b/go/manifest/v2beta3/helpers.go @@ -1,10 +1,10 @@ -package v2beta2 +package v2beta3 import ( k8svalidation "k8s.io/apimachinery/pkg/util/validation" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - types "github.com/akash-network/akash-api/go/node/types/v1beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + resources "pkg.akt.dev/go/node/types/resources/v1beta4" ) type validateManifestGroupsHelper struct { @@ -31,7 +31,7 @@ func newGroupSpecsHelper(gspecs dtypes.GroupSpecs) groupSpecHelper { res := make(groupSpecHelper) for _, gspec := range gspecs { - res[gspec.GetName()] = newGroupSpecHelper(*gspec) + res[gspec.GetName()] = newGroupSpecHelper(gspec) } return res @@ -48,11 +48,11 @@ func newGroupSpecHelper(gs dtypes.GroupSpec) *groupSpec { for _, ep := range gRes.Endpoints { switch ep.Kind { - case types.Endpoint_SHARED_HTTP: + case resources.Endpoint_SHARED_HTTP: vep.httpEndpoints++ - case types.Endpoint_RANDOM_PORT: + case resources.Endpoint_RANDOM_PORT: vep.portEndpoints++ - case types.Endpoint_LEASED_IP: + case resources.Endpoint_LEASED_IP: vep.ipEndpoints++ } } diff --git a/go/manifest/v2beta3/httpoptions.pb.go b/go/manifest/v2beta3/httpoptions.pb.go new file mode 100644 index 00000000..3632c720 --- /dev/null +++ b/go/manifest/v2beta3/httpoptions.pb.go @@ -0,0 +1,535 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/manifest/v2beta3/httpoptions.proto + +package v2beta3 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ServiceExposeHTTPOptions +type ServiceExposeHTTPOptions struct { + MaxBodySize uint32 `protobuf:"varint,1,opt,name=max_body_size,json=maxBodySize,proto3" json:"maxBodySize" yaml:"maxBodySize"` + ReadTimeout uint32 `protobuf:"varint,2,opt,name=read_timeout,json=readTimeout,proto3" json:"readTimeout" yaml:"readTimeout"` + SendTimeout uint32 `protobuf:"varint,3,opt,name=send_timeout,json=sendTimeout,proto3" json:"sendTimeout" yaml:"sendTimeout"` + NextTries uint32 `protobuf:"varint,4,opt,name=next_tries,json=nextTries,proto3" json:"nextTries" yaml:"nextTries"` + NextTimeout uint32 `protobuf:"varint,5,opt,name=next_timeout,json=nextTimeout,proto3" json:"nextTimeout" yaml:"nextTimeout"` + NextCases []string `protobuf:"bytes,6,rep,name=next_cases,json=nextCases,proto3" json:"nextCases,omitempty" yaml:"nextCases,omitempty"` +} + +func (m *ServiceExposeHTTPOptions) Reset() { *m = ServiceExposeHTTPOptions{} } +func (*ServiceExposeHTTPOptions) ProtoMessage() {} +func (*ServiceExposeHTTPOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_ee317fccaba20357, []int{0} +} +func (m *ServiceExposeHTTPOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceExposeHTTPOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ServiceExposeHTTPOptions.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ServiceExposeHTTPOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceExposeHTTPOptions.Merge(m, src) +} +func (m *ServiceExposeHTTPOptions) XXX_Size() int { + return m.Size() +} +func (m *ServiceExposeHTTPOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceExposeHTTPOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceExposeHTTPOptions proto.InternalMessageInfo + +func (m *ServiceExposeHTTPOptions) GetMaxBodySize() uint32 { + if m != nil { + return m.MaxBodySize + } + return 0 +} + +func (m *ServiceExposeHTTPOptions) GetReadTimeout() uint32 { + if m != nil { + return m.ReadTimeout + } + return 0 +} + +func (m *ServiceExposeHTTPOptions) GetSendTimeout() uint32 { + if m != nil { + return m.SendTimeout + } + return 0 +} + +func (m *ServiceExposeHTTPOptions) GetNextTries() uint32 { + if m != nil { + return m.NextTries + } + return 0 +} + +func (m *ServiceExposeHTTPOptions) GetNextTimeout() uint32 { + if m != nil { + return m.NextTimeout + } + return 0 +} + +func (m *ServiceExposeHTTPOptions) GetNextCases() []string { + if m != nil { + return m.NextCases + } + return nil +} + +func init() { + proto.RegisterType((*ServiceExposeHTTPOptions)(nil), "akash.manifest.v2beta3.ServiceExposeHTTPOptions") +} + +func init() { + proto.RegisterFile("akash/manifest/v2beta3/httpoptions.proto", fileDescriptor_ee317fccaba20357) +} + +var fileDescriptor_ee317fccaba20357 = []byte{ + // 384 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xb1, 0x6a, 0xdb, 0x40, + 0x1c, 0x87, 0xa5, 0xba, 0x35, 0x58, 0xad, 0xa1, 0xa8, 0xa5, 0x08, 0x0f, 0x77, 0xae, 0xa0, 0xe0, + 0xa1, 0x48, 0x50, 0x4f, 0xed, 0xd4, 0xaa, 0x14, 0xdc, 0xa9, 0x45, 0xd6, 0x10, 0xb2, 0x88, 0xb3, + 0x75, 0x91, 0x0f, 0x47, 0x3a, 0xa1, 0xbb, 0x18, 0xc9, 0x53, 0x1e, 0x21, 0x8f, 0xe5, 0xd1, 0xa3, + 0x27, 0x25, 0x91, 0x37, 0x8f, 0x7e, 0x82, 0x70, 0x92, 0x12, 0x1d, 0xf6, 0x76, 0x7c, 0xff, 0x8f, + 0x8f, 0xdf, 0x70, 0xda, 0x08, 0x2d, 0x11, 0x5b, 0xd8, 0x11, 0x8a, 0xc9, 0x15, 0x66, 0xdc, 0x5e, + 0x7d, 0x9b, 0x61, 0x8e, 0xc6, 0xf6, 0x82, 0xf3, 0x84, 0x26, 0x9c, 0xd0, 0x98, 0x59, 0x49, 0x4a, + 0x39, 0xd5, 0x3f, 0x55, 0xa6, 0xf5, 0x6c, 0x5a, 0x8d, 0x39, 0xf8, 0x18, 0xd2, 0x90, 0x56, 0x8a, + 0x2d, 0x5e, 0xb5, 0x6d, 0xde, 0x77, 0x34, 0x63, 0x8a, 0xd3, 0x15, 0x99, 0xe3, 0x3f, 0x59, 0x42, + 0x19, 0x9e, 0x78, 0xde, 0xff, 0x7f, 0x75, 0x50, 0xff, 0xab, 0xf5, 0x23, 0x94, 0xf9, 0x33, 0x1a, + 0xe4, 0x3e, 0x23, 0x6b, 0x6c, 0xa8, 0x43, 0x75, 0xd4, 0x77, 0xbe, 0x1c, 0x0a, 0xf8, 0x36, 0x42, + 0x99, 0x43, 0x83, 0x7c, 0x4a, 0xd6, 0xf8, 0x58, 0x40, 0x3d, 0x47, 0xd1, 0xf5, 0x0f, 0x53, 0x82, + 0xa6, 0x2b, 0x2b, 0xfa, 0x44, 0x7b, 0x97, 0x62, 0x14, 0xf8, 0x9c, 0x44, 0x98, 0xde, 0x70, 0xe3, + 0x55, 0x5b, 0x12, 0xdc, 0xab, 0x71, 0x5b, 0x92, 0xa0, 0xe9, 0xca, 0x8a, 0x28, 0x31, 0x1c, 0xb7, + 0xa5, 0x4e, 0x5b, 0x12, 0xfc, 0xac, 0x24, 0x41, 0xd3, 0x95, 0x15, 0xfd, 0xa7, 0xa6, 0xc5, 0x38, + 0xe3, 0x3e, 0x4f, 0x09, 0x66, 0xc6, 0xeb, 0xaa, 0xf3, 0xf9, 0x50, 0xc0, 0x9e, 0xa0, 0x9e, 0x80, + 0xc7, 0x02, 0xbe, 0xaf, 0x2b, 0x2f, 0xc8, 0x74, 0xdb, 0xb3, 0xd8, 0x52, 0x17, 0x9a, 0x2d, 0x6f, + 0xda, 0x2d, 0x95, 0x74, 0xba, 0x45, 0x82, 0xa6, 0x2b, 0x2b, 0xfa, 0x45, 0xb3, 0x65, 0x8e, 0x18, + 0x66, 0x46, 0x77, 0xd8, 0x19, 0xf5, 0x9c, 0xef, 0x9b, 0x02, 0xaa, 0x87, 0x02, 0x7e, 0x10, 0x97, + 0xdf, 0xe2, 0xf0, 0x95, 0x46, 0x84, 0xe3, 0x28, 0xe1, 0xf9, 0xb1, 0x80, 0x83, 0xb6, 0x79, 0x72, + 0x6c, 0x36, 0x56, 0xd4, 0xf9, 0xb5, 0x7b, 0x04, 0xca, 0x6d, 0x09, 0xd4, 0x4d, 0x09, 0xd4, 0x6d, + 0x09, 0xd4, 0x87, 0x12, 0xa8, 0x77, 0x7b, 0xa0, 0x6c, 0xf7, 0x40, 0xd9, 0xed, 0x81, 0x72, 0x09, + 0x93, 0x65, 0x68, 0xa1, 0x25, 0xb7, 0x02, 0xbc, 0xb2, 0x43, 0x7a, 0xf6, 0xc9, 0x66, 0xdd, 0xea, + 0xaf, 0x8c, 0x9f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x9d, 0xa4, 0xac, 0x31, 0x85, 0x02, 0x00, 0x00, +} + +func (m *ServiceExposeHTTPOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceExposeHTTPOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceExposeHTTPOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NextCases) > 0 { + for iNdEx := len(m.NextCases) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NextCases[iNdEx]) + copy(dAtA[i:], m.NextCases[iNdEx]) + i = encodeVarintHttpoptions(dAtA, i, uint64(len(m.NextCases[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if m.NextTimeout != 0 { + i = encodeVarintHttpoptions(dAtA, i, uint64(m.NextTimeout)) + i-- + dAtA[i] = 0x28 + } + if m.NextTries != 0 { + i = encodeVarintHttpoptions(dAtA, i, uint64(m.NextTries)) + i-- + dAtA[i] = 0x20 + } + if m.SendTimeout != 0 { + i = encodeVarintHttpoptions(dAtA, i, uint64(m.SendTimeout)) + i-- + dAtA[i] = 0x18 + } + if m.ReadTimeout != 0 { + i = encodeVarintHttpoptions(dAtA, i, uint64(m.ReadTimeout)) + i-- + dAtA[i] = 0x10 + } + if m.MaxBodySize != 0 { + i = encodeVarintHttpoptions(dAtA, i, uint64(m.MaxBodySize)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintHttpoptions(dAtA []byte, offset int, v uint64) int { + offset -= sovHttpoptions(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ServiceExposeHTTPOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxBodySize != 0 { + n += 1 + sovHttpoptions(uint64(m.MaxBodySize)) + } + if m.ReadTimeout != 0 { + n += 1 + sovHttpoptions(uint64(m.ReadTimeout)) + } + if m.SendTimeout != 0 { + n += 1 + sovHttpoptions(uint64(m.SendTimeout)) + } + if m.NextTries != 0 { + n += 1 + sovHttpoptions(uint64(m.NextTries)) + } + if m.NextTimeout != 0 { + n += 1 + sovHttpoptions(uint64(m.NextTimeout)) + } + if len(m.NextCases) > 0 { + for _, s := range m.NextCases { + l = len(s) + n += 1 + l + sovHttpoptions(uint64(l)) + } + } + return n +} + +func sovHttpoptions(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozHttpoptions(x uint64) (n int) { + return sovHttpoptions(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ServiceExposeHTTPOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceExposeHTTPOptions{`, + `MaxBodySize:` + fmt.Sprintf("%v", this.MaxBodySize) + `,`, + `ReadTimeout:` + fmt.Sprintf("%v", this.ReadTimeout) + `,`, + `SendTimeout:` + fmt.Sprintf("%v", this.SendTimeout) + `,`, + `NextTries:` + fmt.Sprintf("%v", this.NextTries) + `,`, + `NextTimeout:` + fmt.Sprintf("%v", this.NextTimeout) + `,`, + `NextCases:` + fmt.Sprintf("%v", this.NextCases) + `,`, + `}`, + }, "") + return s +} +func valueToStringHttpoptions(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ServiceExposeHTTPOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceExposeHTTPOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceExposeHTTPOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBodySize", wireType) + } + m.MaxBodySize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxBodySize |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadTimeout", wireType) + } + m.ReadTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadTimeout |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendTimeout", wireType) + } + m.SendTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SendTimeout |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextTries", wireType) + } + m.NextTries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextTries |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextTimeout", wireType) + } + m.NextTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextTimeout |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextCases", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttpoptions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttpoptions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextCases = append(m.NextCases, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHttpoptions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthHttpoptions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipHttpoptions(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthHttpoptions + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupHttpoptions + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthHttpoptions + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthHttpoptions = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowHttpoptions = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupHttpoptions = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/manifest/v2beta3/manifest.go b/go/manifest/v2beta3/manifest.go new file mode 100644 index 00000000..46cd7d35 --- /dev/null +++ b/go/manifest/v2beta3/manifest.go @@ -0,0 +1,67 @@ +package v2beta3 + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "regexp" + + sdk "github.com/cosmos/cosmos-sdk/types" + + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" +) + +var ( + serviceNameValidationRegex = regexp.MustCompile(`^[a-z]([-a-z0-9]*[a-z0-9])?$`) + hostnameMaxLen = 255 +) + +// Manifest store list of groups +type Manifest Groups + +// GetGroups returns a manifest with groups list +func (m Manifest) GetGroups() Groups { + return Groups(m) +} + +// Validate does validation for manifest +func (m Manifest) Validate() error { + if len(m) == 0 { + return fmt.Errorf("%w: manifest is empty", ErrInvalidManifest) + } + + return m.GetGroups().Validate() +} + +func (m Manifest) CheckAgainstDeployment(dgroups []dtypes.Group) error { + gspecs := make([]dtypes.GroupSpec, 0, len(dgroups)) + + for _, dgroup := range dgroups { + gspec := dgroup.GroupSpec + gspecs = append(gspecs, gspec) + } + + return m.CheckAgainstGSpecs(gspecs) +} + +func (m Manifest) CheckAgainstGSpecs(gspecs dtypes.GroupSpecs) error { + return m.GetGroups().CheckAgainstGSpecs(gspecs) +} + +// Version calculates the identifying deterministic hash for an SDL. +// Sha256 returns 32 byte sum of the SDL. +func (m Manifest) Version() ([]byte, error) { + data, err := json.Marshal(m) + if err != nil { + return nil, err + } + + sortedBytes, err := sdk.SortJSON(data) + if err != nil { + return nil, err + } + + sum := sha256.Sum256(sortedBytes) + + return sum[:], nil +} diff --git a/go/manifest/v2beta3/manifest_cross_validation_test.go b/go/manifest/v2beta3/manifest_cross_validation_test.go new file mode 100644 index 00000000..07238272 --- /dev/null +++ b/go/manifest/v2beta3/manifest_cross_validation_test.go @@ -0,0 +1,197 @@ +package v2beta3 + +import ( + "testing" + + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" + + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + attr "pkg.akt.dev/go/node/types/attributes/v1" + akashtypes "pkg.akt.dev/go/node/types/resources/v1beta4" + tutil "pkg.akt.dev/go/testutil" + testutil "pkg.akt.dev/go/testutil/v1beta3" +) + +const ( + validationPrefix = `^manifest cross-validation error: ` + groupPrefix = validationPrefix + `group ".+": ` + resourcesIDPrefix = groupPrefix + `resources ID \(\d+\): ` + servicePrefix = groupPrefix + `service ".+": ` + resourcesMismatchRegex = servicePrefix + `CPU|GPU|Memory|Storage resources mismatch for ID \d+$` + overUtilizedGroup = servicePrefix + `over-utilized replicas \(\d+\) > group spec resources count \(\d+\)$` + overUtilizedEndpoints = servicePrefix + `resources ID \(\d+\): over-utilized HTTP|PORT|IP endpoints$` + underUtilizedGroupResources = resourcesIDPrefix + `under-utilized \(\d+\) resources` + underUtilizedGroupEndpoints = resourcesIDPrefix + `under-utilized \(\d+\) HTTP|PORT|IP endpoints` +) + +func TestManifestWithEmptyDeployment(t *testing.T) { + m := simpleManifest(1) + deployment := make([]dtypes.Group, 0) + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) +} + +func simpleDeployment(t *testing.T, expose ServiceExposes, count uint32) []dtypes.Group { + deployment := make([]dtypes.Group, 1) + gid := testutil.GroupID(t) + resources := make(dtypes.ResourceUnits, 1) + resources[0] = dtypes.ResourceUnit{ + Resources: simpleResources(expose), + Count: count, + Price: sdk.NewInt64DecCoin(tutil.CoinDenom, 1), + } + deployment[0] = dtypes.Group{ + ID: gid, + State: 0, + GroupSpec: dtypes.GroupSpec{ + Name: nameOfTestGroup, + Requirements: attr.PlacementRequirements{}, + Resources: resources, + }, + } + + return deployment +} + +func TestManifestWithDeployment(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + err := m.CheckAgainstDeployment(deployment) + require.NoError(t, err) +} + +func TestManifestWithDeploymentMultipleCount(t *testing.T) { + addl := uint32(tutil.RandRangeInt(1, 20)) // nolint: gosec + m := simpleManifest(addl) + + deployment := simpleDeployment(t, m[0].Services[0].Expose, addl) + + err := m.CheckAgainstDeployment(deployment) + require.NoError(t, err) +} + +func TestManifestWithDeploymentMultiple(t *testing.T) { + cpu := int64(tutil.RandRangeInt(1024, 2000)) + storage := int64(tutil.RandRangeInt(2000, 3000)) + memory := int64(tutil.RandRangeInt(3001, 4000)) + + m := make(Manifest, 3) + m[0] = simpleManifest(1)[0] + m[0].Services[0].Resources.CPU.Units.Val = sdk.NewInt(cpu) + m[0].Name = "testgroup-2" + + m[1] = simpleManifest(1)[0] + m[1].Services[0].Resources.Storage[0].Quantity.Val = sdk.NewInt(storage) + m[1].Name = "testgroup-1" + m[1].Services[0].Expose[0].Hosts = []string{"host1.test"} + + m[2] = simpleManifest(1)[0] + m[2].Services[0].Resources.Memory.Quantity.Val = sdk.NewInt(memory) + m[2].Name = "testgroup-0" + m[2].Services[0].Expose[0].Hosts = []string{"host2.test"} + + deployment := make([]dtypes.Group, 3) + deployment[0] = simpleDeployment(t, m[0].Services[0].Expose, 1)[0] + deployment[0].GroupSpec.Resources[0].Memory.Quantity.Val = sdk.NewInt(memory) + deployment[0].GroupSpec.Name = "testgroup-0" + + deployment[1] = simpleDeployment(t, m[1].Services[0].Expose, 1)[0] + deployment[1].GroupSpec.Resources[0].Storage[0].Quantity.Val = sdk.NewInt(storage) + deployment[1].GroupSpec.Name = "testgroup-1" + + deployment[2] = simpleDeployment(t, m[2].Services[0].Expose, 1)[0] + deployment[2].GroupSpec.Resources[0].CPU.Units.Val = sdk.NewInt(cpu) + deployment[2].GroupSpec.Name = "testgroup-2" + + err := m.CheckAgainstDeployment(deployment) + + require.NoError(t, err) +} + +func TestManifestWithDeploymentCPUMismatch(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + deployment[0].GroupSpec.Resources[0].CPU.Units.Val = sdk.NewInt(999) + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, resourcesMismatchRegex, err) +} + +func TestManifestWithDeploymentGPUMismatch(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + deployment[0].GroupSpec.Resources[0].GPU.Units.Val = sdk.NewInt(200) + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, resourcesMismatchRegex, err) +} + +func TestManifestWithDeploymentMemoryMismatch(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + deployment[0].GroupSpec.Resources[0].Memory.Quantity.Val = sdk.NewInt(99999) + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, resourcesMismatchRegex, err) +} + +func TestManifestWithDeploymentStorageMismatch(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + deployment[0].GroupSpec.Resources[0].Storage[0].Quantity.Val = sdk.NewInt(99999) + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, resourcesMismatchRegex, err) +} + +func TestManifestWithDeploymentCountMismatch(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + deployment[0].GroupSpec.Resources[0].Count++ + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, underUtilizedGroupResources, err) +} + +func TestManifestWithManifestGroupMismatch(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + m[0].Services[0].Count++ + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, overUtilizedGroup, err) +} + +func TestManifestWithEndpointMismatchA(t *testing.T) { + m := simpleManifest(1) + + // Make this require an endpoint + m[0].Services[0].Expose[0] = ServiceExpose{ + Port: 2000, + ExternalPort: 0, + Proto: TCP, + Service: "", + Global: true, + Hosts: nil, + } + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + + // Remove an endpoint where the manifest calls for it + deployment[0].GroupSpec.Resources[0].Endpoints = akashtypes.Endpoints{} + + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, overUtilizedEndpoints, err) +} + +func TestManifestWithEndpointMismatchB(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + // Add an endpoint where the manifest doesn't call for it + deployment[0].GroupSpec.Resources[0].Endpoints = append(deployment[0].GroupSpec.Resources[0].Endpoints, akashtypes.Endpoint{}) + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, underUtilizedGroupEndpoints, err) +} diff --git a/go/manifest/v2beta3/manifest_test.go b/go/manifest/v2beta3/manifest_test.go new file mode 100644 index 00000000..940a2f9a --- /dev/null +++ b/go/manifest/v2beta3/manifest_test.go @@ -0,0 +1,470 @@ +package v2beta3 + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" + + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + resources "pkg.akt.dev/go/node/types/resources/v1beta4" + "pkg.akt.dev/go/testutil" +) + +const ( + nameOfTestService = "test-service" + nameOfTestGroup = "testGroup" +) + +var ( + randCPU1 = uint64(testutil.RandCPUUnits()) + randCPU2 = randCPU1 + 1 + randGPU1 = uint64(testutil.RandGPUUnits()) + randMemory = testutil.RandMemoryQuantity() + randStorage = testutil.RandStorageQuantity() +) + +var randUnits1 = resources.Resources{ + ID: 1, + CPU: &resources.CPU{ + Units: resources.NewResourceValue(randCPU1), + }, + GPU: &resources.GPU{ + Units: resources.NewResourceValue(randGPU1), + }, + Memory: &resources.Memory{ + Quantity: resources.NewResourceValue(randMemory), + }, + Storage: resources.Volumes{ + resources.Storage{ + Quantity: resources.NewResourceValue(randStorage), + }, + }, + Endpoints: resources.Endpoints{}, +} + +var randUnits3 = resources.Resources{ + ID: 1, + CPU: &resources.CPU{ + Units: resources.NewResourceValue(randCPU2), + }, + Memory: &resources.Memory{ + Quantity: resources.NewResourceValue(randMemory), + }, + Storage: resources.Volumes{ + resources.Storage{ + Quantity: resources.NewResourceValue(randStorage), + }, + }, + Endpoints: resources.Endpoints{}, +} + +func simpleResources(exposes ServiceExposes) resources.Resources { + return resources.Resources{ + ID: 1, + CPU: &resources.CPU{ + Units: resources.ResourceValue{ + Val: sdk.NewIntFromUint64(randCPU1), + }, + Attributes: nil, + }, + Memory: &resources.Memory{ + Quantity: resources.ResourceValue{ + Val: sdk.NewIntFromUint64(randMemory), + }, + Attributes: nil, + }, + GPU: &resources.GPU{ + Units: resources.ResourceValue{ + Val: sdk.NewIntFromUint64(randGPU1), + }, + Attributes: nil, + }, + Storage: resources.Volumes{ + resources.Storage{ + Name: "default", + Quantity: resources.ResourceValue{ + Val: sdk.NewIntFromUint64(randStorage), + }, + }, + }, + Endpoints: exposes.GetEndpoints(), + } +} + +func TestNilManifestIsInvalid(t *testing.T) { + m := Manifest{} + err := m.Validate() + + require.Error(t, err) + require.Regexp(t, "^.*manifest is empty.*$", err) +} + +func simpleManifest(svcCount uint32) Manifest { + expose := make([]ServiceExpose, 1) + expose[0].Global = true + expose[0].Port = 80 + expose[0].Proto = TCP + expose[0].Hosts = make([]string, 1) + expose[0].Hosts[0] = "host.test" + + services := make([]Service, 1) + services[0] = Service{ + Name: nameOfTestService, + Image: "test/image:1.0", + Command: nil, + Args: nil, + Env: nil, + Resources: simpleResources(expose), + Count: svcCount, + Expose: expose, + } + + m := make(Manifest, 1) + m[0] = Group{ + Name: nameOfTestGroup, + Services: services, + } + + return m +} + +func TestSimpleManifestIsValid(t *testing.T) { + m := simpleManifest(1) + err := m.Validate() + require.NoError(t, err) +} + +func TestSimpleManifestInvalidResourcesID(t *testing.T) { + m := simpleManifest(1) + m[0].Services[0].Resources.ID = 0 + err := m.Validate() + require.Error(t, err) +} + +func TestManifestWithNoGlobalServicesIsInvalid(t *testing.T) { + m := simpleManifest(1) + m[0].Services[0].Expose[0].Global = false + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*zero global services.*$", err) +} + +func TestManifestWithBadServiceNameIsInvalid(t *testing.T) { + m := simpleManifest(1) + m[0].Services[0].Name = "a_bad_service_name" // should not contain underscores + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*name is invalid.*$", err) + + m[0].Services[0].Name = "a-name-" // should not end with dash + err = m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*name is invalid.*$", err) +} + +func TestManifestWithServiceNameIsValid(t *testing.T) { + m := simpleManifest(1) + + m[0].Services[0].Name = "9aaa-bar" // does not allow starting with a number + err := m.Validate() + require.ErrorIs(t, err, ErrInvalidManifest) + require.Regexp(t, "^.*name is invalid.*$", err) +} + +func TestManifestWithDuplicateHostIsInvalid(t *testing.T) { + m := simpleManifest(1) + hosts := make([]string, 2) + const hostname = "a.test" + hosts[0] = hostname + hosts[1] = hostname + m[0].Services[0].Expose[0].Hosts = hosts + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*hostname.+is duplicated.*$", err) +} + +func TestManifestWithDashInHostname(t *testing.T) { + m := simpleManifest(1) + hosts := make([]string, 1) + hosts[0] = "a-test.com" + m[0].Services[0].Expose[0].Hosts = hosts + err := m.Validate() + require.NoError(t, err) +} + +func TestManifestWithBadHostIsInvalid(t *testing.T) { + m := simpleManifest(1) + hosts := make([]string, 2) + hosts[0] = "bob.test" // valid + hosts[1] = "-bob" // invalid + m[0].Services[0].Expose[0].Hosts = hosts + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*invalid hostname.*$", err) +} + +func TestManifestWithLongHostIsInvalid(t *testing.T) { + m := simpleManifest(1) + hosts := make([]string, 1) + buf := &bytes.Buffer{} + for i := 0; i != 255; i++ { + _, err := buf.WriteRune('a') + require.NoError(t, err) + } + _, err := buf.WriteString(".com") + require.NoError(t, err) + + hosts[0] = buf.String() + m[0].Services[0].Expose[0].Hosts = hosts + err = m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*invalid hostname.*$", err) +} + +func TestManifestWithDuplicateGroupIsInvalid(t *testing.T) { + mDuplicate := make(Manifest, 2) + mDuplicate[0] = simpleManifest(1)[0] + mDuplicate[1] = simpleManifest(1)[0] + mDuplicate[1].Services[0].Expose[0].Hosts[0] = "anotherhost.test" + err := mDuplicate.Validate() + require.Error(t, err) + require.Regexp(t, "^.*duplicate group.*$", err) +} + +func TestManifestWithNoServicesInvalid(t *testing.T) { + m := simpleManifest(1) + m[0].Services = nil + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*contains no services.*$", err) +} + +func TestManifestWithEmptyServiceNameInvalid(t *testing.T) { + m := simpleManifest(1) + m[0].Services[0].Name = "" + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*service name is empty.*$", err) +} + +func TestManifestWithEmptyImageNameInvalid(t *testing.T) { + m := simpleManifest(1) + m[0].Services[0].Image = "" + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*service.+has empty image name.*$", err) +} + +func TestManifestWithEmptyEnvValueIsValid(t *testing.T) { + m := simpleManifest(1) + envVars := make([]string, 1) + envVars[0] = "FOO=" // sets FOO to empty string + m[0].Services[0].Env = envVars + err := m.Validate() + require.NoError(t, err) +} + +func TestManifestWithEmptyEnvNameIsInvalid(t *testing.T) { + m := simpleManifest(1) + envVars := make([]string, 1) + envVars[0] = "=FOO" // invalid + m[0].Services[0].Env = envVars + err := m.Validate() + require.Error(t, err) + require.Regexp(t, `^.*var\. with an empty name.*$`, err) +} + +func TestManifestWithBadEnvNameIsInvalid(t *testing.T) { + m := simpleManifest(1) + envVars := make([]string, 1) + envVars[0] = "9VAR=FOO" // invalid because it starts with a digit + m[0].Services[0].Env = envVars + err := m.Validate() + require.Error(t, err) + require.Regexp(t, `^.*var\. with an invalid name.*$`, err) +} + +func TestManifestServiceUnknownProtocolIsInvalid(t *testing.T) { + m := simpleManifest(1) + m[0].Services[0].Expose[0].Proto = "ICMP" + err := m.Validate() + require.Error(t, err) + require.Regexp(t, `^.*protocol .+ unknown.*$`, err) +} + +func Test_ValidateManifest(t *testing.T) { + expose := make([]ServiceExpose, 1) + expose[0].Global = true + expose[0].Port = 80 + expose[0].Proto = TCP + expose[0].Hosts = make([]string, 1) + expose[0].Hosts[0] = "host.test" + + tests := []struct { + name string + ok bool + mani Manifest + dgroups []dtypes.GroupSpec + }{ + { + name: "empty", + ok: false, + }, + + { + name: "single", + ok: true, + mani: []Group{ + { + Name: "foo", + Services: []Service{ + { + Name: "svc1", + Image: "test", + Resources: simpleResources(expose), + Count: 3, + Expose: expose, + }, + }, + }, + }, + dgroups: []dtypes.GroupSpec{ + { + Name: "foo", + Resources: dtypes.ResourceUnits{ + { + Resources: simpleResources(expose), + Count: 3, + }, + }, + }, + }, + }, + + { + name: "multi-mgroup", + ok: true, + mani: []Group{ + { + Name: "foo", + Services: []Service{ + { + Name: "svc1", + Image: "test", + Resources: simpleResources(expose), + Count: 1, + Expose: expose, + }, + { + Name: "svc1", + Image: "test", + Resources: simpleResources(expose), + Count: 2, + }, + }, + }, + }, + dgroups: []dtypes.GroupSpec{ + { + Name: "foo", + Resources: dtypes.ResourceUnits{ + { + Resources: simpleResources(expose), + Count: 3, + }, + }, + }, + }, + }, + + { + name: "mismatch-name", + ok: false, + mani: []Group{ + { + Name: "foo-bad", + Services: []Service{ + { + Name: "svc1", + Image: "test", + Resources: randUnits1, + Count: 3, + }, + }, + }, + }, + dgroups: []dtypes.GroupSpec{ + { + Name: "foo", + Resources: dtypes.ResourceUnits{ + { + Resources: randUnits1, + Count: 3, + }, + }, + }, + }, + }, + + { + name: "mismatch-cpu", + ok: false, + mani: []Group{ + { + Name: "foo", + Services: []Service{ + { + Name: "svc1", + Image: "test", + Resources: randUnits3, + Count: 3, + }, + }, + }, + }, + dgroups: []dtypes.GroupSpec{ + { + Name: "foo", + Resources: dtypes.ResourceUnits{ + { + Resources: randUnits1, + Count: 3, + }, + }, + }, + }, + }, + + { + name: "mismatch-group-count", + ok: false, + mani: []Group{ + { + Name: "foo", + Services: []Service{ + { + Name: "svc1", + Image: "test", + Resources: randUnits3, + Count: 3, + }, + }, + }, + }, + dgroups: []dtypes.GroupSpec{}, + }, + } + + for _, test := range tests { + err := test.mani.CheckAgainstGSpecs(test.dgroups) + if test.ok { + assert.NoError(t, err, test.name) + } else { + assert.Error(t, err, test.name) + } + } +} diff --git a/go/manifest/v2beta3/parse.go b/go/manifest/v2beta3/parse.go new file mode 100644 index 00000000..6647e04c --- /dev/null +++ b/go/manifest/v2beta3/parse.go @@ -0,0 +1,65 @@ +package v2beta3 + +import ( + "errors" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +var ( + errUnknownServiceProtocol = errors.New("unknown service protocol") + ErrUnsupportedServiceProtocol = errors.New("unsupported service protocol") +) + +type ServiceProtocol string + +const ( + TCP = ServiceProtocol("TCP") + UDP = ServiceProtocol("UDP") +) + +func (sp ServiceProtocol) ToString() string { + return string(sp) +} + +func (sp ServiceProtocol) ToKube() (corev1.Protocol, error) { + switch sp { + case TCP: + return corev1.ProtocolTCP, nil + case UDP: + return corev1.ProtocolUDP, nil + } + + return corev1.Protocol(""), fmt.Errorf("%w: %v", errUnknownServiceProtocol, sp) +} + +func ServiceProtocolFromKube(proto corev1.Protocol) (ServiceProtocol, error) { + switch proto { + case corev1.ProtocolTCP: + return TCP, nil + case corev1.ProtocolUDP: + return UDP, nil + } + + return ServiceProtocol(""), fmt.Errorf("%w: %v", errUnknownServiceProtocol, proto) +} + +func ParseServiceProtocol(input string) (ServiceProtocol, error) { + var result ServiceProtocol + + // This is not a case-sensitive parse, so make all input uppercase + input = strings.ToUpper(input) + + switch input { + case "TCP", "": // The empty string (no input) implies TCP + result = TCP + case "UDP": + result = UDP + default: + return result, ErrUnsupportedServiceProtocol + } + + return result, nil +} diff --git a/go/manifest/v2beta3/service.go b/go/manifest/v2beta3/service.go new file mode 100644 index 00000000..cfd43a78 --- /dev/null +++ b/go/manifest/v2beta3/service.go @@ -0,0 +1,108 @@ +package v2beta3 + +import ( + "fmt" + "sort" + "strings" + + k8svalidation "k8s.io/apimachinery/pkg/util/validation" + + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" +) + +func (s *Service) validate(helper *validateManifestGroupsHelper) error { + if len(s.Name) == 0 { + return fmt.Errorf("%w: service name is empty", ErrInvalidManifest) + } + + serviceNameValid := serviceNameValidationRegex.MatchString(s.Name) + if !serviceNameValid { + return fmt.Errorf("%w: service %q name is invalid", ErrInvalidManifest, s.Name) + } + + if len(s.Image) == 0 { + return fmt.Errorf("%w: service %q has empty image name", ErrInvalidManifest, s.Name) + } + + if err := s.Resources.Validate(); err != nil { + return err + } + + for _, envVar := range s.Env { + idx := strings.Index(envVar, "=") + if idx == 0 { + return fmt.Errorf("%w: service %q defines an env. var. with an empty name", ErrInvalidManifest, s.Name) + } + + var envVarName string + if idx > 0 { + envVarName = envVar[0:idx] + } else { + envVarName = envVar + } + + if 0 != len(k8svalidation.IsEnvVarName(envVarName)) { + return fmt.Errorf("%w: service %q defines an env. var. with an invalid name %q", ErrInvalidManifest, s.Name, envVarName) + } + } + + if !sort.IsSorted(s.Expose) { + return fmt.Errorf("%w: service %q: expose is not sorted", ErrInvalidManifest, s.Name) + } + + for _, serviceExpose := range s.Expose { + if err := serviceExpose.validate(helper); err != nil { + return fmt.Errorf("%w: service %q: %w", ErrInvalidManifest, s.Name, err) + } + } + + return nil +} + +func (s *Service) checkAgainstGSpec(gspec *groupSpec) error { + // find resource units by id + var gRes *dtypes.ResourceUnit + + for idx := range gspec.gs.Resources { + if s.Resources.ID == gspec.gs.Resources[idx].ID { + gRes = &gspec.gs.Resources[idx] + break + } + } + + if gRes == nil { + return fmt.Errorf("service %q: not found deployment group resources with ID = %d", s.Name, s.Resources.ID) + } + + if s.Count > gRes.Count { + return fmt.Errorf("service %q: over-utilized replicas (%d) > group spec resources count (%d)", + s.Name, s.Count, gRes.Count) + } + + // do not compare resources directly + if !s.Resources.CPU.Equal(gRes.CPU) { + return fmt.Errorf("service %q: CPU resources mismatch for ID %d", s.Name, s.Resources.ID) + } + + if !s.Resources.GPU.Equal(gRes.GPU) { + return fmt.Errorf("service %q: GPU resources mismatch for ID %d", s.Name, s.Resources.ID) + } + + if !s.Resources.Memory.Equal(gRes.Memory) { + return fmt.Errorf("service %q: Memory resources mismatch for ID %d", s.Name, s.Resources.ID) + } + + if !s.Resources.Storage.Equal(gRes.Storage) { + return fmt.Errorf("service %q: Storage resources mismatch for ID %d", s.Name, s.Resources.ID) + } + + for _, expose := range s.Expose { + if err := expose.checkAgainstResources(gRes, gspec.endpoints); err != nil { + return fmt.Errorf("service %q: resource ID %d: %w", s.Name, gRes.ID, err) + } + } + + gRes.Count -= s.Count + + return nil +} diff --git a/go/manifest/v2beta3/service.pb.go b/go/manifest/v2beta3/service.pb.go new file mode 100644 index 00000000..0b901fa9 --- /dev/null +++ b/go/manifest/v2beta3/service.pb.go @@ -0,0 +1,1742 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/manifest/v2beta3/service.proto + +package v2beta3 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + v1beta4 "pkg.akt.dev/go/node/types/resources/v1beta4" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// StorageParams +type StorageParams struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` + Mount string `protobuf:"bytes,2,opt,name=mount,proto3" json:"mount" yaml:"mount"` + ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"readOnly" yaml:"readOnly"` +} + +func (m *StorageParams) Reset() { *m = StorageParams{} } +func (*StorageParams) ProtoMessage() {} +func (*StorageParams) Descriptor() ([]byte, []int) { + return fileDescriptor_6d5964c4976d68e5, []int{0} +} +func (m *StorageParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StorageParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StorageParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageParams.Merge(m, src) +} +func (m *StorageParams) XXX_Size() int { + return m.Size() +} +func (m *StorageParams) XXX_DiscardUnknown() { + xxx_messageInfo_StorageParams.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageParams proto.InternalMessageInfo + +func (m *StorageParams) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *StorageParams) GetMount() string { + if m != nil { + return m.Mount + } + return "" +} + +func (m *StorageParams) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +// ServiceParams +type ServiceParams struct { + Storage []StorageParams `protobuf:"bytes,1,rep,name=storage,proto3" json:"storage" yaml:"storage"` + Credentials *ImageCredentials `protobuf:"bytes,10,opt,name=credentials,proto3" json:"credentials,omitempty" yaml:"credentials,omitempty"` +} + +func (m *ServiceParams) Reset() { *m = ServiceParams{} } +func (*ServiceParams) ProtoMessage() {} +func (*ServiceParams) Descriptor() ([]byte, []int) { + return fileDescriptor_6d5964c4976d68e5, []int{1} +} +func (m *ServiceParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ServiceParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ServiceParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceParams.Merge(m, src) +} +func (m *ServiceParams) XXX_Size() int { + return m.Size() +} +func (m *ServiceParams) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceParams.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceParams proto.InternalMessageInfo + +func (m *ServiceParams) GetStorage() []StorageParams { + if m != nil { + return m.Storage + } + return nil +} + +func (m *ServiceParams) GetCredentials() *ImageCredentials { + if m != nil { + return m.Credentials + } + return nil +} + +// Credentials to fetch image from registry +type ImageCredentials struct { + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host" yaml:"host"` + Email string `protobuf:"bytes,2,opt,name=email,proto3" json:"email" yaml:"email"` + Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username" yaml:"username"` + Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password" yaml:"password"` +} + +func (m *ImageCredentials) Reset() { *m = ImageCredentials{} } +func (*ImageCredentials) ProtoMessage() {} +func (*ImageCredentials) Descriptor() ([]byte, []int) { + return fileDescriptor_6d5964c4976d68e5, []int{2} +} +func (m *ImageCredentials) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageCredentials) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ImageCredentials.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ImageCredentials) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageCredentials.Merge(m, src) +} +func (m *ImageCredentials) XXX_Size() int { + return m.Size() +} +func (m *ImageCredentials) XXX_DiscardUnknown() { + xxx_messageInfo_ImageCredentials.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageCredentials proto.InternalMessageInfo + +func (m *ImageCredentials) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *ImageCredentials) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *ImageCredentials) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *ImageCredentials) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +// Service stores name, image, args, env, unit, count and expose list of service +type Service struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image" yaml:"image"` + Command []string `protobuf:"bytes,3,rep,name=command,proto3" json:"command" yaml:"command"` + Args []string `protobuf:"bytes,4,rep,name=args,proto3" json:"args" yaml:"args"` + Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env" yaml:"env"` + Resources v1beta4.Resources `protobuf:"bytes,6,opt,name=resources,proto3" json:"resources" yaml:"resources"` + Count uint32 `protobuf:"varint,7,opt,name=count,proto3" json:"count" yaml:"count"` + Expose ServiceExposes `protobuf:"bytes,8,rep,name=expose,proto3,castrepeated=ServiceExposes" json:"expose" yaml:"expose"` + Params *ServiceParams `protobuf:"bytes,9,opt,name=params,proto3" json:"params,omitempty" yaml:"params,omitempty"` + Credentials *ImageCredentials `protobuf:"bytes,10,opt,name=credentials,proto3" json:"credentials" yaml:"credentials"` +} + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { + return fileDescriptor_6d5964c4976d68e5, []int{3} +} +func (m *Service) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Service.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service.Merge(m, src) +} +func (m *Service) XXX_Size() int { + return m.Size() +} +func (m *Service) XXX_DiscardUnknown() { + xxx_messageInfo_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_Service proto.InternalMessageInfo + +func (m *Service) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Service) GetImage() string { + if m != nil { + return m.Image + } + return "" +} + +func (m *Service) GetCommand() []string { + if m != nil { + return m.Command + } + return nil +} + +func (m *Service) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *Service) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +func (m *Service) GetResources() v1beta4.Resources { + if m != nil { + return m.Resources + } + return v1beta4.Resources{} +} + +func (m *Service) GetCount() uint32 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *Service) GetExpose() ServiceExposes { + if m != nil { + return m.Expose + } + return nil +} + +func (m *Service) GetParams() *ServiceParams { + if m != nil { + return m.Params + } + return nil +} + +func (m *Service) GetCredentials() *ImageCredentials { + if m != nil { + return m.Credentials + } + return nil +} + +func init() { + proto.RegisterType((*StorageParams)(nil), "akash.manifest.v2beta3.StorageParams") + proto.RegisterType((*ServiceParams)(nil), "akash.manifest.v2beta3.ServiceParams") + proto.RegisterType((*ImageCredentials)(nil), "akash.manifest.v2beta3.ImageCredentials") + proto.RegisterType((*Service)(nil), "akash.manifest.v2beta3.Service") +} + +func init() { + proto.RegisterFile("akash/manifest/v2beta3/service.proto", fileDescriptor_6d5964c4976d68e5) +} + +var fileDescriptor_6d5964c4976d68e5 = []byte{ + // 737 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x3b, 0x6f, 0x13, 0x4d, + 0x14, 0xf5, 0x7c, 0x4e, 0xfc, 0x18, 0x7f, 0x09, 0xd1, 0x0a, 0xc8, 0x26, 0x82, 0x1d, 0x33, 0x22, + 0xc2, 0x90, 0x68, 0x57, 0x49, 0x90, 0x90, 0x78, 0x14, 0x2c, 0xa2, 0xa0, 0x02, 0x0d, 0x1d, 0x0d, + 0x1a, 0xdb, 0x83, 0x63, 0xc5, 0xbb, 0x63, 0xed, 0xac, 0x0d, 0xe9, 0x68, 0xe9, 0xf8, 0x1d, 0xf0, + 0x47, 0x52, 0xa6, 0x4c, 0x35, 0x80, 0xd3, 0xb9, 0xdc, 0x82, 0x16, 0x34, 0x8f, 0xcd, 0xda, 0x24, + 0x10, 0x21, 0xaa, 0xe4, 0x9e, 0x7b, 0xee, 0xdc, 0xe7, 0x59, 0xc3, 0x9b, 0x74, 0x9f, 0x8a, 0xbd, + 0x20, 0xa2, 0x71, 0xff, 0x0d, 0x13, 0x69, 0x30, 0xde, 0x69, 0xb3, 0x94, 0xee, 0x06, 0x82, 0x25, + 0xe3, 0x7e, 0x87, 0xf9, 0xc3, 0x84, 0xa7, 0xdc, 0xb9, 0xaa, 0x59, 0x7e, 0xce, 0xf2, 0x2d, 0x6b, + 0xfd, 0x72, 0x8f, 0xf7, 0xb8, 0xa6, 0x04, 0xea, 0x3f, 0xc3, 0x5e, 0xbf, 0xf3, 0xe7, 0x37, 0xd9, + 0xbb, 0x21, 0x17, 0xf6, 0xe5, 0xf5, 0x2d, 0xc3, 0x6d, 0x53, 0xc1, 0x82, 0x84, 0x09, 0x3e, 0x4a, + 0x3a, 0x4c, 0x04, 0xe3, 0x6d, 0x15, 0x71, 0xb7, 0x40, 0x0c, 0x1b, 0x7f, 0x06, 0x70, 0xe9, 0x65, + 0xca, 0x13, 0xda, 0x63, 0x2f, 0x68, 0x42, 0x23, 0xe1, 0x6c, 0xc2, 0x85, 0x98, 0x46, 0xcc, 0x05, + 0x4d, 0xd0, 0xaa, 0x87, 0xab, 0x53, 0x89, 0xb4, 0x9d, 0x49, 0xd4, 0x38, 0xa0, 0xd1, 0xe0, 0x3e, + 0x56, 0x16, 0x26, 0x1a, 0x74, 0x02, 0xb8, 0x18, 0xf1, 0x51, 0x9c, 0xba, 0xff, 0x69, 0xf6, 0xda, + 0x54, 0x22, 0x03, 0x64, 0x12, 0xfd, 0x6f, 0xe8, 0xda, 0xc4, 0xc4, 0xc0, 0xce, 0x43, 0x58, 0x4f, + 0x18, 0xed, 0xbe, 0xe6, 0xf1, 0xe0, 0xc0, 0x2d, 0x37, 0x41, 0xab, 0x16, 0xa2, 0xa9, 0x44, 0x35, + 0x05, 0x3e, 0x8f, 0x07, 0x07, 0x99, 0x44, 0x97, 0x4c, 0x5c, 0x8e, 0x60, 0x72, 0xea, 0xc4, 0x3f, + 0x54, 0xb5, 0xa6, 0x67, 0x5b, 0x6d, 0x1b, 0x56, 0x85, 0x29, 0xdf, 0x05, 0xcd, 0x72, 0xab, 0xb1, + 0xb3, 0xe1, 0x9f, 0x3f, 0x59, 0x7f, 0xae, 0xcb, 0xf0, 0xc6, 0xa1, 0x44, 0xa5, 0xa9, 0x44, 0x79, + 0x74, 0x26, 0xd1, 0xb2, 0xc9, 0x6b, 0x01, 0x4c, 0x72, 0x97, 0xf3, 0x01, 0xc0, 0x46, 0x27, 0x61, + 0x5d, 0x16, 0xa7, 0x7d, 0x3a, 0x10, 0x2e, 0x6c, 0x82, 0x56, 0x63, 0xa7, 0xf5, 0xbb, 0x44, 0xcf, + 0x22, 0xda, 0x63, 0x4f, 0x0a, 0x7e, 0xf8, 0xe8, 0x50, 0x22, 0x30, 0x95, 0xe8, 0xca, 0xcc, 0x23, + 0x5b, 0x3c, 0xea, 0xa7, 0x2c, 0x1a, 0xa6, 0xaa, 0xe3, 0x6b, 0x26, 0xf3, 0xb9, 0x6e, 0x4c, 0x66, + 0x73, 0xe3, 0x29, 0x80, 0x2b, 0xbf, 0x26, 0x50, 0x2b, 0xdb, 0xe3, 0x22, 0x9d, 0x5d, 0x99, 0xb2, + 0x8b, 0x95, 0x29, 0x0b, 0x13, 0x0d, 0xaa, 0x95, 0xb1, 0x88, 0xf6, 0x07, 0xb3, 0x2b, 0xd3, 0x40, + 0xb1, 0x32, 0x6d, 0x62, 0x62, 0x60, 0xe7, 0x01, 0xac, 0x8d, 0x04, 0x4b, 0xf4, 0x51, 0x94, 0x75, + 0x8c, 0xde, 0x58, 0x8e, 0x15, 0x1b, 0xcb, 0x11, 0x4c, 0x4e, 0x9d, 0x2a, 0x78, 0x48, 0x85, 0x78, + 0xcb, 0x93, 0xae, 0xbb, 0x50, 0x04, 0xe7, 0x58, 0x11, 0x9c, 0x23, 0x98, 0x9c, 0x3a, 0xf1, 0xf7, + 0x45, 0x58, 0xb5, 0xeb, 0xfe, 0xeb, 0xb3, 0xec, 0xab, 0x21, 0xcd, 0xf6, 0xa8, 0x81, 0xa2, 0x47, + 0x6d, 0x62, 0x62, 0x60, 0xe7, 0x1e, 0xac, 0x76, 0x78, 0x14, 0xd1, 0xb8, 0xeb, 0x96, 0x9b, 0xe5, + 0x56, 0x3d, 0xbc, 0xae, 0x6e, 0xc3, 0x42, 0xc5, 0x6d, 0x58, 0x00, 0x93, 0xdc, 0xa5, 0xca, 0xa2, + 0x49, 0x4f, 0xb8, 0x0b, 0x3a, 0x4a, 0x97, 0xa5, 0xec, 0xa2, 0x2c, 0x65, 0x61, 0xa2, 0x41, 0x67, + 0x13, 0x96, 0x59, 0x3c, 0x76, 0x17, 0x35, 0x77, 0xcd, 0x5e, 0x85, 0x82, 0x32, 0x89, 0xa0, 0x1d, + 0x7d, 0x3c, 0xc6, 0x44, 0x41, 0xce, 0x40, 0x29, 0xc5, 0x8a, 0xd5, 0xad, 0xe8, 0x93, 0xbb, 0x65, + 0x4f, 0x4e, 0x69, 0xdb, 0x2f, 0x94, 0x6c, 0xb5, 0xed, 0x93, 0x1c, 0x09, 0x37, 0xec, 0x75, 0x17, + 0x2f, 0x64, 0x12, 0xad, 0xe4, 0xba, 0xb2, 0x10, 0x26, 0x85, 0x5b, 0x4d, 0xac, 0xa3, 0x85, 0x5c, + 0x6d, 0x82, 0xd6, 0x92, 0x99, 0x58, 0x67, 0x5e, 0xc8, 0x1d, 0x2b, 0x64, 0xfd, 0xd7, 0x19, 0xc2, + 0x8a, 0xf9, 0xec, 0xb8, 0xb5, 0x0b, 0x74, 0x67, 0x16, 0xf8, 0x54, 0x93, 0xc3, 0x6d, 0x5b, 0x99, + 0x0d, 0xce, 0x24, 0x5a, 0xb2, 0x8d, 0x6b, 0x1b, 0x7f, 0xfa, 0x82, 0x96, 0xe7, 0x22, 0x04, 0xb1, + 0x54, 0x27, 0x81, 0x95, 0xa1, 0x16, 0xaf, 0x5b, 0xd7, 0xd3, 0xb8, 0x28, 0xa3, 0x55, 0xfa, 0xae, + 0x9d, 0xf3, 0x8a, 0x09, 0x9e, 0x13, 0xde, 0x6a, 0x7e, 0x7b, 0xf3, 0x1e, 0x4c, 0x6c, 0x26, 0x67, + 0xf4, 0x6f, 0xca, 0xbf, 0x6d, 0x73, 0xcf, 0x3e, 0x92, 0x49, 0xe4, 0x9c, 0xd1, 0xfb, 0xbc, 0xca, + 0xc3, 0xc7, 0xc7, 0xdf, 0xbc, 0xd2, 0xfb, 0x89, 0x07, 0x0e, 0x27, 0x1e, 0x38, 0x9a, 0x78, 0xe0, + 0xeb, 0xc4, 0x03, 0x1f, 0x4f, 0xbc, 0xd2, 0xd1, 0x89, 0x57, 0x3a, 0x3e, 0xf1, 0x4a, 0xaf, 0xd0, + 0x70, 0xbf, 0xe7, 0xd3, 0xfd, 0xd4, 0xef, 0xb2, 0x71, 0xd0, 0xe3, 0x67, 0x7e, 0x1a, 0xda, 0x15, + 0xfd, 0x7d, 0xdf, 0xfd, 0x19, 0x00, 0x00, 0xff, 0xff, 0xd1, 0xd9, 0x34, 0x28, 0x8f, 0x06, 0x00, + 0x00, +} + +func (m *StorageParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ReadOnly { + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Mount) > 0 { + i -= len(m.Mount) + copy(dAtA[i:], m.Mount) + i = encodeVarintService(dAtA, i, uint64(len(m.Mount))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintService(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ServiceParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Credentials != nil { + { + size, err := m.Credentials.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if len(m.Storage) > 0 { + for iNdEx := len(m.Storage) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Storage[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ImageCredentials) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageCredentials) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageCredentials) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Password) > 0 { + i -= len(m.Password) + copy(dAtA[i:], m.Password) + i = encodeVarintService(dAtA, i, uint64(len(m.Password))) + i-- + dAtA[i] = 0x22 + } + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintService(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x1a + } + if len(m.Email) > 0 { + i -= len(m.Email) + copy(dAtA[i:], m.Email) + i = encodeVarintService(dAtA, i, uint64(len(m.Email))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintService(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Credentials != nil { + { + size, err := m.Credentials.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.Params != nil { + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.Expose) > 0 { + for iNdEx := len(m.Expose) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Expose[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.Count != 0 { + i = encodeVarintService(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x38 + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Env[iNdEx]) + copy(dAtA[i:], m.Env[iNdEx]) + i = encodeVarintService(dAtA, i, uint64(len(m.Env[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintService(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintService(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Image) > 0 { + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintService(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintService(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintService(dAtA []byte, offset int, v uint64) int { + offset -= sovService(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *StorageParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Mount) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + if m.ReadOnly { + n += 2 + } + return n +} + +func (m *ServiceParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Storage) > 0 { + for _, e := range m.Storage { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } + } + if m.Credentials != nil { + l = m.Credentials.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *ImageCredentials) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Email) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *Service) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Image) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovService(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovService(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovService(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovService(uint64(l)) + if m.Count != 0 { + n += 1 + sovService(uint64(m.Count)) + } + if len(m.Expose) > 0 { + for _, e := range m.Expose { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } + } + if m.Params != nil { + l = m.Params.Size() + n += 1 + l + sovService(uint64(l)) + } + if m.Credentials != nil { + l = m.Credentials.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func sovService(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozService(x uint64) (n int) { + return sovService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StorageParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageParams{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Mount:` + fmt.Sprintf("%v", this.Mount) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceParams) String() string { + if this == nil { + return "nil" + } + repeatedStringForStorage := "[]StorageParams{" + for _, f := range this.Storage { + repeatedStringForStorage += strings.Replace(strings.Replace(f.String(), "StorageParams", "StorageParams", 1), `&`, ``, 1) + "," + } + repeatedStringForStorage += "}" + s := strings.Join([]string{`&ServiceParams{`, + `Storage:` + repeatedStringForStorage + `,`, + `Credentials:` + strings.Replace(this.Credentials.String(), "ImageCredentials", "ImageCredentials", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageCredentials) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageCredentials{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Email:` + fmt.Sprintf("%v", this.Email) + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `Password:` + fmt.Sprintf("%v", this.Password) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpose := "[]ServiceExpose{" + for _, f := range this.Expose { + repeatedStringForExpose += fmt.Sprintf("%v", f) + "," + } + repeatedStringForExpose += "}" + s := strings.Join([]string{`&Service{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Env:` + fmt.Sprintf("%v", this.Env) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resources", "v1beta4.Resources", 1), `&`, ``, 1) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Expose:` + repeatedStringForExpose + `,`, + `Params:` + strings.Replace(this.Params.String(), "ServiceParams", "ServiceParams", 1) + `,`, + `Credentials:` + strings.Replace(this.Credentials.String(), "ImageCredentials", "ImageCredentials", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringService(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StorageParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Storage = append(m.Storage, StorageParams{}) + if err := m.Storage[len(m.Storage)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Credentials", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Credentials == nil { + m.Credentials = &ImageCredentials{} + } + if err := m.Credentials.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageCredentials) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageCredentials: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageCredentials: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Email", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Email = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Service) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Service: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expose", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expose = append(m.Expose, ServiceExpose{}) + if err := m.Expose[len(m.Expose)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Params == nil { + m.Params = &ServiceParams{} + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Credentials", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Credentials == nil { + m.Credentials = &ImageCredentials{} + } + if err := m.Credentials.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipService(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthService + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupService + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthService + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthService = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowService = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupService = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/manifest/v2beta2/service_expose_test.go b/go/manifest/v2beta3/service_expose_test.go similarity index 97% rename from go/manifest/v2beta2/service_expose_test.go rename to go/manifest/v2beta3/service_expose_test.go index 0a5a533c..07af159e 100644 --- a/go/manifest/v2beta2/service_expose_test.go +++ b/go/manifest/v2beta3/service_expose_test.go @@ -1,4 +1,4 @@ -package v2beta2 +package v2beta3 import ( "testing" diff --git a/go/manifest/v2beta3/serviceexpose.go b/go/manifest/v2beta3/serviceexpose.go new file mode 100644 index 00000000..2d80d875 --- /dev/null +++ b/go/manifest/v2beta3/serviceexpose.go @@ -0,0 +1,108 @@ +package v2beta3 + +import ( + "fmt" + "math" + "sort" + + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + resources "pkg.akt.dev/go/node/types/resources/v1beta4" +) + +func (s *ServiceExpose) GetEndpoints() resources.Endpoints { + if !s.Global { + return resources.Endpoints{} + } + + endpoints := make(resources.Endpoints, 0, 1) + + if len(s.IP) != 0 { + endpoints = make(resources.Endpoints, 0, 2) + + endpoints = append( + endpoints, + resources.Endpoint{ + Kind: resources.Endpoint_LEASED_IP, + SequenceNumber: s.EndpointSequenceNumber, + }, + ) + } + + kind := resources.Endpoint_RANDOM_PORT + if s.IsIngress() { + kind = resources.Endpoint_SHARED_HTTP + } + + endpoints = append(endpoints, resources.Endpoint{Kind: kind}) + + sort.Sort(endpoints) + + return endpoints +} + +func (s *ServiceExpose) validate(helper *validateManifestGroupsHelper) error { + if s.Port == 0 || s.Port > math.MaxUint16 { + return fmt.Errorf("port value must be 0 < value <= 65535 ") + } + + switch s.Proto { + case TCP, UDP: + break + default: + return fmt.Errorf("protocol %q unknown", s.Proto) + } + + if s.Global { + helper.globalServiceCount++ + } + + for _, host := range s.Hosts { + if !isValidHostname(host) { + return fmt.Errorf("has invalid hostname %q", host) + } + + _, exists := helper.hostnames[host] + if exists { + return fmt.Errorf("hostname %q is duplicated, this is not allowed", host) + } + helper.hostnames[host] = 0 // Value stored does not matter + } + + return nil +} + +func (s *ServiceExpose) checkAgainstResources(res *dtypes.ResourceUnit, eps validateEndpointsHelper) error { + if s.Global { + eph := eps[res.ID] + + if s.IsIngress() { + if !eph.tryDecHTTP() { + return fmt.Errorf("over-utilized HTTP endpoints") + } + } else { + if !eph.tryDecPort() { + return fmt.Errorf("over-utilized PORT endpoints") + } + } + + if len(s.IP) > 0 { + if !eph.tryDecIP() { + return fmt.Errorf("over-utilized IP endpoints") + } + } + } + + return nil +} + +func (s *ServiceExpose) IsIngress() bool { + return s.Proto == TCP && s.Global && uint32(80) == s.GetExternalPort() +} + +func (s *ServiceExpose) GetExternalPort() uint32 { + if s.ExternalPort == 0 { + return s.Port + } + + return s.ExternalPort +} diff --git a/go/manifest/v2beta3/serviceexpose.pb.go b/go/manifest/v2beta3/serviceexpose.pb.go new file mode 100644 index 00000000..d79bae18 --- /dev/null +++ b/go/manifest/v2beta3/serviceexpose.pb.go @@ -0,0 +1,665 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/manifest/v2beta3/serviceexpose.proto + +package v2beta3 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ServiceExpose stores exposed ports and hosts details +type ServiceExpose struct { + // port on the container + Port uint32 `protobuf:"varint,1,opt,name=port,proto3" json:"port" yaml:"port"` + // port on the service definition + ExternalPort uint32 `protobuf:"varint,2,opt,name=external_port,json=externalPort,proto3" json:"externalPort" yaml:"externalPort"` + Proto ServiceProtocol `protobuf:"bytes,3,opt,name=proto,proto3,casttype=ServiceProtocol" json:"proto" yaml:"proto"` + Service string `protobuf:"bytes,4,opt,name=service,proto3" json:"service" yaml:"service"` + Global bool `protobuf:"varint,5,opt,name=global,proto3" json:"global" yaml:"global"` + Hosts []string `protobuf:"bytes,6,rep,name=hosts,proto3" json:"hosts" yaml:"hosts"` + HTTPOptions ServiceExposeHTTPOptions `protobuf:"bytes,7,opt,name=http_options,json=httpOptions,proto3" json:"httpOptions" yaml:"httpOptions"` + // The name of the IP address associated with this, if any + IP string `protobuf:"bytes,8,opt,name=ip,proto3" json:"ip" yaml:"ip"` + // The sequence number of the associated endpoint in the on-chain data + EndpointSequenceNumber uint32 `protobuf:"varint,9,opt,name=endpoint_sequence_number,json=endpointSequenceNumber,proto3" json:"endpointSequenceNumber" yaml:"endpointSequenceNumber"` +} + +func (m *ServiceExpose) Reset() { *m = ServiceExpose{} } +func (*ServiceExpose) ProtoMessage() {} +func (*ServiceExpose) Descriptor() ([]byte, []int) { + return fileDescriptor_0cbeaeb8a333db8d, []int{0} +} +func (m *ServiceExpose) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceExpose) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ServiceExpose.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ServiceExpose) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceExpose.Merge(m, src) +} +func (m *ServiceExpose) XXX_Size() int { + return m.Size() +} +func (m *ServiceExpose) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceExpose.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceExpose proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ServiceExpose)(nil), "akash.manifest.v2beta3.ServiceExpose") +} + +func init() { + proto.RegisterFile("akash/manifest/v2beta3/serviceexpose.proto", fileDescriptor_0cbeaeb8a333db8d) +} + +var fileDescriptor_0cbeaeb8a333db8d = []byte{ + // 528 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xbf, 0x6f, 0xd3, 0x4e, + 0x18, 0xc6, 0xed, 0x34, 0x49, 0x9b, 0x4b, 0xf2, 0xfd, 0x4a, 0x07, 0x2a, 0x6e, 0x51, 0xfd, 0x46, + 0x5e, 0x30, 0x3f, 0x64, 0xa3, 0x66, 0x00, 0x95, 0x09, 0x4b, 0x48, 0x20, 0x21, 0x88, 0xdc, 0x4e, + 0x2c, 0x91, 0x93, 0x1e, 0x89, 0x95, 0xc4, 0x77, 0xc4, 0x97, 0xa8, 0x6c, 0x8c, 0x2c, 0x48, 0xfc, + 0x09, 0x2c, 0xfc, 0x2f, 0x1d, 0x3b, 0x76, 0x3a, 0x81, 0xb3, 0x79, 0xf4, 0xc8, 0x84, 0x7c, 0x77, + 0x51, 0x53, 0xd1, 0x6e, 0x7e, 0x9f, 0xf7, 0xf3, 0xbc, 0xf7, 0xc8, 0xef, 0x8b, 0x1e, 0x45, 0x93, + 0x28, 0x1d, 0xfb, 0xb3, 0x28, 0x89, 0x3f, 0x92, 0x94, 0xfb, 0xcb, 0xc3, 0x01, 0xe1, 0x51, 0xd7, + 0x4f, 0xc9, 0x7c, 0x19, 0x0f, 0x09, 0x39, 0x63, 0x34, 0x25, 0x1e, 0x9b, 0x53, 0x4e, 0xf1, 0xae, + 0x64, 0xbd, 0x35, 0xeb, 0x69, 0x76, 0xff, 0xee, 0x88, 0x8e, 0xa8, 0x44, 0xfc, 0xf2, 0x4b, 0xd1, + 0xfb, 0xee, 0x2d, 0x93, 0xc7, 0x9c, 0x33, 0xca, 0x78, 0x4c, 0x93, 0x54, 0x91, 0xce, 0xcf, 0x1a, + 0x6a, 0x1f, 0xab, 0xf7, 0x5e, 0xc9, 0xf7, 0xf0, 0x63, 0x54, 0x65, 0x74, 0xce, 0x2d, 0xb3, 0x63, + 0xba, 0xed, 0xe0, 0x5e, 0x2e, 0x40, 0xd6, 0x85, 0x80, 0xe6, 0xe7, 0x68, 0x36, 0x3d, 0x72, 0xca, + 0xca, 0x09, 0xa5, 0x88, 0xdf, 0xa2, 0x36, 0x39, 0xe3, 0x64, 0x9e, 0x44, 0xd3, 0xbe, 0x74, 0x55, + 0xa4, 0xeb, 0x41, 0x2e, 0xa0, 0xb5, 0x6e, 0xf4, 0x94, 0xfb, 0x8e, 0x72, 0x6f, 0xaa, 0x4e, 0x78, + 0x0d, 0xc2, 0x01, 0xaa, 0xc9, 0x54, 0xd6, 0x56, 0xc7, 0x74, 0x1b, 0xc1, 0x93, 0x5c, 0x80, 0x12, + 0x0a, 0x01, 0x2d, 0xfd, 0xb8, 0x4c, 0xfd, 0x47, 0xc0, 0xff, 0x3a, 0x75, 0xaf, 0x14, 0x86, 0x74, + 0x1a, 0x2a, 0x12, 0x3f, 0x43, 0xdb, 0xfa, 0xff, 0x59, 0x55, 0x39, 0xe5, 0x20, 0x17, 0xb0, 0x96, + 0x0a, 0x01, 0xff, 0xa9, 0x39, 0x5a, 0x70, 0xc2, 0x75, 0x0b, 0x77, 0x51, 0x7d, 0x34, 0xa5, 0x83, + 0x68, 0x6a, 0xd5, 0x3a, 0xa6, 0xbb, 0x13, 0xdc, 0xcf, 0x05, 0x68, 0xa5, 0x10, 0xd0, 0x56, 0x36, + 0x55, 0x3b, 0xa1, 0x6e, 0x60, 0x1f, 0xd5, 0xc6, 0x34, 0xe5, 0xa9, 0x55, 0xef, 0x6c, 0xb9, 0x8d, + 0x60, 0xaf, 0x4c, 0x2c, 0x85, 0xab, 0xc4, 0xb2, 0x74, 0x42, 0x25, 0xe3, 0x6f, 0x26, 0x6a, 0x95, + 0x5b, 0xe8, 0xeb, 0x35, 0x58, 0xdb, 0x1d, 0xd3, 0x6d, 0x1e, 0x3e, 0xf5, 0x6e, 0xde, 0xaf, 0x77, + 0x6d, 0x37, 0xaf, 0x4f, 0x4e, 0x7a, 0xef, 0x95, 0x2f, 0x78, 0x7e, 0x2e, 0xc0, 0xc8, 0x04, 0x34, + 0x37, 0xc4, 0x5c, 0x40, 0xb3, 0x1c, 0xae, 0xcb, 0x42, 0x00, 0xd6, 0x19, 0xae, 0x44, 0x27, 0xdc, + 0x44, 0xf0, 0x43, 0x54, 0x89, 0x99, 0xb5, 0x23, 0xff, 0xd4, 0x5e, 0x26, 0xa0, 0xf2, 0xa6, 0x97, + 0x0b, 0xa8, 0xc4, 0xac, 0x10, 0xd0, 0x50, 0xe6, 0x98, 0x39, 0x61, 0x25, 0x66, 0x78, 0x81, 0x2c, + 0x92, 0x9c, 0x32, 0x1a, 0x27, 0xbc, 0x9f, 0x92, 0x4f, 0x0b, 0x92, 0x0c, 0x49, 0x3f, 0x59, 0xcc, + 0x06, 0x64, 0x6e, 0x35, 0xe4, 0xda, 0x5f, 0xe4, 0x02, 0x76, 0xd7, 0xcc, 0xb1, 0x46, 0xde, 0x49, + 0xa2, 0x10, 0x70, 0xa0, 0x0f, 0xe0, 0xc6, 0xbe, 0x13, 0xde, 0x62, 0x3c, 0xaa, 0x7e, 0xfd, 0x01, + 0x46, 0xf0, 0xf2, 0xf2, 0xb7, 0x6d, 0x7c, 0xc9, 0x6c, 0xf3, 0x3c, 0xb3, 0xcd, 0x8b, 0xcc, 0x36, + 0x7f, 0x65, 0xb6, 0xf9, 0x7d, 0x65, 0x1b, 0x17, 0x2b, 0xdb, 0xb8, 0x5c, 0xd9, 0xc6, 0x07, 0x60, + 0x93, 0x91, 0x17, 0x4d, 0xb8, 0x77, 0x4a, 0x96, 0xfe, 0x88, 0xfe, 0x73, 0xfc, 0x83, 0xba, 0x3c, + 0x90, 0xee, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x84, 0x26, 0xfe, 0x77, 0x03, 0x00, 0x00, +} + +func (m *ServiceExpose) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceExpose) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceExpose) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.EndpointSequenceNumber != 0 { + i = encodeVarintServiceexpose(dAtA, i, uint64(m.EndpointSequenceNumber)) + i-- + dAtA[i] = 0x48 + } + if len(m.IP) > 0 { + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0x42 + } + { + size, err := m.HTTPOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintServiceexpose(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + if len(m.Hosts) > 0 { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if m.Global { + i-- + if m.Global { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.Service) > 0 { + i -= len(m.Service) + copy(dAtA[i:], m.Service) + i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.Service))) + i-- + dAtA[i] = 0x22 + } + if len(m.Proto) > 0 { + i -= len(m.Proto) + copy(dAtA[i:], m.Proto) + i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.Proto))) + i-- + dAtA[i] = 0x1a + } + if m.ExternalPort != 0 { + i = encodeVarintServiceexpose(dAtA, i, uint64(m.ExternalPort)) + i-- + dAtA[i] = 0x10 + } + if m.Port != 0 { + i = encodeVarintServiceexpose(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintServiceexpose(dAtA []byte, offset int, v uint64) int { + offset -= sovServiceexpose(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ServiceExpose) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Port != 0 { + n += 1 + sovServiceexpose(uint64(m.Port)) + } + if m.ExternalPort != 0 { + n += 1 + sovServiceexpose(uint64(m.ExternalPort)) + } + l = len(m.Proto) + if l > 0 { + n += 1 + l + sovServiceexpose(uint64(l)) + } + l = len(m.Service) + if l > 0 { + n += 1 + l + sovServiceexpose(uint64(l)) + } + if m.Global { + n += 2 + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovServiceexpose(uint64(l)) + } + } + l = m.HTTPOptions.Size() + n += 1 + l + sovServiceexpose(uint64(l)) + l = len(m.IP) + if l > 0 { + n += 1 + l + sovServiceexpose(uint64(l)) + } + if m.EndpointSequenceNumber != 0 { + n += 1 + sovServiceexpose(uint64(m.EndpointSequenceNumber)) + } + return n +} + +func sovServiceexpose(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozServiceexpose(x uint64) (n int) { + return sovServiceexpose(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ServiceExpose) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceExpose{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `ExternalPort:` + fmt.Sprintf("%v", this.ExternalPort) + `,`, + `Proto:` + fmt.Sprintf("%v", this.Proto) + `,`, + `Service:` + fmt.Sprintf("%v", this.Service) + `,`, + `Global:` + fmt.Sprintf("%v", this.Global) + `,`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `HTTPOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPOptions), "ServiceExposeHTTPOptions", "ServiceExposeHTTPOptions", 1), `&`, ``, 1) + `,`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `EndpointSequenceNumber:` + fmt.Sprintf("%v", this.EndpointSequenceNumber) + `,`, + `}`, + }, "") + return s +} +func valueToStringServiceexpose(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ServiceExpose) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceExpose: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceExpose: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalPort", wireType) + } + m.ExternalPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExternalPort |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proto", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceexpose + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceexpose + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proto = ServiceProtocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceexpose + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceexpose + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Global", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Global = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceexpose + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceexpose + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthServiceexpose + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthServiceexpose + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.HTTPOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceexpose + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceexpose + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointSequenceNumber", wireType) + } + m.EndpointSequenceNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndpointSequenceNumber |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipServiceexpose(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthServiceexpose + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipServiceexpose(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthServiceexpose + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupServiceexpose + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthServiceexpose + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthServiceexpose = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowServiceexpose = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupServiceexpose = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/manifest/v2beta2/serviceexposes.go b/go/manifest/v2beta3/serviceexposes.go similarity index 78% rename from go/manifest/v2beta2/serviceexposes.go rename to go/manifest/v2beta3/serviceexposes.go index 6dbc5de4..1a3403d9 100644 --- a/go/manifest/v2beta2/serviceexposes.go +++ b/go/manifest/v2beta3/serviceexposes.go @@ -1,9 +1,9 @@ -package v2beta2 +package v2beta3 import ( "sort" - types "github.com/akash-network/akash-api/go/node/types/v1beta3" + resources "pkg.akt.dev/go/node/types/resources/v1beta4" ) type ServiceExposes []ServiceExpose @@ -40,8 +40,8 @@ func (s ServiceExposes) Less(i, j int) bool { return false } -func (s ServiceExposes) GetEndpoints() types.Endpoints { - endpoints := make(types.Endpoints, 0) +func (s ServiceExposes) GetEndpoints() resources.Endpoints { + endpoints := make(resources.Endpoints, 0) for _, expose := range s { endpoints = append(endpoints, expose.GetEndpoints()...) diff --git a/go/manifest/v2beta2/services.go b/go/manifest/v2beta3/services.go similarity index 94% rename from go/manifest/v2beta2/services.go rename to go/manifest/v2beta3/services.go index 8bdfc0b6..abf228b5 100644 --- a/go/manifest/v2beta2/services.go +++ b/go/manifest/v2beta3/services.go @@ -1,4 +1,4 @@ -package v2beta2 +package v2beta3 import ( "sort" diff --git a/go/node/audit/v1/audit.pb.go b/go/node/audit/v1/audit.pb.go new file mode 100644 index 00000000..1a7eb270 --- /dev/null +++ b/go/node/audit/v1/audit.pb.go @@ -0,0 +1,859 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/audit/v1/audit.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + pkg_akt_dev_go_node_types_attributes_v1 "pkg.akt.dev/go/node/types/attributes/v1" + v1 "pkg.akt.dev/go/node/types/attributes/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Provider stores owner auditor and attributes details +type AuditedProvider struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` + Attributes pkg_akt_dev_go_node_types_attributes_v1.Attributes `protobuf:"bytes,4,rep,name=attributes,proto3,castrepeated=pkg.akt.dev/go/node/types/attributes/v1.Attributes" json:"attributes" yaml:"attributes"` +} + +func (m *AuditedProvider) Reset() { *m = AuditedProvider{} } +func (m *AuditedProvider) String() string { return proto.CompactTextString(m) } +func (*AuditedProvider) ProtoMessage() {} +func (*AuditedProvider) Descriptor() ([]byte, []int) { + return fileDescriptor_88024e67b7102602, []int{0} +} +func (m *AuditedProvider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditedProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuditedProvider.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuditedProvider) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditedProvider.Merge(m, src) +} +func (m *AuditedProvider) XXX_Size() int { + return m.Size() +} +func (m *AuditedProvider) XXX_DiscardUnknown() { + xxx_messageInfo_AuditedProvider.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditedProvider proto.InternalMessageInfo + +func (m *AuditedProvider) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *AuditedProvider) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func (m *AuditedProvider) GetAttributes() pkg_akt_dev_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// Attributes +type AuditedAttributesStore struct { + Attributes pkg_akt_dev_go_node_types_attributes_v1.Attributes `protobuf:"bytes,1,rep,name=attributes,proto3,castrepeated=pkg.akt.dev/go/node/types/attributes/v1.Attributes" json:"attributes" yaml:"attributes"` +} + +func (m *AuditedAttributesStore) Reset() { *m = AuditedAttributesStore{} } +func (m *AuditedAttributesStore) String() string { return proto.CompactTextString(m) } +func (*AuditedAttributesStore) ProtoMessage() {} +func (*AuditedAttributesStore) Descriptor() ([]byte, []int) { + return fileDescriptor_88024e67b7102602, []int{1} +} +func (m *AuditedAttributesStore) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditedAttributesStore) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuditedAttributesStore.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuditedAttributesStore) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditedAttributesStore.Merge(m, src) +} +func (m *AuditedAttributesStore) XXX_Size() int { + return m.Size() +} +func (m *AuditedAttributesStore) XXX_DiscardUnknown() { + xxx_messageInfo_AuditedAttributesStore.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditedAttributesStore proto.InternalMessageInfo + +func (m *AuditedAttributesStore) GetAttributes() pkg_akt_dev_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// AttributesFilters defines filters used to filter deployments +type AttributesFilters struct { + Auditors []string `protobuf:"bytes,1,rep,name=auditors,proto3" json:"auditors" yaml:"auditors"` + Owners []string `protobuf:"bytes,2,rep,name=owners,proto3" json:"owners" yaml:"owners"` +} + +func (m *AttributesFilters) Reset() { *m = AttributesFilters{} } +func (m *AttributesFilters) String() string { return proto.CompactTextString(m) } +func (*AttributesFilters) ProtoMessage() {} +func (*AttributesFilters) Descriptor() ([]byte, []int) { + return fileDescriptor_88024e67b7102602, []int{2} +} +func (m *AttributesFilters) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttributesFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AttributesFilters.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AttributesFilters) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributesFilters.Merge(m, src) +} +func (m *AttributesFilters) XXX_Size() int { + return m.Size() +} +func (m *AttributesFilters) XXX_DiscardUnknown() { + xxx_messageInfo_AttributesFilters.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributesFilters proto.InternalMessageInfo + +func (m *AttributesFilters) GetAuditors() []string { + if m != nil { + return m.Auditors + } + return nil +} + +func (m *AttributesFilters) GetOwners() []string { + if m != nil { + return m.Owners + } + return nil +} + +func init() { + proto.RegisterType((*AuditedProvider)(nil), "akash.audit.v1.AuditedProvider") + proto.RegisterType((*AuditedAttributesStore)(nil), "akash.audit.v1.AuditedAttributesStore") + proto.RegisterType((*AttributesFilters)(nil), "akash.audit.v1.AttributesFilters") +} + +func init() { proto.RegisterFile("akash/audit/v1/audit.proto", fileDescriptor_88024e67b7102602) } + +var fileDescriptor_88024e67b7102602 = []byte{ + // 404 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x53, 0x31, 0xce, 0xd3, 0x30, + 0x18, 0x8d, 0xcb, 0x4f, 0xa1, 0x06, 0x5a, 0x35, 0x42, 0x28, 0x14, 0x88, 0x2b, 0xb3, 0x74, 0xb2, + 0xd5, 0x56, 0x02, 0xa9, 0x4c, 0xcd, 0xc0, 0x8c, 0xc2, 0x86, 0xc4, 0x90, 0x2a, 0x56, 0x88, 0x5a, + 0xea, 0xca, 0x76, 0x83, 0x7a, 0x06, 0x16, 0x8e, 0x50, 0x31, 0x72, 0x0d, 0x96, 0x8e, 0x1d, 0x99, + 0x0c, 0x6a, 0x17, 0x94, 0x31, 0x27, 0x40, 0xb5, 0x93, 0xa6, 0x45, 0x1c, 0x80, 0xcd, 0xdf, 0x7b, + 0xdf, 0x7b, 0x79, 0xf6, 0xf7, 0x05, 0xf6, 0xa2, 0x79, 0x24, 0x3f, 0xd0, 0x68, 0x1d, 0xa7, 0x8a, + 0x66, 0x43, 0x7b, 0x20, 0x2b, 0xc1, 0x15, 0x77, 0xdb, 0x86, 0x23, 0x16, 0xca, 0x86, 0xbd, 0x87, + 0x09, 0x4f, 0xb8, 0xa1, 0xe8, 0xe9, 0x64, 0xbb, 0x7a, 0x03, 0xeb, 0x30, 0x8b, 0x24, 0xa3, 0x91, + 0x52, 0x22, 0x9d, 0xad, 0x15, 0x93, 0xc6, 0xab, 0xaa, 0x6c, 0x27, 0xde, 0x36, 0x60, 0x67, 0x7a, + 0x32, 0x63, 0xf1, 0x1b, 0xc1, 0xb3, 0x34, 0x66, 0xc2, 0xa5, 0xf0, 0x36, 0xff, 0xb4, 0x64, 0xc2, + 0x03, 0x7d, 0x30, 0x68, 0x05, 0x8f, 0x73, 0x8d, 0x2c, 0x50, 0x68, 0x74, 0x7f, 0x13, 0x7d, 0x5c, + 0x4c, 0xb0, 0x29, 0x71, 0x68, 0x61, 0xf7, 0x25, 0xbc, 0x63, 0x02, 0x71, 0xe1, 0x35, 0x8c, 0xe4, + 0x59, 0xae, 0x51, 0x05, 0x15, 0x1a, 0xb5, 0xad, 0xa8, 0x04, 0x70, 0x58, 0x51, 0xee, 0x57, 0x00, + 0x61, 0x9d, 0xcf, 0xbb, 0xe9, 0xdf, 0x1a, 0xdc, 0x1b, 0x3d, 0x27, 0xf6, 0x8e, 0xa7, 0xf4, 0xa4, + 0x66, 0x49, 0x36, 0x24, 0xd3, 0xaa, 0x0a, 0xde, 0xef, 0x34, 0x72, 0x72, 0x8d, 0x2e, 0xe4, 0x85, + 0x46, 0xdd, 0xf2, 0x43, 0x67, 0x0c, 0x7f, 0xfb, 0x89, 0x46, 0xab, 0x79, 0x42, 0xa2, 0xb9, 0x22, + 0x31, 0xcb, 0x68, 0xc2, 0xe9, 0x92, 0xc7, 0x8c, 0xaa, 0xcd, 0x8a, 0xc9, 0xeb, 0xb7, 0xa9, 0xdd, + 0x65, 0x78, 0x61, 0x8b, 0xbf, 0x03, 0xf8, 0xa8, 0x7c, 0xa2, 0xba, 0xe3, 0xad, 0xe2, 0x82, 0xfd, + 0x9d, 0x1f, 0xfc, 0x8f, 0xf9, 0x27, 0x37, 0xbf, 0xb7, 0xc8, 0xc1, 0x9f, 0x01, 0xec, 0xd6, 0x0d, + 0xaf, 0xd3, 0x85, 0x62, 0x42, 0xba, 0xaf, 0xe0, 0xdd, 0x72, 0x16, 0x36, 0x7d, 0x2b, 0x40, 0xb9, + 0x46, 0x67, 0xac, 0xd0, 0xa8, 0x73, 0x35, 0x3b, 0x89, 0xc3, 0x33, 0xe9, 0x8e, 0x61, 0xd3, 0xcc, + 0x5f, 0x7a, 0x0d, 0x23, 0x7d, 0x92, 0x6b, 0x54, 0x22, 0x85, 0x46, 0x0f, 0x2e, 0x36, 0x45, 0xe2, + 0xb0, 0x24, 0x6c, 0x9a, 0xe0, 0xc5, 0xee, 0xe0, 0x83, 0xfd, 0xc1, 0x07, 0xbf, 0x0e, 0x3e, 0xf8, + 0x72, 0xf4, 0x9d, 0xfd, 0xd1, 0x77, 0x7e, 0x1c, 0x7d, 0xe7, 0xdd, 0xd3, 0x7f, 0xdd, 0xb6, 0xfa, + 0x15, 0x66, 0x4d, 0xb3, 0xb5, 0xe3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x25, 0xf9, 0xd5, 0x84, + 0x23, 0x03, 0x00, 0x00, +} + +func (m *AuditedProvider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditedProvider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditedProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAudit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuditedAttributesStore) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditedAttributesStore) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditedAttributesStore) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAudit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AttributesFilters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttributesFilters) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttributesFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owners) > 0 { + for iNdEx := len(m.Owners) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Owners[iNdEx]) + copy(dAtA[i:], m.Owners[iNdEx]) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Owners[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Auditors) > 0 { + for iNdEx := len(m.Auditors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Auditors[iNdEx]) + copy(dAtA[i:], m.Auditors[iNdEx]) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintAudit(dAtA []byte, offset int, v uint64) int { + offset -= sovAudit(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AuditedProvider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovAudit(uint64(l)) + } + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovAudit(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovAudit(uint64(l)) + } + } + return n +} + +func (m *AuditedAttributesStore) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovAudit(uint64(l)) + } + } + return n +} + +func (m *AttributesFilters) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Auditors) > 0 { + for _, s := range m.Auditors { + l = len(s) + n += 1 + l + sovAudit(uint64(l)) + } + } + if len(m.Owners) > 0 { + for _, s := range m.Owners { + l = len(s) + n += 1 + l + sovAudit(uint64(l)) + } + } + return n +} + +func sovAudit(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAudit(x uint64) (n int) { + return sovAudit(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *AuditedProvider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditedProvider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditedProvider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAudit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAudit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditedAttributesStore) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditedAttributesStore: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditedAttributesStore: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAudit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAudit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttributesFilters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttributesFilters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttributesFilters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditors = append(m.Auditors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owners", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owners = append(m.Owners, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAudit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAudit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAudit(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAudit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAudit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAudit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAudit + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAudit + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAudit + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAudit = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAudit = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAudit = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/audit/v1/codec.go b/go/node/audit/v1/codec.go new file mode 100644 index 00000000..a79dce48 --- /dev/null +++ b/go/node/audit/v1/codec.go @@ -0,0 +1,38 @@ +package v1 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +var ( + // ModuleCdc references the global x/audit module codec. Note, the codec should + // ONLY be used in certain instances of tests and for JSON encoding as Amino is + // still used for that purpose. + // + // The actual codec used for serialization should be provided to x/provider and + // defined at the application level. + // + // Deprecated: ModuleCdc use is deprecated + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) + +// RegisterLegacyAminoCodec register concrete types on codec +// +// Deprecated: RegisterLegacyAminoCodec is deprecated +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgSignProviderAttributes{}, "akash-sdk/x/"+ModuleName+"/"+MsgTypeSignProviderAttributes, nil) + cdc.RegisterConcrete(&MsgDeleteProviderAttributes{}, "akash-sdk/x/"+ModuleName+"/"+MsgTypeDeleteProviderAttributes, nil) +} + +// RegisterInterfaces registers the x/provider interfaces types with the interface registry +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgSignProviderAttributes{}, + &MsgDeleteProviderAttributes{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} diff --git a/go/node/audit/v1/errors.go b/go/node/audit/v1/errors.go new file mode 100644 index 00000000..e49ba3e2 --- /dev/null +++ b/go/node/audit/v1/errors.go @@ -0,0 +1,22 @@ +package v1 + +import ( + sdkerrors "cosmossdk.io/errors" +) + +const ( + errProviderNotFound uint32 = iota + 1 + errInvalidAddress + errAttributeNotFound +) + +var ( + // ErrProviderNotFound provider not found + ErrProviderNotFound = sdkerrors.Register(ModuleName, errProviderNotFound, "invalid provider: address not found") + + // ErrInvalidAddress invalid trusted auditor address + ErrInvalidAddress = sdkerrors.Register(ModuleName, errInvalidAddress, "invalid address") + + // ErrAttributeNotFound invalid trusted auditor address + ErrAttributeNotFound = sdkerrors.Register(ModuleName, errAttributeNotFound, "attribute not found") +) diff --git a/go/node/audit/v1/event.pb.go b/go/node/audit/v1/event.pb.go new file mode 100644 index 00000000..908338ba --- /dev/null +++ b/go/node/audit/v1/event.pb.go @@ -0,0 +1,597 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/audit/v1/event.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// EventTrustedAuditorCreated defines an SDK message for signing a provider attributes +type EventTrustedAuditorCreated struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` +} + +func (m *EventTrustedAuditorCreated) Reset() { *m = EventTrustedAuditorCreated{} } +func (m *EventTrustedAuditorCreated) String() string { return proto.CompactTextString(m) } +func (*EventTrustedAuditorCreated) ProtoMessage() {} +func (*EventTrustedAuditorCreated) Descriptor() ([]byte, []int) { + return fileDescriptor_18857e57236f769d, []int{0} +} +func (m *EventTrustedAuditorCreated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventTrustedAuditorCreated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventTrustedAuditorCreated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventTrustedAuditorCreated) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventTrustedAuditorCreated.Merge(m, src) +} +func (m *EventTrustedAuditorCreated) XXX_Size() int { + return m.Size() +} +func (m *EventTrustedAuditorCreated) XXX_DiscardUnknown() { + xxx_messageInfo_EventTrustedAuditorCreated.DiscardUnknown(m) +} + +var xxx_messageInfo_EventTrustedAuditorCreated proto.InternalMessageInfo + +func (m *EventTrustedAuditorCreated) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *EventTrustedAuditorCreated) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +// EventTrustedAuditorCreated defines an SDK message for signing a provider attributes +type EventTrustedAuditorDeleted struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` +} + +func (m *EventTrustedAuditorDeleted) Reset() { *m = EventTrustedAuditorDeleted{} } +func (m *EventTrustedAuditorDeleted) String() string { return proto.CompactTextString(m) } +func (*EventTrustedAuditorDeleted) ProtoMessage() {} +func (*EventTrustedAuditorDeleted) Descriptor() ([]byte, []int) { + return fileDescriptor_18857e57236f769d, []int{1} +} +func (m *EventTrustedAuditorDeleted) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventTrustedAuditorDeleted) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventTrustedAuditorDeleted.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventTrustedAuditorDeleted) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventTrustedAuditorDeleted.Merge(m, src) +} +func (m *EventTrustedAuditorDeleted) XXX_Size() int { + return m.Size() +} +func (m *EventTrustedAuditorDeleted) XXX_DiscardUnknown() { + xxx_messageInfo_EventTrustedAuditorDeleted.DiscardUnknown(m) +} + +var xxx_messageInfo_EventTrustedAuditorDeleted proto.InternalMessageInfo + +func (m *EventTrustedAuditorDeleted) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *EventTrustedAuditorDeleted) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func init() { + proto.RegisterType((*EventTrustedAuditorCreated)(nil), "akash.audit.v1.EventTrustedAuditorCreated") + proto.RegisterType((*EventTrustedAuditorDeleted)(nil), "akash.audit.v1.EventTrustedAuditorDeleted") +} + +func init() { proto.RegisterFile("akash/audit/v1/event.proto", fileDescriptor_18857e57236f769d) } + +var fileDescriptor_18857e57236f769d = []byte{ + // 270 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x2c, 0x4d, 0xc9, 0x2c, 0xd1, 0x2f, 0x33, 0xd4, 0x4f, 0x2d, 0x4b, 0xcd, 0x2b, + 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x03, 0xcb, 0xe9, 0x81, 0xe5, 0xf4, 0xca, 0x0c, + 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x52, 0xfa, 0x20, 0x16, 0x44, 0x95, 0x94, 0x64, 0x72, + 0x7e, 0x71, 0x6e, 0x7e, 0x71, 0x3c, 0x44, 0x02, 0xc2, 0x81, 0x48, 0x29, 0x6d, 0x63, 0xe4, 0x92, + 0x72, 0x05, 0x19, 0x18, 0x52, 0x54, 0x5a, 0x5c, 0x92, 0x9a, 0xe2, 0x08, 0x32, 0x29, 0xbf, 0xc8, + 0xb9, 0x28, 0x35, 0xb1, 0x24, 0x35, 0x45, 0xc8, 0x9d, 0x8b, 0x35, 0xbf, 0x3c, 0x2f, 0xb5, 0x48, + 0x82, 0x51, 0x81, 0x51, 0x83, 0xd3, 0xc9, 0xf0, 0xd5, 0x3d, 0x79, 0x88, 0xc0, 0xa7, 0x7b, 0xf2, + 0x3c, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x60, 0xae, 0xd2, 0xa5, 0x2d, 0xba, 0x22, 0x50, 0x83, + 0x1d, 0x53, 0x52, 0x8a, 0x52, 0x8b, 0x8b, 0x83, 0x4b, 0x8a, 0x32, 0xf3, 0xd2, 0x83, 0x20, 0xca, + 0x85, 0xfc, 0xb9, 0xd8, 0x13, 0x21, 0x46, 0x4b, 0x30, 0x81, 0x8d, 0x32, 0x7d, 0x75, 0x4f, 0x1e, + 0x26, 0xf4, 0xe9, 0x9e, 0x3c, 0x1f, 0xc4, 0x30, 0xa8, 0x00, 0x6e, 0xe3, 0x60, 0x5a, 0x70, 0x39, + 0xdc, 0x25, 0x35, 0x27, 0x75, 0x50, 0x3b, 0xdc, 0xc9, 0xec, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, + 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, + 0x8f, 0xe5, 0x18, 0xa2, 0x64, 0x0a, 0xb2, 0xd3, 0xf5, 0x12, 0xb3, 0x4b, 0xf4, 0x52, 0x52, 0xcb, + 0xf4, 0xd3, 0xf3, 0xf5, 0xf3, 0xf2, 0x53, 0x52, 0xe1, 0xd1, 0x9e, 0xc4, 0x06, 0x8e, 0x30, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x18, 0xf8, 0x23, 0x6d, 0x0f, 0x02, 0x00, 0x00, +} + +func (m *EventTrustedAuditorCreated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventTrustedAuditorCreated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventTrustedAuditorCreated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintEvent(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintEvent(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventTrustedAuditorDeleted) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventTrustedAuditorDeleted) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventTrustedAuditorDeleted) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintEvent(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintEvent(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintEvent(dAtA []byte, offset int, v uint64) int { + offset -= sovEvent(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EventTrustedAuditorCreated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovEvent(uint64(l)) + } + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovEvent(uint64(l)) + } + return n +} + +func (m *EventTrustedAuditorDeleted) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovEvent(uint64(l)) + } + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovEvent(uint64(l)) + } + return n +} + +func sovEvent(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvent(x uint64) (n int) { + return sovEvent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EventTrustedAuditorCreated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventTrustedAuditorCreated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventTrustedAuditorCreated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventTrustedAuditorDeleted) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventTrustedAuditorDeleted: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventTrustedAuditorDeleted: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvent(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvent + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvent + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvent + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvent = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvent = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvent = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/audit/v1/genesis.pb.go b/go/node/audit/v1/genesis.pb.go new file mode 100644 index 00000000..b45b6e44 --- /dev/null +++ b/go/node/audit/v1/genesis.pb.go @@ -0,0 +1,331 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/audit/v1/genesis.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the basic genesis state used by audit module +type GenesisState struct { + Providers []AuditedProvider `protobuf:"bytes,1,rep,name=providers,proto3" json:"providers" yaml:"providers"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_94ddb7af951b07fa, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetProviders() []AuditedProvider { + if m != nil { + return m.Providers + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "akash.audit.v1.GenesisState") +} + +func init() { proto.RegisterFile("akash/audit/v1/genesis.proto", fileDescriptor_94ddb7af951b07fa) } + +var fileDescriptor_94ddb7af951b07fa = []byte{ + // 213 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x2c, 0x4d, 0xc9, 0x2c, 0xd1, 0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, + 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x03, 0xcb, 0xea, 0x81, 0x65, 0xf5, + 0xca, 0x0c, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x52, 0xfa, 0x20, 0x16, 0x44, 0x95, 0x94, + 0x14, 0x9a, 0x19, 0x10, 0xe5, 0x60, 0x39, 0xa5, 0x12, 0x2e, 0x1e, 0x77, 0x88, 0x91, 0xc1, 0x25, + 0x89, 0x25, 0xa9, 0x42, 0x29, 0x5c, 0x9c, 0x05, 0x45, 0xf9, 0x65, 0x99, 0x29, 0xa9, 0x45, 0xc5, + 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0xf2, 0x7a, 0xa8, 0xb6, 0xe8, 0x39, 0x82, 0x18, 0xa9, + 0x29, 0x01, 0x50, 0x75, 0x4e, 0xaa, 0x27, 0xee, 0xc9, 0x33, 0xbc, 0xba, 0x27, 0x8f, 0xd0, 0xf9, + 0xe9, 0x9e, 0xbc, 0x40, 0x65, 0x62, 0x6e, 0x8e, 0x95, 0x12, 0x5c, 0x48, 0x29, 0x08, 0x21, 0xed, + 0x64, 0x76, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, + 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0x32, 0x05, 0xd9, 0xe9, + 0x7a, 0x89, 0xd9, 0x25, 0x7a, 0x29, 0xa9, 0x65, 0xfa, 0xe9, 0xf9, 0xfa, 0x79, 0xf9, 0x29, 0xa9, + 0x70, 0x97, 0x27, 0xb1, 0x81, 0x1d, 0x6d, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x9c, 0x21, 0xcb, + 0xe0, 0x16, 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Providers) > 0 { + for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Providers) > 0 { + for _, e := range m.Providers { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Providers = append(m.Providers, AuditedProvider{}) + if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/audit/v1/key.go b/go/node/audit/v1/key.go new file mode 100644 index 00000000..ca085f86 --- /dev/null +++ b/go/node/audit/v1/key.go @@ -0,0 +1,19 @@ +package v1 + +const ( + // ModuleName is the module name constant used in many places + ModuleName = "audit" + + // StoreKey is the store key string for provider + StoreKey = ModuleName + + // RouterKey is the message route for provider + RouterKey = ModuleName + + // QuerierRoute defines the module's query routing key. + QuerierRoute = ModuleName +) + +func PrefixProviderID() []byte { + return []byte{0x01} +} diff --git a/go/node/audit/v1/msg.pb.go b/go/node/audit/v1/msg.pb.go new file mode 100644 index 00000000..e7a42a58 --- /dev/null +++ b/go/node/audit/v1/msg.pb.go @@ -0,0 +1,968 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/audit/v1/msg.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + pkg_akt_dev_go_node_types_attributes_v1 "pkg.akt.dev/go/node/types/attributes/v1" + v1 "pkg.akt.dev/go/node/types/attributes/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgSignProviderAttributes defines an SDK message for signing a provider attributes +type MsgSignProviderAttributes struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` + Attributes pkg_akt_dev_go_node_types_attributes_v1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=pkg.akt.dev/go/node/types/attributes/v1.Attributes" json:"attributes" yaml:"attributes"` +} + +func (m *MsgSignProviderAttributes) Reset() { *m = MsgSignProviderAttributes{} } +func (m *MsgSignProviderAttributes) String() string { return proto.CompactTextString(m) } +func (*MsgSignProviderAttributes) ProtoMessage() {} +func (*MsgSignProviderAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_9d1c9151241de91a, []int{0} +} +func (m *MsgSignProviderAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgSignProviderAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgSignProviderAttributes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgSignProviderAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgSignProviderAttributes.Merge(m, src) +} +func (m *MsgSignProviderAttributes) XXX_Size() int { + return m.Size() +} +func (m *MsgSignProviderAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_MsgSignProviderAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgSignProviderAttributes proto.InternalMessageInfo + +func (m *MsgSignProviderAttributes) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *MsgSignProviderAttributes) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func (m *MsgSignProviderAttributes) GetAttributes() pkg_akt_dev_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. +type MsgSignProviderAttributesResponse struct { +} + +func (m *MsgSignProviderAttributesResponse) Reset() { *m = MsgSignProviderAttributesResponse{} } +func (m *MsgSignProviderAttributesResponse) String() string { return proto.CompactTextString(m) } +func (*MsgSignProviderAttributesResponse) ProtoMessage() {} +func (*MsgSignProviderAttributesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9d1c9151241de91a, []int{1} +} +func (m *MsgSignProviderAttributesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgSignProviderAttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgSignProviderAttributesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgSignProviderAttributesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgSignProviderAttributesResponse.Merge(m, src) +} +func (m *MsgSignProviderAttributesResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgSignProviderAttributesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgSignProviderAttributesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgSignProviderAttributesResponse proto.InternalMessageInfo + +// MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes +type MsgDeleteProviderAttributes struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` + Keys []string `protobuf:"bytes,3,rep,name=keys,proto3" json:"keys" yaml:"keys"` +} + +func (m *MsgDeleteProviderAttributes) Reset() { *m = MsgDeleteProviderAttributes{} } +func (m *MsgDeleteProviderAttributes) String() string { return proto.CompactTextString(m) } +func (*MsgDeleteProviderAttributes) ProtoMessage() {} +func (*MsgDeleteProviderAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_9d1c9151241de91a, []int{2} +} +func (m *MsgDeleteProviderAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDeleteProviderAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDeleteProviderAttributes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDeleteProviderAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDeleteProviderAttributes.Merge(m, src) +} +func (m *MsgDeleteProviderAttributes) XXX_Size() int { + return m.Size() +} +func (m *MsgDeleteProviderAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDeleteProviderAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDeleteProviderAttributes proto.InternalMessageInfo + +func (m *MsgDeleteProviderAttributes) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *MsgDeleteProviderAttributes) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func (m *MsgDeleteProviderAttributes) GetKeys() []string { + if m != nil { + return m.Keys + } + return nil +} + +// MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. +type MsgDeleteProviderAttributesResponse struct { +} + +func (m *MsgDeleteProviderAttributesResponse) Reset() { *m = MsgDeleteProviderAttributesResponse{} } +func (m *MsgDeleteProviderAttributesResponse) String() string { return proto.CompactTextString(m) } +func (*MsgDeleteProviderAttributesResponse) ProtoMessage() {} +func (*MsgDeleteProviderAttributesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9d1c9151241de91a, []int{3} +} +func (m *MsgDeleteProviderAttributesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDeleteProviderAttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDeleteProviderAttributesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDeleteProviderAttributesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDeleteProviderAttributesResponse.Merge(m, src) +} +func (m *MsgDeleteProviderAttributesResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgDeleteProviderAttributesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDeleteProviderAttributesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDeleteProviderAttributesResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgSignProviderAttributes)(nil), "akash.audit.v1.MsgSignProviderAttributes") + proto.RegisterType((*MsgSignProviderAttributesResponse)(nil), "akash.audit.v1.MsgSignProviderAttributesResponse") + proto.RegisterType((*MsgDeleteProviderAttributes)(nil), "akash.audit.v1.MsgDeleteProviderAttributes") + proto.RegisterType((*MsgDeleteProviderAttributesResponse)(nil), "akash.audit.v1.MsgDeleteProviderAttributesResponse") +} + +func init() { proto.RegisterFile("akash/audit/v1/msg.proto", fileDescriptor_9d1c9151241de91a) } + +var fileDescriptor_9d1c9151241de91a = []byte{ + // 444 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x48, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x2c, 0x4d, 0xc9, 0x2c, 0xd1, 0x2f, 0x33, 0xd4, 0xcf, 0x2d, 0x4e, 0xd7, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x03, 0xcb, 0xe8, 0x81, 0x65, 0xf4, 0xca, 0x0c, 0xa5, 0x44, + 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x52, 0xfa, 0x20, 0x16, 0x44, 0x95, 0x94, 0x64, 0x72, 0x7e, 0x71, + 0x6e, 0x7e, 0x71, 0x3c, 0x44, 0x02, 0xc2, 0x81, 0x4a, 0x89, 0x43, 0x78, 0x20, 0x23, 0x51, 0x4c, + 0x96, 0xd2, 0x80, 0xd8, 0x99, 0x94, 0x58, 0x9c, 0xaa, 0x9f, 0x58, 0x52, 0x52, 0x94, 0x99, 0x54, + 0x5a, 0x92, 0x5a, 0x0c, 0x52, 0x03, 0xe7, 0x41, 0x54, 0x2a, 0xbd, 0x61, 0xe2, 0x92, 0xf4, 0x2d, + 0x4e, 0x0f, 0xce, 0x4c, 0xcf, 0x0b, 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0x72, 0x84, 0x6b, + 0x10, 0x72, 0xe7, 0x62, 0xcd, 0x2f, 0xcf, 0x4b, 0x2d, 0x92, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x74, + 0x32, 0x7c, 0x75, 0x4f, 0x1e, 0x22, 0xf0, 0xe9, 0x9e, 0x3c, 0x4f, 0x65, 0x62, 0x6e, 0x8e, 0x95, + 0x12, 0x98, 0xab, 0x74, 0x69, 0x8b, 0xae, 0x08, 0xd4, 0x69, 0x8e, 0x29, 0x29, 0x45, 0xa9, 0xc5, + 0xc5, 0xc1, 0x25, 0x45, 0x99, 0x79, 0xe9, 0x41, 0x10, 0xe5, 0x42, 0xfe, 0x5c, 0xec, 0x60, 0x6f, + 0xe6, 0x17, 0x49, 0x30, 0x81, 0x8d, 0x32, 0x7d, 0x75, 0x4f, 0x1e, 0x26, 0xf4, 0xe9, 0x9e, 0x3c, + 0x1f, 0xc4, 0x30, 0xa8, 0x00, 0x6e, 0xe3, 0x60, 0x5a, 0x84, 0x16, 0x31, 0x72, 0x71, 0x21, 0x7c, + 0x26, 0xc1, 0xac, 0xc0, 0xac, 0xc1, 0x6d, 0xa4, 0xac, 0x07, 0x09, 0x51, 0x90, 0xbf, 0xf5, 0x10, + 0xb2, 0x7a, 0x65, 0x86, 0x7a, 0x70, 0x4f, 0x39, 0xc5, 0x9e, 0xb8, 0x27, 0xcf, 0xf0, 0xea, 0x9e, + 0x3c, 0x92, 0xf6, 0x4f, 0xf7, 0xe4, 0x05, 0xa1, 0x0e, 0x80, 0x8b, 0x29, 0xad, 0xba, 0x2f, 0x6f, + 0x54, 0x90, 0x9d, 0xae, 0x97, 0x98, 0x5d, 0xa2, 0x97, 0x92, 0x5a, 0xa6, 0x9f, 0x9e, 0xaf, 0x9f, + 0x97, 0x9f, 0x92, 0xaa, 0x5f, 0x52, 0x59, 0x90, 0x5a, 0x8c, 0x1a, 0xaa, 0x08, 0xd3, 0x8b, 0x83, + 0x90, 0x8c, 0xb5, 0x12, 0x78, 0xb1, 0x40, 0x9e, 0xa1, 0xe9, 0xf9, 0x06, 0x2d, 0x98, 0xb3, 0x95, + 0x94, 0xb9, 0x14, 0x71, 0x86, 0x76, 0x50, 0x6a, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0xd2, 0x37, + 0x46, 0x2e, 0x69, 0xdf, 0xe2, 0x74, 0x97, 0xd4, 0x9c, 0xd4, 0x92, 0xd4, 0x21, 0x15, 0x2b, 0xda, + 0x5c, 0x2c, 0xd9, 0xa9, 0x95, 0x90, 0xe8, 0xe0, 0x74, 0x12, 0x7f, 0x75, 0x4f, 0x1e, 0xcc, 0xff, + 0x74, 0x4f, 0x9e, 0x1b, 0x62, 0x14, 0x88, 0xa7, 0x14, 0x04, 0x16, 0xc4, 0x12, 0x3a, 0xaa, 0x5c, + 0xca, 0x78, 0xfc, 0x0d, 0x0b, 0x1f, 0x27, 0xb3, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, + 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, + 0x63, 0x88, 0x92, 0xc1, 0x16, 0x61, 0xb0, 0x9c, 0x97, 0xc4, 0x06, 0x4e, 0xf2, 0xc6, 0x80, 0x00, + 0x00, 0x00, 0xff, 0xff, 0x1d, 0x36, 0x14, 0x03, 0x92, 0x03, 0x00, 0x00, +} + +func (m *MsgSignProviderAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgSignProviderAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgSignProviderAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintMsg(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintMsg(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgSignProviderAttributesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgSignProviderAttributesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgSignProviderAttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgDeleteProviderAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDeleteProviderAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDeleteProviderAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Keys) > 0 { + for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Keys[iNdEx]) + copy(dAtA[i:], m.Keys[iNdEx]) + i = encodeVarintMsg(dAtA, i, uint64(len(m.Keys[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintMsg(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintMsg(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgDeleteProviderAttributesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDeleteProviderAttributesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDeleteProviderAttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintMsg(dAtA []byte, offset int, v uint64) int { + offset -= sovMsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgSignProviderAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovMsg(uint64(l)) + } + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovMsg(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovMsg(uint64(l)) + } + } + return n +} + +func (m *MsgSignProviderAttributesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgDeleteProviderAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovMsg(uint64(l)) + } + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovMsg(uint64(l)) + } + if len(m.Keys) > 0 { + for _, s := range m.Keys { + l = len(s) + n += 1 + l + sovMsg(uint64(l)) + } + } + return n +} + +func (m *MsgDeleteProviderAttributesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovMsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMsg(x uint64) (n int) { + return sovMsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgSignProviderAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgSignProviderAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgSignProviderAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgSignProviderAttributesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgSignProviderAttributesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgSignProviderAttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDeleteProviderAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDeleteProviderAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDeleteProviderAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keys = append(m.Keys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDeleteProviderAttributesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDeleteProviderAttributesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDeleteProviderAttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/audit/v1/msgs.go b/go/node/audit/v1/msgs.go new file mode 100644 index 00000000..76744c9d --- /dev/null +++ b/go/node/audit/v1/msgs.go @@ -0,0 +1,121 @@ +package v1 + +import ( + "reflect" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +var ( + _ sdk.Msg = &MsgSignProviderAttributes{} + _ sdk.Msg = &MsgDeleteProviderAttributes{} +) + +var ( + MsgTypeSignProviderAttributes = "" + MsgTypeDeleteProviderAttributes = "" +) + +func init() { + MsgTypeSignProviderAttributes = reflect.TypeOf(&MsgSignProviderAttributes{}).Elem().Name() + MsgTypeDeleteProviderAttributes = reflect.TypeOf(&MsgDeleteProviderAttributes{}).Elem().Name() +} + +// ====MsgSignProviderAttributes==== + +// Type implements the sdk.Msg interface +func (m *MsgSignProviderAttributes) Type() string { + return MsgTypeSignProviderAttributes +} + +// ValidateBasic does basic validation +func (m *MsgSignProviderAttributes) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(m.Owner); err != nil { + return sdkerrors.ErrInvalidAddress.Wrap("MsgCreate: Invalid Owner Address") + } + + if _, err := sdk.AccAddressFromBech32(m.Auditor); err != nil { + return sdkerrors.ErrInvalidAddress.Wrap("MsgCreate: Invalid Auditor Address") + } + + return nil +} + +// GetSigners defines whose signature is required +func (m *MsgSignProviderAttributes) GetSigners() []sdk.AccAddress { + auditor, err := sdk.AccAddressFromBech32(m.Auditor) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{auditor} +} + +// ====MsgRevokeProviderAttributes==== + +// Type implements the sdk.Msg interface +func (m *MsgDeleteProviderAttributes) Type() string { + return MsgTypeDeleteProviderAttributes +} + +// ValidateBasic does basic validation +func (m *MsgDeleteProviderAttributes) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(m.Owner); err != nil { + return sdkerrors.ErrInvalidAddress.Wrap("MsgCreate: Invalid Owner Address") + } + + if _, err := sdk.AccAddressFromBech32(m.Auditor); err != nil { + return sdkerrors.ErrInvalidAddress.Wrap("MsgCreate: Invalid Auditor Address") + } + + return nil +} + +// GetSignBytes encodes the message for signing + +// GetSigners defines whose signature is required +func (m *MsgDeleteProviderAttributes) GetSigners() []sdk.AccAddress { + auditor, err := sdk.AccAddressFromBech32(m.Auditor) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{auditor} +} + +// ============= GetSignBytes ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (m *MsgSignProviderAttributes) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(m)) +} + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (m *MsgDeleteProviderAttributes) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(m)) +} + +// ============= Route ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all since sdk.Msg does not not have Route defined anymore + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (m *MsgSignProviderAttributes) Route() string { + return RouterKey +} + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (m *MsgDeleteProviderAttributes) Route() string { + return RouterKey +} diff --git a/go/node/audit/v1/query.pb.go b/go/node/audit/v1/query.pb.go new file mode 100644 index 00000000..12313d8e --- /dev/null +++ b/go/node/audit/v1/query.pb.go @@ -0,0 +1,1717 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/audit/v1/query.proto + +package v1 + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryProvidersResponse is response type for the Query/Providers RPC method +type QueryProvidersResponse struct { + Providers AuditedProviders `protobuf:"bytes,1,rep,name=providers,proto3,castrepeated=AuditedProviders" json:"providers"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryProvidersResponse) Reset() { *m = QueryProvidersResponse{} } +func (m *QueryProvidersResponse) String() string { return proto.CompactTextString(m) } +func (*QueryProvidersResponse) ProtoMessage() {} +func (*QueryProvidersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_966c8e0ace08c293, []int{0} +} +func (m *QueryProvidersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProvidersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProvidersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProvidersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProvidersResponse.Merge(m, src) +} +func (m *QueryProvidersResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryProvidersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProvidersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProvidersResponse proto.InternalMessageInfo + +func (m *QueryProvidersResponse) GetProviders() AuditedProviders { + if m != nil { + return m.Providers + } + return nil +} + +func (m *QueryProvidersResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryProviderRequest is request type for the Query/Provider RPC method +type QueryProviderRequest struct { + Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` + Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` +} + +func (m *QueryProviderRequest) Reset() { *m = QueryProviderRequest{} } +func (m *QueryProviderRequest) String() string { return proto.CompactTextString(m) } +func (*QueryProviderRequest) ProtoMessage() {} +func (*QueryProviderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_966c8e0ace08c293, []int{1} +} +func (m *QueryProviderRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProviderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProviderRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProviderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProviderRequest.Merge(m, src) +} +func (m *QueryProviderRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryProviderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProviderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProviderRequest proto.InternalMessageInfo + +func (m *QueryProviderRequest) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func (m *QueryProviderRequest) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +// QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method +type QueryAllProvidersAttributesRequest struct { + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAllProvidersAttributesRequest) Reset() { *m = QueryAllProvidersAttributesRequest{} } +func (m *QueryAllProvidersAttributesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllProvidersAttributesRequest) ProtoMessage() {} +func (*QueryAllProvidersAttributesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_966c8e0ace08c293, []int{2} +} +func (m *QueryAllProvidersAttributesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllProvidersAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllProvidersAttributesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllProvidersAttributesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllProvidersAttributesRequest.Merge(m, src) +} +func (m *QueryAllProvidersAttributesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllProvidersAttributesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllProvidersAttributesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllProvidersAttributesRequest proto.InternalMessageInfo + +func (m *QueryAllProvidersAttributesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryProviderAttributesRequest is request type for the Query/Provider RPC method +type QueryProviderAttributesRequest struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryProviderAttributesRequest) Reset() { *m = QueryProviderAttributesRequest{} } +func (m *QueryProviderAttributesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryProviderAttributesRequest) ProtoMessage() {} +func (*QueryProviderAttributesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_966c8e0ace08c293, []int{3} +} +func (m *QueryProviderAttributesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProviderAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProviderAttributesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProviderAttributesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProviderAttributesRequest.Merge(m, src) +} +func (m *QueryProviderAttributesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryProviderAttributesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProviderAttributesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProviderAttributesRequest proto.InternalMessageInfo + +func (m *QueryProviderAttributesRequest) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *QueryProviderAttributesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryProviderAuditorRequest is request type for the Query/Providers RPC method +type QueryProviderAuditorRequest struct { + Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` + Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` +} + +func (m *QueryProviderAuditorRequest) Reset() { *m = QueryProviderAuditorRequest{} } +func (m *QueryProviderAuditorRequest) String() string { return proto.CompactTextString(m) } +func (*QueryProviderAuditorRequest) ProtoMessage() {} +func (*QueryProviderAuditorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_966c8e0ace08c293, []int{4} +} +func (m *QueryProviderAuditorRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProviderAuditorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProviderAuditorRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProviderAuditorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProviderAuditorRequest.Merge(m, src) +} +func (m *QueryProviderAuditorRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryProviderAuditorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProviderAuditorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProviderAuditorRequest proto.InternalMessageInfo + +func (m *QueryProviderAuditorRequest) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func (m *QueryProviderAuditorRequest) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +// QueryAuditorAttributesRequest is request type for the Query/Providers RPC method +type QueryAuditorAttributesRequest struct { + Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAuditorAttributesRequest) Reset() { *m = QueryAuditorAttributesRequest{} } +func (m *QueryAuditorAttributesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAuditorAttributesRequest) ProtoMessage() {} +func (*QueryAuditorAttributesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_966c8e0ace08c293, []int{5} +} +func (m *QueryAuditorAttributesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAuditorAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAuditorAttributesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAuditorAttributesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAuditorAttributesRequest.Merge(m, src) +} +func (m *QueryAuditorAttributesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAuditorAttributesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAuditorAttributesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAuditorAttributesRequest proto.InternalMessageInfo + +func (m *QueryAuditorAttributesRequest) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func (m *QueryAuditorAttributesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +func init() { + proto.RegisterType((*QueryProvidersResponse)(nil), "akash.audit.v1.QueryProvidersResponse") + proto.RegisterType((*QueryProviderRequest)(nil), "akash.audit.v1.QueryProviderRequest") + proto.RegisterType((*QueryAllProvidersAttributesRequest)(nil), "akash.audit.v1.QueryAllProvidersAttributesRequest") + proto.RegisterType((*QueryProviderAttributesRequest)(nil), "akash.audit.v1.QueryProviderAttributesRequest") + proto.RegisterType((*QueryProviderAuditorRequest)(nil), "akash.audit.v1.QueryProviderAuditorRequest") + proto.RegisterType((*QueryAuditorAttributesRequest)(nil), "akash.audit.v1.QueryAuditorAttributesRequest") +} + +func init() { proto.RegisterFile("akash/audit/v1/query.proto", fileDescriptor_966c8e0ace08c293) } + +var fileDescriptor_966c8e0ace08c293 = []byte{ + // 559 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0x4d, 0x8b, 0xd3, 0x4e, + 0x1c, 0xc7, 0x3b, 0xfd, 0xd3, 0xbf, 0x74, 0x16, 0x44, 0x87, 0xb2, 0xd4, 0xba, 0xa6, 0xa5, 0xe0, + 0x6e, 0x75, 0xe9, 0x0c, 0x89, 0x0f, 0x07, 0x6f, 0xdd, 0xc3, 0x7a, 0x12, 0xd6, 0x5c, 0x04, 0x6f, + 0x53, 0x33, 0xc4, 0xd0, 0x98, 0xc9, 0x66, 0xa6, 0x11, 0x91, 0x15, 0xf4, 0x15, 0x08, 0x9e, 0xbd, + 0xe8, 0x4d, 0x7c, 0x05, 0xbe, 0x82, 0x3d, 0x2e, 0x78, 0xf1, 0xa4, 0xd2, 0xfa, 0x42, 0x24, 0x33, + 0x49, 0xb6, 0x49, 0xba, 0xdb, 0xa5, 0x78, 0xcb, 0xe4, 0xf7, 0xf0, 0xfd, 0xfc, 0x1e, 0x66, 0x60, + 0x87, 0x4e, 0xa8, 0x78, 0x4e, 0xe8, 0xd4, 0xf1, 0x24, 0x89, 0x4d, 0x72, 0x38, 0x65, 0xd1, 0x2b, + 0x1c, 0x46, 0x5c, 0x72, 0x74, 0x59, 0xd9, 0xb0, 0xb2, 0xe1, 0xd8, 0xec, 0xb4, 0x5c, 0xee, 0x72, + 0x65, 0x22, 0xc9, 0x97, 0xf6, 0xea, 0x6c, 0xb9, 0x9c, 0xbb, 0x3e, 0x23, 0x34, 0xf4, 0x08, 0x0d, + 0x02, 0x2e, 0xa9, 0xf4, 0x78, 0x20, 0x52, 0xeb, 0xed, 0x67, 0x5c, 0xbc, 0xe0, 0x82, 0x8c, 0xa9, + 0x60, 0x3a, 0x39, 0x89, 0xcd, 0x31, 0x93, 0xd4, 0x24, 0x21, 0x75, 0xbd, 0x40, 0x39, 0xa7, 0xbe, + 0x65, 0x16, 0x2d, 0xac, 0x6c, 0xfd, 0x6f, 0x00, 0x6e, 0x3e, 0x4e, 0xc2, 0x0f, 0x22, 0x1e, 0x7b, + 0x0e, 0x8b, 0x84, 0xcd, 0x44, 0xc8, 0x03, 0xc1, 0xd0, 0x13, 0xd8, 0x0c, 0xb3, 0x9f, 0x6d, 0xd0, + 0xfb, 0x6f, 0xb0, 0x61, 0x75, 0x71, 0x11, 0x1d, 0x8f, 0x92, 0x0f, 0xe6, 0x64, 0xc1, 0x7b, 0xed, + 0xe3, 0x9f, 0xdd, 0xda, 0x97, 0x5f, 0xdd, 0x2b, 0x25, 0x83, 0xb0, 0x4f, 0x73, 0xa1, 0x87, 0x10, + 0x9e, 0x32, 0xb6, 0xeb, 0x3d, 0x30, 0xd8, 0xb0, 0x76, 0xb0, 0x2e, 0x08, 0x27, 0x05, 0x61, 0xdd, + 0xad, 0xb4, 0x20, 0x7c, 0x40, 0x5d, 0x96, 0x51, 0xd9, 0x0b, 0xa1, 0xfd, 0x7d, 0xd8, 0x2a, 0xb0, + 0xdb, 0xec, 0x70, 0xca, 0x84, 0x44, 0x6d, 0x78, 0x49, 0x11, 0xf2, 0xa8, 0x0d, 0x7a, 0x60, 0xd0, + 0xb4, 0xb3, 0x23, 0x6a, 0xc1, 0x06, 0x7f, 0x19, 0xb0, 0x48, 0xa9, 0x36, 0x6d, 0x7d, 0xe8, 0xfb, + 0xb0, 0xaf, 0xf2, 0x8c, 0x7c, 0x3f, 0x07, 0x1e, 0x49, 0x19, 0x79, 0xe3, 0xa9, 0x64, 0x22, 0xcb, + 0xba, 0x5f, 0xc0, 0x06, 0x0a, 0x7b, 0x7b, 0x25, 0xb6, 0x8a, 0x2d, 0x50, 0xbf, 0x81, 0x46, 0x81, + 0xba, 0xaa, 0x94, 0x53, 0x82, 0x05, 0xca, 0x92, 0x7e, 0x7d, 0x6d, 0xfd, 0x47, 0xf0, 0x7a, 0x51, + 0x5f, 0xf7, 0x66, 0xdd, 0xe6, 0xbd, 0x05, 0xf0, 0x86, 0xee, 0x9e, 0x76, 0xab, 0x96, 0x73, 0x76, + 0xc6, 0x7f, 0x54, 0x92, 0xf5, 0xa9, 0x01, 0x1b, 0x8a, 0x01, 0x7d, 0x06, 0x70, 0x73, 0xf9, 0x18, + 0x91, 0x55, 0x5e, 0xde, 0xd5, 0x33, 0xef, 0x6c, 0x2f, 0x8d, 0xa9, 0xdc, 0x95, 0xfe, 0xf0, 0xdd, + 0xf7, 0x3f, 0x1f, 0xea, 0x3b, 0xe8, 0x26, 0x59, 0x76, 0xd7, 0x08, 0xcd, 0x13, 0x13, 0xdf, 0x13, + 0x32, 0xa1, 0x44, 0xd5, 0xf1, 0x23, 0x7c, 0xae, 0xda, 0xfa, 0x74, 0xf7, 0x14, 0x1d, 0x41, 0xc3, + 0x95, 0x74, 0xaf, 0xd5, 0x4c, 0x8f, 0x34, 0xe5, 0x57, 0x00, 0xaf, 0x95, 0x96, 0x64, 0x01, 0x76, + 0xf7, 0x7c, 0xd8, 0xc2, 0x52, 0x5d, 0x98, 0xf4, 0x81, 0x22, 0xbd, 0x8b, 0xac, 0xd5, 0xa4, 0xe9, + 0x0e, 0x1d, 0x65, 0xcc, 0xe8, 0x23, 0x80, 0x57, 0xab, 0x98, 0xc3, 0xe5, 0x53, 0x3f, 0x63, 0x57, + 0x2f, 0x0c, 0x6a, 0x2a, 0xd0, 0x5d, 0x74, 0x2b, 0x05, 0xcd, 0x5e, 0xb7, 0x9c, 0x95, 0x47, 0x0b, + 0x88, 0x49, 0x3b, 0xf7, 0xee, 0x1f, 0xcf, 0x0c, 0x70, 0x32, 0x33, 0xc0, 0xef, 0x99, 0x01, 0xde, + 0xcf, 0x8d, 0xda, 0xc9, 0xdc, 0xa8, 0xfd, 0x98, 0x1b, 0xb5, 0xa7, 0x5b, 0xe1, 0xc4, 0xc5, 0x74, + 0x22, 0xb1, 0xc3, 0x62, 0xe2, 0x72, 0x12, 0x70, 0x87, 0xe5, 0xa5, 0x8f, 0xff, 0x57, 0x2f, 0xf5, + 0x9d, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0x41, 0x7d, 0xb9, 0x53, 0x06, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // AllProvidersAttributes queries all providers + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + AllProvidersAttributes(ctx context.Context, in *QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) + // ProviderAttributes queries all provider signed attributes + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + ProviderAttributes(ctx context.Context, in *QueryProviderAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) + // ProviderAuditorAttributes queries provider signed attributes by specific auditor + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + ProviderAuditorAttributes(ctx context.Context, in *QueryProviderAuditorRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) + // AuditorAttributes queries all providers signed by this auditor + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + AuditorAttributes(ctx context.Context, in *QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) AllProvidersAttributes(ctx context.Context, in *QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { + out := new(QueryProvidersResponse) + err := c.cc.Invoke(ctx, "/akash.audit.v1.Query/AllProvidersAttributes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ProviderAttributes(ctx context.Context, in *QueryProviderAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { + out := new(QueryProvidersResponse) + err := c.cc.Invoke(ctx, "/akash.audit.v1.Query/ProviderAttributes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ProviderAuditorAttributes(ctx context.Context, in *QueryProviderAuditorRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { + out := new(QueryProvidersResponse) + err := c.cc.Invoke(ctx, "/akash.audit.v1.Query/ProviderAuditorAttributes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) AuditorAttributes(ctx context.Context, in *QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { + out := new(QueryProvidersResponse) + err := c.cc.Invoke(ctx, "/akash.audit.v1.Query/AuditorAttributes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // AllProvidersAttributes queries all providers + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + AllProvidersAttributes(context.Context, *QueryAllProvidersAttributesRequest) (*QueryProvidersResponse, error) + // ProviderAttributes queries all provider signed attributes + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + ProviderAttributes(context.Context, *QueryProviderAttributesRequest) (*QueryProvidersResponse, error) + // ProviderAuditorAttributes queries provider signed attributes by specific auditor + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + ProviderAuditorAttributes(context.Context, *QueryProviderAuditorRequest) (*QueryProvidersResponse, error) + // AuditorAttributes queries all providers signed by this auditor + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + AuditorAttributes(context.Context, *QueryAuditorAttributesRequest) (*QueryProvidersResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) AllProvidersAttributes(ctx context.Context, req *QueryAllProvidersAttributesRequest) (*QueryProvidersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllProvidersAttributes not implemented") +} +func (*UnimplementedQueryServer) ProviderAttributes(ctx context.Context, req *QueryProviderAttributesRequest) (*QueryProvidersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProviderAttributes not implemented") +} +func (*UnimplementedQueryServer) ProviderAuditorAttributes(ctx context.Context, req *QueryProviderAuditorRequest) (*QueryProvidersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProviderAuditorAttributes not implemented") +} +func (*UnimplementedQueryServer) AuditorAttributes(ctx context.Context, req *QueryAuditorAttributesRequest) (*QueryProvidersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AuditorAttributes not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_AllProvidersAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllProvidersAttributesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllProvidersAttributes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.audit.v1.Query/AllProvidersAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllProvidersAttributes(ctx, req.(*QueryAllProvidersAttributesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryProviderAttributesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ProviderAttributes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.audit.v1.Query/ProviderAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ProviderAttributes(ctx, req.(*QueryProviderAttributesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ProviderAuditorAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryProviderAuditorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ProviderAuditorAttributes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.audit.v1.Query/ProviderAuditorAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ProviderAuditorAttributes(ctx, req.(*QueryProviderAuditorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_AuditorAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAuditorAttributesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AuditorAttributes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.audit.v1.Query/AuditorAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AuditorAttributes(ctx, req.(*QueryAuditorAttributesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.audit.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AllProvidersAttributes", + Handler: _Query_AllProvidersAttributes_Handler, + }, + { + MethodName: "ProviderAttributes", + Handler: _Query_ProviderAttributes_Handler, + }, + { + MethodName: "ProviderAuditorAttributes", + Handler: _Query_ProviderAuditorAttributes_Handler, + }, + { + MethodName: "AuditorAttributes", + Handler: _Query_AuditorAttributes_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/audit/v1/query.proto", +} + +func (m *QueryProvidersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProvidersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProvidersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Providers) > 0 { + for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryProviderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProviderRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProviderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x12 + } + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAllProvidersAttributesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllProvidersAttributesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllProvidersAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryProviderAttributesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProviderAttributesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProviderAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryProviderAuditorRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProviderAuditorRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProviderAuditorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x12 + } + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAuditorAttributesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAuditorAttributesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAuditorAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryProvidersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Providers) > 0 { + for _, e := range m.Providers { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryProviderRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAllProvidersAttributesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryProviderAttributesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryProviderAuditorRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAuditorAttributesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryProvidersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProvidersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProvidersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Providers = append(m.Providers, AuditedProvider{}) + if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryProviderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProviderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProviderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllProvidersAttributesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllProvidersAttributesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllProvidersAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryProviderAttributesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProviderAttributesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProviderAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryProviderAuditorRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProviderAuditorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProviderAuditorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAuditorAttributesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAuditorAttributesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAuditorAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/audit/v1/query.pb.gw.go b/go/node/audit/v1/query.pb.gw.go new file mode 100644 index 00000000..1965702c --- /dev/null +++ b/go/node/audit/v1/query.pb.gw.go @@ -0,0 +1,532 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: akash/audit/v1/query.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_Query_AllProvidersAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_AllProvidersAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllProvidersAttributesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AllProvidersAttributes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.AllProvidersAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllProvidersAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllProvidersAttributesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AllProvidersAttributes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.AllProvidersAttributes(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_ProviderAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{"owner": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_ProviderAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProviderAttributesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ProviderAttributes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ProviderAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ProviderAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProviderAttributesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ProviderAttributes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ProviderAttributes(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_ProviderAuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProviderAuditorRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["auditor"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") + } + + protoReq.Auditor, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) + } + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + msg, err := client.ProviderAuditorAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ProviderAuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProviderAuditorRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["auditor"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") + } + + protoReq.Auditor, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) + } + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + msg, err := server.ProviderAuditorAttributes(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_AuditorAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{"auditor": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_AuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAuditorAttributesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["auditor"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") + } + + protoReq.Auditor, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AuditorAttributes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.AuditorAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAuditorAttributesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["auditor"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") + } + + protoReq.Auditor, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AuditorAttributes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.AuditorAttributes(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_AllProvidersAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllProvidersAttributes_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllProvidersAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ProviderAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ProviderAttributes_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ProviderAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ProviderAuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ProviderAuditorAttributes_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ProviderAuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AuditorAttributes_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_AllProvidersAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllProvidersAttributes_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllProvidersAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ProviderAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ProviderAttributes_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ProviderAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ProviderAuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ProviderAuditorAttributes_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ProviderAuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AuditorAttributes_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_AllProvidersAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 2, 4}, []string{"akash", "audit", "v1", "attributes", "list"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_ProviderAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"akash", "audit", "v1", "attributes", "owner", "list"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_ProviderAuditorAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"akash", "audit", "v1", "attributes", "auditor", "owner"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_AuditorAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"akash", "provider", "v1", "auditor", "list"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_AllProvidersAttributes_0 = runtime.ForwardResponseMessage + + forward_Query_ProviderAttributes_0 = runtime.ForwardResponseMessage + + forward_Query_ProviderAuditorAttributes_0 = runtime.ForwardResponseMessage + + forward_Query_AuditorAttributes_0 = runtime.ForwardResponseMessage +) diff --git a/go/node/audit/v1/service.pb.go b/go/node/audit/v1/service.pb.go new file mode 100644 index 00000000..ae2ca479 --- /dev/null +++ b/go/node/audit/v1/service.pb.go @@ -0,0 +1,168 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/audit/v1/service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("akash/audit/v1/service.proto", fileDescriptor_767ce8698e34fe92) } + +var fileDescriptor_767ce8698e34fe92 = []byte{ + // 235 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x2c, 0x4d, 0xc9, 0x2c, 0xd1, 0x2f, 0x33, 0xd4, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, + 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x03, 0xcb, 0xea, 0x81, 0x65, 0xf5, + 0xca, 0x0c, 0xa5, 0xc4, 0x93, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, 0xf5, 0x73, 0x8b, 0xd3, 0x41, 0x8a, + 0x73, 0x8b, 0xd3, 0x21, 0x0a, 0xa5, 0x24, 0xd0, 0x8c, 0x81, 0xcb, 0x18, 0x35, 0x31, 0x71, 0x31, + 0xfb, 0x16, 0xa7, 0x0b, 0x95, 0x71, 0x89, 0x05, 0x67, 0xa6, 0xe7, 0x05, 0x14, 0xe5, 0x97, 0x65, + 0xa6, 0xa4, 0x16, 0x39, 0x96, 0x94, 0x14, 0x65, 0x26, 0x95, 0x96, 0xa4, 0x16, 0x0b, 0x69, 0xea, + 0xa1, 0xda, 0xa2, 0xe7, 0x5b, 0x9c, 0x8e, 0x5d, 0xa9, 0x94, 0x21, 0xd1, 0x4a, 0x83, 0x52, 0x8b, + 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x85, 0x6a, 0xb8, 0x24, 0x5c, 0x52, 0x73, 0x52, 0x4b, 0x52, 0xb1, + 0xd8, 0xac, 0x8d, 0xc5, 0x38, 0x5c, 0x8a, 0xa5, 0x8c, 0x49, 0x50, 0x0c, 0xb3, 0x5d, 0x8a, 0xb5, + 0xe1, 0xf9, 0x06, 0x2d, 0x46, 0x27, 0xb3, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, + 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, + 0x88, 0x92, 0x29, 0xc8, 0x4e, 0xd7, 0x4b, 0xcc, 0x2e, 0xd1, 0x4b, 0x49, 0x2d, 0xd3, 0x4f, 0xcf, + 0xd7, 0xcf, 0xcb, 0x4f, 0x49, 0x85, 0x07, 0x63, 0x12, 0x1b, 0x38, 0x0c, 0x8d, 0x01, 0x01, 0x00, + 0x00, 0xff, 0xff, 0x0b, 0xb0, 0x08, 0x31, 0xa6, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // SignProviderAttributes defines a method that signs provider attributes + SignProviderAttributes(ctx context.Context, in *MsgSignProviderAttributes, opts ...grpc.CallOption) (*MsgSignProviderAttributesResponse, error) + // DeleteProviderAttributes defines a method that deletes provider attributes + DeleteProviderAttributes(ctx context.Context, in *MsgDeleteProviderAttributes, opts ...grpc.CallOption) (*MsgDeleteProviderAttributesResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) SignProviderAttributes(ctx context.Context, in *MsgSignProviderAttributes, opts ...grpc.CallOption) (*MsgSignProviderAttributesResponse, error) { + out := new(MsgSignProviderAttributesResponse) + err := c.cc.Invoke(ctx, "/akash.audit.v1.Msg/SignProviderAttributes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) DeleteProviderAttributes(ctx context.Context, in *MsgDeleteProviderAttributes, opts ...grpc.CallOption) (*MsgDeleteProviderAttributesResponse, error) { + out := new(MsgDeleteProviderAttributesResponse) + err := c.cc.Invoke(ctx, "/akash.audit.v1.Msg/DeleteProviderAttributes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // SignProviderAttributes defines a method that signs provider attributes + SignProviderAttributes(context.Context, *MsgSignProviderAttributes) (*MsgSignProviderAttributesResponse, error) + // DeleteProviderAttributes defines a method that deletes provider attributes + DeleteProviderAttributes(context.Context, *MsgDeleteProviderAttributes) (*MsgDeleteProviderAttributesResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) SignProviderAttributes(ctx context.Context, req *MsgSignProviderAttributes) (*MsgSignProviderAttributesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SignProviderAttributes not implemented") +} +func (*UnimplementedMsgServer) DeleteProviderAttributes(ctx context.Context, req *MsgDeleteProviderAttributes) (*MsgDeleteProviderAttributesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteProviderAttributes not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_SignProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgSignProviderAttributes) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).SignProviderAttributes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.audit.v1.Msg/SignProviderAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).SignProviderAttributes(ctx, req.(*MsgSignProviderAttributes)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_DeleteProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgDeleteProviderAttributes) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).DeleteProviderAttributes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.audit.v1.Msg/DeleteProviderAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).DeleteProviderAttributes(ctx, req.(*MsgDeleteProviderAttributes)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.audit.v1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SignProviderAttributes", + Handler: _Msg_SignProviderAttributes_Handler, + }, + { + MethodName: "DeleteProviderAttributes", + Handler: _Msg_DeleteProviderAttributes_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/audit/v1/service.proto", +} diff --git a/go/node/audit/v1/types.go b/go/node/audit/v1/types.go new file mode 100644 index 00000000..4f28a752 --- /dev/null +++ b/go/node/audit/v1/types.go @@ -0,0 +1,33 @@ +package v1 + +import ( + "bytes" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type ProviderID struct { + Owner sdk.Address + Auditor sdk.Address +} + +// AuditedProviders is the collection of AuditedProvider +type AuditedProviders []AuditedProvider + +// String implements the Stringer interface for a Providers object. +func (obj AuditedProviders) String() string { + var buf bytes.Buffer + + const sep = "\n\n" + + for _, p := range obj { + buf.WriteString(p.String()) + buf.WriteString(sep) + } + + if len(obj) > 0 { + buf.Truncate(buf.Len() - len(sep)) + } + + return buf.String() +} diff --git a/go/node/audit/v1beta1/audit.pb.go b/go/node/audit/v1beta1/audit.pb.go deleted file mode 100644 index f296481c..00000000 --- a/go/node/audit/v1beta1/audit.pb.go +++ /dev/null @@ -1,2079 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/audit/v1beta1/audit.proto - -package v1beta1 - -import ( - context "context" - fmt "fmt" - github_com_akash_network_akash_api_go_node_types_v1beta1 "github.com/akash-network/akash-api/go/node/types/v1beta1" - v1beta1 "github.com/akash-network/akash-api/go/node/types/v1beta1" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Provider stores owner auditor and attributes details -type Provider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta1.Attributes `protobuf:"bytes,4,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta1.Attributes" json:"attributes" yaml:"attributes"` -} - -func (m *Provider) Reset() { *m = Provider{} } -func (m *Provider) String() string { return proto.CompactTextString(m) } -func (*Provider) ProtoMessage() {} -func (*Provider) Descriptor() ([]byte, []int) { - return fileDescriptor_c24b9e4462ded131, []int{0} -} -func (m *Provider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Provider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Provider) XXX_Merge(src proto.Message) { - xxx_messageInfo_Provider.Merge(m, src) -} -func (m *Provider) XXX_Size() int { - return m.Size() -} -func (m *Provider) XXX_DiscardUnknown() { - xxx_messageInfo_Provider.DiscardUnknown(m) -} - -var xxx_messageInfo_Provider proto.InternalMessageInfo - -func (m *Provider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *Provider) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *Provider) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta1.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// Attributes -type AuditedAttributes struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta1.Attributes" json:"attributes" yaml:"attributes"` -} - -func (m *AuditedAttributes) Reset() { *m = AuditedAttributes{} } -func (m *AuditedAttributes) String() string { return proto.CompactTextString(m) } -func (*AuditedAttributes) ProtoMessage() {} -func (*AuditedAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_c24b9e4462ded131, []int{1} -} -func (m *AuditedAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuditedAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuditedAttributes.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuditedAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuditedAttributes.Merge(m, src) -} -func (m *AuditedAttributes) XXX_Size() int { - return m.Size() -} -func (m *AuditedAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_AuditedAttributes.DiscardUnknown(m) -} - -var xxx_messageInfo_AuditedAttributes proto.InternalMessageInfo - -func (m *AuditedAttributes) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *AuditedAttributes) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *AuditedAttributes) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta1.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// AttributesResponse represents details of deployment along with group details -type AttributesResponse struct { - Attributes []AuditedAttributes `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes" yaml:"attributes"` -} - -func (m *AttributesResponse) Reset() { *m = AttributesResponse{} } -func (m *AttributesResponse) String() string { return proto.CompactTextString(m) } -func (*AttributesResponse) ProtoMessage() {} -func (*AttributesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c24b9e4462ded131, []int{2} -} -func (m *AttributesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AttributesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AttributesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributesResponse.Merge(m, src) -} -func (m *AttributesResponse) XXX_Size() int { - return m.Size() -} -func (m *AttributesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AttributesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributesResponse proto.InternalMessageInfo - -func (m *AttributesResponse) GetAttributes() []AuditedAttributes { - if m != nil { - return m.Attributes - } - return nil -} - -// AttributesFilters defines filters used to filter deployments -type AttributesFilters struct { - Auditors []string `protobuf:"bytes,1,rep,name=auditors,proto3" json:"auditors" yaml:"auditors"` - Owners []string `protobuf:"bytes,2,rep,name=owners,proto3" json:"owners" yaml:"owners"` -} - -func (m *AttributesFilters) Reset() { *m = AttributesFilters{} } -func (m *AttributesFilters) String() string { return proto.CompactTextString(m) } -func (*AttributesFilters) ProtoMessage() {} -func (*AttributesFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_c24b9e4462ded131, []int{3} -} -func (m *AttributesFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AttributesFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AttributesFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AttributesFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributesFilters.Merge(m, src) -} -func (m *AttributesFilters) XXX_Size() int { - return m.Size() -} -func (m *AttributesFilters) XXX_DiscardUnknown() { - xxx_messageInfo_AttributesFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributesFilters proto.InternalMessageInfo - -func (m *AttributesFilters) GetAuditors() []string { - if m != nil { - return m.Auditors - } - return nil -} - -func (m *AttributesFilters) GetOwners() []string { - if m != nil { - return m.Owners - } - return nil -} - -// MsgSignProviderAttributes defines an SDK message for signing a provider attributes -type MsgSignProviderAttributes struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta1.Attributes" json:"attributes" yaml:"attributes"` -} - -func (m *MsgSignProviderAttributes) Reset() { *m = MsgSignProviderAttributes{} } -func (m *MsgSignProviderAttributes) String() string { return proto.CompactTextString(m) } -func (*MsgSignProviderAttributes) ProtoMessage() {} -func (*MsgSignProviderAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_c24b9e4462ded131, []int{4} -} -func (m *MsgSignProviderAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgSignProviderAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgSignProviderAttributes.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgSignProviderAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgSignProviderAttributes.Merge(m, src) -} -func (m *MsgSignProviderAttributes) XXX_Size() int { - return m.Size() -} -func (m *MsgSignProviderAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_MsgSignProviderAttributes.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgSignProviderAttributes proto.InternalMessageInfo - -func (m *MsgSignProviderAttributes) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgSignProviderAttributes) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *MsgSignProviderAttributes) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta1.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. -type MsgSignProviderAttributesResponse struct { -} - -func (m *MsgSignProviderAttributesResponse) Reset() { *m = MsgSignProviderAttributesResponse{} } -func (m *MsgSignProviderAttributesResponse) String() string { return proto.CompactTextString(m) } -func (*MsgSignProviderAttributesResponse) ProtoMessage() {} -func (*MsgSignProviderAttributesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c24b9e4462ded131, []int{5} -} -func (m *MsgSignProviderAttributesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgSignProviderAttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgSignProviderAttributesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgSignProviderAttributesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgSignProviderAttributesResponse.Merge(m, src) -} -func (m *MsgSignProviderAttributesResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgSignProviderAttributesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgSignProviderAttributesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgSignProviderAttributesResponse proto.InternalMessageInfo - -// MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes -type MsgDeleteProviderAttributes struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` - Keys []string `protobuf:"bytes,3,rep,name=keys,proto3" json:"keys" yaml:"keys"` -} - -func (m *MsgDeleteProviderAttributes) Reset() { *m = MsgDeleteProviderAttributes{} } -func (m *MsgDeleteProviderAttributes) String() string { return proto.CompactTextString(m) } -func (*MsgDeleteProviderAttributes) ProtoMessage() {} -func (*MsgDeleteProviderAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_c24b9e4462ded131, []int{6} -} -func (m *MsgDeleteProviderAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDeleteProviderAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDeleteProviderAttributes.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDeleteProviderAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDeleteProviderAttributes.Merge(m, src) -} -func (m *MsgDeleteProviderAttributes) XXX_Size() int { - return m.Size() -} -func (m *MsgDeleteProviderAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDeleteProviderAttributes.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDeleteProviderAttributes proto.InternalMessageInfo - -func (m *MsgDeleteProviderAttributes) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgDeleteProviderAttributes) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *MsgDeleteProviderAttributes) GetKeys() []string { - if m != nil { - return m.Keys - } - return nil -} - -// MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. -type MsgDeleteProviderAttributesResponse struct { -} - -func (m *MsgDeleteProviderAttributesResponse) Reset() { *m = MsgDeleteProviderAttributesResponse{} } -func (m *MsgDeleteProviderAttributesResponse) String() string { return proto.CompactTextString(m) } -func (*MsgDeleteProviderAttributesResponse) ProtoMessage() {} -func (*MsgDeleteProviderAttributesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c24b9e4462ded131, []int{7} -} -func (m *MsgDeleteProviderAttributesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDeleteProviderAttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDeleteProviderAttributesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDeleteProviderAttributesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDeleteProviderAttributesResponse.Merge(m, src) -} -func (m *MsgDeleteProviderAttributesResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgDeleteProviderAttributesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDeleteProviderAttributesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDeleteProviderAttributesResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Provider)(nil), "akash.audit.v1beta1.Provider") - proto.RegisterType((*AuditedAttributes)(nil), "akash.audit.v1beta1.AuditedAttributes") - proto.RegisterType((*AttributesResponse)(nil), "akash.audit.v1beta1.AttributesResponse") - proto.RegisterType((*AttributesFilters)(nil), "akash.audit.v1beta1.AttributesFilters") - proto.RegisterType((*MsgSignProviderAttributes)(nil), "akash.audit.v1beta1.MsgSignProviderAttributes") - proto.RegisterType((*MsgSignProviderAttributesResponse)(nil), "akash.audit.v1beta1.MsgSignProviderAttributesResponse") - proto.RegisterType((*MsgDeleteProviderAttributes)(nil), "akash.audit.v1beta1.MsgDeleteProviderAttributes") - proto.RegisterType((*MsgDeleteProviderAttributesResponse)(nil), "akash.audit.v1beta1.MsgDeleteProviderAttributesResponse") -} - -func init() { proto.RegisterFile("akash/audit/v1beta1/audit.proto", fileDescriptor_c24b9e4462ded131) } - -var fileDescriptor_c24b9e4462ded131 = []byte{ - // 575 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x6f, 0xd3, 0x4e, - 0x14, 0xcf, 0x39, 0xf9, 0xf6, 0x9b, 0x5c, 0xf9, 0xa1, 0x18, 0x04, 0x69, 0xaa, 0xfa, 0xca, 0x55, - 0x40, 0x25, 0x84, 0x4d, 0x5b, 0x09, 0xaa, 0x32, 0xd5, 0x20, 0xb6, 0x48, 0xc8, 0x6c, 0x6c, 0x0e, - 0x39, 0xb9, 0x56, 0x52, 0x5f, 0xe4, 0xbb, 0xb4, 0xca, 0xc6, 0xc4, 0x0a, 0x6c, 0x8c, 0x1d, 0x98, - 0x90, 0x98, 0x91, 0xe0, 0x1f, 0xe8, 0xd8, 0x91, 0xe9, 0x40, 0xc9, 0x82, 0x3c, 0xfa, 0x2f, 0x40, - 0xb9, 0xb3, 0x1d, 0x47, 0xad, 0x51, 0x33, 0x95, 0x81, 0xcd, 0xef, 0xf3, 0x3e, 0xef, 0xdd, 0xe7, - 0x7d, 0xfc, 0x4e, 0x07, 0x91, 0xdb, 0x75, 0xd9, 0x9e, 0xe5, 0x0e, 0x3a, 0x3e, 0xb7, 0x0e, 0x36, - 0xda, 0x84, 0xbb, 0x1b, 0x2a, 0x32, 0xfb, 0x21, 0xe5, 0x54, 0xbf, 0x26, 0x09, 0xa6, 0x82, 0x12, - 0x42, 0xf3, 0xba, 0x47, 0x3d, 0x2a, 0xf3, 0xd6, 0xe4, 0x4b, 0x51, 0x9b, 0x58, 0xf5, 0x6a, 0xbb, - 0x8c, 0x4c, 0x5b, 0x71, 0x1e, 0xfa, 0xed, 0x01, 0x27, 0x8a, 0x83, 0x3f, 0x6a, 0xb0, 0xfa, 0x3c, - 0xa4, 0x07, 0x7e, 0x87, 0x84, 0xba, 0x05, 0xff, 0xa3, 0x87, 0x01, 0x09, 0x1b, 0x60, 0x15, 0xac, - 0xd7, 0xec, 0xa5, 0x48, 0x20, 0x05, 0xc4, 0x02, 0x5d, 0x1a, 0xba, 0xfb, 0xbd, 0x1d, 0x2c, 0x43, - 0xec, 0x28, 0x58, 0x7f, 0x04, 0xff, 0x97, 0x42, 0x68, 0xd8, 0xd0, 0x64, 0xc9, 0x4a, 0x24, 0x50, - 0x0a, 0xc5, 0x02, 0x5d, 0x51, 0x45, 0x09, 0x80, 0x9d, 0x34, 0xa5, 0x7f, 0x06, 0x10, 0x66, 0x52, - 0x58, 0xa3, 0xb2, 0x5a, 0x5e, 0x5f, 0xdc, 0x5c, 0x31, 0xd5, 0x6c, 0x13, 0xc1, 0xe9, 0x68, 0xe6, - 0x6e, 0xca, 0xb2, 0x83, 0x63, 0x81, 0x4a, 0x91, 0x40, 0xb9, 0xc2, 0x58, 0xa0, 0x7a, 0x72, 0x44, - 0x86, 0xe1, 0x4f, 0x3f, 0xd0, 0x13, 0xcf, 0xe7, 0x7b, 0x83, 0xb6, 0xf9, 0x8a, 0xee, 0x5b, 0xb2, - 0xe7, 0xfd, 0x80, 0xf0, 0x43, 0x1a, 0x76, 0x93, 0xc8, 0xed, 0xfb, 0x96, 0x47, 0xad, 0x80, 0x76, - 0x88, 0xc5, 0x87, 0x7d, 0xc2, 0xac, 0x53, 0xc7, 0x31, 0x27, 0x77, 0x0e, 0xfe, 0xa2, 0xc1, 0xfa, - 0xee, 0x44, 0x3b, 0xe9, 0x4c, 0x19, 0x17, 0xe7, 0x57, 0xf9, 0x2f, 0xf7, 0x6b, 0xa7, 0xfa, 0xe1, - 0x08, 0x81, 0x5f, 0x47, 0xa8, 0x84, 0xdf, 0x02, 0xa8, 0xe7, 0x48, 0x84, 0xf5, 0x69, 0xc0, 0x88, - 0x4e, 0x67, 0xe6, 0x01, 0x72, 0x9e, 0x3b, 0xe6, 0x19, 0xbb, 0x6d, 0x9e, 0xb2, 0xdd, 0xbe, 0x7b, - 0xce, 0xc1, 0x8a, 0x15, 0xd5, 0xa7, 0xdd, 0x9e, 0xf9, 0x3d, 0x4e, 0x42, 0xa6, 0x3f, 0x86, 0xd5, - 0xc4, 0x6c, 0x25, 0xa7, 0x66, 0xa3, 0x48, 0xa0, 0x0c, 0x8b, 0x05, 0xba, 0x3a, 0xf3, 0x73, 0x18, - 0x76, 0xb2, 0xa4, 0xbe, 0x05, 0x17, 0xe4, 0x0f, 0x66, 0x0d, 0x4d, 0x96, 0x2e, 0x47, 0x02, 0x25, - 0x48, 0x2c, 0xd0, 0xe5, 0xdc, 0x2a, 0x30, 0xec, 0x24, 0x89, 0x9c, 0xa2, 0x6f, 0x1a, 0x5c, 0x6a, - 0x31, 0xef, 0x85, 0xef, 0x05, 0xe9, 0x5d, 0xfc, 0xb7, 0x65, 0xe7, 0xd8, 0xb2, 0x8a, 0x74, 0x6f, - 0x0d, 0xde, 0x2a, 0x34, 0x2f, 0xdd, 0x37, 0xfc, 0x15, 0xc0, 0xe5, 0x16, 0xf3, 0x9e, 0x92, 0x1e, - 0xe1, 0xe4, 0x42, 0x4d, 0xbe, 0x07, 0x2b, 0x5d, 0x32, 0x54, 0xee, 0xd6, 0xec, 0x9b, 0x91, 0x40, - 0x32, 0x8e, 0x05, 0x5a, 0x54, 0x25, 0x93, 0x08, 0x3b, 0x12, 0x4c, 0x26, 0xbc, 0x0d, 0xd7, 0xfe, - 0xa0, 0x3d, 0x9d, 0x71, 0xf3, 0xbd, 0x06, 0xcb, 0x2d, 0xe6, 0xe9, 0xaf, 0x01, 0xbc, 0x51, 0xb0, - 0x4b, 0xe6, 0x99, 0x57, 0xac, 0xd0, 0xbe, 0xe6, 0xc3, 0xf9, 0xf8, 0xd9, 0xf5, 0x7e, 0x03, 0x60, - 0xa3, 0xd0, 0xeb, 0x07, 0x45, 0x4d, 0x8b, 0x2a, 0x9a, 0xdb, 0xf3, 0x56, 0xa4, 0x42, 0x6c, 0xe7, - 0x78, 0x64, 0x80, 0x93, 0x91, 0x01, 0x7e, 0x8e, 0x0c, 0xf0, 0x6e, 0x6c, 0x94, 0x4e, 0xc6, 0x46, - 0xe9, 0xfb, 0xd8, 0x28, 0xbd, 0xdc, 0x9e, 0x63, 0x1b, 0x67, 0x9e, 0xe3, 0xf6, 0x82, 0x7c, 0x3a, - 0xb7, 0x7e, 0x07, 0x00, 0x00, 0xff, 0xff, 0xbb, 0xd0, 0xe7, 0x3b, 0xac, 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // SignProviderAttributes defines a method that signs provider attributes - SignProviderAttributes(ctx context.Context, in *MsgSignProviderAttributes, opts ...grpc.CallOption) (*MsgSignProviderAttributesResponse, error) - // DeleteProviderAttributes defines a method that deletes provider attributes - DeleteProviderAttributes(ctx context.Context, in *MsgDeleteProviderAttributes, opts ...grpc.CallOption) (*MsgDeleteProviderAttributesResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) SignProviderAttributes(ctx context.Context, in *MsgSignProviderAttributes, opts ...grpc.CallOption) (*MsgSignProviderAttributesResponse, error) { - out := new(MsgSignProviderAttributesResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta1.Msg/SignProviderAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) DeleteProviderAttributes(ctx context.Context, in *MsgDeleteProviderAttributes, opts ...grpc.CallOption) (*MsgDeleteProviderAttributesResponse, error) { - out := new(MsgDeleteProviderAttributesResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta1.Msg/DeleteProviderAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // SignProviderAttributes defines a method that signs provider attributes - SignProviderAttributes(context.Context, *MsgSignProviderAttributes) (*MsgSignProviderAttributesResponse, error) - // DeleteProviderAttributes defines a method that deletes provider attributes - DeleteProviderAttributes(context.Context, *MsgDeleteProviderAttributes) (*MsgDeleteProviderAttributesResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) SignProviderAttributes(ctx context.Context, req *MsgSignProviderAttributes) (*MsgSignProviderAttributesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SignProviderAttributes not implemented") -} -func (*UnimplementedMsgServer) DeleteProviderAttributes(ctx context.Context, req *MsgDeleteProviderAttributes) (*MsgDeleteProviderAttributesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteProviderAttributes not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_SignProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgSignProviderAttributes) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).SignProviderAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta1.Msg/SignProviderAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).SignProviderAttributes(ctx, req.(*MsgSignProviderAttributes)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_DeleteProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgDeleteProviderAttributes) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).DeleteProviderAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta1.Msg/DeleteProviderAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).DeleteProviderAttributes(ctx, req.(*MsgDeleteProviderAttributes)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.audit.v1beta1.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SignProviderAttributes", - Handler: _Msg_SignProviderAttributes_Handler, - }, - { - MethodName: "DeleteProviderAttributes", - Handler: _Msg_DeleteProviderAttributes_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/audit/v1beta1/audit.proto", -} - -func (m *Provider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Provider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAudit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuditedAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuditedAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuditedAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAudit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AttributesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AttributesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAudit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AttributesFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AttributesFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AttributesFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owners) > 0 { - for iNdEx := len(m.Owners) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Owners[iNdEx]) - copy(dAtA[i:], m.Owners[iNdEx]) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owners[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Auditors) > 0 { - for iNdEx := len(m.Auditors) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Auditors[iNdEx]) - copy(dAtA[i:], m.Auditors[iNdEx]) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditors[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *MsgSignProviderAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgSignProviderAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgSignProviderAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAudit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgSignProviderAttributesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgSignProviderAttributesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgSignProviderAttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgDeleteProviderAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDeleteProviderAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDeleteProviderAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Keys) > 0 { - for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Keys[iNdEx]) - copy(dAtA[i:], m.Keys[iNdEx]) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Keys[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgDeleteProviderAttributesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDeleteProviderAttributesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDeleteProviderAttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintAudit(dAtA []byte, offset int, v uint64) int { - offset -= sovAudit(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Provider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *AuditedAttributes) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *AttributesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *AttributesFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Auditors) > 0 { - for _, s := range m.Auditors { - l = len(s) - n += 1 + l + sovAudit(uint64(l)) - } - } - if len(m.Owners) > 0 { - for _, s := range m.Owners { - l = len(s) - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *MsgSignProviderAttributes) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *MsgSignProviderAttributesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgDeleteProviderAttributes) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - if len(m.Keys) > 0 { - for _, s := range m.Keys { - l = len(s) - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *MsgDeleteProviderAttributesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovAudit(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAudit(x uint64) (n int) { - return sovAudit(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Provider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Provider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta1.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuditedAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuditedAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuditedAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta1.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AttributesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AttributesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, AuditedAttributes{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AttributesFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AttributesFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AttributesFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditors", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditors = append(m.Auditors, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owners", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owners = append(m.Owners, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgSignProviderAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgSignProviderAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgSignProviderAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta1.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgSignProviderAttributesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgSignProviderAttributesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgSignProviderAttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDeleteProviderAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDeleteProviderAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDeleteProviderAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keys = append(m.Keys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDeleteProviderAttributesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDeleteProviderAttributesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDeleteProviderAttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAudit(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAudit - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAudit - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAudit - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAudit - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAudit - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAudit - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAudit = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAudit = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAudit = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/audit/v1beta1/codec.go b/go/node/audit/v1beta1/codec.go deleted file mode 100644 index ac6eb2c1..00000000 --- a/go/node/audit/v1beta1/codec.go +++ /dev/null @@ -1,43 +0,0 @@ -package v1beta1 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/provider module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/provider and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterLegacyAminoCodec register concrete types on codec -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgSignProviderAttributes{}, ModuleName+"/"+MsgTypeSignProviderAttributes, nil) - cdc.RegisterConcrete(&MsgDeleteProviderAttributes{}, ModuleName+"/"+MsgTypeDeleteProviderAttributes, nil) -} - -// RegisterInterfaces registers the x/provider interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgSignProviderAttributes{}, - &MsgDeleteProviderAttributes{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/audit/v1beta1/errors.go b/go/node/audit/v1beta1/errors.go deleted file mode 100644 index d540e894..00000000 --- a/go/node/audit/v1beta1/errors.go +++ /dev/null @@ -1 +0,0 @@ -package v1beta1 diff --git a/go/node/audit/v1beta1/event.go b/go/node/audit/v1beta1/event.go deleted file mode 100644 index ecbdfc7e..00000000 --- a/go/node/audit/v1beta1/event.go +++ /dev/null @@ -1,118 +0,0 @@ -package v1beta1 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -const ( - evActionTrustedAuditorCreated = "audit-trusted-auditor-created" - evActionTrustedAuditorDeleted = "audit-trusted-auditor-deleted" - evOwnerKey = "owner" - evAuditorKey = "auditor" -) - -// EventTrustedAuditorCreated struct -type EventTrustedAuditorCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.Address `json:"owner"` - Auditor sdk.Address `json:"auditor"` -} - -func NewEventTrustedAuditorCreated(owner sdk.Address, auditor sdk.Address) EventTrustedAuditorCreated { - return EventTrustedAuditorCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionTrustedAuditorCreated, - }, - Owner: owner, - Auditor: auditor, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderCreated struct -func (ev EventTrustedAuditorCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionTrustedAuditorCreated), - }, TrustedAuditorEVAttributes(ev.Owner, ev.Auditor)...)..., - ) -} - -// TrustedAuditorEVAttributes returns event attributes for given Provider -func TrustedAuditorEVAttributes(owner sdk.Address, auditor sdk.Address) []sdk.Attribute { - return []sdk.Attribute{ - sdk.NewAttribute(evOwnerKey, owner.String()), - sdk.NewAttribute(evAuditorKey, auditor.String()), - } -} - -// ParseEVTTrustedAuditor returns provider details for given event attributes -func ParseEVTTrustedAuditor(attrs []sdk.Attribute) (sdk.Address, sdk.Address, error) { - owner, err := sdkutil.GetAccAddress(attrs, evOwnerKey) - if err != nil { - return nil, nil, err - } - - auditor, err := sdkutil.GetAccAddress(attrs, evAuditorKey) - if err != nil { - return nil, nil, err - } - - return owner, auditor, nil -} - -type EventTrustedAuditorDeleted struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.Address `json:"owner"` - Auditor sdk.Address `json:"auditor"` -} - -func NewEventTrustedAuditorDeleted(owner sdk.Address, auditor sdk.Address) EventTrustedAuditorDeleted { - return EventTrustedAuditorDeleted{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionTrustedAuditorDeleted, - }, - Owner: owner, - Auditor: auditor, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderCreated struct -func (ev EventTrustedAuditorDeleted) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionTrustedAuditorDeleted), - }, TrustedAuditorEVAttributes(ev.Owner, ev.Auditor)...)..., - ) -} - -// ParseEvent parses event and returns details of event and error if occurred -func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { - if ev.Type != sdkutil.EventTypeMessage { - return nil, sdkutil.ErrUnknownType - } - if ev.Module != ModuleName { - return nil, sdkutil.ErrUnknownModule - } - switch ev.Action { - case evActionTrustedAuditorCreated: - owner, auditor, err := ParseEVTTrustedAuditor(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventTrustedAuditorCreated(owner, auditor), nil - case evActionTrustedAuditorDeleted: - owner, auditor, err := ParseEVTTrustedAuditor(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventTrustedAuditorDeleted(owner, auditor), nil - default: - return nil, sdkutil.ErrUnknownAction - } -} diff --git a/go/node/audit/v1beta1/genesis.pb.go b/go/node/audit/v1beta1/genesis.pb.go deleted file mode 100644 index d6b05d39..00000000 --- a/go/node/audit/v1beta1/genesis.pb.go +++ /dev/null @@ -1,332 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/audit/v1beta1/genesis.proto - -package v1beta1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState defines the basic genesis state used by audit module -type GenesisState struct { - Attributes []AuditedAttributes `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes" yaml:"attributes"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_c832388e2ecc1d8d, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetAttributes() []AuditedAttributes { - if m != nil { - return m.Attributes - } - return nil -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.audit.v1beta1.GenesisState") -} - -func init() { proto.RegisterFile("akash/audit/v1beta1/genesis.proto", fileDescriptor_c832388e2ecc1d8d) } - -var fileDescriptor_c832388e2ecc1d8d = []byte{ - // 230 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x2c, 0x4d, 0xc9, 0x2c, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, - 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x06, - 0x2b, 0xd1, 0x03, 0x2b, 0xd1, 0x83, 0x2a, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xeb, - 0x83, 0x58, 0x10, 0xa5, 0x52, 0xf2, 0xd8, 0x4c, 0x83, 0x68, 0x04, 0x2b, 0x50, 0xaa, 0xe7, 0xe2, - 0x71, 0x87, 0x18, 0x1e, 0x5c, 0x92, 0x58, 0x92, 0x2a, 0x94, 0xcf, 0xc5, 0x95, 0x58, 0x52, 0x52, - 0x94, 0x99, 0x54, 0x5a, 0x92, 0x5a, 0x2c, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, 0x6d, 0xa4, 0xa6, 0x87, - 0xc5, 0x42, 0x3d, 0x47, 0x10, 0x2f, 0x35, 0xc5, 0x11, 0xae, 0xda, 0x49, 0xfd, 0xc4, 0x3d, 0x79, - 0x86, 0x57, 0xf7, 0xe4, 0x91, 0x4c, 0xf8, 0x74, 0x4f, 0x5e, 0xb0, 0x32, 0x31, 0x37, 0xc7, 0x4a, - 0x09, 0x21, 0xa6, 0x14, 0x84, 0xa4, 0xc0, 0xc9, 0xfd, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, - 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, - 0xe5, 0x18, 0xa2, 0x74, 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xf3, - 0xcb, 0x8a, 0x92, 0x73, 0xb2, 0xf5, 0x21, 0xbe, 0xa9, 0x80, 0xfa, 0xa7, 0xa4, 0xb2, 0x20, 0xb5, - 0x18, 0xe6, 0xab, 0x24, 0x36, 0xb0, 0x87, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x3f, - 0x6b, 0x0a, 0x41, 0x01, 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, AuditedAttributes{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/audit/v1beta1/key.go b/go/node/audit/v1beta1/key.go deleted file mode 100644 index a51b4703..00000000 --- a/go/node/audit/v1beta1/key.go +++ /dev/null @@ -1,16 +0,0 @@ -package v1beta1 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "audit" - - // StoreKey is the store key string for provider - StoreKey = ModuleName - - // RouterKey is the message route for provider - RouterKey = ModuleName -) - -func PrefixProviderID() []byte { - return []byte{0x01} -} diff --git a/go/node/audit/v1beta1/msgs.go b/go/node/audit/v1beta1/msgs.go deleted file mode 100644 index 90800b74..00000000 --- a/go/node/audit/v1beta1/msgs.go +++ /dev/null @@ -1,94 +0,0 @@ -package v1beta1 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - MsgTypeSignProviderAttributes = "audit-sign-provider-attributes" - MsgTypeDeleteProviderAttributes = "audit-delete-provider-attributes" -) - -var ( - _ sdk.Msg = &MsgSignProviderAttributes{} - _ sdk.Msg = &MsgDeleteProviderAttributes{} -) - -// ====MsgSignProviderAttributes==== -// Route implements the sdk.Msg interface -func (m MsgSignProviderAttributes) Route() string { - return RouterKey -} - -// Type implements the sdk.Msg interface -func (m MsgSignProviderAttributes) Type() string { - return MsgTypeSignProviderAttributes -} - -// ValidateBasic does basic validation -func (m MsgSignProviderAttributes) ValidateBasic() error { - if _, err := sdk.AccAddressFromBech32(m.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Owner Address") - } - - if _, err := sdk.AccAddressFromBech32(m.Auditor); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Auditor Address") - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (m MsgSignProviderAttributes) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) -} - -// GetSigners defines whose signature is required -func (m MsgSignProviderAttributes) GetSigners() []sdk.AccAddress { - auditor, err := sdk.AccAddressFromBech32(m.Auditor) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{auditor} -} - -// ====MsgRevokeProviderAttributes==== -// Route implements the sdk.Msg interface -func (m MsgDeleteProviderAttributes) Route() string { - return RouterKey -} - -// Type implements the sdk.Msg interface -func (m MsgDeleteProviderAttributes) Type() string { - return MsgTypeDeleteProviderAttributes -} - -// ValidateBasic does basic validation -func (m MsgDeleteProviderAttributes) ValidateBasic() error { - if _, err := sdk.AccAddressFromBech32(m.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Owner Address") - } - - if _, err := sdk.AccAddressFromBech32(m.Auditor); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Auditor Address") - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (m MsgDeleteProviderAttributes) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) -} - -// GetSigners defines whose signature is required -func (m MsgDeleteProviderAttributes) GetSigners() []sdk.AccAddress { - auditor, err := sdk.AccAddressFromBech32(m.Auditor) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{auditor} -} diff --git a/go/node/audit/v1beta1/query.pb.go b/go/node/audit/v1beta1/query.pb.go deleted file mode 100644 index eaf4661c..00000000 --- a/go/node/audit/v1beta1/query.pb.go +++ /dev/null @@ -1,1717 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/audit/v1beta1/query.proto - -package v1beta1 - -import ( - context "context" - fmt "fmt" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryProvidersResponse is response type for the Query/Providers RPC method -type QueryProvidersResponse struct { - Providers Providers `protobuf:"bytes,1,rep,name=providers,proto3,castrepeated=Providers" json:"providers"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryProvidersResponse) Reset() { *m = QueryProvidersResponse{} } -func (m *QueryProvidersResponse) String() string { return proto.CompactTextString(m) } -func (*QueryProvidersResponse) ProtoMessage() {} -func (*QueryProvidersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_57c276f283e450c2, []int{0} -} -func (m *QueryProvidersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProvidersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProvidersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProvidersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProvidersResponse.Merge(m, src) -} -func (m *QueryProvidersResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryProvidersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProvidersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProvidersResponse proto.InternalMessageInfo - -func (m *QueryProvidersResponse) GetProviders() Providers { - if m != nil { - return m.Providers - } - return nil -} - -func (m *QueryProvidersResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProviderRequest is request type for the Query/Provider RPC method -type QueryProviderRequest struct { - Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` - Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` -} - -func (m *QueryProviderRequest) Reset() { *m = QueryProviderRequest{} } -func (m *QueryProviderRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProviderRequest) ProtoMessage() {} -func (*QueryProviderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_57c276f283e450c2, []int{1} -} -func (m *QueryProviderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProviderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProviderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProviderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProviderRequest.Merge(m, src) -} -func (m *QueryProviderRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryProviderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProviderRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProviderRequest proto.InternalMessageInfo - -func (m *QueryProviderRequest) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *QueryProviderRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -// QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method -type QueryAllProvidersAttributesRequest struct { - Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryAllProvidersAttributesRequest) Reset() { *m = QueryAllProvidersAttributesRequest{} } -func (m *QueryAllProvidersAttributesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryAllProvidersAttributesRequest) ProtoMessage() {} -func (*QueryAllProvidersAttributesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_57c276f283e450c2, []int{2} -} -func (m *QueryAllProvidersAttributesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryAllProvidersAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryAllProvidersAttributesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryAllProvidersAttributesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryAllProvidersAttributesRequest.Merge(m, src) -} -func (m *QueryAllProvidersAttributesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryAllProvidersAttributesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryAllProvidersAttributesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryAllProvidersAttributesRequest proto.InternalMessageInfo - -func (m *QueryAllProvidersAttributesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProviderAttributesRequest is request type for the Query/Provider RPC method -type QueryProviderAttributesRequest struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryProviderAttributesRequest) Reset() { *m = QueryProviderAttributesRequest{} } -func (m *QueryProviderAttributesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProviderAttributesRequest) ProtoMessage() {} -func (*QueryProviderAttributesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_57c276f283e450c2, []int{3} -} -func (m *QueryProviderAttributesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProviderAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProviderAttributesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProviderAttributesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProviderAttributesRequest.Merge(m, src) -} -func (m *QueryProviderAttributesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryProviderAttributesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProviderAttributesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProviderAttributesRequest proto.InternalMessageInfo - -func (m *QueryProviderAttributesRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *QueryProviderAttributesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProviderAuditorRequest is request type for the Query/Providers RPC method -type QueryProviderAuditorRequest struct { - Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` - Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` -} - -func (m *QueryProviderAuditorRequest) Reset() { *m = QueryProviderAuditorRequest{} } -func (m *QueryProviderAuditorRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProviderAuditorRequest) ProtoMessage() {} -func (*QueryProviderAuditorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_57c276f283e450c2, []int{4} -} -func (m *QueryProviderAuditorRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProviderAuditorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProviderAuditorRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProviderAuditorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProviderAuditorRequest.Merge(m, src) -} -func (m *QueryProviderAuditorRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryProviderAuditorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProviderAuditorRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProviderAuditorRequest proto.InternalMessageInfo - -func (m *QueryProviderAuditorRequest) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *QueryProviderAuditorRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -// QueryAuditorAttributesRequest is request type for the Query/Providers RPC method -type QueryAuditorAttributesRequest struct { - Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryAuditorAttributesRequest) Reset() { *m = QueryAuditorAttributesRequest{} } -func (m *QueryAuditorAttributesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryAuditorAttributesRequest) ProtoMessage() {} -func (*QueryAuditorAttributesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_57c276f283e450c2, []int{5} -} -func (m *QueryAuditorAttributesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryAuditorAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryAuditorAttributesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryAuditorAttributesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryAuditorAttributesRequest.Merge(m, src) -} -func (m *QueryAuditorAttributesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryAuditorAttributesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryAuditorAttributesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryAuditorAttributesRequest proto.InternalMessageInfo - -func (m *QueryAuditorAttributesRequest) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *QueryAuditorAttributesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -func init() { - proto.RegisterType((*QueryProvidersResponse)(nil), "akash.audit.v1beta1.QueryProvidersResponse") - proto.RegisterType((*QueryProviderRequest)(nil), "akash.audit.v1beta1.QueryProviderRequest") - proto.RegisterType((*QueryAllProvidersAttributesRequest)(nil), "akash.audit.v1beta1.QueryAllProvidersAttributesRequest") - proto.RegisterType((*QueryProviderAttributesRequest)(nil), "akash.audit.v1beta1.QueryProviderAttributesRequest") - proto.RegisterType((*QueryProviderAuditorRequest)(nil), "akash.audit.v1beta1.QueryProviderAuditorRequest") - proto.RegisterType((*QueryAuditorAttributesRequest)(nil), "akash.audit.v1beta1.QueryAuditorAttributesRequest") -} - -func init() { proto.RegisterFile("akash/audit/v1beta1/query.proto", fileDescriptor_57c276f283e450c2) } - -var fileDescriptor_57c276f283e450c2 = []byte{ - // 559 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xc1, 0x6e, 0xd3, 0x30, - 0x18, 0xc7, 0xeb, 0xa2, 0x82, 0xea, 0x9d, 0x66, 0xaa, 0xa9, 0x14, 0x96, 0x56, 0x39, 0x40, 0x35, - 0xc0, 0xde, 0x32, 0xc4, 0x04, 0x17, 0xb4, 0x1d, 0xb6, 0x13, 0x08, 0x72, 0xe4, 0xe6, 0x74, 0x56, - 0x16, 0x2d, 0x8b, 0xb3, 0xd8, 0x29, 0x4c, 0x68, 0x48, 0xf0, 0x04, 0x48, 0xbc, 0x05, 0xe2, 0x02, - 0x57, 0x5e, 0x60, 0xc7, 0x49, 0x5c, 0x38, 0x0d, 0xd4, 0xf2, 0x20, 0x28, 0x76, 0x92, 0x36, 0x4b, - 0x4b, 0xbb, 0xc1, 0xad, 0x69, 0xfe, 0xdf, 0xff, 0xff, 0xfb, 0x3e, 0x7f, 0x31, 0x6c, 0xd3, 0x7d, - 0x2a, 0xf6, 0x08, 0x8d, 0x77, 0x3d, 0x49, 0xfa, 0x6b, 0x0e, 0x93, 0x74, 0x8d, 0x1c, 0xc6, 0x2c, - 0x3a, 0xc2, 0x61, 0xc4, 0x25, 0x47, 0xd7, 0x95, 0x00, 0x2b, 0x01, 0x4e, 0x05, 0xad, 0x86, 0xcb, - 0x5d, 0xae, 0xde, 0x93, 0xe4, 0x97, 0x96, 0xb6, 0x6e, 0xb9, 0x9c, 0xbb, 0x3e, 0x23, 0x34, 0xf4, - 0x08, 0x0d, 0x02, 0x2e, 0xa9, 0xf4, 0x78, 0x20, 0xd2, 0xb7, 0x2b, 0x3d, 0x2e, 0x0e, 0xb8, 0x20, - 0x0e, 0x15, 0x4c, 0x27, 0xe4, 0x79, 0x21, 0x75, 0xbd, 0x40, 0x89, 0x53, 0xed, 0x44, 0x2a, 0x8d, - 0xa0, 0x04, 0xe6, 0x17, 0x00, 0x97, 0x5e, 0x24, 0x1e, 0xcf, 0x23, 0xde, 0xf7, 0x76, 0x59, 0x24, - 0x6c, 0x26, 0x42, 0x1e, 0x08, 0x86, 0x9e, 0xc1, 0x7a, 0x98, 0xfd, 0xd9, 0x04, 0x9d, 0x2b, 0xdd, - 0x05, 0x6b, 0x19, 0x4f, 0x68, 0x02, 0x67, 0xa5, 0x5b, 0x8b, 0x27, 0x67, 0xed, 0xca, 0xa7, 0x9f, - 0xed, 0xfa, 0xc8, 0x6c, 0x64, 0x81, 0x76, 0x20, 0x1c, 0xf1, 0x35, 0xab, 0x1d, 0xd0, 0x5d, 0xb0, - 0xee, 0x60, 0xdd, 0x0c, 0x4e, 0x9a, 0xc1, 0x7a, 0x5c, 0xb9, 0x2d, 0x75, 0x59, 0x06, 0x63, 0x8f, - 0x95, 0x9a, 0xdb, 0xb0, 0x51, 0x40, 0xb6, 0xd9, 0x61, 0xcc, 0x84, 0x44, 0x4d, 0x78, 0x4d, 0x81, - 0xf1, 0xa8, 0x09, 0x3a, 0xa0, 0x5b, 0xb7, 0xb3, 0x47, 0xd4, 0x80, 0x35, 0xfe, 0x2a, 0x60, 0x91, - 0x4a, 0xad, 0xdb, 0xfa, 0xc1, 0xf4, 0xa1, 0xa9, 0x7c, 0x36, 0x7d, 0x3f, 0x07, 0xde, 0x94, 0x32, - 0xf2, 0x9c, 0x58, 0x32, 0x91, 0xb9, 0x6e, 0x17, 0xb0, 0x81, 0xc2, 0xbe, 0x3d, 0x13, 0x5b, 0xd5, - 0x16, 0xa8, 0xdf, 0x42, 0xa3, 0x40, 0x5d, 0x4e, 0xca, 0x29, 0xc1, 0x18, 0xe5, 0xb9, 0xfc, 0xea, - 0xa5, 0xf3, 0x9f, 0xc2, 0x9b, 0xc5, 0x7c, 0x3d, 0x9b, 0xcb, 0x0e, 0xef, 0x1d, 0x80, 0xcb, 0x7a, - 0x7a, 0x5a, 0x56, 0x6e, 0x67, 0xba, 0xe3, 0x7f, 0x6a, 0xc9, 0x3a, 0xab, 0xc1, 0x9a, 0x62, 0x40, - 0x5f, 0x01, 0x5c, 0x9a, 0x7c, 0x8c, 0x68, 0x63, 0xe2, 0xce, 0xce, 0x3e, 0xf8, 0xd6, 0xdd, 0xe9, - 0x85, 0xa5, 0x8f, 0xc5, 0xb4, 0xde, 0x7f, 0xff, 0xfd, 0xb1, 0x7a, 0x0f, 0xad, 0x90, 0xa9, 0x5f, - 0x1c, 0xa1, 0x79, 0x04, 0xf1, 0x3d, 0x21, 0x13, 0x68, 0x54, 0xde, 0x06, 0xb4, 0x3e, 0x3b, 0xf7, - 0x1f, 0x61, 0x1f, 0x2b, 0xd8, 0x07, 0xc8, 0x9a, 0x0f, 0xf6, 0x8d, 0x3a, 0xf1, 0x63, 0x0d, 0xfd, - 0x0d, 0xc0, 0x1b, 0xe7, 0x56, 0x68, 0x8c, 0x7d, 0x75, 0x0e, 0xf6, 0xc2, 0xde, 0x5d, 0x0c, 0xfc, - 0x89, 0x02, 0x7f, 0x84, 0x36, 0xe6, 0x04, 0x4f, 0x17, 0xee, 0x38, 0x6b, 0x01, 0x7d, 0x06, 0x70, - 0xb1, 0x4c, 0x6d, 0xfd, 0x65, 0x45, 0xa6, 0x6c, 0xf7, 0xc5, 0xb8, 0x1f, 0x2a, 0xee, 0x55, 0x84, - 0x53, 0xee, 0xec, 0x52, 0x2c, 0xa2, 0xf3, 0x68, 0x8c, 0x38, 0x19, 0xf6, 0xd6, 0xce, 0xc9, 0xc0, - 0x00, 0xa7, 0x03, 0x03, 0xfc, 0x1a, 0x18, 0xe0, 0xc3, 0xd0, 0xa8, 0x9c, 0x0e, 0x8d, 0xca, 0x8f, - 0xa1, 0x51, 0x79, 0x79, 0xdf, 0xf5, 0xe4, 0x5e, 0xec, 0xe0, 0x1e, 0x3f, 0x20, 0xbc, 0x1f, 0xf5, - 0xfc, 0xfd, 0xd4, 0xfa, 0x75, 0x3a, 0x06, 0x79, 0x14, 0x32, 0x91, 0xf9, 0x3b, 0x57, 0xd5, 0x6d, - 0xbf, 0xfe, 0x27, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xa4, 0x67, 0xcf, 0xa6, 0x06, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // AllProvidersAttributes queries all providers - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - AllProvidersAttributes(ctx context.Context, in *QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) - // ProviderAttributes queries all provider signed attributes - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - ProviderAttributes(ctx context.Context, in *QueryProviderAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) - // ProviderAuditorAttributes queries provider signed attributes by specific auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - ProviderAuditorAttributes(ctx context.Context, in *QueryProviderAuditorRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) - // AuditorAttributes queries all providers signed by this auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - AuditorAttributes(ctx context.Context, in *QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) AllProvidersAttributes(ctx context.Context, in *QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta1.Query/AllProvidersAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) ProviderAttributes(ctx context.Context, in *QueryProviderAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta1.Query/ProviderAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) ProviderAuditorAttributes(ctx context.Context, in *QueryProviderAuditorRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta1.Query/ProviderAuditorAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) AuditorAttributes(ctx context.Context, in *QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta1.Query/AuditorAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // AllProvidersAttributes queries all providers - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - AllProvidersAttributes(context.Context, *QueryAllProvidersAttributesRequest) (*QueryProvidersResponse, error) - // ProviderAttributes queries all provider signed attributes - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - ProviderAttributes(context.Context, *QueryProviderAttributesRequest) (*QueryProvidersResponse, error) - // ProviderAuditorAttributes queries provider signed attributes by specific auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - ProviderAuditorAttributes(context.Context, *QueryProviderAuditorRequest) (*QueryProvidersResponse, error) - // AuditorAttributes queries all providers signed by this auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - AuditorAttributes(context.Context, *QueryAuditorAttributesRequest) (*QueryProvidersResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) AllProvidersAttributes(ctx context.Context, req *QueryAllProvidersAttributesRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AllProvidersAttributes not implemented") -} -func (*UnimplementedQueryServer) ProviderAttributes(ctx context.Context, req *QueryProviderAttributesRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ProviderAttributes not implemented") -} -func (*UnimplementedQueryServer) ProviderAuditorAttributes(ctx context.Context, req *QueryProviderAuditorRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ProviderAuditorAttributes not implemented") -} -func (*UnimplementedQueryServer) AuditorAttributes(ctx context.Context, req *QueryAuditorAttributesRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AuditorAttributes not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_AllProvidersAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryAllProvidersAttributesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).AllProvidersAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta1.Query/AllProvidersAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).AllProvidersAttributes(ctx, req.(*QueryAllProvidersAttributesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_ProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryProviderAttributesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).ProviderAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta1.Query/ProviderAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).ProviderAttributes(ctx, req.(*QueryProviderAttributesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_ProviderAuditorAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryProviderAuditorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).ProviderAuditorAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta1.Query/ProviderAuditorAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).ProviderAuditorAttributes(ctx, req.(*QueryProviderAuditorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_AuditorAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryAuditorAttributesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).AuditorAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta1.Query/AuditorAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).AuditorAttributes(ctx, req.(*QueryAuditorAttributesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.audit.v1beta1.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "AllProvidersAttributes", - Handler: _Query_AllProvidersAttributes_Handler, - }, - { - MethodName: "ProviderAttributes", - Handler: _Query_ProviderAttributes_Handler, - }, - { - MethodName: "ProviderAuditorAttributes", - Handler: _Query_ProviderAuditorAttributes_Handler, - }, - { - MethodName: "AuditorAttributes", - Handler: _Query_AuditorAttributes_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/audit/v1beta1/query.proto", -} - -func (m *QueryProvidersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProvidersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProvidersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Providers) > 0 { - for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryProviderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProviderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProviderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x12 - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryAllProvidersAttributesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryAllProvidersAttributesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryAllProvidersAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryProviderAttributesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProviderAttributesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProviderAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryProviderAuditorRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProviderAuditorRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProviderAuditorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x12 - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryAuditorAttributesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryAuditorAttributesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryAuditorAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryProvidersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Providers) > 0 { - for _, e := range m.Providers { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProviderRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryAllProvidersAttributesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProviderAttributesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProviderAuditorRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryAuditorAttributesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryProvidersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProvidersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProvidersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Providers = append(m.Providers, Provider{}) - if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProviderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProviderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProviderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryAllProvidersAttributesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryAllProvidersAttributesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryAllProvidersAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProviderAttributesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProviderAttributesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProviderAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProviderAuditorRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProviderAuditorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProviderAuditorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryAuditorAttributesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryAuditorAttributesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryAuditorAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/audit/v1beta1/query.pb.gw.go b/go/node/audit/v1beta1/query.pb.gw.go deleted file mode 100644 index 2dc65845..00000000 --- a/go/node/audit/v1beta1/query.pb.gw.go +++ /dev/null @@ -1,532 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/audit/v1beta2/query.proto - -/* -Package v1beta1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta1 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_AllProvidersAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_AllProvidersAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAllProvidersAttributesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AllProvidersAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AllProvidersAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_AllProvidersAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAllProvidersAttributesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AllProvidersAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.AllProvidersAttributes(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_ProviderAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{"owner": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_Query_ProviderAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderAttributesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ProviderAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ProviderAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_ProviderAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderAttributesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ProviderAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ProviderAttributes(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Query_ProviderAuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderAuditorRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["auditor"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") - } - - protoReq.Auditor, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) - } - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - msg, err := client.ProviderAuditorAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_ProviderAuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderAuditorRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["auditor"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") - } - - protoReq.Auditor, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) - } - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - msg, err := server.ProviderAuditorAttributes(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_AuditorAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{"auditor": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_Query_AuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAuditorAttributesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["auditor"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") - } - - protoReq.Auditor, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AuditorAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AuditorAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_AuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAuditorAttributesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["auditor"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") - } - - protoReq.Auditor, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AuditorAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.AuditorAttributes(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_AllProvidersAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_AllProvidersAttributes_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_AllProvidersAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ProviderAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_ProviderAttributes_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ProviderAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ProviderAuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_ProviderAuditorAttributes_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ProviderAuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_AuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_AuditorAttributes_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_AuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_AllProvidersAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_AllProvidersAttributes_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_AllProvidersAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ProviderAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_ProviderAttributes_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ProviderAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ProviderAuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_ProviderAuditorAttributes_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ProviderAuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_AuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_AuditorAttributes_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_AuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_AllProvidersAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 2, 4}, []string{"akash", "audit", "v1beta2", "attributes", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_ProviderAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"akash", "audit", "v1beta2", "attributes", "owner", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_ProviderAuditorAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"akash", "audit", "v1beta2", "attributes", "auditor", "owner"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_AuditorAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"akash", "provider", "v1beta2", "auditor", "list"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_AllProvidersAttributes_0 = runtime.ForwardResponseMessage - - forward_Query_ProviderAttributes_0 = runtime.ForwardResponseMessage - - forward_Query_ProviderAuditorAttributes_0 = runtime.ForwardResponseMessage - - forward_Query_AuditorAttributes_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/audit/v1beta1/types.go b/go/node/audit/v1beta1/types.go deleted file mode 100644 index 46164a43..00000000 --- a/go/node/audit/v1beta1/types.go +++ /dev/null @@ -1,33 +0,0 @@ -package v1beta1 - -import ( - "bytes" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -type ProviderID struct { - Owner sdk.Address - Auditor sdk.Address -} - -// Providers is the collection of Provider -type Providers []Provider - -// String implements the Stringer interface for a Providers object. -func (obj Providers) String() string { - var buf bytes.Buffer - - const sep = "\n\n" - - for _, p := range obj { - buf.WriteString(p.String()) - buf.WriteString(sep) - } - - if len(obj) > 0 { - buf.Truncate(buf.Len() - len(sep)) - } - - return buf.String() -} diff --git a/go/node/audit/v1beta2/audit.pb.go b/go/node/audit/v1beta2/audit.pb.go deleted file mode 100644 index 72d1e653..00000000 --- a/go/node/audit/v1beta2/audit.pb.go +++ /dev/null @@ -1,2079 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/audit/v1beta2/audit.proto - -package v1beta2 - -import ( - context "context" - fmt "fmt" - github_com_akash_network_akash_api_go_node_types_v1beta2 "github.com/akash-network/akash-api/go/node/types/v1beta2" - v1beta2 "github.com/akash-network/akash-api/go/node/types/v1beta2" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Provider stores owner auditor and attributes details -type Provider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta2.Attributes `protobuf:"bytes,4,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta2.Attributes" json:"attributes" yaml:"attributes"` -} - -func (m *Provider) Reset() { *m = Provider{} } -func (m *Provider) String() string { return proto.CompactTextString(m) } -func (*Provider) ProtoMessage() {} -func (*Provider) Descriptor() ([]byte, []int) { - return fileDescriptor_919e209b8bf6f93c, []int{0} -} -func (m *Provider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Provider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Provider) XXX_Merge(src proto.Message) { - xxx_messageInfo_Provider.Merge(m, src) -} -func (m *Provider) XXX_Size() int { - return m.Size() -} -func (m *Provider) XXX_DiscardUnknown() { - xxx_messageInfo_Provider.DiscardUnknown(m) -} - -var xxx_messageInfo_Provider proto.InternalMessageInfo - -func (m *Provider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *Provider) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *Provider) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta2.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// Attributes -type AuditedAttributes struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta2.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta2.Attributes" json:"attributes" yaml:"attributes"` -} - -func (m *AuditedAttributes) Reset() { *m = AuditedAttributes{} } -func (m *AuditedAttributes) String() string { return proto.CompactTextString(m) } -func (*AuditedAttributes) ProtoMessage() {} -func (*AuditedAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_919e209b8bf6f93c, []int{1} -} -func (m *AuditedAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuditedAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuditedAttributes.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuditedAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuditedAttributes.Merge(m, src) -} -func (m *AuditedAttributes) XXX_Size() int { - return m.Size() -} -func (m *AuditedAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_AuditedAttributes.DiscardUnknown(m) -} - -var xxx_messageInfo_AuditedAttributes proto.InternalMessageInfo - -func (m *AuditedAttributes) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *AuditedAttributes) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *AuditedAttributes) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta2.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// AttributesResponse represents details of deployment along with group details -type AttributesResponse struct { - Attributes []AuditedAttributes `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes" yaml:"attributes"` -} - -func (m *AttributesResponse) Reset() { *m = AttributesResponse{} } -func (m *AttributesResponse) String() string { return proto.CompactTextString(m) } -func (*AttributesResponse) ProtoMessage() {} -func (*AttributesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_919e209b8bf6f93c, []int{2} -} -func (m *AttributesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AttributesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AttributesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributesResponse.Merge(m, src) -} -func (m *AttributesResponse) XXX_Size() int { - return m.Size() -} -func (m *AttributesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AttributesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributesResponse proto.InternalMessageInfo - -func (m *AttributesResponse) GetAttributes() []AuditedAttributes { - if m != nil { - return m.Attributes - } - return nil -} - -// AttributesFilters defines filters used to filter deployments -type AttributesFilters struct { - Auditors []string `protobuf:"bytes,1,rep,name=auditors,proto3" json:"auditors" yaml:"auditors"` - Owners []string `protobuf:"bytes,2,rep,name=owners,proto3" json:"owners" yaml:"owners"` -} - -func (m *AttributesFilters) Reset() { *m = AttributesFilters{} } -func (m *AttributesFilters) String() string { return proto.CompactTextString(m) } -func (*AttributesFilters) ProtoMessage() {} -func (*AttributesFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_919e209b8bf6f93c, []int{3} -} -func (m *AttributesFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AttributesFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AttributesFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AttributesFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributesFilters.Merge(m, src) -} -func (m *AttributesFilters) XXX_Size() int { - return m.Size() -} -func (m *AttributesFilters) XXX_DiscardUnknown() { - xxx_messageInfo_AttributesFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributesFilters proto.InternalMessageInfo - -func (m *AttributesFilters) GetAuditors() []string { - if m != nil { - return m.Auditors - } - return nil -} - -func (m *AttributesFilters) GetOwners() []string { - if m != nil { - return m.Owners - } - return nil -} - -// MsgSignProviderAttributes defines an SDK message for signing a provider attributes -type MsgSignProviderAttributes struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta2.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta2.Attributes" json:"attributes" yaml:"attributes"` -} - -func (m *MsgSignProviderAttributes) Reset() { *m = MsgSignProviderAttributes{} } -func (m *MsgSignProviderAttributes) String() string { return proto.CompactTextString(m) } -func (*MsgSignProviderAttributes) ProtoMessage() {} -func (*MsgSignProviderAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_919e209b8bf6f93c, []int{4} -} -func (m *MsgSignProviderAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgSignProviderAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgSignProviderAttributes.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgSignProviderAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgSignProviderAttributes.Merge(m, src) -} -func (m *MsgSignProviderAttributes) XXX_Size() int { - return m.Size() -} -func (m *MsgSignProviderAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_MsgSignProviderAttributes.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgSignProviderAttributes proto.InternalMessageInfo - -func (m *MsgSignProviderAttributes) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgSignProviderAttributes) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *MsgSignProviderAttributes) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta2.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. -type MsgSignProviderAttributesResponse struct { -} - -func (m *MsgSignProviderAttributesResponse) Reset() { *m = MsgSignProviderAttributesResponse{} } -func (m *MsgSignProviderAttributesResponse) String() string { return proto.CompactTextString(m) } -func (*MsgSignProviderAttributesResponse) ProtoMessage() {} -func (*MsgSignProviderAttributesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_919e209b8bf6f93c, []int{5} -} -func (m *MsgSignProviderAttributesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgSignProviderAttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgSignProviderAttributesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgSignProviderAttributesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgSignProviderAttributesResponse.Merge(m, src) -} -func (m *MsgSignProviderAttributesResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgSignProviderAttributesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgSignProviderAttributesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgSignProviderAttributesResponse proto.InternalMessageInfo - -// MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes -type MsgDeleteProviderAttributes struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` - Keys []string `protobuf:"bytes,3,rep,name=keys,proto3" json:"keys" yaml:"keys"` -} - -func (m *MsgDeleteProviderAttributes) Reset() { *m = MsgDeleteProviderAttributes{} } -func (m *MsgDeleteProviderAttributes) String() string { return proto.CompactTextString(m) } -func (*MsgDeleteProviderAttributes) ProtoMessage() {} -func (*MsgDeleteProviderAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_919e209b8bf6f93c, []int{6} -} -func (m *MsgDeleteProviderAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDeleteProviderAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDeleteProviderAttributes.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDeleteProviderAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDeleteProviderAttributes.Merge(m, src) -} -func (m *MsgDeleteProviderAttributes) XXX_Size() int { - return m.Size() -} -func (m *MsgDeleteProviderAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDeleteProviderAttributes.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDeleteProviderAttributes proto.InternalMessageInfo - -func (m *MsgDeleteProviderAttributes) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgDeleteProviderAttributes) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *MsgDeleteProviderAttributes) GetKeys() []string { - if m != nil { - return m.Keys - } - return nil -} - -// MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. -type MsgDeleteProviderAttributesResponse struct { -} - -func (m *MsgDeleteProviderAttributesResponse) Reset() { *m = MsgDeleteProviderAttributesResponse{} } -func (m *MsgDeleteProviderAttributesResponse) String() string { return proto.CompactTextString(m) } -func (*MsgDeleteProviderAttributesResponse) ProtoMessage() {} -func (*MsgDeleteProviderAttributesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_919e209b8bf6f93c, []int{7} -} -func (m *MsgDeleteProviderAttributesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDeleteProviderAttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDeleteProviderAttributesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDeleteProviderAttributesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDeleteProviderAttributesResponse.Merge(m, src) -} -func (m *MsgDeleteProviderAttributesResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgDeleteProviderAttributesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDeleteProviderAttributesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDeleteProviderAttributesResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Provider)(nil), "akash.audit.v1beta2.Provider") - proto.RegisterType((*AuditedAttributes)(nil), "akash.audit.v1beta2.AuditedAttributes") - proto.RegisterType((*AttributesResponse)(nil), "akash.audit.v1beta2.AttributesResponse") - proto.RegisterType((*AttributesFilters)(nil), "akash.audit.v1beta2.AttributesFilters") - proto.RegisterType((*MsgSignProviderAttributes)(nil), "akash.audit.v1beta2.MsgSignProviderAttributes") - proto.RegisterType((*MsgSignProviderAttributesResponse)(nil), "akash.audit.v1beta2.MsgSignProviderAttributesResponse") - proto.RegisterType((*MsgDeleteProviderAttributes)(nil), "akash.audit.v1beta2.MsgDeleteProviderAttributes") - proto.RegisterType((*MsgDeleteProviderAttributesResponse)(nil), "akash.audit.v1beta2.MsgDeleteProviderAttributesResponse") -} - -func init() { proto.RegisterFile("akash/audit/v1beta2/audit.proto", fileDescriptor_919e209b8bf6f93c) } - -var fileDescriptor_919e209b8bf6f93c = []byte{ - // 573 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x6f, 0xd3, 0x40, - 0x14, 0xce, 0xb9, 0xa1, 0x24, 0x57, 0x7e, 0x28, 0x06, 0x41, 0x9a, 0xaa, 0xbe, 0x72, 0x15, 0x50, - 0x09, 0x61, 0x43, 0x2a, 0x41, 0x55, 0xa6, 0x1a, 0xc4, 0x16, 0x09, 0x99, 0x8d, 0xcd, 0x21, 0x27, - 0xd7, 0x4a, 0xea, 0x8b, 0x7c, 0x97, 0x56, 0xd9, 0x98, 0x58, 0x81, 0x8d, 0xb1, 0x03, 0x13, 0x12, - 0x33, 0x12, 0xfc, 0x03, 0x1d, 0x3b, 0x32, 0x1d, 0x28, 0x59, 0x90, 0x47, 0xff, 0x05, 0x28, 0x77, - 0xb6, 0xe3, 0xa8, 0x35, 0x6a, 0xa6, 0x32, 0xb0, 0xf9, 0x7d, 0xef, 0x7b, 0xef, 0xbe, 0xf7, 0xf9, - 0x9d, 0x0e, 0x22, 0xb7, 0xeb, 0xb2, 0x5d, 0xcb, 0x1d, 0x74, 0x7c, 0x6e, 0xed, 0x3f, 0x6c, 0x13, - 0xee, 0x36, 0x55, 0x64, 0xf6, 0x43, 0xca, 0xa9, 0x7e, 0x4d, 0x12, 0x4c, 0x05, 0x25, 0x84, 0xc6, - 0x75, 0x8f, 0x7a, 0x54, 0xe6, 0xad, 0xc9, 0x97, 0xa2, 0x36, 0xb0, 0xea, 0xd5, 0x76, 0x19, 0x99, - 0xb6, 0xe2, 0x3c, 0xf4, 0xdb, 0x03, 0x4e, 0x14, 0x07, 0x7f, 0xd2, 0x60, 0xe5, 0x45, 0x48, 0xf7, - 0xfd, 0x0e, 0x09, 0x75, 0x0b, 0x5e, 0xa0, 0x07, 0x01, 0x09, 0xeb, 0x60, 0x0d, 0x6c, 0x54, 0xed, - 0xe5, 0x48, 0x20, 0x05, 0xc4, 0x02, 0x5d, 0x1a, 0xba, 0x7b, 0xbd, 0x6d, 0x2c, 0x43, 0xec, 0x28, - 0x58, 0x7f, 0x0c, 0x2f, 0x4a, 0x21, 0x34, 0xac, 0x6b, 0xb2, 0x64, 0x35, 0x12, 0x28, 0x85, 0x62, - 0x81, 0xae, 0xa8, 0xa2, 0x04, 0xc0, 0x4e, 0x9a, 0xd2, 0xbf, 0x00, 0x08, 0x33, 0x29, 0xac, 0x5e, - 0x5e, 0x5b, 0xd8, 0x58, 0x6a, 0xae, 0x9a, 0x6a, 0xb6, 0x89, 0xe0, 0x74, 0x34, 0x73, 0x27, 0x65, - 0xd9, 0xc1, 0x91, 0x40, 0xa5, 0x48, 0xa0, 0x5c, 0x61, 0x2c, 0x50, 0x2d, 0x39, 0x22, 0xc3, 0xf0, - 0xe7, 0x9f, 0xe8, 0xa9, 0xe7, 0xf3, 0xdd, 0x41, 0xdb, 0x7c, 0x4d, 0xf7, 0x2c, 0xd9, 0xf3, 0x7e, - 0x40, 0xf8, 0x01, 0x0d, 0xbb, 0x49, 0xe4, 0xf6, 0x7d, 0xcb, 0xa3, 0x56, 0x40, 0x3b, 0xc4, 0xe2, - 0xc3, 0x3e, 0x61, 0xd6, 0x89, 0xe3, 0x98, 0x93, 0x3b, 0x07, 0x7f, 0xd5, 0x60, 0x6d, 0x67, 0xa2, - 0x9d, 0x74, 0xa6, 0x8c, 0xf3, 0xf3, 0x6b, 0xe1, 0x1f, 0xf7, 0x6b, 0xbb, 0xf2, 0xf1, 0x10, 0x81, - 0xdf, 0x87, 0xa8, 0x84, 0xdf, 0x01, 0xa8, 0xe7, 0x48, 0x84, 0xf5, 0x69, 0xc0, 0x88, 0x4e, 0x67, - 0xe6, 0x01, 0x72, 0x9e, 0x3b, 0xe6, 0x29, 0xbb, 0x6d, 0x9e, 0xb0, 0xdd, 0xbe, 0x7b, 0xc6, 0xc1, - 0x8a, 0x15, 0xd5, 0xa6, 0xdd, 0x9e, 0xfb, 0x3d, 0x4e, 0x42, 0xa6, 0x3f, 0x81, 0x95, 0xc4, 0x6c, - 0x25, 0xa7, 0x6a, 0xa3, 0x48, 0xa0, 0x0c, 0x8b, 0x05, 0xba, 0x3a, 0xf3, 0x73, 0x18, 0x76, 0xb2, - 0xa4, 0xbe, 0x09, 0x17, 0xe5, 0x0f, 0x66, 0x75, 0x4d, 0x96, 0xae, 0x44, 0x02, 0x25, 0x48, 0x2c, - 0xd0, 0xe5, 0xdc, 0x2a, 0x30, 0xec, 0x24, 0x89, 0x9c, 0xa2, 0xef, 0x1a, 0x5c, 0x6e, 0x31, 0xef, - 0xa5, 0xef, 0x05, 0xe9, 0x5d, 0xfc, 0xbf, 0x65, 0x67, 0xd8, 0xb2, 0xb2, 0x74, 0x6f, 0x1d, 0xde, - 0x2a, 0x34, 0x2f, 0xdd, 0x37, 0xfc, 0x0d, 0xc0, 0x95, 0x16, 0xf3, 0x9e, 0x91, 0x1e, 0xe1, 0xe4, - 0x5c, 0x4d, 0xbe, 0x07, 0xcb, 0x5d, 0x32, 0x54, 0xee, 0x56, 0xed, 0x9b, 0x91, 0x40, 0x32, 0x8e, - 0x05, 0x5a, 0x52, 0x25, 0x93, 0x08, 0x3b, 0x12, 0x4c, 0x26, 0xbc, 0x0d, 0xd7, 0xff, 0xa2, 0x3d, - 0x9d, 0xb1, 0xf9, 0x41, 0x83, 0x0b, 0x2d, 0xe6, 0xe9, 0x6f, 0x00, 0xbc, 0x51, 0xb0, 0x4b, 0xe6, - 0xa9, 0x57, 0xac, 0xd0, 0xbe, 0xc6, 0xa3, 0xf9, 0xf8, 0xd9, 0xf5, 0x7e, 0x0b, 0x60, 0xbd, 0xd0, - 0xeb, 0x07, 0x45, 0x4d, 0x8b, 0x2a, 0x1a, 0x5b, 0xf3, 0x56, 0xa4, 0x42, 0x6c, 0xe7, 0x68, 0x64, - 0x80, 0xe3, 0x91, 0x01, 0x7e, 0x8d, 0x0c, 0xf0, 0x7e, 0x6c, 0x94, 0x8e, 0xc7, 0x46, 0xe9, 0xc7, - 0xd8, 0x28, 0xbd, 0xda, 0x9a, 0x63, 0x1b, 0x67, 0x9e, 0xe3, 0xf6, 0xa2, 0x7c, 0x3a, 0x37, 0xff, - 0x04, 0x00, 0x00, 0xff, 0xff, 0xe5, 0x1b, 0x0f, 0x69, 0xac, 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // SignProviderAttributes defines a method that signs provider attributes - SignProviderAttributes(ctx context.Context, in *MsgSignProviderAttributes, opts ...grpc.CallOption) (*MsgSignProviderAttributesResponse, error) - // DeleteProviderAttributes defines a method that deletes provider attributes - DeleteProviderAttributes(ctx context.Context, in *MsgDeleteProviderAttributes, opts ...grpc.CallOption) (*MsgDeleteProviderAttributesResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) SignProviderAttributes(ctx context.Context, in *MsgSignProviderAttributes, opts ...grpc.CallOption) (*MsgSignProviderAttributesResponse, error) { - out := new(MsgSignProviderAttributesResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta2.Msg/SignProviderAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) DeleteProviderAttributes(ctx context.Context, in *MsgDeleteProviderAttributes, opts ...grpc.CallOption) (*MsgDeleteProviderAttributesResponse, error) { - out := new(MsgDeleteProviderAttributesResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta2.Msg/DeleteProviderAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // SignProviderAttributes defines a method that signs provider attributes - SignProviderAttributes(context.Context, *MsgSignProviderAttributes) (*MsgSignProviderAttributesResponse, error) - // DeleteProviderAttributes defines a method that deletes provider attributes - DeleteProviderAttributes(context.Context, *MsgDeleteProviderAttributes) (*MsgDeleteProviderAttributesResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) SignProviderAttributes(ctx context.Context, req *MsgSignProviderAttributes) (*MsgSignProviderAttributesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SignProviderAttributes not implemented") -} -func (*UnimplementedMsgServer) DeleteProviderAttributes(ctx context.Context, req *MsgDeleteProviderAttributes) (*MsgDeleteProviderAttributesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteProviderAttributes not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_SignProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgSignProviderAttributes) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).SignProviderAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta2.Msg/SignProviderAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).SignProviderAttributes(ctx, req.(*MsgSignProviderAttributes)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_DeleteProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgDeleteProviderAttributes) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).DeleteProviderAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta2.Msg/DeleteProviderAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).DeleteProviderAttributes(ctx, req.(*MsgDeleteProviderAttributes)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.audit.v1beta2.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SignProviderAttributes", - Handler: _Msg_SignProviderAttributes_Handler, - }, - { - MethodName: "DeleteProviderAttributes", - Handler: _Msg_DeleteProviderAttributes_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/audit/v1beta2/audit.proto", -} - -func (m *Provider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Provider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAudit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuditedAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuditedAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuditedAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAudit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AttributesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AttributesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAudit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AttributesFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AttributesFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AttributesFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owners) > 0 { - for iNdEx := len(m.Owners) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Owners[iNdEx]) - copy(dAtA[i:], m.Owners[iNdEx]) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owners[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Auditors) > 0 { - for iNdEx := len(m.Auditors) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Auditors[iNdEx]) - copy(dAtA[i:], m.Auditors[iNdEx]) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditors[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *MsgSignProviderAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgSignProviderAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgSignProviderAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAudit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgSignProviderAttributesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgSignProviderAttributesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgSignProviderAttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgDeleteProviderAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDeleteProviderAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDeleteProviderAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Keys) > 0 { - for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Keys[iNdEx]) - copy(dAtA[i:], m.Keys[iNdEx]) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Keys[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgDeleteProviderAttributesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDeleteProviderAttributesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDeleteProviderAttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintAudit(dAtA []byte, offset int, v uint64) int { - offset -= sovAudit(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Provider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *AuditedAttributes) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *AttributesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *AttributesFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Auditors) > 0 { - for _, s := range m.Auditors { - l = len(s) - n += 1 + l + sovAudit(uint64(l)) - } - } - if len(m.Owners) > 0 { - for _, s := range m.Owners { - l = len(s) - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *MsgSignProviderAttributes) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *MsgSignProviderAttributesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgDeleteProviderAttributes) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - if len(m.Keys) > 0 { - for _, s := range m.Keys { - l = len(s) - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *MsgDeleteProviderAttributesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovAudit(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAudit(x uint64) (n int) { - return sovAudit(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Provider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Provider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta2.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuditedAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuditedAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuditedAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta2.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AttributesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AttributesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, AuditedAttributes{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AttributesFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AttributesFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AttributesFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditors", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditors = append(m.Auditors, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owners", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owners = append(m.Owners, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgSignProviderAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgSignProviderAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgSignProviderAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta2.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgSignProviderAttributesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgSignProviderAttributesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgSignProviderAttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDeleteProviderAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDeleteProviderAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDeleteProviderAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keys = append(m.Keys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDeleteProviderAttributesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDeleteProviderAttributesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDeleteProviderAttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAudit(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAudit - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAudit - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAudit - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAudit - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAudit - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAudit - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAudit = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAudit = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAudit = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/audit/v1beta2/codec.go b/go/node/audit/v1beta2/codec.go deleted file mode 100644 index 2969ac8c..00000000 --- a/go/node/audit/v1beta2/codec.go +++ /dev/null @@ -1,43 +0,0 @@ -package v1beta2 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/provider module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/provider and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterCodec register concrete types on codec -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgSignProviderAttributes{}, ModuleName+"/"+MsgTypeSignProviderAttributes, nil) - cdc.RegisterConcrete(&MsgDeleteProviderAttributes{}, ModuleName+"/"+MsgTypeDeleteProviderAttributes, nil) -} - -// RegisterInterfaces registers the x/provider interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgSignProviderAttributes{}, - &MsgDeleteProviderAttributes{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/audit/v1beta2/errors.go b/go/node/audit/v1beta2/errors.go deleted file mode 100644 index 16357253..00000000 --- a/go/node/audit/v1beta2/errors.go +++ /dev/null @@ -1,16 +0,0 @@ -package v1beta2 - -import ( - "errors" -) - -var ( - // ErrProviderNotFound provider not found - ErrProviderNotFound = errors.New("invalid provider: address not found") - - // ErrInvalidAddress invalid trusted auditor address - ErrInvalidAddress = errors.New("invalid address") - - // ErrAttributeNotFound invalid trusted auditor address - ErrAttributeNotFound = errors.New("attribute not found") -) diff --git a/go/node/audit/v1beta2/event.go b/go/node/audit/v1beta2/event.go deleted file mode 100644 index b24795c7..00000000 --- a/go/node/audit/v1beta2/event.go +++ /dev/null @@ -1,118 +0,0 @@ -package v1beta2 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -const ( - evActionTrustedAuditorCreated = "audit-trusted-auditor-created" - evActionTrustedAuditorDeleted = "audit-trusted-auditor-deleted" - evOwnerKey = "owner" - evAuditorKey = "auditor" -) - -// EventTrustedAuditorCreated struct -type EventTrustedAuditorCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.Address `json:"owner"` - Auditor sdk.Address `json:"auditor"` -} - -func NewEventTrustedAuditorCreated(owner sdk.Address, auditor sdk.Address) EventTrustedAuditorCreated { - return EventTrustedAuditorCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionTrustedAuditorCreated, - }, - Owner: owner, - Auditor: auditor, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderCreated struct -func (ev EventTrustedAuditorCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionTrustedAuditorCreated), - }, TrustedAuditorEVAttributes(ev.Owner, ev.Auditor)...)..., - ) -} - -// TrustedAuditorEVAttributes returns event attributes for given Provider -func TrustedAuditorEVAttributes(owner sdk.Address, auditor sdk.Address) []sdk.Attribute { - return []sdk.Attribute{ - sdk.NewAttribute(evOwnerKey, owner.String()), - sdk.NewAttribute(evAuditorKey, auditor.String()), - } -} - -// ParseEVTTrustedAuditor returns provider details for given event attributes -func ParseEVTTrustedAuditor(attrs []sdk.Attribute) (sdk.Address, sdk.Address, error) { - owner, err := sdkutil.GetAccAddress(attrs, evOwnerKey) - if err != nil { - return nil, nil, err - } - - auditor, err := sdkutil.GetAccAddress(attrs, evAuditorKey) - if err != nil { - return nil, nil, err - } - - return owner, auditor, nil -} - -type EventTrustedAuditorDeleted struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.Address `json:"owner"` - Auditor sdk.Address `json:"auditor"` -} - -func NewEventTrustedAuditorDeleted(owner sdk.Address, auditor sdk.Address) EventTrustedAuditorDeleted { - return EventTrustedAuditorDeleted{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionTrustedAuditorDeleted, - }, - Owner: owner, - Auditor: auditor, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderCreated struct -func (ev EventTrustedAuditorDeleted) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionTrustedAuditorDeleted), - }, TrustedAuditorEVAttributes(ev.Owner, ev.Auditor)...)..., - ) -} - -// ParseEvent parses event and returns details of event and error if occurred -func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { - if ev.Type != sdkutil.EventTypeMessage { - return nil, sdkutil.ErrUnknownType - } - if ev.Module != ModuleName { - return nil, sdkutil.ErrUnknownModule - } - switch ev.Action { - case evActionTrustedAuditorCreated: - owner, auditor, err := ParseEVTTrustedAuditor(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventTrustedAuditorCreated(owner, auditor), nil - case evActionTrustedAuditorDeleted: - owner, auditor, err := ParseEVTTrustedAuditor(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventTrustedAuditorDeleted(owner, auditor), nil - default: - return nil, sdkutil.ErrUnknownAction - } -} diff --git a/go/node/audit/v1beta2/genesis.pb.go b/go/node/audit/v1beta2/genesis.pb.go deleted file mode 100644 index a5af778f..00000000 --- a/go/node/audit/v1beta2/genesis.pb.go +++ /dev/null @@ -1,332 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/audit/v1beta2/genesis.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState defines the basic genesis state used by audit module -type GenesisState struct { - Attributes []AuditedAttributes `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes" yaml:"attributes"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_5e38933397b76ee4, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetAttributes() []AuditedAttributes { - if m != nil { - return m.Attributes - } - return nil -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.audit.v1beta2.GenesisState") -} - -func init() { proto.RegisterFile("akash/audit/v1beta2/genesis.proto", fileDescriptor_5e38933397b76ee4) } - -var fileDescriptor_5e38933397b76ee4 = []byte{ - // 235 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x2c, 0x4d, 0xc9, 0x2c, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd2, - 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x06, - 0x2b, 0xd1, 0x03, 0x2b, 0xd1, 0x83, 0x2a, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xeb, - 0x83, 0x58, 0x10, 0xa5, 0x52, 0xf2, 0xd8, 0x4c, 0x83, 0x68, 0x04, 0x2b, 0x50, 0xaa, 0xe7, 0xe2, - 0x71, 0x87, 0x18, 0x1e, 0x5c, 0x92, 0x58, 0x92, 0x2a, 0x94, 0xcf, 0xc5, 0x95, 0x58, 0x52, 0x52, - 0x94, 0x99, 0x54, 0x5a, 0x92, 0x5a, 0x2c, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, 0x6d, 0xa4, 0xa6, 0x87, - 0xc5, 0x42, 0x3d, 0x47, 0x10, 0x2f, 0x35, 0xc5, 0x11, 0xae, 0xda, 0x49, 0xfd, 0xc4, 0x3d, 0x79, - 0x86, 0x57, 0xf7, 0xe4, 0x91, 0x4c, 0xf8, 0x74, 0x4f, 0x5e, 0xb0, 0x32, 0x31, 0x37, 0xc7, 0x4a, - 0x09, 0x21, 0xa6, 0x14, 0x84, 0xa4, 0xc0, 0x29, 0xe8, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, - 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, - 0xe5, 0x18, 0xa2, 0x2c, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, - 0x0e, 0xd0, 0xcd, 0x4b, 0x2d, 0x29, 0xcf, 0x2f, 0xca, 0x86, 0xf2, 0x12, 0x0b, 0x32, 0xf5, 0xd3, - 0xf3, 0xf5, 0xf3, 0xf2, 0x53, 0x52, 0x51, 0x3d, 0x98, 0xc4, 0x06, 0xf6, 0x9b, 0x31, 0x20, 0x00, - 0x00, 0xff, 0xff, 0x00, 0xd9, 0x21, 0x07, 0x4c, 0x01, 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, AuditedAttributes{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/audit/v1beta2/key.go b/go/node/audit/v1beta2/key.go deleted file mode 100644 index 43cbb76e..00000000 --- a/go/node/audit/v1beta2/key.go +++ /dev/null @@ -1,16 +0,0 @@ -package v1beta2 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "audit" - - // StoreKey is the store key string for provider - StoreKey = ModuleName - - // RouterKey is the message route for provider - RouterKey = ModuleName -) - -func PrefixProviderID() []byte { - return []byte{0x01} -} diff --git a/go/node/audit/v1beta2/msgs.go b/go/node/audit/v1beta2/msgs.go deleted file mode 100644 index 2940f9e3..00000000 --- a/go/node/audit/v1beta2/msgs.go +++ /dev/null @@ -1,94 +0,0 @@ -package v1beta2 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - MsgTypeSignProviderAttributes = "audit-sign-provider-attributes" - MsgTypeDeleteProviderAttributes = "audit-delete-provider-attributes" -) - -var ( - _ sdk.Msg = &MsgSignProviderAttributes{} - _ sdk.Msg = &MsgDeleteProviderAttributes{} -) - -// ====MsgSignProviderAttributes==== -// Route implements the sdk.Msg interface -func (m MsgSignProviderAttributes) Route() string { - return RouterKey -} - -// Type implements the sdk.Msg interface -func (m MsgSignProviderAttributes) Type() string { - return MsgTypeSignProviderAttributes -} - -// ValidateBasic does basic validation -func (m MsgSignProviderAttributes) ValidateBasic() error { - if _, err := sdk.AccAddressFromBech32(m.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Owner Address") - } - - if _, err := sdk.AccAddressFromBech32(m.Auditor); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Auditor Address") - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (m MsgSignProviderAttributes) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) -} - -// GetSigners defines whose signature is required -func (m MsgSignProviderAttributes) GetSigners() []sdk.AccAddress { - auditor, err := sdk.AccAddressFromBech32(m.Auditor) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{auditor} -} - -// ====MsgRevokeProviderAttributes==== -// Route implements the sdk.Msg interface -func (m MsgDeleteProviderAttributes) Route() string { - return RouterKey -} - -// Type implements the sdk.Msg interface -func (m MsgDeleteProviderAttributes) Type() string { - return MsgTypeDeleteProviderAttributes -} - -// ValidateBasic does basic validation -func (m MsgDeleteProviderAttributes) ValidateBasic() error { - if _, err := sdk.AccAddressFromBech32(m.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Owner Address") - } - - if _, err := sdk.AccAddressFromBech32(m.Auditor); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Auditor Address") - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (m MsgDeleteProviderAttributes) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) -} - -// GetSigners defines whose signature is required -func (m MsgDeleteProviderAttributes) GetSigners() []sdk.AccAddress { - auditor, err := sdk.AccAddressFromBech32(m.Auditor) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{auditor} -} diff --git a/go/node/audit/v1beta2/query.pb.go b/go/node/audit/v1beta2/query.pb.go deleted file mode 100644 index c9352b7c..00000000 --- a/go/node/audit/v1beta2/query.pb.go +++ /dev/null @@ -1,1718 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/audit/v1beta2/query.proto - -package v1beta2 - -import ( - context "context" - fmt "fmt" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryProvidersResponse is response type for the Query/Providers RPC method -type QueryProvidersResponse struct { - Providers Providers `protobuf:"bytes,1,rep,name=providers,proto3,castrepeated=Providers" json:"providers"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryProvidersResponse) Reset() { *m = QueryProvidersResponse{} } -func (m *QueryProvidersResponse) String() string { return proto.CompactTextString(m) } -func (*QueryProvidersResponse) ProtoMessage() {} -func (*QueryProvidersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_27fc582ee70c7c99, []int{0} -} -func (m *QueryProvidersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProvidersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProvidersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProvidersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProvidersResponse.Merge(m, src) -} -func (m *QueryProvidersResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryProvidersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProvidersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProvidersResponse proto.InternalMessageInfo - -func (m *QueryProvidersResponse) GetProviders() Providers { - if m != nil { - return m.Providers - } - return nil -} - -func (m *QueryProvidersResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProviderRequest is request type for the Query/Provider RPC method -type QueryProviderRequest struct { - Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` - Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` -} - -func (m *QueryProviderRequest) Reset() { *m = QueryProviderRequest{} } -func (m *QueryProviderRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProviderRequest) ProtoMessage() {} -func (*QueryProviderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_27fc582ee70c7c99, []int{1} -} -func (m *QueryProviderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProviderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProviderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProviderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProviderRequest.Merge(m, src) -} -func (m *QueryProviderRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryProviderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProviderRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProviderRequest proto.InternalMessageInfo - -func (m *QueryProviderRequest) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *QueryProviderRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -// QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method -type QueryAllProvidersAttributesRequest struct { - Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryAllProvidersAttributesRequest) Reset() { *m = QueryAllProvidersAttributesRequest{} } -func (m *QueryAllProvidersAttributesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryAllProvidersAttributesRequest) ProtoMessage() {} -func (*QueryAllProvidersAttributesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_27fc582ee70c7c99, []int{2} -} -func (m *QueryAllProvidersAttributesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryAllProvidersAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryAllProvidersAttributesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryAllProvidersAttributesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryAllProvidersAttributesRequest.Merge(m, src) -} -func (m *QueryAllProvidersAttributesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryAllProvidersAttributesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryAllProvidersAttributesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryAllProvidersAttributesRequest proto.InternalMessageInfo - -func (m *QueryAllProvidersAttributesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProviderAttributesRequest is request type for the Query/Provider RPC method -type QueryProviderAttributesRequest struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryProviderAttributesRequest) Reset() { *m = QueryProviderAttributesRequest{} } -func (m *QueryProviderAttributesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProviderAttributesRequest) ProtoMessage() {} -func (*QueryProviderAttributesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_27fc582ee70c7c99, []int{3} -} -func (m *QueryProviderAttributesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProviderAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProviderAttributesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProviderAttributesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProviderAttributesRequest.Merge(m, src) -} -func (m *QueryProviderAttributesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryProviderAttributesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProviderAttributesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProviderAttributesRequest proto.InternalMessageInfo - -func (m *QueryProviderAttributesRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *QueryProviderAttributesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProviderAuditorRequest is request type for the Query/Providers RPC method -type QueryProviderAuditorRequest struct { - Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` - Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` -} - -func (m *QueryProviderAuditorRequest) Reset() { *m = QueryProviderAuditorRequest{} } -func (m *QueryProviderAuditorRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProviderAuditorRequest) ProtoMessage() {} -func (*QueryProviderAuditorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_27fc582ee70c7c99, []int{4} -} -func (m *QueryProviderAuditorRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProviderAuditorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProviderAuditorRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProviderAuditorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProviderAuditorRequest.Merge(m, src) -} -func (m *QueryProviderAuditorRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryProviderAuditorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProviderAuditorRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProviderAuditorRequest proto.InternalMessageInfo - -func (m *QueryProviderAuditorRequest) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *QueryProviderAuditorRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -// QueryAuditorAttributesRequest is request type for the Query/Providers RPC method -type QueryAuditorAttributesRequest struct { - Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryAuditorAttributesRequest) Reset() { *m = QueryAuditorAttributesRequest{} } -func (m *QueryAuditorAttributesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryAuditorAttributesRequest) ProtoMessage() {} -func (*QueryAuditorAttributesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_27fc582ee70c7c99, []int{5} -} -func (m *QueryAuditorAttributesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryAuditorAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryAuditorAttributesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryAuditorAttributesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryAuditorAttributesRequest.Merge(m, src) -} -func (m *QueryAuditorAttributesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryAuditorAttributesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryAuditorAttributesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryAuditorAttributesRequest proto.InternalMessageInfo - -func (m *QueryAuditorAttributesRequest) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *QueryAuditorAttributesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -func init() { - proto.RegisterType((*QueryProvidersResponse)(nil), "akash.audit.v1beta2.QueryProvidersResponse") - proto.RegisterType((*QueryProviderRequest)(nil), "akash.audit.v1beta2.QueryProviderRequest") - proto.RegisterType((*QueryAllProvidersAttributesRequest)(nil), "akash.audit.v1beta2.QueryAllProvidersAttributesRequest") - proto.RegisterType((*QueryProviderAttributesRequest)(nil), "akash.audit.v1beta2.QueryProviderAttributesRequest") - proto.RegisterType((*QueryProviderAuditorRequest)(nil), "akash.audit.v1beta2.QueryProviderAuditorRequest") - proto.RegisterType((*QueryAuditorAttributesRequest)(nil), "akash.audit.v1beta2.QueryAuditorAttributesRequest") -} - -func init() { proto.RegisterFile("akash/audit/v1beta2/query.proto", fileDescriptor_27fc582ee70c7c99) } - -var fileDescriptor_27fc582ee70c7c99 = []byte{ - // 566 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x95, 0x41, 0x6f, 0xd3, 0x30, - 0x14, 0xc7, 0xeb, 0xa1, 0x82, 0xea, 0x9d, 0x66, 0xaa, 0xa9, 0x14, 0x96, 0x56, 0x3d, 0x40, 0x35, - 0xc0, 0xde, 0x32, 0xc4, 0x80, 0x0b, 0xda, 0x0e, 0xe3, 0x04, 0x1a, 0x39, 0x72, 0x73, 0x57, 0x2b, - 0x8b, 0x96, 0xe5, 0x65, 0xb1, 0xb3, 0x09, 0xa1, 0x21, 0xc1, 0x27, 0x40, 0xe2, 0x5b, 0x20, 0x2e, - 0x70, 0xe5, 0x0b, 0xec, 0x38, 0x89, 0x0b, 0xa7, 0x81, 0x5a, 0x3e, 0x08, 0x8a, 0x9d, 0xb4, 0xcd, - 0xd2, 0xd2, 0x6e, 0x70, 0xab, 0xeb, 0xff, 0x7b, 0xff, 0xdf, 0x7b, 0x7e, 0x76, 0x70, 0x83, 0xef, - 0x71, 0xb9, 0xcb, 0x78, 0xdc, 0xf5, 0x14, 0x3b, 0x5c, 0xed, 0x08, 0xc5, 0x6d, 0x76, 0x10, 0x8b, - 0xe8, 0x35, 0x0d, 0x23, 0x50, 0x40, 0xae, 0x6b, 0x01, 0xd5, 0x02, 0x9a, 0x0a, 0xea, 0x55, 0x17, - 0x5c, 0xd0, 0xfb, 0x2c, 0xf9, 0x65, 0xa4, 0xf5, 0x5b, 0x2e, 0x80, 0xeb, 0x0b, 0xc6, 0x43, 0x8f, - 0xf1, 0x20, 0x00, 0xc5, 0x95, 0x07, 0x81, 0x4c, 0x77, 0x97, 0x77, 0x40, 0xee, 0x83, 0x64, 0x1d, - 0x2e, 0x85, 0x71, 0x48, 0xfd, 0x56, 0x59, 0xc8, 0x5d, 0x2f, 0xd0, 0xe2, 0x54, 0x3b, 0x96, 0xca, - 0x20, 0x68, 0x41, 0xeb, 0x0b, 0xc2, 0x8b, 0x2f, 0x93, 0x1c, 0xdb, 0x11, 0x1c, 0x7a, 0x5d, 0x11, - 0x49, 0x47, 0xc8, 0x10, 0x02, 0x29, 0xc8, 0x0b, 0x5c, 0x09, 0xb3, 0x3f, 0x6b, 0xa8, 0x79, 0xa5, - 0x3d, 0x6f, 0x2f, 0xd1, 0x31, 0x45, 0xd0, 0x2c, 0x74, 0x73, 0xe1, 0xe4, 0xac, 0x51, 0xfa, 0xf4, - 0xb3, 0x51, 0x19, 0x26, 0x1b, 0xa6, 0x20, 0xcf, 0x30, 0x1e, 0xf2, 0xd5, 0xe6, 0x9a, 0xa8, 0x3d, - 0x6f, 0xdf, 0xa1, 0xa6, 0x18, 0x9a, 0x14, 0x43, 0x4d, 0xbb, 0xd2, 0x62, 0xe8, 0x36, 0x77, 0x45, - 0x06, 0xe3, 0x8c, 0x84, 0xb6, 0xb6, 0x70, 0x35, 0x87, 0xec, 0x88, 0x83, 0x58, 0x48, 0x45, 0x6a, - 0xf8, 0x9a, 0x06, 0x83, 0xa8, 0x86, 0x9a, 0xa8, 0x5d, 0x71, 0xb2, 0x25, 0xa9, 0xe2, 0x32, 0x1c, - 0x05, 0x22, 0xd2, 0xae, 0x15, 0xc7, 0x2c, 0x5a, 0x3e, 0x6e, 0xe9, 0x3c, 0x1b, 0xbe, 0x3f, 0x00, - 0xde, 0x50, 0x2a, 0xf2, 0x3a, 0xb1, 0x12, 0x32, 0xcb, 0xba, 0x95, 0xc3, 0x46, 0x1a, 0xfb, 0xf6, - 0x54, 0x6c, 0x1d, 0x9b, 0xa3, 0x7e, 0x8b, 0xad, 0x1c, 0x75, 0xd1, 0x69, 0x40, 0x89, 0x46, 0x28, - 0xcf, 0xf9, 0xcf, 0x5d, 0xda, 0xff, 0x39, 0xbe, 0x99, 0xf7, 0x37, 0xbd, 0xb9, 0x6c, 0xf3, 0xde, - 0x21, 0xbc, 0x64, 0xba, 0x67, 0x64, 0xc5, 0x72, 0x26, 0x67, 0xfc, 0x4f, 0x25, 0xd9, 0x67, 0x65, - 0x5c, 0xd6, 0x0c, 0xe4, 0x2b, 0xc2, 0x8b, 0xe3, 0x8f, 0x91, 0xac, 0x8f, 0x9d, 0xd9, 0xe9, 0x07, - 0x5f, 0xbf, 0x3b, 0x39, 0xb0, 0x70, 0x59, 0x5a, 0xf6, 0xfb, 0xef, 0xbf, 0x3f, 0xce, 0xdd, 0x23, - 0xcb, 0x6c, 0xe2, 0x8d, 0x63, 0x7c, 0x60, 0xc1, 0x7c, 0x4f, 0xaa, 0x04, 0x9a, 0x14, 0xa7, 0x81, - 0xac, 0x4d, 0xf7, 0xfd, 0x47, 0xd8, 0x27, 0x1a, 0xf6, 0x01, 0xb1, 0x67, 0x83, 0x7d, 0xa3, 0x4f, - 0xfc, 0xd8, 0x40, 0x7f, 0x43, 0xf8, 0xc6, 0xb9, 0x11, 0x1a, 0x61, 0x5f, 0x99, 0x81, 0x3d, 0x37, - 0x77, 0x17, 0x03, 0x7f, 0xaa, 0xc1, 0x1f, 0x93, 0xf5, 0x19, 0xc1, 0xd3, 0x81, 0x3b, 0xce, 0x4a, - 0x20, 0x9f, 0x11, 0x5e, 0x28, 0x52, 0xdb, 0x7f, 0x19, 0x91, 0x09, 0xd3, 0x7d, 0x31, 0xee, 0x87, - 0x9a, 0x7b, 0x85, 0xd0, 0x94, 0x3b, 0x7b, 0x14, 0xf3, 0xe8, 0x10, 0x8d, 0x10, 0x27, 0xcd, 0xde, - 0x74, 0x4e, 0x7a, 0x16, 0x3a, 0xed, 0x59, 0xe8, 0x57, 0xcf, 0x42, 0x1f, 0xfa, 0x56, 0xe9, 0xb4, - 0x6f, 0x95, 0x7e, 0xf4, 0xad, 0xd2, 0xab, 0x47, 0xae, 0xa7, 0x76, 0xe3, 0x0e, 0xdd, 0x81, 0x7d, - 0x93, 0xf3, 0x7e, 0x20, 0xd4, 0x11, 0x44, 0x7b, 0xe9, 0x2a, 0xf9, 0x74, 0xb8, 0xc0, 0x02, 0xe8, - 0x8a, 0x7c, 0x97, 0x3a, 0x57, 0xf5, 0xc3, 0xbf, 0xf6, 0x27, 0x00, 0x00, 0xff, 0xff, 0xa4, 0xe5, - 0xcf, 0xfd, 0xb1, 0x06, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // AllProvidersAttributes queries all providers - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - AllProvidersAttributes(ctx context.Context, in *QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) - // ProviderAttributes queries all provider signed attributes - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - ProviderAttributes(ctx context.Context, in *QueryProviderAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) - // ProviderAuditorAttributes queries provider signed attributes by specific auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - ProviderAuditorAttributes(ctx context.Context, in *QueryProviderAuditorRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) - // AuditorAttributes queries all providers signed by this auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - AuditorAttributes(ctx context.Context, in *QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) AllProvidersAttributes(ctx context.Context, in *QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta2.Query/AllProvidersAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) ProviderAttributes(ctx context.Context, in *QueryProviderAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta2.Query/ProviderAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) ProviderAuditorAttributes(ctx context.Context, in *QueryProviderAuditorRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta2.Query/ProviderAuditorAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) AuditorAttributes(ctx context.Context, in *QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta2.Query/AuditorAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // AllProvidersAttributes queries all providers - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - AllProvidersAttributes(context.Context, *QueryAllProvidersAttributesRequest) (*QueryProvidersResponse, error) - // ProviderAttributes queries all provider signed attributes - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - ProviderAttributes(context.Context, *QueryProviderAttributesRequest) (*QueryProvidersResponse, error) - // ProviderAuditorAttributes queries provider signed attributes by specific auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - ProviderAuditorAttributes(context.Context, *QueryProviderAuditorRequest) (*QueryProvidersResponse, error) - // AuditorAttributes queries all providers signed by this auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - AuditorAttributes(context.Context, *QueryAuditorAttributesRequest) (*QueryProvidersResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) AllProvidersAttributes(ctx context.Context, req *QueryAllProvidersAttributesRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AllProvidersAttributes not implemented") -} -func (*UnimplementedQueryServer) ProviderAttributes(ctx context.Context, req *QueryProviderAttributesRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ProviderAttributes not implemented") -} -func (*UnimplementedQueryServer) ProviderAuditorAttributes(ctx context.Context, req *QueryProviderAuditorRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ProviderAuditorAttributes not implemented") -} -func (*UnimplementedQueryServer) AuditorAttributes(ctx context.Context, req *QueryAuditorAttributesRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AuditorAttributes not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_AllProvidersAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryAllProvidersAttributesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).AllProvidersAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta2.Query/AllProvidersAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).AllProvidersAttributes(ctx, req.(*QueryAllProvidersAttributesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_ProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryProviderAttributesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).ProviderAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta2.Query/ProviderAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).ProviderAttributes(ctx, req.(*QueryProviderAttributesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_ProviderAuditorAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryProviderAuditorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).ProviderAuditorAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta2.Query/ProviderAuditorAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).ProviderAuditorAttributes(ctx, req.(*QueryProviderAuditorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_AuditorAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryAuditorAttributesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).AuditorAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta2.Query/AuditorAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).AuditorAttributes(ctx, req.(*QueryAuditorAttributesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.audit.v1beta2.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "AllProvidersAttributes", - Handler: _Query_AllProvidersAttributes_Handler, - }, - { - MethodName: "ProviderAttributes", - Handler: _Query_ProviderAttributes_Handler, - }, - { - MethodName: "ProviderAuditorAttributes", - Handler: _Query_ProviderAuditorAttributes_Handler, - }, - { - MethodName: "AuditorAttributes", - Handler: _Query_AuditorAttributes_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/audit/v1beta2/query.proto", -} - -func (m *QueryProvidersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProvidersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProvidersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Providers) > 0 { - for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryProviderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProviderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProviderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x12 - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryAllProvidersAttributesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryAllProvidersAttributesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryAllProvidersAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryProviderAttributesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProviderAttributesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProviderAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryProviderAuditorRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProviderAuditorRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProviderAuditorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x12 - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryAuditorAttributesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryAuditorAttributesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryAuditorAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryProvidersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Providers) > 0 { - for _, e := range m.Providers { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProviderRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryAllProvidersAttributesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProviderAttributesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProviderAuditorRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryAuditorAttributesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryProvidersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProvidersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProvidersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Providers = append(m.Providers, Provider{}) - if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProviderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProviderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProviderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryAllProvidersAttributesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryAllProvidersAttributesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryAllProvidersAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProviderAttributesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProviderAttributesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProviderAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProviderAuditorRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProviderAuditorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProviderAuditorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryAuditorAttributesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryAuditorAttributesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryAuditorAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/audit/v1beta2/query.pb.gw.go b/go/node/audit/v1beta2/query.pb.gw.go deleted file mode 100644 index 01a89373..00000000 --- a/go/node/audit/v1beta2/query.pb.gw.go +++ /dev/null @@ -1,532 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/audit/v1beta2/query.proto - -/* -Package v1beta2 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta2 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_AllProvidersAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_AllProvidersAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAllProvidersAttributesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AllProvidersAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AllProvidersAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_AllProvidersAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAllProvidersAttributesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AllProvidersAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.AllProvidersAttributes(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_ProviderAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{"owner": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_Query_ProviderAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderAttributesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ProviderAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ProviderAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_ProviderAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderAttributesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ProviderAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ProviderAttributes(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Query_ProviderAuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderAuditorRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["auditor"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") - } - - protoReq.Auditor, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) - } - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - msg, err := client.ProviderAuditorAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_ProviderAuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderAuditorRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["auditor"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") - } - - protoReq.Auditor, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) - } - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - msg, err := server.ProviderAuditorAttributes(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_AuditorAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{"auditor": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_Query_AuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAuditorAttributesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["auditor"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") - } - - protoReq.Auditor, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AuditorAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AuditorAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_AuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAuditorAttributesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["auditor"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") - } - - protoReq.Auditor, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AuditorAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.AuditorAttributes(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_AllProvidersAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_AllProvidersAttributes_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_AllProvidersAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ProviderAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_ProviderAttributes_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ProviderAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ProviderAuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_ProviderAuditorAttributes_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ProviderAuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_AuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_AuditorAttributes_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_AuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_AllProvidersAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_AllProvidersAttributes_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_AllProvidersAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ProviderAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_ProviderAttributes_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ProviderAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ProviderAuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_ProviderAuditorAttributes_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ProviderAuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_AuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_AuditorAttributes_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_AuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_AllProvidersAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 2, 4}, []string{"akash", "audit", "v1beta2", "attributes", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_ProviderAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"akash", "audit", "v1beta2", "attributes", "owner", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_ProviderAuditorAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"akash", "audit", "v1beta2", "attributes", "auditor", "owner"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_AuditorAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"akash", "provider", "v1beta2", "auditor", "list"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_AllProvidersAttributes_0 = runtime.ForwardResponseMessage - - forward_Query_ProviderAttributes_0 = runtime.ForwardResponseMessage - - forward_Query_ProviderAuditorAttributes_0 = runtime.ForwardResponseMessage - - forward_Query_AuditorAttributes_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/audit/v1beta2/types.go b/go/node/audit/v1beta2/types.go deleted file mode 100644 index 698e8605..00000000 --- a/go/node/audit/v1beta2/types.go +++ /dev/null @@ -1,33 +0,0 @@ -package v1beta2 - -import ( - "bytes" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -type ProviderID struct { - Owner sdk.Address - Auditor sdk.Address -} - -// Providers is the collection of Provider -type Providers []Provider - -// String implements the Stringer interface for a Providers object. -func (obj Providers) String() string { - var buf bytes.Buffer - - const sep = "\n\n" - - for _, p := range obj { - buf.WriteString(p.String()) - buf.WriteString(sep) - } - - if len(obj) > 0 { - buf.Truncate(buf.Len() - len(sep)) - } - - return buf.String() -} diff --git a/go/node/audit/v1beta3/audit.pb.go b/go/node/audit/v1beta3/audit.pb.go deleted file mode 100644 index 21995801..00000000 --- a/go/node/audit/v1beta3/audit.pb.go +++ /dev/null @@ -1,2079 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/audit/v1beta3/audit.proto - -package v1beta3 - -import ( - context "context" - fmt "fmt" - github_com_akash_network_akash_api_go_node_types_v1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" - v1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Provider stores owner auditor and attributes details -type Provider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes `protobuf:"bytes,4,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes" json:"attributes" yaml:"attributes"` -} - -func (m *Provider) Reset() { *m = Provider{} } -func (m *Provider) String() string { return proto.CompactTextString(m) } -func (*Provider) ProtoMessage() {} -func (*Provider) Descriptor() ([]byte, []int) { - return fileDescriptor_56975f549b197e6e, []int{0} -} -func (m *Provider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Provider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Provider) XXX_Merge(src proto.Message) { - xxx_messageInfo_Provider.Merge(m, src) -} -func (m *Provider) XXX_Size() int { - return m.Size() -} -func (m *Provider) XXX_DiscardUnknown() { - xxx_messageInfo_Provider.DiscardUnknown(m) -} - -var xxx_messageInfo_Provider proto.InternalMessageInfo - -func (m *Provider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *Provider) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *Provider) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// Attributes -type AuditedAttributes struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes" json:"attributes" yaml:"attributes"` -} - -func (m *AuditedAttributes) Reset() { *m = AuditedAttributes{} } -func (m *AuditedAttributes) String() string { return proto.CompactTextString(m) } -func (*AuditedAttributes) ProtoMessage() {} -func (*AuditedAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_56975f549b197e6e, []int{1} -} -func (m *AuditedAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuditedAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AuditedAttributes.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AuditedAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuditedAttributes.Merge(m, src) -} -func (m *AuditedAttributes) XXX_Size() int { - return m.Size() -} -func (m *AuditedAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_AuditedAttributes.DiscardUnknown(m) -} - -var xxx_messageInfo_AuditedAttributes proto.InternalMessageInfo - -func (m *AuditedAttributes) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *AuditedAttributes) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *AuditedAttributes) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// AttributesResponse represents details of deployment along with group details -type AttributesResponse struct { - Attributes []AuditedAttributes `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes" yaml:"attributes"` -} - -func (m *AttributesResponse) Reset() { *m = AttributesResponse{} } -func (m *AttributesResponse) String() string { return proto.CompactTextString(m) } -func (*AttributesResponse) ProtoMessage() {} -func (*AttributesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_56975f549b197e6e, []int{2} -} -func (m *AttributesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AttributesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AttributesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributesResponse.Merge(m, src) -} -func (m *AttributesResponse) XXX_Size() int { - return m.Size() -} -func (m *AttributesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_AttributesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributesResponse proto.InternalMessageInfo - -func (m *AttributesResponse) GetAttributes() []AuditedAttributes { - if m != nil { - return m.Attributes - } - return nil -} - -// AttributesFilters defines filters used to filter deployments -type AttributesFilters struct { - Auditors []string `protobuf:"bytes,1,rep,name=auditors,proto3" json:"auditors" yaml:"auditors"` - Owners []string `protobuf:"bytes,2,rep,name=owners,proto3" json:"owners" yaml:"owners"` -} - -func (m *AttributesFilters) Reset() { *m = AttributesFilters{} } -func (m *AttributesFilters) String() string { return proto.CompactTextString(m) } -func (*AttributesFilters) ProtoMessage() {} -func (*AttributesFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_56975f549b197e6e, []int{3} -} -func (m *AttributesFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AttributesFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AttributesFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AttributesFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttributesFilters.Merge(m, src) -} -func (m *AttributesFilters) XXX_Size() int { - return m.Size() -} -func (m *AttributesFilters) XXX_DiscardUnknown() { - xxx_messageInfo_AttributesFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_AttributesFilters proto.InternalMessageInfo - -func (m *AttributesFilters) GetAuditors() []string { - if m != nil { - return m.Auditors - } - return nil -} - -func (m *AttributesFilters) GetOwners() []string { - if m != nil { - return m.Owners - } - return nil -} - -// MsgSignProviderAttributes defines an SDK message for signing a provider attributes -type MsgSignProviderAttributes struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes" json:"attributes" yaml:"attributes"` -} - -func (m *MsgSignProviderAttributes) Reset() { *m = MsgSignProviderAttributes{} } -func (m *MsgSignProviderAttributes) String() string { return proto.CompactTextString(m) } -func (*MsgSignProviderAttributes) ProtoMessage() {} -func (*MsgSignProviderAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_56975f549b197e6e, []int{4} -} -func (m *MsgSignProviderAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgSignProviderAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgSignProviderAttributes.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgSignProviderAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgSignProviderAttributes.Merge(m, src) -} -func (m *MsgSignProviderAttributes) XXX_Size() int { - return m.Size() -} -func (m *MsgSignProviderAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_MsgSignProviderAttributes.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgSignProviderAttributes proto.InternalMessageInfo - -func (m *MsgSignProviderAttributes) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgSignProviderAttributes) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *MsgSignProviderAttributes) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. -type MsgSignProviderAttributesResponse struct { -} - -func (m *MsgSignProviderAttributesResponse) Reset() { *m = MsgSignProviderAttributesResponse{} } -func (m *MsgSignProviderAttributesResponse) String() string { return proto.CompactTextString(m) } -func (*MsgSignProviderAttributesResponse) ProtoMessage() {} -func (*MsgSignProviderAttributesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_56975f549b197e6e, []int{5} -} -func (m *MsgSignProviderAttributesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgSignProviderAttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgSignProviderAttributesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgSignProviderAttributesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgSignProviderAttributesResponse.Merge(m, src) -} -func (m *MsgSignProviderAttributesResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgSignProviderAttributesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgSignProviderAttributesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgSignProviderAttributesResponse proto.InternalMessageInfo - -// MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes -type MsgDeleteProviderAttributes struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` - Keys []string `protobuf:"bytes,3,rep,name=keys,proto3" json:"keys" yaml:"keys"` -} - -func (m *MsgDeleteProviderAttributes) Reset() { *m = MsgDeleteProviderAttributes{} } -func (m *MsgDeleteProviderAttributes) String() string { return proto.CompactTextString(m) } -func (*MsgDeleteProviderAttributes) ProtoMessage() {} -func (*MsgDeleteProviderAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_56975f549b197e6e, []int{6} -} -func (m *MsgDeleteProviderAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDeleteProviderAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDeleteProviderAttributes.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDeleteProviderAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDeleteProviderAttributes.Merge(m, src) -} -func (m *MsgDeleteProviderAttributes) XXX_Size() int { - return m.Size() -} -func (m *MsgDeleteProviderAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDeleteProviderAttributes.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDeleteProviderAttributes proto.InternalMessageInfo - -func (m *MsgDeleteProviderAttributes) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgDeleteProviderAttributes) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *MsgDeleteProviderAttributes) GetKeys() []string { - if m != nil { - return m.Keys - } - return nil -} - -// MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. -type MsgDeleteProviderAttributesResponse struct { -} - -func (m *MsgDeleteProviderAttributesResponse) Reset() { *m = MsgDeleteProviderAttributesResponse{} } -func (m *MsgDeleteProviderAttributesResponse) String() string { return proto.CompactTextString(m) } -func (*MsgDeleteProviderAttributesResponse) ProtoMessage() {} -func (*MsgDeleteProviderAttributesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_56975f549b197e6e, []int{7} -} -func (m *MsgDeleteProviderAttributesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDeleteProviderAttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDeleteProviderAttributesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDeleteProviderAttributesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDeleteProviderAttributesResponse.Merge(m, src) -} -func (m *MsgDeleteProviderAttributesResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgDeleteProviderAttributesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDeleteProviderAttributesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDeleteProviderAttributesResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Provider)(nil), "akash.audit.v1beta3.Provider") - proto.RegisterType((*AuditedAttributes)(nil), "akash.audit.v1beta3.AuditedAttributes") - proto.RegisterType((*AttributesResponse)(nil), "akash.audit.v1beta3.AttributesResponse") - proto.RegisterType((*AttributesFilters)(nil), "akash.audit.v1beta3.AttributesFilters") - proto.RegisterType((*MsgSignProviderAttributes)(nil), "akash.audit.v1beta3.MsgSignProviderAttributes") - proto.RegisterType((*MsgSignProviderAttributesResponse)(nil), "akash.audit.v1beta3.MsgSignProviderAttributesResponse") - proto.RegisterType((*MsgDeleteProviderAttributes)(nil), "akash.audit.v1beta3.MsgDeleteProviderAttributes") - proto.RegisterType((*MsgDeleteProviderAttributesResponse)(nil), "akash.audit.v1beta3.MsgDeleteProviderAttributesResponse") -} - -func init() { proto.RegisterFile("akash/audit/v1beta3/audit.proto", fileDescriptor_56975f549b197e6e) } - -var fileDescriptor_56975f549b197e6e = []byte{ - // 573 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x6f, 0xd3, 0x40, - 0x14, 0xce, 0xb9, 0xa1, 0x24, 0x57, 0x7e, 0x28, 0x06, 0x41, 0x9a, 0xaa, 0xbe, 0x72, 0x15, 0x50, - 0x09, 0x61, 0x43, 0x23, 0x41, 0x55, 0xa6, 0x1a, 0xc4, 0x16, 0x09, 0x99, 0x8d, 0xcd, 0x21, 0x27, - 0xd7, 0x4a, 0xea, 0x8b, 0x7c, 0x97, 0x56, 0xd9, 0x98, 0x58, 0x81, 0x8d, 0xb1, 0x03, 0x13, 0x12, - 0x33, 0x12, 0xfc, 0x03, 0x1d, 0x3b, 0x32, 0x1d, 0x28, 0x59, 0x90, 0x47, 0xff, 0x05, 0x28, 0x77, - 0xb6, 0xe3, 0xa8, 0x35, 0x6a, 0xa6, 0x32, 0xb0, 0xf9, 0x7d, 0xef, 0x7b, 0xef, 0xbe, 0xf7, 0xf9, - 0x9d, 0x0e, 0x22, 0xb7, 0xeb, 0xb2, 0x5d, 0xcb, 0x1d, 0x74, 0x7c, 0x6e, 0xed, 0x3f, 0x6c, 0x13, - 0xee, 0x36, 0x55, 0x64, 0xf6, 0x43, 0xca, 0xa9, 0x7e, 0x4d, 0x12, 0x4c, 0x05, 0x25, 0x84, 0xc6, - 0x75, 0x8f, 0x7a, 0x54, 0xe6, 0xad, 0xc9, 0x97, 0xa2, 0x36, 0xb0, 0xea, 0xd5, 0x76, 0x19, 0x99, - 0xb6, 0xe2, 0x3c, 0xf4, 0xdb, 0x03, 0x4e, 0x14, 0x07, 0x7f, 0xd2, 0x60, 0xe5, 0x45, 0x48, 0xf7, - 0xfd, 0x0e, 0x09, 0x75, 0x0b, 0x5e, 0xa0, 0x07, 0x01, 0x09, 0xeb, 0x60, 0x0d, 0x6c, 0x54, 0xed, - 0xe5, 0x48, 0x20, 0x05, 0xc4, 0x02, 0x5d, 0x1a, 0xba, 0x7b, 0xbd, 0x6d, 0x2c, 0x43, 0xec, 0x28, - 0x58, 0x7f, 0x0c, 0x2f, 0x4a, 0x21, 0x34, 0xac, 0x6b, 0xb2, 0x64, 0x35, 0x12, 0x28, 0x85, 0x62, - 0x81, 0xae, 0xa8, 0xa2, 0x04, 0xc0, 0x4e, 0x9a, 0xd2, 0xbf, 0x00, 0x08, 0x33, 0x29, 0xac, 0x5e, - 0x5e, 0x5b, 0xd8, 0x58, 0xda, 0x5c, 0x35, 0xd5, 0x6c, 0x13, 0xc1, 0xe9, 0x68, 0xe6, 0x4e, 0xca, - 0xb2, 0x83, 0x23, 0x81, 0x4a, 0x91, 0x40, 0xb9, 0xc2, 0x58, 0xa0, 0x5a, 0x72, 0x44, 0x86, 0xe1, - 0xcf, 0x3f, 0xd1, 0x53, 0xcf, 0xe7, 0xbb, 0x83, 0xb6, 0xf9, 0x9a, 0xee, 0x59, 0xb2, 0xe7, 0xfd, - 0x80, 0xf0, 0x03, 0x1a, 0x76, 0x93, 0xc8, 0xed, 0xfb, 0x96, 0x47, 0xad, 0x80, 0x76, 0x88, 0xc5, - 0x87, 0x7d, 0xc2, 0xac, 0x13, 0xc7, 0x31, 0x27, 0x77, 0x0e, 0xfe, 0xaa, 0xc1, 0xda, 0xce, 0x44, - 0x3b, 0xe9, 0x4c, 0x19, 0xe7, 0xe7, 0xd7, 0xc2, 0x3f, 0xee, 0xd7, 0x76, 0xe5, 0xe3, 0x21, 0x02, - 0xbf, 0x0f, 0x51, 0x09, 0xbf, 0x03, 0x50, 0xcf, 0x91, 0x08, 0xeb, 0xd3, 0x80, 0x11, 0x9d, 0xce, - 0xcc, 0x03, 0xe4, 0x3c, 0x77, 0xcc, 0x53, 0x76, 0xdb, 0x3c, 0x61, 0xbb, 0x7d, 0xf7, 0x8c, 0x83, - 0x15, 0x2b, 0xaa, 0x4d, 0xbb, 0x3d, 0xf7, 0x7b, 0x9c, 0x84, 0x4c, 0x7f, 0x02, 0x2b, 0x89, 0xd9, - 0x4a, 0x4e, 0xd5, 0x46, 0x91, 0x40, 0x19, 0x16, 0x0b, 0x74, 0x75, 0xe6, 0xe7, 0x30, 0xec, 0x64, - 0x49, 0xbd, 0x09, 0x17, 0xe5, 0x0f, 0x66, 0x75, 0x4d, 0x96, 0xae, 0x44, 0x02, 0x25, 0x48, 0x2c, - 0xd0, 0xe5, 0xdc, 0x2a, 0x30, 0xec, 0x24, 0x89, 0x9c, 0xa2, 0xef, 0x1a, 0x5c, 0x6e, 0x31, 0xef, - 0xa5, 0xef, 0x05, 0xe9, 0x5d, 0xfc, 0xbf, 0x65, 0x67, 0xd8, 0xb2, 0xb2, 0x74, 0x6f, 0x1d, 0xde, - 0x2a, 0x34, 0x2f, 0xdd, 0x37, 0xfc, 0x0d, 0xc0, 0x95, 0x16, 0xf3, 0x9e, 0x91, 0x1e, 0xe1, 0xe4, - 0x5c, 0x4d, 0xbe, 0x07, 0xcb, 0x5d, 0x32, 0x54, 0xee, 0x56, 0xed, 0x9b, 0x91, 0x40, 0x32, 0x8e, - 0x05, 0x5a, 0x52, 0x25, 0x93, 0x08, 0x3b, 0x12, 0x4c, 0x26, 0xbc, 0x0d, 0xd7, 0xff, 0xa2, 0x3d, - 0x9d, 0x71, 0xf3, 0x83, 0x06, 0x17, 0x5a, 0xcc, 0xd3, 0xdf, 0x00, 0x78, 0xa3, 0x60, 0x97, 0xcc, - 0x53, 0xaf, 0x58, 0xa1, 0x7d, 0x8d, 0x47, 0xf3, 0xf1, 0xb3, 0xeb, 0xfd, 0x16, 0xc0, 0x7a, 0xa1, - 0xd7, 0x0f, 0x8a, 0x9a, 0x16, 0x55, 0x34, 0xb6, 0xe6, 0xad, 0x48, 0x85, 0xd8, 0xce, 0xd1, 0xc8, - 0x00, 0xc7, 0x23, 0x03, 0xfc, 0x1a, 0x19, 0xe0, 0xfd, 0xd8, 0x28, 0x1d, 0x8f, 0x8d, 0xd2, 0x8f, - 0xb1, 0x51, 0x7a, 0xb5, 0x35, 0xc7, 0x36, 0xce, 0x3c, 0xc7, 0xed, 0x45, 0xf9, 0x74, 0x36, 0xff, - 0x04, 0x00, 0x00, 0xff, 0xff, 0x10, 0x5f, 0x78, 0xee, 0xac, 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // SignProviderAttributes defines a method that signs provider attributes - SignProviderAttributes(ctx context.Context, in *MsgSignProviderAttributes, opts ...grpc.CallOption) (*MsgSignProviderAttributesResponse, error) - // DeleteProviderAttributes defines a method that deletes provider attributes - DeleteProviderAttributes(ctx context.Context, in *MsgDeleteProviderAttributes, opts ...grpc.CallOption) (*MsgDeleteProviderAttributesResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) SignProviderAttributes(ctx context.Context, in *MsgSignProviderAttributes, opts ...grpc.CallOption) (*MsgSignProviderAttributesResponse, error) { - out := new(MsgSignProviderAttributesResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta3.Msg/SignProviderAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) DeleteProviderAttributes(ctx context.Context, in *MsgDeleteProviderAttributes, opts ...grpc.CallOption) (*MsgDeleteProviderAttributesResponse, error) { - out := new(MsgDeleteProviderAttributesResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta3.Msg/DeleteProviderAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // SignProviderAttributes defines a method that signs provider attributes - SignProviderAttributes(context.Context, *MsgSignProviderAttributes) (*MsgSignProviderAttributesResponse, error) - // DeleteProviderAttributes defines a method that deletes provider attributes - DeleteProviderAttributes(context.Context, *MsgDeleteProviderAttributes) (*MsgDeleteProviderAttributesResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) SignProviderAttributes(ctx context.Context, req *MsgSignProviderAttributes) (*MsgSignProviderAttributesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method SignProviderAttributes not implemented") -} -func (*UnimplementedMsgServer) DeleteProviderAttributes(ctx context.Context, req *MsgDeleteProviderAttributes) (*MsgDeleteProviderAttributesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteProviderAttributes not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_SignProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgSignProviderAttributes) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).SignProviderAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta3.Msg/SignProviderAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).SignProviderAttributes(ctx, req.(*MsgSignProviderAttributes)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_DeleteProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgDeleteProviderAttributes) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).DeleteProviderAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta3.Msg/DeleteProviderAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).DeleteProviderAttributes(ctx, req.(*MsgDeleteProviderAttributes)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.audit.v1beta3.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SignProviderAttributes", - Handler: _Msg_SignProviderAttributes_Handler, - }, - { - MethodName: "DeleteProviderAttributes", - Handler: _Msg_DeleteProviderAttributes_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/audit/v1beta3/audit.proto", -} - -func (m *Provider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Provider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAudit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AuditedAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuditedAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuditedAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAudit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *AttributesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AttributesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAudit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AttributesFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AttributesFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AttributesFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owners) > 0 { - for iNdEx := len(m.Owners) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Owners[iNdEx]) - copy(dAtA[i:], m.Owners[iNdEx]) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owners[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Auditors) > 0 { - for iNdEx := len(m.Auditors) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Auditors[iNdEx]) - copy(dAtA[i:], m.Auditors[iNdEx]) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditors[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *MsgSignProviderAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgSignProviderAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgSignProviderAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAudit(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgSignProviderAttributesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgSignProviderAttributesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgSignProviderAttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgDeleteProviderAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDeleteProviderAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDeleteProviderAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Keys) > 0 { - for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Keys[iNdEx]) - copy(dAtA[i:], m.Keys[iNdEx]) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Keys[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgDeleteProviderAttributesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDeleteProviderAttributesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDeleteProviderAttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintAudit(dAtA []byte, offset int, v uint64) int { - offset -= sovAudit(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Provider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *AuditedAttributes) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *AttributesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *AttributesFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Auditors) > 0 { - for _, s := range m.Auditors { - l = len(s) - n += 1 + l + sovAudit(uint64(l)) - } - } - if len(m.Owners) > 0 { - for _, s := range m.Owners { - l = len(s) - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *MsgSignProviderAttributes) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *MsgSignProviderAttributesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgDeleteProviderAttributes) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovAudit(uint64(l)) - } - if len(m.Keys) > 0 { - for _, s := range m.Keys { - l = len(s) - n += 1 + l + sovAudit(uint64(l)) - } - } - return n -} - -func (m *MsgDeleteProviderAttributesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovAudit(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAudit(x uint64) (n int) { - return sovAudit(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Provider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Provider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta3.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuditedAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuditedAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuditedAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta3.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AttributesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AttributesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, AuditedAttributes{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AttributesFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AttributesFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AttributesFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditors", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditors = append(m.Auditors, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owners", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owners = append(m.Owners, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgSignProviderAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgSignProviderAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgSignProviderAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta3.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgSignProviderAttributesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgSignProviderAttributesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgSignProviderAttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDeleteProviderAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDeleteProviderAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDeleteProviderAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAudit - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAudit - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keys = append(m.Keys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDeleteProviderAttributesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAudit - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDeleteProviderAttributesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDeleteProviderAttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipAudit(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAudit - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAudit(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAudit - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAudit - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAudit - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAudit - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAudit - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAudit - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAudit = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAudit = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAudit = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/audit/v1beta3/codec.go b/go/node/audit/v1beta3/codec.go deleted file mode 100644 index fccd976f..00000000 --- a/go/node/audit/v1beta3/codec.go +++ /dev/null @@ -1,43 +0,0 @@ -package v1beta3 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/audit module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/provider and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterLegacyAminoCodec register concrete types on codec -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgSignProviderAttributes{}, ModuleName+"/"+MsgTypeSignProviderAttributes, nil) - cdc.RegisterConcrete(&MsgDeleteProviderAttributes{}, ModuleName+"/"+MsgTypeDeleteProviderAttributes, nil) -} - -// RegisterInterfaces registers the x/provider interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgSignProviderAttributes{}, - &MsgDeleteProviderAttributes{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/audit/v1beta3/errors.go b/go/node/audit/v1beta3/errors.go deleted file mode 100644 index dd551e72..00000000 --- a/go/node/audit/v1beta3/errors.go +++ /dev/null @@ -1,22 +0,0 @@ -package v1beta3 - -import ( - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - errProviderNotFound uint32 = iota + 1 - errInvalidAddress - errAttributeNotFound -) - -var ( - // ErrProviderNotFound provider not found - ErrProviderNotFound = sdkerrors.Register(ModuleName, errProviderNotFound, "invalid provider: address not found") - - // ErrInvalidAddress invalid trusted auditor address - ErrInvalidAddress = sdkerrors.Register(ModuleName, errInvalidAddress, "invalid address") - - // ErrAttributeNotFound invalid trusted auditor address - ErrAttributeNotFound = sdkerrors.Register(ModuleName, errAttributeNotFound, "attribute not found") -) diff --git a/go/node/audit/v1beta3/event.go b/go/node/audit/v1beta3/event.go deleted file mode 100644 index 50a5afdb..00000000 --- a/go/node/audit/v1beta3/event.go +++ /dev/null @@ -1,118 +0,0 @@ -package v1beta3 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -const ( - evActionTrustedAuditorCreated = "audit-trusted-auditor-created" - evActionTrustedAuditorDeleted = "audit-trusted-auditor-deleted" - evOwnerKey = "owner" - evAuditorKey = "auditor" -) - -// EventTrustedAuditorCreated struct -type EventTrustedAuditorCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.Address `json:"owner"` - Auditor sdk.Address `json:"auditor"` -} - -func NewEventTrustedAuditorCreated(owner sdk.Address, auditor sdk.Address) EventTrustedAuditorCreated { - return EventTrustedAuditorCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionTrustedAuditorCreated, - }, - Owner: owner, - Auditor: auditor, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderCreated struct -func (ev EventTrustedAuditorCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionTrustedAuditorCreated), - }, TrustedAuditorEVAttributes(ev.Owner, ev.Auditor)...)..., - ) -} - -// TrustedAuditorEVAttributes returns event attributes for given Provider -func TrustedAuditorEVAttributes(owner sdk.Address, auditor sdk.Address) []sdk.Attribute { - return []sdk.Attribute{ - sdk.NewAttribute(evOwnerKey, owner.String()), - sdk.NewAttribute(evAuditorKey, auditor.String()), - } -} - -// ParseEVTTrustedAuditor returns provider details for given event attributes -func ParseEVTTrustedAuditor(attrs []sdk.Attribute) (sdk.Address, sdk.Address, error) { - owner, err := sdkutil.GetAccAddress(attrs, evOwnerKey) - if err != nil { - return nil, nil, err - } - - auditor, err := sdkutil.GetAccAddress(attrs, evAuditorKey) - if err != nil { - return nil, nil, err - } - - return owner, auditor, nil -} - -type EventTrustedAuditorDeleted struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.Address `json:"owner"` - Auditor sdk.Address `json:"auditor"` -} - -func NewEventTrustedAuditorDeleted(owner sdk.Address, auditor sdk.Address) EventTrustedAuditorDeleted { - return EventTrustedAuditorDeleted{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionTrustedAuditorDeleted, - }, - Owner: owner, - Auditor: auditor, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderCreated struct -func (ev EventTrustedAuditorDeleted) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionTrustedAuditorDeleted), - }, TrustedAuditorEVAttributes(ev.Owner, ev.Auditor)...)..., - ) -} - -// ParseEvent parses event and returns details of event and error if occurred -func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { - if ev.Type != sdkutil.EventTypeMessage { - return nil, sdkutil.ErrUnknownType - } - if ev.Module != ModuleName { - return nil, sdkutil.ErrUnknownModule - } - switch ev.Action { - case evActionTrustedAuditorCreated: - owner, auditor, err := ParseEVTTrustedAuditor(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventTrustedAuditorCreated(owner, auditor), nil - case evActionTrustedAuditorDeleted: - owner, auditor, err := ParseEVTTrustedAuditor(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventTrustedAuditorDeleted(owner, auditor), nil - default: - return nil, sdkutil.ErrUnknownAction - } -} diff --git a/go/node/audit/v1beta3/genesis.pb.go b/go/node/audit/v1beta3/genesis.pb.go deleted file mode 100644 index 1e277909..00000000 --- a/go/node/audit/v1beta3/genesis.pb.go +++ /dev/null @@ -1,332 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/audit/v1beta3/genesis.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState defines the basic genesis state used by audit module -type GenesisState struct { - Attributes []AuditedAttributes `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes" yaml:"attributes"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_74163cc28cdac025, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetAttributes() []AuditedAttributes { - if m != nil { - return m.Attributes - } - return nil -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.audit.v1beta3.GenesisState") -} - -func init() { proto.RegisterFile("akash/audit/v1beta3/genesis.proto", fileDescriptor_74163cc28cdac025) } - -var fileDescriptor_74163cc28cdac025 = []byte{ - // 235 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x2c, 0x4d, 0xc9, 0x2c, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd6, - 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x06, - 0x2b, 0xd1, 0x03, 0x2b, 0xd1, 0x83, 0x2a, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xeb, - 0x83, 0x58, 0x10, 0xa5, 0x52, 0xf2, 0xd8, 0x4c, 0x83, 0x68, 0x04, 0x2b, 0x50, 0xaa, 0xe7, 0xe2, - 0x71, 0x87, 0x18, 0x1e, 0x5c, 0x92, 0x58, 0x92, 0x2a, 0x94, 0xcf, 0xc5, 0x95, 0x58, 0x52, 0x52, - 0x94, 0x99, 0x54, 0x5a, 0x92, 0x5a, 0x2c, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, 0x6d, 0xa4, 0xa6, 0x87, - 0xc5, 0x42, 0x3d, 0x47, 0x10, 0x2f, 0x35, 0xc5, 0x11, 0xae, 0xda, 0x49, 0xfd, 0xc4, 0x3d, 0x79, - 0x86, 0x57, 0xf7, 0xe4, 0x91, 0x4c, 0xf8, 0x74, 0x4f, 0x5e, 0xb0, 0x32, 0x31, 0x37, 0xc7, 0x4a, - 0x09, 0x21, 0xa6, 0x14, 0x84, 0xa4, 0xc0, 0x29, 0xe8, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, - 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, - 0xe5, 0x18, 0xa2, 0x2c, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, - 0x0e, 0xd0, 0xcd, 0x4b, 0x2d, 0x29, 0xcf, 0x2f, 0xca, 0x86, 0xf2, 0x12, 0x0b, 0x32, 0xf5, 0xd3, - 0xf3, 0xf5, 0xf3, 0xf2, 0x53, 0x52, 0x51, 0x3d, 0x98, 0xc4, 0x06, 0xf6, 0x9b, 0x31, 0x20, 0x00, - 0x00, 0xff, 0xff, 0x54, 0x65, 0x08, 0x68, 0x4c, 0x01, 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, AuditedAttributes{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/audit/v1beta3/key.go b/go/node/audit/v1beta3/key.go deleted file mode 100644 index 390ed084..00000000 --- a/go/node/audit/v1beta3/key.go +++ /dev/null @@ -1,16 +0,0 @@ -package v1beta3 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "audit" - - // StoreKey is the store key string for provider - StoreKey = ModuleName - - // RouterKey is the message route for provider - RouterKey = ModuleName -) - -func PrefixProviderID() []byte { - return []byte{0x01} -} diff --git a/go/node/audit/v1beta3/msgs.go b/go/node/audit/v1beta3/msgs.go deleted file mode 100644 index 4a501915..00000000 --- a/go/node/audit/v1beta3/msgs.go +++ /dev/null @@ -1,94 +0,0 @@ -package v1beta3 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - MsgTypeSignProviderAttributes = "audit-sign-provider-attributes" - MsgTypeDeleteProviderAttributes = "audit-delete-provider-attributes" -) - -var ( - _ sdk.Msg = &MsgSignProviderAttributes{} - _ sdk.Msg = &MsgDeleteProviderAttributes{} -) - -// ====MsgSignProviderAttributes==== -// Route implements the sdk.Msg interface -func (m MsgSignProviderAttributes) Route() string { - return RouterKey -} - -// Type implements the sdk.Msg interface -func (m MsgSignProviderAttributes) Type() string { - return MsgTypeSignProviderAttributes -} - -// ValidateBasic does basic validation -func (m MsgSignProviderAttributes) ValidateBasic() error { - if _, err := sdk.AccAddressFromBech32(m.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Owner Address") - } - - if _, err := sdk.AccAddressFromBech32(m.Auditor); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Auditor Address") - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (m MsgSignProviderAttributes) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) -} - -// GetSigners defines whose signature is required -func (m MsgSignProviderAttributes) GetSigners() []sdk.AccAddress { - auditor, err := sdk.AccAddressFromBech32(m.Auditor) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{auditor} -} - -// ====MsgRevokeProviderAttributes==== -// Route implements the sdk.Msg interface -func (m MsgDeleteProviderAttributes) Route() string { - return RouterKey -} - -// Type implements the sdk.Msg interface -func (m MsgDeleteProviderAttributes) Type() string { - return MsgTypeDeleteProviderAttributes -} - -// ValidateBasic does basic validation -func (m MsgDeleteProviderAttributes) ValidateBasic() error { - if _, err := sdk.AccAddressFromBech32(m.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Owner Address") - } - - if _, err := sdk.AccAddressFromBech32(m.Auditor); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Auditor Address") - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (m MsgDeleteProviderAttributes) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) -} - -// GetSigners defines whose signature is required -func (m MsgDeleteProviderAttributes) GetSigners() []sdk.AccAddress { - auditor, err := sdk.AccAddressFromBech32(m.Auditor) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{auditor} -} diff --git a/go/node/audit/v1beta3/query.pb.go b/go/node/audit/v1beta3/query.pb.go deleted file mode 100644 index 47ce9a68..00000000 --- a/go/node/audit/v1beta3/query.pb.go +++ /dev/null @@ -1,1718 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/audit/v1beta3/query.proto - -package v1beta3 - -import ( - context "context" - fmt "fmt" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryProvidersResponse is response type for the Query/Providers RPC method -type QueryProvidersResponse struct { - Providers Providers `protobuf:"bytes,1,rep,name=providers,proto3,castrepeated=Providers" json:"providers"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryProvidersResponse) Reset() { *m = QueryProvidersResponse{} } -func (m *QueryProvidersResponse) String() string { return proto.CompactTextString(m) } -func (*QueryProvidersResponse) ProtoMessage() {} -func (*QueryProvidersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_9dd51717cd9dc5fd, []int{0} -} -func (m *QueryProvidersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProvidersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProvidersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProvidersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProvidersResponse.Merge(m, src) -} -func (m *QueryProvidersResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryProvidersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProvidersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProvidersResponse proto.InternalMessageInfo - -func (m *QueryProvidersResponse) GetProviders() Providers { - if m != nil { - return m.Providers - } - return nil -} - -func (m *QueryProvidersResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProviderRequest is request type for the Query/Provider RPC method -type QueryProviderRequest struct { - Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` - Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` -} - -func (m *QueryProviderRequest) Reset() { *m = QueryProviderRequest{} } -func (m *QueryProviderRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProviderRequest) ProtoMessage() {} -func (*QueryProviderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9dd51717cd9dc5fd, []int{1} -} -func (m *QueryProviderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProviderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProviderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProviderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProviderRequest.Merge(m, src) -} -func (m *QueryProviderRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryProviderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProviderRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProviderRequest proto.InternalMessageInfo - -func (m *QueryProviderRequest) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *QueryProviderRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -// QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method -type QueryAllProvidersAttributesRequest struct { - Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryAllProvidersAttributesRequest) Reset() { *m = QueryAllProvidersAttributesRequest{} } -func (m *QueryAllProvidersAttributesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryAllProvidersAttributesRequest) ProtoMessage() {} -func (*QueryAllProvidersAttributesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9dd51717cd9dc5fd, []int{2} -} -func (m *QueryAllProvidersAttributesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryAllProvidersAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryAllProvidersAttributesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryAllProvidersAttributesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryAllProvidersAttributesRequest.Merge(m, src) -} -func (m *QueryAllProvidersAttributesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryAllProvidersAttributesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryAllProvidersAttributesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryAllProvidersAttributesRequest proto.InternalMessageInfo - -func (m *QueryAllProvidersAttributesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProviderAttributesRequest is request type for the Query/Provider RPC method -type QueryProviderAttributesRequest struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryProviderAttributesRequest) Reset() { *m = QueryProviderAttributesRequest{} } -func (m *QueryProviderAttributesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProviderAttributesRequest) ProtoMessage() {} -func (*QueryProviderAttributesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9dd51717cd9dc5fd, []int{3} -} -func (m *QueryProviderAttributesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProviderAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProviderAttributesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProviderAttributesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProviderAttributesRequest.Merge(m, src) -} -func (m *QueryProviderAttributesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryProviderAttributesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProviderAttributesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProviderAttributesRequest proto.InternalMessageInfo - -func (m *QueryProviderAttributesRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *QueryProviderAttributesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProviderAuditorRequest is request type for the Query/Providers RPC method -type QueryProviderAuditorRequest struct { - Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` - Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` -} - -func (m *QueryProviderAuditorRequest) Reset() { *m = QueryProviderAuditorRequest{} } -func (m *QueryProviderAuditorRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProviderAuditorRequest) ProtoMessage() {} -func (*QueryProviderAuditorRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9dd51717cd9dc5fd, []int{4} -} -func (m *QueryProviderAuditorRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProviderAuditorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProviderAuditorRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProviderAuditorRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProviderAuditorRequest.Merge(m, src) -} -func (m *QueryProviderAuditorRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryProviderAuditorRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProviderAuditorRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProviderAuditorRequest proto.InternalMessageInfo - -func (m *QueryProviderAuditorRequest) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *QueryProviderAuditorRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -// QueryAuditorAttributesRequest is request type for the Query/Providers RPC method -type QueryAuditorAttributesRequest struct { - Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryAuditorAttributesRequest) Reset() { *m = QueryAuditorAttributesRequest{} } -func (m *QueryAuditorAttributesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryAuditorAttributesRequest) ProtoMessage() {} -func (*QueryAuditorAttributesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_9dd51717cd9dc5fd, []int{5} -} -func (m *QueryAuditorAttributesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryAuditorAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryAuditorAttributesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryAuditorAttributesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryAuditorAttributesRequest.Merge(m, src) -} -func (m *QueryAuditorAttributesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryAuditorAttributesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryAuditorAttributesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryAuditorAttributesRequest proto.InternalMessageInfo - -func (m *QueryAuditorAttributesRequest) GetAuditor() string { - if m != nil { - return m.Auditor - } - return "" -} - -func (m *QueryAuditorAttributesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -func init() { - proto.RegisterType((*QueryProvidersResponse)(nil), "akash.audit.v1beta3.QueryProvidersResponse") - proto.RegisterType((*QueryProviderRequest)(nil), "akash.audit.v1beta3.QueryProviderRequest") - proto.RegisterType((*QueryAllProvidersAttributesRequest)(nil), "akash.audit.v1beta3.QueryAllProvidersAttributesRequest") - proto.RegisterType((*QueryProviderAttributesRequest)(nil), "akash.audit.v1beta3.QueryProviderAttributesRequest") - proto.RegisterType((*QueryProviderAuditorRequest)(nil), "akash.audit.v1beta3.QueryProviderAuditorRequest") - proto.RegisterType((*QueryAuditorAttributesRequest)(nil), "akash.audit.v1beta3.QueryAuditorAttributesRequest") -} - -func init() { proto.RegisterFile("akash/audit/v1beta3/query.proto", fileDescriptor_9dd51717cd9dc5fd) } - -var fileDescriptor_9dd51717cd9dc5fd = []byte{ - // 567 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x95, 0x41, 0x6f, 0xd3, 0x30, - 0x14, 0xc7, 0xeb, 0xa2, 0x82, 0xea, 0x9d, 0x66, 0xaa, 0xa9, 0x14, 0x96, 0x56, 0x3d, 0x40, 0x35, - 0xc0, 0xde, 0x52, 0xc4, 0x80, 0x0b, 0xda, 0x0e, 0xe3, 0x04, 0x1a, 0x39, 0x72, 0x73, 0x57, 0x2b, - 0x8b, 0xd6, 0xe5, 0x65, 0xb1, 0xb3, 0x09, 0xa1, 0x21, 0xc1, 0x27, 0x40, 0xe2, 0x5b, 0x20, 0x2e, - 0x70, 0xe5, 0x0b, 0xec, 0x38, 0x89, 0x0b, 0xa7, 0x81, 0x5a, 0x3e, 0x08, 0x8a, 0x9d, 0xb4, 0xcd, - 0xd2, 0xd2, 0x6e, 0x70, 0xab, 0xeb, 0xff, 0x7b, 0xff, 0xdf, 0x7b, 0x7e, 0x76, 0x70, 0x9d, 0xef, - 0x71, 0xb9, 0xcb, 0x78, 0xd4, 0xf5, 0x14, 0x3b, 0x5c, 0xeb, 0x08, 0xc5, 0xdb, 0xec, 0x20, 0x12, - 0xe1, 0x6b, 0x1a, 0x84, 0xa0, 0x80, 0x5c, 0xd7, 0x02, 0xaa, 0x05, 0x34, 0x11, 0xd4, 0x2a, 0x2e, - 0xb8, 0xa0, 0xf7, 0x59, 0xfc, 0xcb, 0x48, 0x6b, 0xb7, 0x5c, 0x00, 0xb7, 0x27, 0x18, 0x0f, 0x3c, - 0xc6, 0x7d, 0x1f, 0x14, 0x57, 0x1e, 0xf8, 0x32, 0xd9, 0x5d, 0xd9, 0x01, 0xb9, 0x0f, 0x92, 0x75, - 0xb8, 0x14, 0xc6, 0x21, 0xf1, 0x5b, 0x63, 0x01, 0x77, 0x3d, 0x5f, 0x8b, 0x13, 0xed, 0x44, 0x2a, - 0x83, 0xa0, 0x05, 0xcd, 0x2f, 0x08, 0x2f, 0xbd, 0x8c, 0x73, 0x6c, 0x87, 0x70, 0xe8, 0x75, 0x45, - 0x28, 0x1d, 0x21, 0x03, 0xf0, 0xa5, 0x20, 0x2f, 0x70, 0x39, 0x48, 0xff, 0xac, 0xa2, 0xc6, 0x95, - 0xd6, 0x82, 0xbd, 0x4c, 0x27, 0x14, 0x41, 0xd3, 0xd0, 0xcd, 0xc5, 0x93, 0xb3, 0x7a, 0xe1, 0xd3, - 0xcf, 0x7a, 0x79, 0x94, 0x6c, 0x94, 0x82, 0x3c, 0xc3, 0x78, 0xc4, 0x57, 0x2d, 0x36, 0x50, 0x6b, - 0xc1, 0xbe, 0x43, 0x4d, 0x31, 0x34, 0x2e, 0x86, 0x9a, 0x76, 0x25, 0xc5, 0xd0, 0x6d, 0xee, 0x8a, - 0x14, 0xc6, 0x19, 0x0b, 0x6d, 0x6e, 0xe1, 0x4a, 0x06, 0xd9, 0x11, 0x07, 0x91, 0x90, 0x8a, 0x54, - 0xf1, 0x35, 0x0d, 0x06, 0x61, 0x15, 0x35, 0x50, 0xab, 0xec, 0xa4, 0x4b, 0x52, 0xc1, 0x25, 0x38, - 0xf2, 0x45, 0xa8, 0x5d, 0xcb, 0x8e, 0x59, 0x34, 0x7b, 0xb8, 0xa9, 0xf3, 0x6c, 0xf4, 0x7a, 0x43, - 0xe0, 0x0d, 0xa5, 0x42, 0xaf, 0x13, 0x29, 0x21, 0xd3, 0xac, 0x5b, 0x19, 0x6c, 0xa4, 0xb1, 0x6f, - 0xcf, 0xc4, 0xd6, 0xb1, 0x19, 0xea, 0xb7, 0xd8, 0xca, 0x50, 0xe7, 0x9d, 0x86, 0x94, 0x68, 0x8c, - 0xf2, 0x9c, 0x7f, 0xf1, 0xd2, 0xfe, 0xcf, 0xf1, 0xcd, 0xac, 0xbf, 0xe9, 0xcd, 0x65, 0x9b, 0xf7, - 0x0e, 0xe1, 0x65, 0xd3, 0x3d, 0x23, 0xcb, 0x97, 0x33, 0x3d, 0xe3, 0x7f, 0x2a, 0xc9, 0x3e, 0x2b, - 0xe1, 0x92, 0x66, 0x20, 0x5f, 0x11, 0x5e, 0x9a, 0x7c, 0x8c, 0x64, 0x7d, 0xe2, 0xcc, 0xce, 0x3e, - 0xf8, 0xda, 0xdd, 0xe9, 0x81, 0xb9, 0xcb, 0xd2, 0xb4, 0xdf, 0x7f, 0xff, 0xfd, 0xb1, 0x78, 0x8f, - 0xac, 0xb0, 0xa9, 0x37, 0x8e, 0xf1, 0xa1, 0x05, 0xeb, 0x79, 0x52, 0xc5, 0xd0, 0x24, 0x3f, 0x0d, - 0xa4, 0x3d, 0xdb, 0xf7, 0x1f, 0x61, 0x9f, 0x68, 0xd8, 0x07, 0xc4, 0x9e, 0x0f, 0xf6, 0x8d, 0x3e, - 0xf1, 0x63, 0x03, 0xfd, 0x0d, 0xe1, 0x1b, 0xe7, 0x46, 0x68, 0x8c, 0x7d, 0x75, 0x0e, 0xf6, 0xcc, - 0xdc, 0x5d, 0x0c, 0xfc, 0xa9, 0x06, 0x7f, 0x4c, 0xd6, 0xe7, 0x04, 0x4f, 0x06, 0xee, 0x38, 0x2d, - 0x81, 0x7c, 0x46, 0x78, 0x31, 0x4f, 0x6d, 0xff, 0x65, 0x44, 0xa6, 0x4c, 0xf7, 0xc5, 0xb8, 0x1f, - 0x6a, 0xee, 0x55, 0x42, 0x13, 0xee, 0xf4, 0x51, 0xcc, 0xa2, 0x43, 0x38, 0x46, 0x1c, 0x37, 0x7b, - 0xd3, 0x39, 0xe9, 0x5b, 0xe8, 0xb4, 0x6f, 0xa1, 0x5f, 0x7d, 0x0b, 0x7d, 0x18, 0x58, 0x85, 0xd3, - 0x81, 0x55, 0xf8, 0x31, 0xb0, 0x0a, 0xaf, 0x1e, 0xb9, 0x9e, 0xda, 0x8d, 0x3a, 0x74, 0x07, 0xf6, - 0x4d, 0xce, 0xfb, 0xbe, 0x50, 0x47, 0x10, 0xee, 0x25, 0xab, 0xf8, 0xd3, 0xe1, 0x02, 0xf3, 0xa1, - 0x2b, 0xb2, 0x5d, 0xea, 0x5c, 0xd5, 0x0f, 0x7f, 0xfb, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5c, - 0x61, 0xaf, 0x79, 0xb1, 0x06, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // AllProvidersAttributes queries all providers - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - AllProvidersAttributes(ctx context.Context, in *QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) - // ProviderAttributes queries all provider signed attributes - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - ProviderAttributes(ctx context.Context, in *QueryProviderAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) - // ProviderAuditorAttributes queries provider signed attributes by specific auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - ProviderAuditorAttributes(ctx context.Context, in *QueryProviderAuditorRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) - // AuditorAttributes queries all providers signed by this auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - AuditorAttributes(ctx context.Context, in *QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) AllProvidersAttributes(ctx context.Context, in *QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta3.Query/AllProvidersAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) ProviderAttributes(ctx context.Context, in *QueryProviderAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta3.Query/ProviderAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) ProviderAuditorAttributes(ctx context.Context, in *QueryProviderAuditorRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta3.Query/ProviderAuditorAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) AuditorAttributes(ctx context.Context, in *QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.audit.v1beta3.Query/AuditorAttributes", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // AllProvidersAttributes queries all providers - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - AllProvidersAttributes(context.Context, *QueryAllProvidersAttributesRequest) (*QueryProvidersResponse, error) - // ProviderAttributes queries all provider signed attributes - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - ProviderAttributes(context.Context, *QueryProviderAttributesRequest) (*QueryProvidersResponse, error) - // ProviderAuditorAttributes queries provider signed attributes by specific auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - ProviderAuditorAttributes(context.Context, *QueryProviderAuditorRequest) (*QueryProvidersResponse, error) - // AuditorAttributes queries all providers signed by this auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - AuditorAttributes(context.Context, *QueryAuditorAttributesRequest) (*QueryProvidersResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) AllProvidersAttributes(ctx context.Context, req *QueryAllProvidersAttributesRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AllProvidersAttributes not implemented") -} -func (*UnimplementedQueryServer) ProviderAttributes(ctx context.Context, req *QueryProviderAttributesRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ProviderAttributes not implemented") -} -func (*UnimplementedQueryServer) ProviderAuditorAttributes(ctx context.Context, req *QueryProviderAuditorRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ProviderAuditorAttributes not implemented") -} -func (*UnimplementedQueryServer) AuditorAttributes(ctx context.Context, req *QueryAuditorAttributesRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method AuditorAttributes not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_AllProvidersAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryAllProvidersAttributesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).AllProvidersAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta3.Query/AllProvidersAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).AllProvidersAttributes(ctx, req.(*QueryAllProvidersAttributesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_ProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryProviderAttributesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).ProviderAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta3.Query/ProviderAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).ProviderAttributes(ctx, req.(*QueryProviderAttributesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_ProviderAuditorAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryProviderAuditorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).ProviderAuditorAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta3.Query/ProviderAuditorAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).ProviderAuditorAttributes(ctx, req.(*QueryProviderAuditorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_AuditorAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryAuditorAttributesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).AuditorAttributes(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.audit.v1beta3.Query/AuditorAttributes", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).AuditorAttributes(ctx, req.(*QueryAuditorAttributesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.audit.v1beta3.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "AllProvidersAttributes", - Handler: _Query_AllProvidersAttributes_Handler, - }, - { - MethodName: "ProviderAttributes", - Handler: _Query_ProviderAttributes_Handler, - }, - { - MethodName: "ProviderAuditorAttributes", - Handler: _Query_ProviderAuditorAttributes_Handler, - }, - { - MethodName: "AuditorAttributes", - Handler: _Query_AuditorAttributes_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/audit/v1beta3/query.proto", -} - -func (m *QueryProvidersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProvidersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProvidersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Providers) > 0 { - for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryProviderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProviderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProviderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x12 - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryAllProvidersAttributesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryAllProvidersAttributesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryAllProvidersAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryProviderAttributesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProviderAttributesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProviderAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryProviderAuditorRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProviderAuditorRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProviderAuditorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x12 - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryAuditorAttributesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryAuditorAttributesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryAuditorAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Auditor) > 0 { - i -= len(m.Auditor) - copy(dAtA[i:], m.Auditor) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryProvidersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Providers) > 0 { - for _, e := range m.Providers { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProviderRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryAllProvidersAttributesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProviderAttributesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProviderAuditorRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryAuditorAttributesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Auditor) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryProvidersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProvidersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProvidersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Providers = append(m.Providers, Provider{}) - if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProviderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProviderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProviderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryAllProvidersAttributesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryAllProvidersAttributesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryAllProvidersAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProviderAttributesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProviderAttributesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProviderAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProviderAuditorRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProviderAuditorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProviderAuditorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryAuditorAttributesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryAuditorAttributesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryAuditorAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Auditor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/audit/v1beta3/query.pb.gw.go b/go/node/audit/v1beta3/query.pb.gw.go deleted file mode 100644 index 189e7bb0..00000000 --- a/go/node/audit/v1beta3/query.pb.gw.go +++ /dev/null @@ -1,532 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/audit/v1beta3/query.proto - -/* -Package v1beta3 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta3 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_AllProvidersAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_AllProvidersAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAllProvidersAttributesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AllProvidersAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AllProvidersAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_AllProvidersAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAllProvidersAttributesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AllProvidersAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.AllProvidersAttributes(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_ProviderAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{"owner": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_Query_ProviderAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderAttributesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ProviderAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.ProviderAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_ProviderAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderAttributesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ProviderAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.ProviderAttributes(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Query_ProviderAuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderAuditorRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["auditor"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") - } - - protoReq.Auditor, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) - } - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - msg, err := client.ProviderAuditorAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_ProviderAuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderAuditorRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["auditor"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") - } - - protoReq.Auditor, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) - } - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - msg, err := server.ProviderAuditorAttributes(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_AuditorAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{"auditor": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} -) - -func request_Query_AuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAuditorAttributesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["auditor"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") - } - - protoReq.Auditor, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AuditorAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.AuditorAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_AuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAuditorAttributesRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["auditor"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") - } - - protoReq.Auditor, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) - } - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AuditorAttributes_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.AuditorAttributes(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_AllProvidersAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_AllProvidersAttributes_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_AllProvidersAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ProviderAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_ProviderAttributes_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ProviderAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ProviderAuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_ProviderAuditorAttributes_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ProviderAuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_AuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_AuditorAttributes_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_AuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_AllProvidersAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_AllProvidersAttributes_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_AllProvidersAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ProviderAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_ProviderAttributes_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ProviderAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_ProviderAuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_ProviderAuditorAttributes_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_ProviderAuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_AuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_AuditorAttributes_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_AuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_AllProvidersAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 2, 4}, []string{"akash", "audit", "v1beta3", "attributes", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_ProviderAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"akash", "audit", "v1beta3", "attributes", "owner", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_ProviderAuditorAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"akash", "audit", "v1beta3", "attributes", "auditor", "owner"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_AuditorAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"akash", "provider", "v1beta3", "auditor", "list"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_AllProvidersAttributes_0 = runtime.ForwardResponseMessage - - forward_Query_ProviderAttributes_0 = runtime.ForwardResponseMessage - - forward_Query_ProviderAuditorAttributes_0 = runtime.ForwardResponseMessage - - forward_Query_AuditorAttributes_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/audit/v1beta3/types.go b/go/node/audit/v1beta3/types.go deleted file mode 100644 index afd5ba14..00000000 --- a/go/node/audit/v1beta3/types.go +++ /dev/null @@ -1,33 +0,0 @@ -package v1beta3 - -import ( - "bytes" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -type ProviderID struct { - Owner sdk.Address - Auditor sdk.Address -} - -// Providers is the collection of Provider -type Providers []Provider - -// String implements the Stringer interface for a Providers object. -func (obj Providers) String() string { - var buf bytes.Buffer - - const sep = "\n\n" - - for _, p := range obj { - buf.WriteString(p.String()) - buf.WriteString(sep) - } - - if len(obj) > 0 { - buf.Truncate(buf.Len() - len(sep)) - } - - return buf.String() -} diff --git a/go/node/cert/v1/cert.go b/go/node/cert/v1/cert.go new file mode 100644 index 00000000..96171031 --- /dev/null +++ b/go/node/cert/v1/cert.go @@ -0,0 +1,70 @@ +package v1 + +import ( + "crypto/x509" + "encoding/pem" + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func ParseAndValidateCertificate(owner sdk.Address, crt, pub []byte) (*x509.Certificate, error) { + blk, rest := pem.Decode(pub) + if blk == nil || len(rest) > 0 { + return nil, ErrInvalidPubkeyValue + } + + if blk.Type != PemBlkTypeECPublicKey { + return nil, fmt.Errorf("%w: invalid pem block type", ErrInvalidPubkeyValue) + } + + blk, rest = pem.Decode(crt) + if blk == nil || len(rest) > 0 { + return nil, ErrInvalidCertificateValue + } + + if blk.Type != PemBlkTypeCertificate { + return nil, fmt.Errorf("%w: invalid pem block type", ErrInvalidCertificateValue) + } + + cert, err := x509.ParseCertificate(blk.Bytes) + if err != nil { + return nil, err + } + + cowner, err := sdk.AccAddressFromBech32(cert.Subject.CommonName) + if err != nil { + return nil, fmt.Errorf("%w: %s", ErrInvalidCertificateValue, err.Error()) + } + + if !owner.Equals(cowner) { + return nil, fmt.Errorf("%w: CommonName does not match owner", ErrInvalidCertificateValue) + } + + return cert, nil +} + +func (m *ID) String() string { + return fmt.Sprintf("%s/%s", m.Owner, m.Serial) +} + +func (m *ID) Equals(val ID) bool { + return (m.Owner == val.Owner) && (m.Serial == val.Serial) +} + +func (m Certificate) Validate(owner sdk.Address) error { + if m.State != CertificateValid { + return ErrInvalidState + } + + _, err := ParseAndValidateCertificate(owner, m.Cert, m.Pubkey) + if err != nil { + return err + } + + return nil +} + +func (m Certificate) IsState(state State) bool { + return m.State == state +} diff --git a/go/node/cert/v1/cert.pb.go b/go/node/cert/v1/cert.pb.go new file mode 100644 index 00000000..ceb1cacf --- /dev/null +++ b/go/node/cert/v1/cert.pb.go @@ -0,0 +1,678 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/cert/v1/cert.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State is an enum which refers to state of deployment +type State int32 + +const ( + // Prefix should start with 0 in enum. So declaring dummy state + CertificateStateInvalid State = 0 + // CertificateValid denotes state for deployment active + CertificateValid State = 1 + // CertificateRevoked denotes state for deployment closed + CertificateRevoked State = 2 +) + +var State_name = map[int32]string{ + 0: "invalid", + 1: "valid", + 2: "revoked", +} + +var State_value = map[string]int32{ + "invalid": 0, + "valid": 1, + "revoked": 2, +} + +func (x State) String() string { + return proto.EnumName(State_name, int32(x)) +} + +func (State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_aed64ec87f738ef2, []int{0} +} + +// ID stores owner and sequence number +type ID struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial" yaml:"serial"` +} + +func (m *ID) Reset() { *m = ID{} } +func (*ID) ProtoMessage() {} +func (*ID) Descriptor() ([]byte, []int) { + return fileDescriptor_aed64ec87f738ef2, []int{0} +} +func (m *ID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ID) XXX_Merge(src proto.Message) { + xxx_messageInfo_ID.Merge(m, src) +} +func (m *ID) XXX_Size() int { + return m.Size() +} +func (m *ID) XXX_DiscardUnknown() { + xxx_messageInfo_ID.DiscardUnknown(m) +} + +var xxx_messageInfo_ID proto.InternalMessageInfo + +func (m *ID) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *ID) GetSerial() string { + if m != nil { + return m.Serial + } + return "" +} + +// Certificate stores state, certificate and it's public key +type Certificate struct { + State State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.cert.v1.State" json:"state" yaml:"state"` + Cert []byte `protobuf:"bytes,3,opt,name=cert,proto3" json:"cert" yaml:"cert"` + Pubkey []byte `protobuf:"bytes,4,opt,name=pubkey,proto3" json:"pubkey" yaml:"pubkey"` +} + +func (m *Certificate) Reset() { *m = Certificate{} } +func (m *Certificate) String() string { return proto.CompactTextString(m) } +func (*Certificate) ProtoMessage() {} +func (*Certificate) Descriptor() ([]byte, []int) { + return fileDescriptor_aed64ec87f738ef2, []int{1} +} +func (m *Certificate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Certificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Certificate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Certificate) XXX_Merge(src proto.Message) { + xxx_messageInfo_Certificate.Merge(m, src) +} +func (m *Certificate) XXX_Size() int { + return m.Size() +} +func (m *Certificate) XXX_DiscardUnknown() { + xxx_messageInfo_Certificate.DiscardUnknown(m) +} + +var xxx_messageInfo_Certificate proto.InternalMessageInfo + +func (m *Certificate) GetState() State { + if m != nil { + return m.State + } + return CertificateStateInvalid +} + +func (m *Certificate) GetCert() []byte { + if m != nil { + return m.Cert + } + return nil +} + +func (m *Certificate) GetPubkey() []byte { + if m != nil { + return m.Pubkey + } + return nil +} + +func init() { + proto.RegisterEnum("akash.cert.v1.State", State_name, State_value) + proto.RegisterType((*ID)(nil), "akash.cert.v1.ID") + proto.RegisterType((*Certificate)(nil), "akash.cert.v1.Certificate") +} + +func init() { proto.RegisterFile("akash/cert/v1/cert.proto", fileDescriptor_aed64ec87f738ef2) } + +var fileDescriptor_aed64ec87f738ef2 = []byte{ + // 428 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0xc1, 0x8b, 0xd3, 0x40, + 0x14, 0xc6, 0x33, 0xb5, 0xdd, 0xd5, 0xd9, 0x5d, 0x29, 0x43, 0x70, 0xb3, 0x29, 0x64, 0xc2, 0x78, + 0x29, 0x8a, 0x09, 0x75, 0xf1, 0xb2, 0x37, 0xa3, 0x22, 0x7b, 0xcd, 0x82, 0x07, 0x2f, 0x32, 0xdb, + 0x8c, 0x31, 0xa4, 0x9b, 0x29, 0x93, 0x31, 0xb2, 0x07, 0xef, 0x52, 0x3c, 0x78, 0xf4, 0x52, 0x58, + 0xf0, 0x5f, 0x10, 0xff, 0x06, 0x8f, 0x8b, 0x27, 0x4f, 0x41, 0xda, 0x8b, 0xf4, 0xd8, 0xbf, 0x40, + 0xf2, 0x26, 0xd2, 0xee, 0x29, 0xf9, 0xbe, 0xef, 0x37, 0x8f, 0xf7, 0xc1, 0xc3, 0x0e, 0xcf, 0x79, + 0xf9, 0x2e, 0x1c, 0x0b, 0xa5, 0xc3, 0x6a, 0x04, 0xdf, 0x60, 0xaa, 0xa4, 0x96, 0xe4, 0x00, 0x92, + 0x00, 0x9c, 0x6a, 0xe4, 0xda, 0xa9, 0x4c, 0x25, 0x24, 0x61, 0xf3, 0x67, 0x20, 0xf7, 0x68, 0x2c, + 0xcb, 0x0b, 0x59, 0xbe, 0x31, 0x81, 0x11, 0x26, 0x62, 0x9f, 0x11, 0xee, 0x9c, 0x3e, 0x27, 0x2f, + 0x71, 0x4f, 0x7e, 0x28, 0x84, 0x72, 0x90, 0x8f, 0x86, 0x77, 0xa2, 0xd1, 0xaa, 0xa6, 0xc6, 0x58, + 0xd7, 0x74, 0xff, 0x92, 0x5f, 0x4c, 0x4e, 0x18, 0x48, 0xf6, 0xeb, 0xfb, 0x23, 0xbb, 0x1d, 0xf0, + 0x34, 0x49, 0x94, 0x28, 0xcb, 0x33, 0xad, 0xb2, 0x22, 0x8d, 0x0d, 0x4e, 0x8e, 0xf1, 0x4e, 0x29, + 0x54, 0xc6, 0x27, 0x4e, 0x07, 0x26, 0x0d, 0x56, 0x35, 0x6d, 0x9d, 0x75, 0x4d, 0x0f, 0xcc, 0x28, + 0xa3, 0x59, 0xdc, 0x06, 0x27, 0xb7, 0xbf, 0x5e, 0x51, 0xeb, 0xef, 0x15, 0xb5, 0xd8, 0x0f, 0x84, + 0xf7, 0x9e, 0x09, 0xa5, 0xb3, 0xb7, 0xd9, 0x98, 0x6b, 0x41, 0x5e, 0xe0, 0x5e, 0xa9, 0xb9, 0x16, + 0x30, 0xed, 0xee, 0x63, 0x3b, 0xb8, 0x51, 0x37, 0x38, 0x6b, 0xb2, 0xe8, 0xa8, 0xd9, 0x16, 0xb0, + 0xcd, 0xb6, 0x20, 0x59, 0x6c, 0x6c, 0xf2, 0x10, 0x77, 0x9b, 0x27, 0xce, 0x2d, 0x1f, 0x0d, 0xf7, + 0xa3, 0xc3, 0x55, 0x4d, 0x41, 0xaf, 0x6b, 0xba, 0x67, 0xf0, 0x46, 0xb1, 0x18, 0xcc, 0xa6, 0xc2, + 0xf4, 0xfd, 0x79, 0x2e, 0x2e, 0x9d, 0x2e, 0xe0, 0x50, 0xc1, 0x38, 0x9b, 0x0a, 0x46, 0xb3, 0xb8, + 0x0d, 0x1e, 0x7c, 0xc4, 0x3d, 0x58, 0x86, 0x0c, 0xf1, 0x6e, 0x56, 0x54, 0x7c, 0x92, 0x25, 0x7d, + 0xcb, 0x1d, 0xcc, 0xe6, 0xfe, 0xe1, 0x56, 0x1f, 0x40, 0x4e, 0x4d, 0x4c, 0x28, 0xee, 0x19, 0x0e, + 0xb9, 0xf6, 0x6c, 0xee, 0xf7, 0xb7, 0xb8, 0x57, 0x00, 0xdc, 0xc7, 0xbb, 0x4a, 0x54, 0x32, 0x17, + 0x49, 0xbf, 0xe3, 0xde, 0x9b, 0xcd, 0x7d, 0xb2, 0x85, 0xc4, 0x26, 0x71, 0xbb, 0x9f, 0xbe, 0x79, + 0x56, 0xf4, 0xe4, 0xe7, 0xc2, 0x43, 0xd7, 0x0b, 0x0f, 0xfd, 0x59, 0x78, 0xe8, 0xcb, 0xd2, 0xb3, + 0xae, 0x97, 0x9e, 0xf5, 0x7b, 0xe9, 0x59, 0xaf, 0x07, 0xd3, 0x3c, 0x0d, 0x78, 0xae, 0x83, 0x44, + 0x54, 0x61, 0x2a, 0xc3, 0x42, 0x26, 0xe2, 0xff, 0x21, 0x9d, 0xef, 0xc0, 0x11, 0x1c, 0xff, 0x0b, + 0x00, 0x00, 0xff, 0xff, 0x3b, 0x17, 0xac, 0x54, 0x60, 0x02, 0x00, 0x00, +} + +func (m *ID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Serial) > 0 { + i -= len(m.Serial) + copy(dAtA[i:], m.Serial) + i = encodeVarintCert(dAtA, i, uint64(len(m.Serial))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintCert(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Certificate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Certificate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Certificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Pubkey) > 0 { + i -= len(m.Pubkey) + copy(dAtA[i:], m.Pubkey) + i = encodeVarintCert(dAtA, i, uint64(len(m.Pubkey))) + i-- + dAtA[i] = 0x22 + } + if len(m.Cert) > 0 { + i -= len(m.Cert) + copy(dAtA[i:], m.Cert) + i = encodeVarintCert(dAtA, i, uint64(len(m.Cert))) + i-- + dAtA[i] = 0x1a + } + if m.State != 0 { + i = encodeVarintCert(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + return len(dAtA) - i, nil +} + +func encodeVarintCert(dAtA []byte, offset int, v uint64) int { + offset -= sovCert(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovCert(uint64(l)) + } + l = len(m.Serial) + if l > 0 { + n += 1 + l + sovCert(uint64(l)) + } + return n +} + +func (m *Certificate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.State != 0 { + n += 1 + sovCert(uint64(m.State)) + } + l = len(m.Cert) + if l > 0 { + n += 1 + l + sovCert(uint64(l)) + } + l = len(m.Pubkey) + if l > 0 { + n += 1 + l + sovCert(uint64(l)) + } + return n +} + +func sovCert(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCert(x uint64) (n int) { + return sovCert(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCert + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCert + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCert + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCert + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Serial", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCert + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthCert + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthCert + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Serial = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCert(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCert + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Certificate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCert + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Certificate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Certificate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCert + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCert + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCert + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCert + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cert = append(m.Cert[:0], dAtA[iNdEx:postIndex]...) + if m.Cert == nil { + m.Cert = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pubkey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCert + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCert + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCert + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pubkey = append(m.Pubkey[:0], dAtA[iNdEx:postIndex]...) + if m.Pubkey == nil { + m.Pubkey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCert(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCert + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCert(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCert + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCert + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCert + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCert + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCert + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCert + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCert = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCert = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCert = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/cert/v1/codec.go b/go/node/cert/v1/codec.go new file mode 100644 index 00000000..ee530b58 --- /dev/null +++ b/go/node/cert/v1/codec.go @@ -0,0 +1,38 @@ +package v1 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +var ( + // ModuleCdc references the global x/provider module codec. Note, the codec should + // ONLY be used in certain instances of tests and for JSON encoding as Amino is + // still used for that purpose. + // + // The actual codec used for serialization should be provided to x/provider and + // defined at the application level. + // + // Deprecated: ModuleCdc use is deprecated + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) + +// RegisterLegacyAminoCodec register concrete types on codec +// +// Deprecated: RegisterLegacyAminoCodec is deprecated +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgCreateCertificate{}, "akash-sdk/x/"+ModuleName+"/"+(&MsgCreateCertificate{}).Type(), nil) + cdc.RegisterConcrete(&MsgRevokeCertificate{}, "akash-sdk/x/"+ModuleName+"/"+(&MsgRevokeCertificate{}).Type(), nil) +} + +// RegisterInterfaces registers the x/provider interfaces types with the interface registry +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgCreateCertificate{}, + &MsgRevokeCertificate{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} diff --git a/go/node/cert/v1/errors.go b/go/node/cert/v1/errors.go new file mode 100644 index 00000000..876f3357 --- /dev/null +++ b/go/node/cert/v1/errors.go @@ -0,0 +1,52 @@ +package v1 + +import ( + "errors" + + sdkerrors "cosmossdk.io/errors" +) + +const ( + errCertificateNotFound uint32 = iota + 1 + errInvalidAddress + errCertificateExists + errCertificateAlreadyRevoked + errInvalidSerialNumber + errInvalidCertificateValue + errInvalidPubkeyValue + errInvalidState + errInvalidKeySize +) + +var ( + ErrCertificate = errors.New("certificate error") +) + +var ( + // ErrCertificateNotFound certificate not found + ErrCertificateNotFound = sdkerrors.Register(ModuleName, errCertificateNotFound, "certificate not found") + + // ErrInvalidAddress invalid trusted auditor address + ErrInvalidAddress = sdkerrors.Register(ModuleName, errInvalidAddress, "invalid address") + + // ErrCertificateExists certificate already exists + ErrCertificateExists = sdkerrors.Register(ModuleName, errCertificateExists, "certificate exists") + + // ErrCertificateAlreadyRevoked certificate already revoked + ErrCertificateAlreadyRevoked = sdkerrors.Register(ModuleName, errCertificateAlreadyRevoked, "certificate already revoked") + + // ErrInvalidSerialNumber invalid serial number + ErrInvalidSerialNumber = sdkerrors.Register(ModuleName, errInvalidSerialNumber, "invalid serial number") + + // ErrInvalidCertificateValue certificate content is not valid + ErrInvalidCertificateValue = sdkerrors.Register(ModuleName, errInvalidCertificateValue, "invalid certificate value") + + // ErrInvalidPubkeyValue public key is not valid + ErrInvalidPubkeyValue = sdkerrors.Register(ModuleName, errInvalidPubkeyValue, "invalid pubkey value") + + // ErrInvalidState invalid certificate state + ErrInvalidState = sdkerrors.Register(ModuleName, errInvalidState, "invalid state") + + // ErrInvalidKeySize invalid certificate state + ErrInvalidKeySize = sdkerrors.Register(ModuleName, errInvalidKeySize, "invalid key size") +) diff --git a/go/node/cert/v1/filters.pb.go b/go/node/cert/v1/filters.pb.go new file mode 100644 index 00000000..c7c0033b --- /dev/null +++ b/go/node/cert/v1/filters.pb.go @@ -0,0 +1,427 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/cert/v1/filters.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// CertificateFilter defines filters used to filter certificates +type CertificateFilter struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial" yaml:"serial"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state" yaml:"state"` +} + +func (m *CertificateFilter) Reset() { *m = CertificateFilter{} } +func (m *CertificateFilter) String() string { return proto.CompactTextString(m) } +func (*CertificateFilter) ProtoMessage() {} +func (*CertificateFilter) Descriptor() ([]byte, []int) { + return fileDescriptor_b8f3ca07a7278a9e, []int{0} +} +func (m *CertificateFilter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CertificateFilter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CertificateFilter) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateFilter.Merge(m, src) +} +func (m *CertificateFilter) XXX_Size() int { + return m.Size() +} +func (m *CertificateFilter) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateFilter.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateFilter proto.InternalMessageInfo + +func (m *CertificateFilter) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *CertificateFilter) GetSerial() string { + if m != nil { + return m.Serial + } + return "" +} + +func (m *CertificateFilter) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func init() { + proto.RegisterType((*CertificateFilter)(nil), "akash.cert.v1.CertificateFilter") +} + +func init() { proto.RegisterFile("akash/cert/v1/filters.proto", fileDescriptor_b8f3ca07a7278a9e) } + +var fileDescriptor_b8f3ca07a7278a9e = []byte{ + // 287 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4e, 0x2d, 0x2a, 0xd1, 0x2f, 0x33, 0xd4, 0x4f, 0xcb, 0xcc, 0x29, 0x49, 0x2d, + 0x2a, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x05, 0x4b, 0xea, 0x81, 0x24, 0xf5, 0xca, + 0x0c, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x32, 0xfa, 0x20, 0x16, 0x44, 0x91, 0x94, 0x64, + 0x72, 0x7e, 0x71, 0x6e, 0x7e, 0x71, 0x3c, 0x44, 0x02, 0xc2, 0x81, 0x48, 0x29, 0x9d, 0x60, 0xe4, + 0x12, 0x74, 0x4e, 0x2d, 0x2a, 0xc9, 0x4c, 0xcb, 0x4c, 0x4e, 0x2c, 0x49, 0x75, 0x03, 0x1b, 0x2e, + 0xe4, 0xce, 0xc5, 0x9a, 0x5f, 0x9e, 0x97, 0x5a, 0x24, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xe9, 0x64, + 0xf8, 0xea, 0x9e, 0x3c, 0x44, 0xe0, 0xd3, 0x3d, 0x79, 0x9e, 0xca, 0xc4, 0xdc, 0x1c, 0x2b, 0x25, + 0x30, 0x57, 0xe9, 0xd2, 0x16, 0x5d, 0x11, 0xa8, 0x79, 0x8e, 0x29, 0x29, 0x45, 0xa9, 0xc5, 0xc5, + 0xc1, 0x25, 0x45, 0x99, 0x79, 0xe9, 0x41, 0x10, 0xe5, 0x42, 0xc6, 0x5c, 0x6c, 0xc5, 0xa9, 0x45, + 0x99, 0x89, 0x39, 0x12, 0x4c, 0x60, 0x93, 0xa4, 0x5f, 0xdd, 0x93, 0x87, 0x8a, 0x7c, 0xba, 0x27, + 0xcf, 0x0b, 0x31, 0x0a, 0xc2, 0x57, 0x0a, 0x82, 0x4a, 0x08, 0xe9, 0x73, 0xb1, 0x16, 0x97, 0x24, + 0x96, 0xa4, 0x4a, 0x30, 0x83, 0xf5, 0x48, 0x82, 0x6c, 0x07, 0x0b, 0x20, 0x6c, 0x07, 0x73, 0x95, + 0x82, 0x20, 0xc2, 0x56, 0x2c, 0x2f, 0x16, 0xc8, 0x33, 0x38, 0x99, 0x9e, 0x78, 0x24, 0xc7, 0x78, + 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, + 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x74, 0x41, 0x76, 0xba, 0x5e, 0x62, 0x76, 0x89, 0x5e, 0x4a, + 0x6a, 0x99, 0x7e, 0x7a, 0xbe, 0x7e, 0x5e, 0x7e, 0x4a, 0x2a, 0x2c, 0x3c, 0x93, 0xd8, 0xc0, 0x01, + 0x61, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x94, 0x07, 0xc6, 0x2b, 0x67, 0x01, 0x00, 0x00, +} + +func (m *CertificateFilter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateFilter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintFilters(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x1a + } + if len(m.Serial) > 0 { + i -= len(m.Serial) + copy(dAtA[i:], m.Serial) + i = encodeVarintFilters(dAtA, i, uint64(len(m.Serial))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintFilters(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintFilters(dAtA []byte, offset int, v uint64) int { + offset -= sovFilters(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CertificateFilter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + l = len(m.Serial) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + return n +} + +func sovFilters(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFilters(x uint64) (n int) { + return sovFilters(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CertificateFilter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateFilter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateFilter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Serial", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Serial = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFilters(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthFilters + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFilters(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilters + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilters + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilters + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFilters + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupFilters + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthFilters + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthFilters = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFilters = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupFilters = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/cert/v1/genesis.go b/go/node/cert/v1/genesis.go new file mode 100644 index 00000000..6433543b --- /dev/null +++ b/go/node/cert/v1/genesis.go @@ -0,0 +1,58 @@ +package v1 + +import ( + "bytes" + "encoding/json" + + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type GenesisCertificates []GenesisCertificate + +func (obj GenesisCertificates) Contains(cert GenesisCertificate) bool { + for _, c := range obj { + if c.Owner == cert.Owner { + return true + } + + if bytes.Equal(c.Certificate.Cert, cert.Certificate.Cert) { + return true + } + } + + return false +} + +func (m GenesisCertificate) Validate() error { + owner, err := sdk.AccAddressFromBech32(m.Owner) + if err != nil { + return err + } + if err := m.Certificate.Validate(owner); err != nil { + return err + } + + return nil +} + +func (m *GenesisState) Validate() error { + for _, cert := range m.Certificates { + if err := cert.Validate(); err != nil { + return err + } + } + return nil +} + +// GetGenesisStateFromAppState returns x/cert GenesisState given raw application +// genesis state. +func GetGenesisStateFromAppState(cdc codec.JSONCodec, appState map[string]json.RawMessage) *GenesisState { + var genesisState GenesisState + + if appState[ModuleName] != nil { + cdc.MustUnmarshalJSON(appState[ModuleName], &genesisState) + } + + return &genesisState +} diff --git a/go/node/cert/v1/genesis.pb.go b/go/node/cert/v1/genesis.pb.go new file mode 100644 index 00000000..27eb028c --- /dev/null +++ b/go/node/cert/v1/genesis.pb.go @@ -0,0 +1,563 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/cert/v1/genesis.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisCertificate defines certificate entry at genesis +type GenesisCertificate struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + Certificate Certificate `protobuf:"bytes,2,opt,name=certificate,proto3" json:"certificate" yaml:"certificate"` +} + +func (m *GenesisCertificate) Reset() { *m = GenesisCertificate{} } +func (m *GenesisCertificate) String() string { return proto.CompactTextString(m) } +func (*GenesisCertificate) ProtoMessage() {} +func (*GenesisCertificate) Descriptor() ([]byte, []int) { + return fileDescriptor_079f259b80baefa9, []int{0} +} +func (m *GenesisCertificate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisCertificate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisCertificate) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisCertificate.Merge(m, src) +} +func (m *GenesisCertificate) XXX_Size() int { + return m.Size() +} +func (m *GenesisCertificate) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisCertificate.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisCertificate proto.InternalMessageInfo + +func (m *GenesisCertificate) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *GenesisCertificate) GetCertificate() Certificate { + if m != nil { + return m.Certificate + } + return Certificate{} +} + +// GenesisState defines the basic genesis state used by cert module +type GenesisState struct { + Certificates GenesisCertificates `protobuf:"bytes,1,rep,name=certificates,proto3,castrepeated=GenesisCertificates" json:"certificates" yaml:"certificates"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_079f259b80baefa9, []int{1} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetCertificates() GenesisCertificates { + if m != nil { + return m.Certificates + } + return nil +} + +func init() { + proto.RegisterType((*GenesisCertificate)(nil), "akash.cert.v1.GenesisCertificate") + proto.RegisterType((*GenesisState)(nil), "akash.cert.v1.GenesisState") +} + +func init() { proto.RegisterFile("akash/cert/v1/genesis.proto", fileDescriptor_079f259b80baefa9) } + +var fileDescriptor_079f259b80baefa9 = []byte{ + // 333 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x51, 0xbd, 0x4e, 0xeb, 0x30, + 0x14, 0x8e, 0xef, 0x15, 0x48, 0xa4, 0x65, 0x49, 0x3b, 0x84, 0x56, 0x8a, 0x8b, 0xa7, 0x32, 0x60, + 0xab, 0x45, 0x2c, 0x6c, 0x84, 0xa1, 0x7b, 0xbb, 0xb1, 0xa0, 0x90, 0x18, 0x13, 0x95, 0xc6, 0x95, + 0x6d, 0x15, 0xf1, 0x08, 0x6c, 0xcc, 0x3c, 0x02, 0x33, 0xcf, 0x80, 0x3a, 0x56, 0x4c, 0x4c, 0x06, + 0xb5, 0x5b, 0xc7, 0x3e, 0x01, 0xaa, 0x1d, 0x44, 0x42, 0x36, 0xfb, 0x7c, 0x7f, 0xe7, 0xd3, 0x71, + 0xdb, 0xd1, 0x38, 0x92, 0xb7, 0x24, 0xa6, 0x42, 0x91, 0x59, 0x8f, 0x30, 0x9a, 0x51, 0x99, 0x4a, + 0x3c, 0x15, 0x5c, 0x71, 0x6f, 0xdf, 0x80, 0x78, 0x0b, 0xe2, 0x59, 0xaf, 0xd5, 0x64, 0x9c, 0x71, + 0x83, 0x90, 0xed, 0xcb, 0x92, 0x5a, 0x07, 0x31, 0x97, 0x13, 0x2e, 0xaf, 0x2c, 0x60, 0x3f, 0x39, + 0xe4, 0x97, 0xcd, 0x8d, 0x8f, 0x41, 0xd0, 0x1b, 0x70, 0xbd, 0x81, 0xcd, 0xba, 0xa0, 0x42, 0xa5, + 0x37, 0x69, 0x1c, 0x29, 0xea, 0x0d, 0xdc, 0x1d, 0x7e, 0x9f, 0x51, 0xe1, 0x83, 0x0e, 0xe8, 0xee, + 0x85, 0xbd, 0xb5, 0x86, 0x76, 0xb0, 0xd1, 0xb0, 0xfe, 0x10, 0x4d, 0xee, 0xce, 0x90, 0xf9, 0xa2, + 0xf7, 0xd7, 0xe3, 0x66, 0x1e, 0x75, 0x9e, 0x24, 0x82, 0x4a, 0x39, 0x52, 0x22, 0xcd, 0xd8, 0xd0, + 0xd2, 0x3d, 0xe6, 0xd6, 0xe2, 0x5f, 0x5f, 0xff, 0x5f, 0x07, 0x74, 0x6b, 0xfd, 0x16, 0x2e, 0xf5, + 0xc1, 0x85, 0xe4, 0xf0, 0x68, 0xae, 0xa1, 0xb3, 0xd6, 0xb0, 0x28, 0xdb, 0x68, 0xe8, 0xd9, 0xd0, + 0xc2, 0x10, 0x0d, 0x8b, 0x14, 0xf4, 0x0c, 0xdc, 0x7a, 0x5e, 0x64, 0xa4, 0xb6, 0x15, 0x1e, 0x81, + 0x5b, 0x2f, 0x10, 0xa4, 0x0f, 0x3a, 0xff, 0xbb, 0xb5, 0xfe, 0xe1, 0x9f, 0xec, 0x6a, 0xf9, 0x30, + 0xcc, 0x57, 0x28, 0xc9, 0x37, 0x1a, 0x36, 0x2a, 0x3b, 0x48, 0xf4, 0xf2, 0x09, 0x1b, 0x55, 0x0b, + 0x39, 0x2c, 0x69, 0xc3, 0xd3, 0xf9, 0x32, 0x00, 0x8b, 0x65, 0x00, 0xbe, 0x96, 0x01, 0x78, 0x5a, + 0x05, 0xce, 0x62, 0x15, 0x38, 0x1f, 0xab, 0xc0, 0xb9, 0x6c, 0x4f, 0xc7, 0x0c, 0x47, 0x63, 0x85, + 0x13, 0x3a, 0x23, 0x8c, 0x93, 0x8c, 0x27, 0xf4, 0xe7, 0x4e, 0xd7, 0xbb, 0xe6, 0x46, 0x27, 0xdf, + 0x01, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x0d, 0xb5, 0xc1, 0x1c, 0x02, 0x00, 0x00, +} + +func (m *GenesisCertificate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisCertificate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisCertificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Certificate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Certificates) > 0 { + for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Certificates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisCertificate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + l = m.Certificate.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Certificates) > 0 { + for _, e := range m.Certificates { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisCertificate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisCertificate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisCertificate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificates = append(m.Certificates, GenesisCertificate{}) + if err := m.Certificates[len(m.Certificates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/cert/v1/key.go b/go/node/cert/v1/key.go new file mode 100644 index 00000000..d76da27c --- /dev/null +++ b/go/node/cert/v1/key.go @@ -0,0 +1,16 @@ +package v1 + +const ( + // ModuleName is the module name constant used in many places + ModuleName = "cert" + + // StoreKey is the store key string for provider + StoreKey = ModuleName + + // RouterKey is the message route for provider + RouterKey = ModuleName +) + +func PrefixCertificateID() []byte { + return []byte{0x01} +} diff --git a/go/node/cert/v1/msg.pb.go b/go/node/cert/v1/msg.pb.go new file mode 100644 index 00000000..25c3f83f --- /dev/null +++ b/go/node/cert/v1/msg.pb.go @@ -0,0 +1,852 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/cert/v1/msg.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgCreateCertificate defines an SDK message for creating certificate +type MsgCreateCertificate struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + Cert []byte `protobuf:"bytes,2,opt,name=cert,proto3" json:"cert" yaml:"cert"` + Pubkey []byte `protobuf:"bytes,3,opt,name=pubkey,proto3" json:"pubkey" yaml:"pubkey"` +} + +func (m *MsgCreateCertificate) Reset() { *m = MsgCreateCertificate{} } +func (m *MsgCreateCertificate) String() string { return proto.CompactTextString(m) } +func (*MsgCreateCertificate) ProtoMessage() {} +func (*MsgCreateCertificate) Descriptor() ([]byte, []int) { + return fileDescriptor_d3964b4ee733a8c2, []int{0} +} +func (m *MsgCreateCertificate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateCertificate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateCertificate) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateCertificate.Merge(m, src) +} +func (m *MsgCreateCertificate) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateCertificate) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateCertificate.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateCertificate proto.InternalMessageInfo + +func (m *MsgCreateCertificate) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *MsgCreateCertificate) GetCert() []byte { + if m != nil { + return m.Cert + } + return nil +} + +func (m *MsgCreateCertificate) GetPubkey() []byte { + if m != nil { + return m.Pubkey + } + return nil +} + +// MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. +type MsgCreateCertificateResponse struct { +} + +func (m *MsgCreateCertificateResponse) Reset() { *m = MsgCreateCertificateResponse{} } +func (m *MsgCreateCertificateResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateCertificateResponse) ProtoMessage() {} +func (*MsgCreateCertificateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_d3964b4ee733a8c2, []int{1} +} +func (m *MsgCreateCertificateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateCertificateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateCertificateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateCertificateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateCertificateResponse.Merge(m, src) +} +func (m *MsgCreateCertificateResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateCertificateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateCertificateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateCertificateResponse proto.InternalMessageInfo + +// MsgRevokeCertificate defines an SDK message for revoking certificate +type MsgRevokeCertificate struct { + ID ID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *MsgRevokeCertificate) Reset() { *m = MsgRevokeCertificate{} } +func (m *MsgRevokeCertificate) String() string { return proto.CompactTextString(m) } +func (*MsgRevokeCertificate) ProtoMessage() {} +func (*MsgRevokeCertificate) Descriptor() ([]byte, []int) { + return fileDescriptor_d3964b4ee733a8c2, []int{2} +} +func (m *MsgRevokeCertificate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRevokeCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRevokeCertificate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRevokeCertificate) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRevokeCertificate.Merge(m, src) +} +func (m *MsgRevokeCertificate) XXX_Size() int { + return m.Size() +} +func (m *MsgRevokeCertificate) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRevokeCertificate.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRevokeCertificate proto.InternalMessageInfo + +func (m *MsgRevokeCertificate) GetID() ID { + if m != nil { + return m.ID + } + return ID{} +} + +// MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. +type MsgRevokeCertificateResponse struct { +} + +func (m *MsgRevokeCertificateResponse) Reset() { *m = MsgRevokeCertificateResponse{} } +func (m *MsgRevokeCertificateResponse) String() string { return proto.CompactTextString(m) } +func (*MsgRevokeCertificateResponse) ProtoMessage() {} +func (*MsgRevokeCertificateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_d3964b4ee733a8c2, []int{3} +} +func (m *MsgRevokeCertificateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRevokeCertificateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRevokeCertificateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRevokeCertificateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRevokeCertificateResponse.Merge(m, src) +} +func (m *MsgRevokeCertificateResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgRevokeCertificateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRevokeCertificateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRevokeCertificateResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCreateCertificate)(nil), "akash.cert.v1.MsgCreateCertificate") + proto.RegisterType((*MsgCreateCertificateResponse)(nil), "akash.cert.v1.MsgCreateCertificateResponse") + proto.RegisterType((*MsgRevokeCertificate)(nil), "akash.cert.v1.MsgRevokeCertificate") + proto.RegisterType((*MsgRevokeCertificateResponse)(nil), "akash.cert.v1.MsgRevokeCertificateResponse") +} + +func init() { proto.RegisterFile("akash/cert/v1/msg.proto", fileDescriptor_d3964b4ee733a8c2) } + +var fileDescriptor_d3964b4ee733a8c2 = []byte{ + // 399 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x31, 0x0e, 0xd3, 0x30, + 0x14, 0x86, 0x93, 0x00, 0x15, 0x4d, 0x5b, 0xa4, 0x46, 0x95, 0x1a, 0x5a, 0x88, 0x2b, 0x4f, 0x15, + 0x08, 0x47, 0xa5, 0x62, 0xe9, 0x04, 0x69, 0x25, 0xd4, 0x81, 0x25, 0x6c, 0x2c, 0x28, 0xad, 0x4d, + 0xb0, 0x42, 0xe3, 0x28, 0x0e, 0x41, 0x5d, 0x39, 0x01, 0x47, 0xe0, 0x08, 0x0c, 0x1c, 0xa2, 0x63, + 0x61, 0x62, 0xb2, 0x50, 0x3a, 0x80, 0x32, 0xe6, 0x04, 0x28, 0x76, 0x2a, 0x11, 0xc1, 0xe6, 0xff, + 0x7d, 0xcf, 0xef, 0xff, 0x9f, 0x9e, 0x39, 0x0e, 0xa2, 0x80, 0xbf, 0x75, 0xf7, 0x24, 0xcd, 0xdc, + 0x7c, 0xe1, 0x1e, 0x78, 0x88, 0x92, 0x94, 0x65, 0xcc, 0x1a, 0x48, 0x80, 0x6a, 0x80, 0xf2, 0xc5, + 0x64, 0x14, 0xb2, 0x90, 0x49, 0xe2, 0xd6, 0x2f, 0xd5, 0x34, 0xb9, 0xbb, 0x67, 0xfc, 0xc0, 0xf8, + 0x6b, 0x05, 0x94, 0x68, 0xd0, 0x58, 0xa9, 0x7a, 0x62, 0x6b, 0xf0, 0xc4, 0x6e, 0x3b, 0x4a, 0x03, + 0x49, 0xe0, 0x37, 0xdd, 0x1c, 0xbd, 0xe0, 0xe1, 0x3a, 0x25, 0x41, 0x46, 0xd6, 0x24, 0xcd, 0xe8, + 0x1b, 0xba, 0x0f, 0x32, 0x62, 0x3d, 0x37, 0x6f, 0xb1, 0x0f, 0x31, 0x49, 0x6d, 0x7d, 0xa6, 0xcf, + 0xbb, 0xde, 0xa2, 0x14, 0x40, 0x15, 0x2a, 0x01, 0xfa, 0xc7, 0xe0, 0xf0, 0x6e, 0x05, 0xa5, 0x84, + 0xdf, 0xbf, 0x3e, 0x1a, 0x35, 0x29, 0x9e, 0x61, 0x9c, 0x12, 0xce, 0x5f, 0x66, 0x29, 0x8d, 0x43, + 0x5f, 0xb5, 0x5b, 0x0f, 0xcd, 0x9b, 0xb5, 0x9f, 0x6d, 0xcc, 0xf4, 0x79, 0xdf, 0x1b, 0x97, 0x02, + 0x48, 0x5d, 0x09, 0xd0, 0x53, 0x63, 0x6a, 0x05, 0x7d, 0x59, 0xb4, 0x96, 0x66, 0x27, 0x79, 0xbf, + 0x8b, 0xc8, 0xd1, 0xbe, 0x21, 0xdb, 0xa7, 0xa5, 0x00, 0x4d, 0xa5, 0x12, 0x60, 0xa0, 0x3e, 0x28, + 0x0d, 0xfd, 0x06, 0xac, 0xee, 0xfc, 0xfe, 0x0c, 0xb4, 0x8f, 0xbf, 0xbe, 0x3c, 0x50, 0x8e, 0xd0, + 0x31, 0xef, 0xfd, 0x6f, 0x25, 0x9f, 0xf0, 0x84, 0xc5, 0x9c, 0xc0, 0x48, 0xae, 0xec, 0x93, 0x9c, + 0x45, 0xad, 0x95, 0x9f, 0x9a, 0x06, 0xc5, 0x72, 0xdf, 0xde, 0xe3, 0x21, 0x6a, 0xdd, 0x02, 0x6d, + 0x37, 0xde, 0xfd, 0x93, 0x00, 0x5a, 0x21, 0x80, 0xb1, 0xdd, 0x94, 0x02, 0x18, 0x14, 0x57, 0x02, + 0x74, 0x55, 0x22, 0x8a, 0xa1, 0x6f, 0x50, 0xbc, 0x1a, 0x5e, 0x93, 0xdc, 0xa6, 0x18, 0xfd, 0x1d, + 0xe6, 0x1f, 0xb3, 0x6b, 0x18, 0xef, 0xc9, 0xa9, 0x70, 0xf4, 0x73, 0xe1, 0xe8, 0x3f, 0x0b, 0x47, + 0xff, 0x74, 0x71, 0xb4, 0xf3, 0xc5, 0xd1, 0x7e, 0x5c, 0x1c, 0xed, 0xd5, 0x34, 0x89, 0x42, 0x14, + 0x44, 0x19, 0xc2, 0x24, 0x77, 0x43, 0xe6, 0xc6, 0x0c, 0x93, 0xeb, 0x09, 0x77, 0x1d, 0x79, 0xbe, + 0xe5, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf0, 0xaa, 0x90, 0x81, 0x4c, 0x02, 0x00, 0x00, +} + +func (m *MsgCreateCertificate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateCertificate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateCertificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Pubkey) > 0 { + i -= len(m.Pubkey) + copy(dAtA[i:], m.Pubkey) + i = encodeVarintMsg(dAtA, i, uint64(len(m.Pubkey))) + i-- + dAtA[i] = 0x1a + } + if len(m.Cert) > 0 { + i -= len(m.Cert) + copy(dAtA[i:], m.Cert) + i = encodeVarintMsg(dAtA, i, uint64(len(m.Cert))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintMsg(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateCertificateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateCertificateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateCertificateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgRevokeCertificate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRevokeCertificate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRevokeCertificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgRevokeCertificateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRevokeCertificateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRevokeCertificateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintMsg(dAtA []byte, offset int, v uint64) int { + offset -= sovMsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCreateCertificate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovMsg(uint64(l)) + } + l = len(m.Cert) + if l > 0 { + n += 1 + l + sovMsg(uint64(l)) + } + l = len(m.Pubkey) + if l > 0 { + n += 1 + l + sovMsg(uint64(l)) + } + return n +} + +func (m *MsgCreateCertificateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgRevokeCertificate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovMsg(uint64(l)) + return n +} + +func (m *MsgRevokeCertificateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovMsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMsg(x uint64) (n int) { + return sovMsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCreateCertificate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateCertificate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateCertificate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cert", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cert = append(m.Cert[:0], dAtA[iNdEx:postIndex]...) + if m.Cert == nil { + m.Cert = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pubkey", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Pubkey = append(m.Pubkey[:0], dAtA[iNdEx:postIndex]...) + if m.Pubkey == nil { + m.Pubkey = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateCertificateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateCertificateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRevokeCertificate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRevokeCertificate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRevokeCertificate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRevokeCertificateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRevokeCertificateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRevokeCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/cert/v1/msgs.go b/go/node/cert/v1/msgs.go new file mode 100644 index 00000000..03eed21e --- /dev/null +++ b/go/node/cert/v1/msgs.go @@ -0,0 +1,122 @@ +package v1 + +import ( + "math/big" + "reflect" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +var ( + _ sdk.Msg = &MsgCreateCertificate{} + _ sdk.Msg = &MsgRevokeCertificate{} +) + +var ( + MsgTypeCreateCertificate = "" + MsgTypeRevokeCertificate = "" +) + +func init() { + MsgTypeCreateCertificate = reflect.TypeOf(&MsgCreateCertificate{}).Elem().Name() + MsgTypeRevokeCertificate = reflect.TypeOf(&MsgRevokeCertificate{}).Elem().Name() +} + +// ====MsgCreateCertificate==== + +// Type implements the sdk.Msg interface +func (m *MsgCreateCertificate) Type() string { + return MsgTypeCreateCertificate +} + +// ValidateBasic does basic validation +func (m *MsgCreateCertificate) ValidateBasic() error { + owner, err := sdk.AccAddressFromBech32(m.Owner) + if err != nil { + return sdkerrors.ErrInvalidAddress.Wrap("MsgCreateCertificate: Invalid Owner Address") + } + + _, err = ParseAndValidateCertificate(owner, m.Cert, m.Pubkey) + if err != nil { + return err + } + + return nil +} + +// GetSigners defines whose signature is required +func (m *MsgCreateCertificate) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(m.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// ====MsgRevokeCertificate==== + +// Type implements the sdk.Msg interface +func (m *MsgRevokeCertificate) Type() string { + return MsgTypeRevokeCertificate +} + +// ValidateBasic does basic validation +func (m *MsgRevokeCertificate) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(m.ID.Owner); err != nil { + return sdkerrors.ErrInvalidAddress.Wrap("MsgRevoke: Invalid Owner Address") + } + + if _, valid := new(big.Int).SetString(m.ID.Serial, 10); !valid { + return ErrInvalidSerialNumber + } + + return nil +} + +// GetSigners defines whose signature is required +func (m *MsgRevokeCertificate) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(m.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// ============= GetSignBytes ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (m *MsgCreateCertificate) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(m)) +} + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (m *MsgRevokeCertificate) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(m)) +} + +// ============= Route ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all since sdk.Msg does not not have Route defined anymore + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (m *MsgCreateCertificate) Route() string { + return RouterKey +} + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (m *MsgRevokeCertificate) Route() string { + return RouterKey +} diff --git a/go/node/cert/v1/query.pb.go b/go/node/cert/v1/query.pb.go new file mode 100644 index 00000000..58340375 --- /dev/null +++ b/go/node/cert/v1/query.pb.go @@ -0,0 +1,953 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/cert/v1/query.proto + +package v1 + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// CertificateResponse contains a single X509 certificate and its serial number +type CertificateResponse struct { + Certificate Certificate `protobuf:"bytes,1,opt,name=certificate,proto3" json:"certificate" yaml:"certificate"` + Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial" yaml:"serial"` +} + +func (m *CertificateResponse) Reset() { *m = CertificateResponse{} } +func (m *CertificateResponse) String() string { return proto.CompactTextString(m) } +func (*CertificateResponse) ProtoMessage() {} +func (*CertificateResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2e5d14d61992fa41, []int{0} +} +func (m *CertificateResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CertificateResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CertificateResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateResponse.Merge(m, src) +} +func (m *CertificateResponse) XXX_Size() int { + return m.Size() +} +func (m *CertificateResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateResponse proto.InternalMessageInfo + +func (m *CertificateResponse) GetCertificate() Certificate { + if m != nil { + return m.Certificate + } + return Certificate{} +} + +func (m *CertificateResponse) GetSerial() string { + if m != nil { + return m.Serial + } + return "" +} + +// QueryDeploymentsRequest is request type for the Query/Deployments RPC method +type QueryCertificatesRequest struct { + Filter CertificateFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryCertificatesRequest) Reset() { *m = QueryCertificatesRequest{} } +func (m *QueryCertificatesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryCertificatesRequest) ProtoMessage() {} +func (*QueryCertificatesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2e5d14d61992fa41, []int{1} +} +func (m *QueryCertificatesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryCertificatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryCertificatesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryCertificatesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryCertificatesRequest.Merge(m, src) +} +func (m *QueryCertificatesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryCertificatesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryCertificatesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryCertificatesRequest proto.InternalMessageInfo + +func (m *QueryCertificatesRequest) GetFilter() CertificateFilter { + if m != nil { + return m.Filter + } + return CertificateFilter{} +} + +func (m *QueryCertificatesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryCertificatesResponse is response type for the Query/Certificates RPC method +type QueryCertificatesResponse struct { + Certificates CertificatesResponse `protobuf:"bytes,1,rep,name=certificates,proto3,castrepeated=CertificatesResponse" json:"certificates"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryCertificatesResponse) Reset() { *m = QueryCertificatesResponse{} } +func (m *QueryCertificatesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryCertificatesResponse) ProtoMessage() {} +func (*QueryCertificatesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_2e5d14d61992fa41, []int{2} +} +func (m *QueryCertificatesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryCertificatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryCertificatesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryCertificatesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryCertificatesResponse.Merge(m, src) +} +func (m *QueryCertificatesResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryCertificatesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryCertificatesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryCertificatesResponse proto.InternalMessageInfo + +func (m *QueryCertificatesResponse) GetCertificates() CertificatesResponse { + if m != nil { + return m.Certificates + } + return nil +} + +func (m *QueryCertificatesResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +func init() { + proto.RegisterType((*CertificateResponse)(nil), "akash.cert.v1.CertificateResponse") + proto.RegisterType((*QueryCertificatesRequest)(nil), "akash.cert.v1.QueryCertificatesRequest") + proto.RegisterType((*QueryCertificatesResponse)(nil), "akash.cert.v1.QueryCertificatesResponse") +} + +func init() { proto.RegisterFile("akash/cert/v1/query.proto", fileDescriptor_2e5d14d61992fa41) } + +var fileDescriptor_2e5d14d61992fa41 = []byte{ + // 476 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x52, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xce, 0x15, 0x88, 0xc4, 0xa5, 0x5d, 0x8e, 0x0e, 0x6e, 0x52, 0xd9, 0xd6, 0x0d, 0xd4, 0x30, + 0xdc, 0x29, 0xa9, 0x58, 0x18, 0x18, 0x8c, 0x54, 0x56, 0xf0, 0xc8, 0x76, 0x49, 0xae, 0x87, 0x15, + 0xd7, 0xe7, 0xfa, 0xae, 0x91, 0xba, 0xb2, 0xb2, 0x20, 0x21, 0xfe, 0x00, 0x23, 0x2b, 0x7f, 0xa2, + 0x62, 0xaa, 0xc4, 0xc2, 0x64, 0x50, 0xc2, 0xd4, 0xb1, 0xbf, 0x00, 0xd9, 0x77, 0x51, 0xcf, 0x90, + 0x28, 0x9b, 0xfd, 0xbe, 0xef, 0x7b, 0xef, 0x7b, 0xef, 0x3b, 0x78, 0xc0, 0x66, 0x4c, 0xbd, 0xa3, + 0x13, 0x5e, 0x6a, 0x3a, 0x1f, 0xd2, 0xf3, 0x0b, 0x5e, 0x5e, 0x92, 0xa2, 0x94, 0x5a, 0xa2, 0xbd, + 0x06, 0x22, 0x35, 0x44, 0xe6, 0xc3, 0xfe, 0xbe, 0x90, 0x42, 0x36, 0x08, 0xad, 0xbf, 0x0c, 0xa9, + 0x7f, 0x28, 0xa4, 0x14, 0x19, 0xa7, 0xac, 0x48, 0x29, 0xcb, 0x73, 0xa9, 0x99, 0x4e, 0x65, 0xae, + 0x2c, 0xfa, 0x74, 0x22, 0xd5, 0x99, 0x54, 0x74, 0xcc, 0x14, 0x37, 0xbd, 0xe9, 0x7c, 0x38, 0xe6, + 0x9a, 0x0d, 0x69, 0xc1, 0x44, 0x9a, 0x37, 0x64, 0xcb, 0xf5, 0xda, 0x4e, 0x9a, 0xb1, 0x06, 0x19, + 0xb4, 0x91, 0xd3, 0x34, 0xd3, 0xbc, 0xb4, 0x23, 0xf0, 0x37, 0x00, 0x1f, 0xbd, 0xe4, 0xa5, 0x4e, + 0x4f, 0xd3, 0x09, 0xd3, 0x3c, 0xe1, 0xaa, 0x90, 0xb9, 0xe2, 0x48, 0xc0, 0xde, 0xe4, 0xae, 0xec, + 0x81, 0x10, 0x44, 0xbd, 0x51, 0x9f, 0xb4, 0x76, 0x22, 0x8e, 0x30, 0x7e, 0x72, 0x55, 0x05, 0x9d, + 0x9b, 0x2a, 0x70, 0x65, 0xb7, 0x55, 0x80, 0x2e, 0xd9, 0x59, 0xf6, 0x1c, 0x3b, 0x45, 0x9c, 0xb8, + 0x14, 0x74, 0x0c, 0xbb, 0x8a, 0x97, 0x29, 0xcb, 0xbc, 0x9d, 0x10, 0x44, 0x0f, 0xe3, 0xc1, 0x4d, + 0x15, 0xd8, 0xca, 0x6d, 0x15, 0xec, 0x19, 0xb9, 0xf9, 0xc7, 0x89, 0x05, 0xf0, 0x17, 0x00, 0xbd, + 0x37, 0xf5, 0x3d, 0x1c, 0x07, 0x2a, 0xe1, 0xe7, 0x17, 0x5c, 0x69, 0xf4, 0x02, 0x76, 0xcd, 0x8e, + 0xd6, 0x75, 0xb8, 0xd9, 0xf5, 0x49, 0xc3, 0x8b, 0xef, 0xd7, 0xde, 0x13, 0xab, 0x42, 0x27, 0x10, + 0xde, 0x5d, 0xb7, 0x71, 0xd5, 0x1b, 0x3d, 0x26, 0x26, 0x0a, 0x52, 0x47, 0x41, 0x4c, 0xcc, 0x36, + 0x0a, 0xf2, 0x9a, 0x09, 0x6e, 0x67, 0x27, 0x8e, 0x12, 0x7f, 0x07, 0xf0, 0x60, 0x8d, 0x49, 0x7b, + 0xe0, 0x29, 0xdc, 0x75, 0xce, 0xa0, 0x3c, 0x10, 0xde, 0x8b, 0x7a, 0x23, 0xbc, 0xd9, 0xeb, 0x4a, + 0x19, 0x1f, 0xd6, 0x6e, 0xbf, 0xfe, 0x0a, 0xf6, 0xd7, 0xf5, 0x4d, 0x5a, 0x5d, 0xd1, 0xab, 0x35, + 0xbb, 0x1c, 0x6d, 0xdd, 0xc5, 0xb6, 0x72, 0xa4, 0xa3, 0xcf, 0x00, 0x3e, 0x68, 0x96, 0x41, 0x1f, + 0x00, 0xdc, 0x75, 0x27, 0xa3, 0xa3, 0x7f, 0x3c, 0x6f, 0x0a, 0xa6, 0x1f, 0x6d, 0x27, 0x9a, 0xc9, + 0x38, 0x7a, 0xff, 0xe3, 0xcf, 0xa7, 0x1d, 0x8c, 0x42, 0xfa, 0xff, 0xab, 0x5e, 0x91, 0x69, 0x96, + 0x2a, 0x1d, 0x3f, 0xbb, 0x5a, 0xf8, 0xe0, 0x7a, 0xe1, 0x83, 0xdf, 0x0b, 0x1f, 0x7c, 0x5c, 0xfa, + 0x9d, 0xeb, 0xa5, 0xdf, 0xf9, 0xb9, 0xf4, 0x3b, 0x6f, 0x07, 0xc5, 0x4c, 0x10, 0x36, 0xd3, 0x64, + 0xca, 0xe7, 0x54, 0x48, 0x9a, 0xcb, 0x29, 0x5f, 0x35, 0x1a, 0x77, 0x9b, 0xd7, 0x7f, 0xfc, 0x37, + 0x00, 0x00, 0xff, 0xff, 0xc5, 0xd6, 0xea, 0x4d, 0xc0, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Certificates queries certificates + Certificates(ctx context.Context, in *QueryCertificatesRequest, opts ...grpc.CallOption) (*QueryCertificatesResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Certificates(ctx context.Context, in *QueryCertificatesRequest, opts ...grpc.CallOption) (*QueryCertificatesResponse, error) { + out := new(QueryCertificatesResponse) + err := c.cc.Invoke(ctx, "/akash.cert.v1.Query/Certificates", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Certificates queries certificates + Certificates(context.Context, *QueryCertificatesRequest) (*QueryCertificatesResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Certificates(ctx context.Context, req *QueryCertificatesRequest) (*QueryCertificatesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Certificates not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Certificates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryCertificatesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Certificates(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.cert.v1.Query/Certificates", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Certificates(ctx, req.(*QueryCertificatesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.cert.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Certificates", + Handler: _Query_Certificates_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/cert/v1/query.proto", +} + +func (m *CertificateResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Serial) > 0 { + i -= len(m.Serial) + copy(dAtA[i:], m.Serial) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Serial))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Certificate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryCertificatesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryCertificatesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryCertificatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryCertificatesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryCertificatesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryCertificatesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Certificates) > 0 { + for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Certificates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CertificateResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Certificate.Size() + n += 1 + l + sovQuery(uint64(l)) + l = len(m.Serial) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryCertificatesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Filter.Size() + n += 1 + l + sovQuery(uint64(l)) + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryCertificatesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Certificates) > 0 { + for _, e := range m.Certificates { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CertificateResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Serial", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Serial = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryCertificatesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryCertificatesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryCertificatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryCertificatesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryCertificatesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryCertificatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificates = append(m.Certificates, CertificateResponse{}) + if err := m.Certificates[len(m.Certificates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/cert/v1/query.pb.gw.go b/go/node/cert/v1/query.pb.gw.go new file mode 100644 index 00000000..e546ef43 --- /dev/null +++ b/go/node/cert/v1/query.pb.gw.go @@ -0,0 +1,171 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: akash/cert/v1/query.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_Query_Certificates_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Certificates_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryCertificatesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Certificates_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Certificates(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Certificates_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryCertificatesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Certificates_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Certificates(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Certificates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Certificates_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Certificates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Certificates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Certificates_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Certificates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Certificates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "cert", "v1", "certificates", "list"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_Certificates_0 = runtime.ForwardResponseMessage +) diff --git a/go/node/cert/v1/service.pb.go b/go/node/cert/v1/service.pb.go new file mode 100644 index 00000000..2a708270 --- /dev/null +++ b/go/node/cert/v1/service.pb.go @@ -0,0 +1,167 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/cert/v1/service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("akash/cert/v1/service.proto", fileDescriptor_b880d9d2b395389a) } + +var fileDescriptor_b880d9d2b395389a = []byte{ + // 223 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4e, 0x2d, 0x2a, 0xd1, 0x2f, 0x33, 0xd4, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, + 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x05, 0x4b, 0xea, 0x81, 0x24, 0xf5, 0xca, + 0x0c, 0xa5, 0xc4, 0x93, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, 0xf5, 0x73, 0x8b, 0xd3, 0x41, 0x6a, 0x73, + 0x8b, 0xd3, 0x21, 0xea, 0xa4, 0xc4, 0x51, 0x0d, 0x81, 0x4b, 0x18, 0xdd, 0x62, 0xe4, 0x62, 0xf6, + 0x2d, 0x4e, 0x17, 0x4a, 0xe5, 0x12, 0x74, 0x2e, 0x4a, 0x4d, 0x2c, 0x49, 0x75, 0x4e, 0x2d, 0x2a, + 0xc9, 0x4c, 0xcb, 0x4c, 0x4e, 0x2c, 0x49, 0x15, 0x52, 0xd6, 0x43, 0x31, 0x5e, 0xcf, 0xb7, 0x38, + 0x1d, 0x43, 0x91, 0x94, 0x36, 0x11, 0x8a, 0x82, 0x52, 0x8b, 0x0b, 0xf2, 0xf3, 0x8a, 0x53, 0x41, + 0xd6, 0x04, 0xa5, 0x96, 0xe5, 0x67, 0x13, 0xb2, 0x06, 0x43, 0x11, 0x36, 0x6b, 0x30, 0x14, 0xc1, + 0xac, 0x91, 0x62, 0x6d, 0x78, 0xbe, 0x41, 0x8b, 0xd1, 0xc9, 0xf4, 0xc4, 0x23, 0x39, 0xc6, 0x0b, + 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, + 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0xa4, 0x0b, 0xb2, 0xd3, 0xf5, 0x12, 0xb3, 0x4b, 0xf4, 0x52, 0x52, + 0xcb, 0xf4, 0xd3, 0xf3, 0xf5, 0xf3, 0xf2, 0x53, 0x52, 0x61, 0xa1, 0x93, 0xc4, 0x06, 0x0e, 0x1a, + 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xaf, 0x08, 0xf0, 0x7a, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // CreateCertificate defines a method to create new certificate given proper inputs. + CreateCertificate(ctx context.Context, in *MsgCreateCertificate, opts ...grpc.CallOption) (*MsgCreateCertificateResponse, error) + // RevokeCertificate defines a method to revoke the certificate + RevokeCertificate(ctx context.Context, in *MsgRevokeCertificate, opts ...grpc.CallOption) (*MsgRevokeCertificateResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) CreateCertificate(ctx context.Context, in *MsgCreateCertificate, opts ...grpc.CallOption) (*MsgCreateCertificateResponse, error) { + out := new(MsgCreateCertificateResponse) + err := c.cc.Invoke(ctx, "/akash.cert.v1.Msg/CreateCertificate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) RevokeCertificate(ctx context.Context, in *MsgRevokeCertificate, opts ...grpc.CallOption) (*MsgRevokeCertificateResponse, error) { + out := new(MsgRevokeCertificateResponse) + err := c.cc.Invoke(ctx, "/akash.cert.v1.Msg/RevokeCertificate", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // CreateCertificate defines a method to create new certificate given proper inputs. + CreateCertificate(context.Context, *MsgCreateCertificate) (*MsgCreateCertificateResponse, error) + // RevokeCertificate defines a method to revoke the certificate + RevokeCertificate(context.Context, *MsgRevokeCertificate) (*MsgRevokeCertificateResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) CreateCertificate(ctx context.Context, req *MsgCreateCertificate) (*MsgCreateCertificateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateCertificate not implemented") +} +func (*UnimplementedMsgServer) RevokeCertificate(ctx context.Context, req *MsgRevokeCertificate) (*MsgRevokeCertificateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RevokeCertificate not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_CreateCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateCertificate) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateCertificate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.cert.v1.Msg/CreateCertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateCertificate(ctx, req.(*MsgCreateCertificate)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgRevokeCertificate) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).RevokeCertificate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.cert.v1.Msg/RevokeCertificate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).RevokeCertificate(ctx, req.(*MsgRevokeCertificate)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.cert.v1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateCertificate", + Handler: _Msg_CreateCertificate_Handler, + }, + { + MethodName: "RevokeCertificate", + Handler: _Msg_RevokeCertificate_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/cert/v1/service.proto", +} diff --git a/go/node/cert/v1/types.go b/go/node/cert/v1/types.go new file mode 100644 index 00000000..878976c0 --- /dev/null +++ b/go/node/cert/v1/types.go @@ -0,0 +1,70 @@ +package v1 + +import ( + "bytes" + "math/big" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +const ( + PemBlkTypeCertificate = "CERTIFICATE" + PemBlkTypeECPrivateKey = "EC PRIVATE KEY" + PemBlkTypeECPublicKey = "EC PUBLIC KEY" +) + +type CertID struct { + Owner sdk.Address + Serial big.Int +} + +func ToCertID(id ID) (CertID, error) { + addr, err := sdk.AccAddressFromBech32(id.Owner) + if err != nil { + return CertID{}, err + } + + serial, valid := new(big.Int).SetString(id.Serial, 10) + if !valid { + return CertID{}, ErrInvalidSerialNumber + } + + return CertID{ + Owner: addr, + Serial: *serial, + }, nil +} + +// Certificates is the collection of Certificate +type Certificates []Certificate + +type CertificatesResponse []CertificateResponse + +// String implements the Stringer interface for a Certificates object. +func (obj Certificates) String() string { + var buf bytes.Buffer + + const sep = "\n\n" + + for _, p := range obj { + buf.WriteString(p.String()) + buf.WriteString(sep) + } + + if len(obj) > 0 { + buf.Truncate(buf.Len() - len(sep)) + } + + return buf.String() +} + +func (obj Certificates) Contains(cert Certificate) bool { + for _, c := range obj { + // fixme is bytes.Equal right way to do it? + if bytes.Equal(c.Cert, cert.Cert) { + return true + } + } + + return false +} diff --git a/go/node/cert/v1beta3/utils/constants.go b/go/node/cert/v1/utils/constants.go similarity index 100% rename from go/node/cert/v1beta3/utils/constants.go rename to go/node/cert/v1/utils/constants.go diff --git a/go/node/cert/v1/utils/key_pair_manager.go b/go/node/cert/v1/utils/key_pair_manager.go new file mode 100644 index 00000000..738b6c2e --- /dev/null +++ b/go/node/cert/v1/utils/key_pair_manager.go @@ -0,0 +1,310 @@ +package utils + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "net" + "os" + "time" + + "go.step.sm/crypto/pemutil" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + + types "pkg.akt.dev/go/node/cert/v1" +) + +var ( + errCertificateNotFoundInPEM = fmt.Errorf("%w: certificate not found in PEM", types.ErrCertificate) + errPrivateKeyNotFoundInPEM = fmt.Errorf("%w: private key not found in PEM", types.ErrCertificate) + errPublicKeyNotFoundInPEM = fmt.Errorf("%w: public key not found in PEM", types.ErrCertificate) + errUnsupportedEncryptedPEM = errors.New("unsupported encrypted PEM") +) + +type KeyPairManager interface { + KeyExists() (bool, error) + Generate(notBefore, notAfter time.Time, domains []string) error + + // Read the PEM blocks, containing the cert, private key, & public key + Read(fin ...io.Reader) ([]byte, []byte, []byte, error) + + ReadX509KeyPair(fin ...io.Reader) (*x509.Certificate, tls.Certificate, error) +} + +type keyPairManager struct { + addr sdk.AccAddress + passwordBytes []byte + passwordLegacy []byte + homeDir string +} + +func NewKeyPairManager(cctx sdkclient.Context, fromAddress sdk.AccAddress) (KeyPairManager, error) { + sig, _, err := cctx.Keyring.SignByAddress(fromAddress, []byte(fromAddress.String())) + if err != nil { + return nil, err + } + + // ignore error if ledger device is being used + // due to its jsonparser not liking bech address sent as data in binary format + // if test or file keyring used it will allow to decode old private keys for the mTLS cert + sigLegacy, _, _ := cctx.Keyring.SignByAddress(fromAddress, fromAddress.Bytes()) + + return &keyPairManager{ + addr: fromAddress, + passwordBytes: sig, + passwordLegacy: sigLegacy, + homeDir: cctx.HomeDir, + }, nil +} + +func (kpm *keyPairManager) getKeyPath() string { + return kpm.homeDir + "/" + kpm.addr.String() + ".pem" +} + +func (kpm *keyPairManager) ReadX509KeyPair(fin ...io.Reader) (*x509.Certificate, tls.Certificate, error) { + certData, privKeyData, _, err := kpm.Read(fin...) + if err != nil { + return nil, tls.Certificate{}, err + } + + x509cert, err := x509.ParseCertificate(certData) + if err != nil { + return nil, tls.Certificate{}, fmt.Errorf("could not parse x509 cert: %w", err) + } + + result := tls.Certificate{ + Certificate: [][]byte{certData}, + } + + result.PrivateKey, err = x509.ParsePKCS8PrivateKey(privKeyData) + if err != nil { + return nil, tls.Certificate{}, fmt.Errorf("%w: failed parsing private key data", err) + } + + return x509cert, result, err +} + +func (kpm *keyPairManager) KeyExists() (bool, error) { + _, err := os.Stat(kpm.getKeyPath()) + if err == nil { + return true, nil + } + + if os.IsNotExist(err) { + return false, nil + } + + return false, err +} + +func (kpm *keyPairManager) Generate(notBefore, notAfter time.Time, domains []string) error { + var err error + var pemOut *os.File + if pemOut, err = os.OpenFile(kpm.getKeyPath(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600); err != nil { + return err + } + + err = kpm.generateImpl(notBefore, notAfter, domains, pemOut) + + closeErr := pemOut.Close() + if closeErr != nil { + return closeErr + } + + return err +} + +func (kpm *keyPairManager) generateImpl(notBefore, notAfter time.Time, domains []string, fout io.Writer) error { + var err error + // Generate the private key + var priv *ecdsa.PrivateKey + if priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { + return fmt.Errorf("could not generate key: %w", err) + } + + serialNumber := new(big.Int).SetInt64(time.Now().UTC().UnixNano()) + + extKeyUsage := []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + } + + if len(domains) != 0 { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: kpm.addr.String(), + ExtraNames: []pkix.AttributeTypeAndValue{ + { + Type: AuthVersionOID, + Value: "v0.0.1", + }, + }, + }, + Issuer: pkix.Name{ + CommonName: kpm.addr.String(), + }, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageDataEncipherment | x509.KeyUsageKeyEncipherment, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: true, + } + + var ips []net.IP + + for i := len(domains) - 1; i >= 0; i-- { + if ip := net.ParseIP(domains[i]); ip != nil { + ips = append(ips, ip) + domains = append(domains[:i], domains[i+1:]...) + } + } + + if len(domains) != 0 || len(ips) != 0 { + template.PermittedDNSDomainsCritical = true + template.PermittedDNSDomains = domains + template.DNSNames = domains + template.IPAddresses = ips + } + + var certDer []byte + if certDer, err = x509.CreateCertificate(rand.Reader, &template, &template, priv.Public(), priv); err != nil { + return fmt.Errorf("could not create certificate: %w", err) + } + + var keyDer []byte + if keyDer, err = x509.MarshalPKCS8PrivateKey(priv); err != nil { + return fmt.Errorf("could not create private key: %w", err) + } + + var blk *pem.Block + blk, err = pemutil.EncryptPKCS8PrivateKey(rand.Reader, keyDer, kpm.passwordBytes, x509.PEMCipherAES256) + if err != nil { + return fmt.Errorf("could not encrypt private key as PEM: %w", err) + } + + // Write the certificate + if err = pem.Encode(fout, &pem.Block{Type: types.PemBlkTypeCertificate, Bytes: certDer}); err != nil { + return fmt.Errorf("could not encode certificate as PEM: %w", err) + } + + // Write the encrypted private key + if err = pem.Encode(fout, blk); err != nil { + return fmt.Errorf("could not encode private key as PEM: %w", err) + } + + return nil +} + +func (kpm *keyPairManager) Read(fin ...io.Reader) ([]byte, []byte, []byte, error) { + var pemIn io.Reader + var closeMe io.ReadCloser + + if len(fin) != 0 { + if len(fin) != 1 { + return nil, nil, nil, fmt.Errorf("%w: Read() takes exactly 1 or 0 arguments, not %d", types.ErrCertificate, len(fin)) + } + pemIn = fin[0] + } + + if pemIn == nil { + fopen, err := os.OpenFile(kpm.getKeyPath(), os.O_RDONLY, 0x0) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not open certificate PEM file: %w", err) + } + closeMe = fopen + pemIn = fopen + } + + cert, privKey, pubKey, err := kpm.readImpl(pemIn) + + if closeMe != nil { + closeErr := closeMe.Close() + if closeErr != nil { + return nil, nil, nil, fmt.Errorf("could not close PEM file: %w", closeErr) + } + } + + return cert, privKey, pubKey, err +} + +func (kpm *keyPairManager) readImpl(fin io.Reader) ([]byte, []byte, []byte, error) { + buf := &bytes.Buffer{} + _, err := io.Copy(buf, fin) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed reading certificate PEM file: %w", err) + } + data := buf.Bytes() + + // Read certificate + block, remaining := pem.Decode(data) + if block == nil { + return nil, nil, nil, errCertificateNotFoundInPEM + } + cert := block.Bytes + + // Read private key + block, _ = pem.Decode(remaining) + if block == nil { + return nil, nil, nil, errPrivateKeyNotFoundInPEM + } + + var privKeyPlaintext []byte + var privKeyI interface{} + + // PKCS#8 header defined in RFC7468 section 11 + // nolint: gocritic + if block.Type == "ENCRYPTED PRIVATE KEY" { + privKeyPlaintext, err = pemutil.DecryptPKCS8PrivateKey(block.Bytes, kpm.passwordBytes) + } else if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + // nolint:staticcheck + privKeyPlaintext, _ = x509.DecryptPEMBlock(block, kpm.passwordBytes) + + // DecryptPEMBlock may not return IncorrectPasswordError. + // Try parse private key instead and if it fails give another try with legacy password + privKeyI, err = x509.ParsePKCS8PrivateKey(privKeyPlaintext) + if err != nil { + // nolint:staticcheck + privKeyPlaintext, err = x509.DecryptPEMBlock(block, kpm.passwordLegacy) + } + } else { + return nil, nil, nil, errUnsupportedEncryptedPEM + } + if err != nil { + return nil, nil, nil, fmt.Errorf("%w: failed decrypting x509 block with private key", err) + } + + if privKeyI == nil { + if privKeyI, err = x509.ParsePKCS8PrivateKey(privKeyPlaintext); err != nil { + return nil, nil, nil, fmt.Errorf("%w: failed parsing private key data", err) + } + } + + eckey, valid := privKeyI.(*ecdsa.PrivateKey) + if !valid { + return nil, nil, nil, fmt.Errorf("%w: unexpected private key type, expected %T but got %T", + errPublicKeyNotFoundInPEM, + &ecdsa.PrivateKey{}, + privKeyI) + } + + var pubKey []byte + if pubKey, err = x509.MarshalPKIXPublicKey(eckey.Public()); err != nil { + return nil, nil, nil, fmt.Errorf("%w: failed extracting public key", err) + } + + return cert, privKeyPlaintext, pubKey, nil +} diff --git a/go/node/cert/v1/utils/utils.go b/go/node/cert/v1/utils/utils.go new file mode 100644 index 00000000..796e8780 --- /dev/null +++ b/go/node/cert/v1/utils/utils.go @@ -0,0 +1,58 @@ +package utils + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "time" + + "github.com/cosmos/cosmos-sdk/client" + + ctypes "pkg.akt.dev/go/node/cert/v1" +) + +// LoadAndQueryCertificateForAccount wraps LoadAndQueryPEMForAccount and tls.X509KeyPair +func LoadAndQueryCertificateForAccount(ctx context.Context, cctx client.Context, fin io.Reader) (tls.Certificate, error) { + kpm, err := NewKeyPairManager(cctx, cctx.FromAddress) + if err != nil { + return tls.Certificate{}, err + } + + x509cert, tlsCert, err := kpm.ReadX509KeyPair(fin) + if err != nil { + return tls.Certificate{}, err + } + + // Check if valid according to time + if x509cert.NotBefore.After(time.Now().UTC()) { + return tls.Certificate{}, fmt.Errorf("%w: certificate is not yet active, start ts %s", ctypes.ErrCertificate, x509cert.NotBefore) + } + + if time.Now().UTC().After(x509cert.NotAfter) { + return tls.Certificate{}, fmt.Errorf("%w: certificate has been expired since %s", ctypes.ErrCertificate, x509cert.NotAfter) + } + + params := &ctypes.QueryCertificatesRequest{ + Filter: ctypes.CertificateFilter{ + Owner: x509cert.Subject.CommonName, + Serial: x509cert.SerialNumber.String(), + }, + } + + certs, err := ctypes.NewQueryClient(cctx).Certificates(ctx, params) + if err != nil { + return tls.Certificate{}, err + } + + if len(certs.Certificates) == 0 { + return tls.Certificate{}, fmt.Errorf("%w: certificate has not been committed to blockchain", ctypes.ErrCertificate) + } + + foundCert := certs.Certificates[0] + if foundCert.GetCertificate().State != ctypes.CertificateValid { + return tls.Certificate{}, fmt.Errorf("%w: certificate is not valid", ctypes.ErrCertificate) + } + + return tlsCert, nil +} diff --git a/go/node/cert/v1beta1/cert.go b/go/node/cert/v1beta1/cert.go deleted file mode 100644 index 8f088267..00000000 --- a/go/node/cert/v1beta1/cert.go +++ /dev/null @@ -1,71 +0,0 @@ -package v1beta1 - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -func ParseAndValidateCertificate(owner sdk.Address, crt, pub []byte) (*x509.Certificate, error) { - blk, rest := pem.Decode(pub) - if blk == nil || len(rest) > 0 { - return nil, ErrInvalidPubkeyValue - } - - if blk.Type != PemBlkTypeECPublicKey { - return nil, errors.Wrap(ErrInvalidPubkeyValue, "invalid pem block type") - } - - blk, rest = pem.Decode(crt) - if blk == nil || len(rest) > 0 { - return nil, ErrInvalidCertificateValue - } - - if blk.Type != PemBlkTypeCertificate { - return nil, errors.Wrap(ErrInvalidCertificateValue, "invalid pem block type") - } - - cert, err := x509.ParseCertificate(blk.Bytes) - if err != nil { - return nil, err - } - - cowner, err := sdk.AccAddressFromBech32(cert.Subject.CommonName) - if err != nil { - return nil, errors.Wrap(ErrInvalidCertificateValue, err.Error()) - } - - if !owner.Equals(cowner) { - return nil, errors.Wrap(ErrInvalidCertificateValue, "CommonName does not match owner") - } - - return cert, nil -} - -func (m *CertificateID) String() string { - return fmt.Sprintf("%s/%s", m.Owner, m.Serial) -} - -func (m *CertificateID) Equals(val CertificateID) bool { - return (m.Owner == val.Owner) && (m.Serial == val.Serial) -} - -func (m Certificate) Validate(owner sdk.Address) error { - if val, exists := Certificate_State_name[int32(m.State)]; !exists || val == "invalid" { - return ErrInvalidState - } - - _, err := ParseAndValidateCertificate(owner, m.Cert, m.Pubkey) - if err != nil { - return err - } - - return nil -} - -func (m Certificate) IsState(state Certificate_State) bool { - return m.State == state -} diff --git a/go/node/cert/v1beta1/cert.pb.go b/go/node/cert/v1beta1/cert.pb.go deleted file mode 100644 index edb7680c..00000000 --- a/go/node/cert/v1beta1/cert.pb.go +++ /dev/null @@ -1,1775 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/cert/v1beta1/cert.proto - -package v1beta1 - -import ( - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of deployment -type Certificate_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - CertificateStateInvalid Certificate_State = 0 - // CertificateValid denotes state for deployment active - CertificateValid Certificate_State = 1 - // CertificateRevoked denotes state for deployment closed - CertificateRevoked Certificate_State = 2 -) - -var Certificate_State_name = map[int32]string{ - 0: "invalid", - 1: "valid", - 2: "revoked", -} - -var Certificate_State_value = map[string]int32{ - "invalid": 0, - "valid": 1, - "revoked": 2, -} - -func (x Certificate_State) String() string { - return proto.EnumName(Certificate_State_name, int32(x)) -} - -func (Certificate_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6bba68168592156e, []int{1, 0} -} - -// CertificateID stores owner and sequence number -type CertificateID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial" yaml:"serial"` -} - -func (m *CertificateID) Reset() { *m = CertificateID{} } -func (*CertificateID) ProtoMessage() {} -func (*CertificateID) Descriptor() ([]byte, []int) { - return fileDescriptor_6bba68168592156e, []int{0} -} -func (m *CertificateID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CertificateID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CertificateID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CertificateID) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateID.Merge(m, src) -} -func (m *CertificateID) XXX_Size() int { - return m.Size() -} -func (m *CertificateID) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateID.DiscardUnknown(m) -} - -var xxx_messageInfo_CertificateID proto.InternalMessageInfo - -func (m *CertificateID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *CertificateID) GetSerial() string { - if m != nil { - return m.Serial - } - return "" -} - -// Certificate stores state, certificate and it's public key -type Certificate struct { - State Certificate_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.cert.v1beta1.Certificate_State" json:"state" yaml:"state"` - Cert []byte `protobuf:"bytes,3,opt,name=cert,proto3" json:"cert" yaml:"cert"` - Pubkey []byte `protobuf:"bytes,4,opt,name=pubkey,proto3" json:"pubkey" yaml:"pubkey"` -} - -func (m *Certificate) Reset() { *m = Certificate{} } -func (m *Certificate) String() string { return proto.CompactTextString(m) } -func (*Certificate) ProtoMessage() {} -func (*Certificate) Descriptor() ([]byte, []int) { - return fileDescriptor_6bba68168592156e, []int{1} -} -func (m *Certificate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Certificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Certificate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Certificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_Certificate.Merge(m, src) -} -func (m *Certificate) XXX_Size() int { - return m.Size() -} -func (m *Certificate) XXX_DiscardUnknown() { - xxx_messageInfo_Certificate.DiscardUnknown(m) -} - -var xxx_messageInfo_Certificate proto.InternalMessageInfo - -func (m *Certificate) GetState() Certificate_State { - if m != nil { - return m.State - } - return CertificateStateInvalid -} - -func (m *Certificate) GetCert() []byte { - if m != nil { - return m.Cert - } - return nil -} - -func (m *Certificate) GetPubkey() []byte { - if m != nil { - return m.Pubkey - } - return nil -} - -// CertificateFilter defines filters used to filter certificates -type CertificateFilter struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial" yaml:"serial"` - State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *CertificateFilter) Reset() { *m = CertificateFilter{} } -func (m *CertificateFilter) String() string { return proto.CompactTextString(m) } -func (*CertificateFilter) ProtoMessage() {} -func (*CertificateFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_6bba68168592156e, []int{2} -} -func (m *CertificateFilter) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CertificateFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CertificateFilter.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CertificateFilter) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateFilter.Merge(m, src) -} -func (m *CertificateFilter) XXX_Size() int { - return m.Size() -} -func (m *CertificateFilter) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateFilter.DiscardUnknown(m) -} - -var xxx_messageInfo_CertificateFilter proto.InternalMessageInfo - -func (m *CertificateFilter) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *CertificateFilter) GetSerial() string { - if m != nil { - return m.Serial - } - return "" -} - -func (m *CertificateFilter) GetState() string { - if m != nil { - return m.State - } - return "" -} - -// MsgCreateCertificate defines an SDK message for creating certificate -type MsgCreateCertificate struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Cert []byte `protobuf:"bytes,2,opt,name=cert,proto3" json:"cert" yaml:"cert"` - Pubkey []byte `protobuf:"bytes,3,opt,name=pubkey,proto3" json:"pubkey" yaml:"pubkey"` -} - -func (m *MsgCreateCertificate) Reset() { *m = MsgCreateCertificate{} } -func (m *MsgCreateCertificate) String() string { return proto.CompactTextString(m) } -func (*MsgCreateCertificate) ProtoMessage() {} -func (*MsgCreateCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_6bba68168592156e, []int{3} -} -func (m *MsgCreateCertificate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateCertificate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateCertificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateCertificate.Merge(m, src) -} -func (m *MsgCreateCertificate) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateCertificate) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateCertificate.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateCertificate proto.InternalMessageInfo - -func (m *MsgCreateCertificate) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgCreateCertificate) GetCert() []byte { - if m != nil { - return m.Cert - } - return nil -} - -func (m *MsgCreateCertificate) GetPubkey() []byte { - if m != nil { - return m.Pubkey - } - return nil -} - -// MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. -type MsgCreateCertificateResponse struct { -} - -func (m *MsgCreateCertificateResponse) Reset() { *m = MsgCreateCertificateResponse{} } -func (m *MsgCreateCertificateResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateCertificateResponse) ProtoMessage() {} -func (*MsgCreateCertificateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6bba68168592156e, []int{4} -} -func (m *MsgCreateCertificateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateCertificateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateCertificateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateCertificateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateCertificateResponse.Merge(m, src) -} -func (m *MsgCreateCertificateResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateCertificateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateCertificateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateCertificateResponse proto.InternalMessageInfo - -// MsgRevokeCertificate defines an SDK message for revoking certificate -type MsgRevokeCertificate struct { - ID CertificateID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgRevokeCertificate) Reset() { *m = MsgRevokeCertificate{} } -func (m *MsgRevokeCertificate) String() string { return proto.CompactTextString(m) } -func (*MsgRevokeCertificate) ProtoMessage() {} -func (*MsgRevokeCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_6bba68168592156e, []int{5} -} -func (m *MsgRevokeCertificate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgRevokeCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgRevokeCertificate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgRevokeCertificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgRevokeCertificate.Merge(m, src) -} -func (m *MsgRevokeCertificate) XXX_Size() int { - return m.Size() -} -func (m *MsgRevokeCertificate) XXX_DiscardUnknown() { - xxx_messageInfo_MsgRevokeCertificate.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgRevokeCertificate proto.InternalMessageInfo - -func (m *MsgRevokeCertificate) GetID() CertificateID { - if m != nil { - return m.ID - } - return CertificateID{} -} - -// MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. -type MsgRevokeCertificateResponse struct { -} - -func (m *MsgRevokeCertificateResponse) Reset() { *m = MsgRevokeCertificateResponse{} } -func (m *MsgRevokeCertificateResponse) String() string { return proto.CompactTextString(m) } -func (*MsgRevokeCertificateResponse) ProtoMessage() {} -func (*MsgRevokeCertificateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_6bba68168592156e, []int{6} -} -func (m *MsgRevokeCertificateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgRevokeCertificateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgRevokeCertificateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgRevokeCertificateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgRevokeCertificateResponse.Merge(m, src) -} -func (m *MsgRevokeCertificateResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgRevokeCertificateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgRevokeCertificateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgRevokeCertificateResponse proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("akash.cert.v1beta1.Certificate_State", Certificate_State_name, Certificate_State_value) - proto.RegisterType((*CertificateID)(nil), "akash.cert.v1beta1.CertificateID") - proto.RegisterType((*Certificate)(nil), "akash.cert.v1beta1.Certificate") - proto.RegisterType((*CertificateFilter)(nil), "akash.cert.v1beta1.CertificateFilter") - proto.RegisterType((*MsgCreateCertificate)(nil), "akash.cert.v1beta1.MsgCreateCertificate") - proto.RegisterType((*MsgCreateCertificateResponse)(nil), "akash.cert.v1beta1.MsgCreateCertificateResponse") - proto.RegisterType((*MsgRevokeCertificate)(nil), "akash.cert.v1beta1.MsgRevokeCertificate") - proto.RegisterType((*MsgRevokeCertificateResponse)(nil), "akash.cert.v1beta1.MsgRevokeCertificateResponse") -} - -func init() { proto.RegisterFile("akash/cert/v1beta1/cert.proto", fileDescriptor_6bba68168592156e) } - -var fileDescriptor_6bba68168592156e = []byte{ - // 580 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0xb1, 0x6f, 0xda, 0x4e, - 0x14, 0xb6, 0x0d, 0x24, 0xbf, 0x1c, 0xc9, 0x4f, 0xc4, 0x42, 0x0d, 0x75, 0x1a, 0x1f, 0xbd, 0xaa, - 0x12, 0x52, 0x2b, 0xbb, 0x21, 0x1b, 0x23, 0x41, 0x91, 0x18, 0x22, 0x55, 0xae, 0x94, 0xa1, 0x9b, - 0xc1, 0x57, 0x72, 0x82, 0x60, 0x64, 0x5f, 0x68, 0x19, 0xaa, 0xae, 0x15, 0x53, 0xc7, 0x2e, 0x48, - 0x91, 0xfa, 0x07, 0xb4, 0x7f, 0x45, 0x95, 0x31, 0x63, 0xa7, 0x53, 0x05, 0x43, 0x2b, 0x46, 0xfe, - 0x82, 0xca, 0xef, 0x40, 0xb8, 0x32, 0xb4, 0xc9, 0xd2, 0xcd, 0xef, 0x7d, 0xdf, 0xf9, 0x7d, 0xdf, - 0x7d, 0xa7, 0x87, 0x0e, 0xdc, 0xb6, 0x1b, 0x9e, 0xdb, 0x4d, 0x1a, 0x70, 0xbb, 0x7f, 0xd8, 0xa0, - 0xdc, 0x3d, 0x84, 0xc2, 0xea, 0x05, 0x3e, 0xf7, 0x75, 0x1d, 0x60, 0x0b, 0x3a, 0x73, 0xd8, 0xc8, - 0xb7, 0xfc, 0x96, 0x0f, 0xb0, 0x1d, 0x7d, 0x49, 0x26, 0x79, 0x87, 0x76, 0x8e, 0x69, 0xc0, 0xd9, - 0x2b, 0xd6, 0x74, 0x39, 0xad, 0xd7, 0x74, 0x1b, 0x65, 0xfc, 0xd7, 0x5d, 0x1a, 0x14, 0xd4, 0xa2, - 0x5a, 0xda, 0xaa, 0xde, 0x9f, 0x0a, 0x2c, 0x1b, 0x33, 0x81, 0xb7, 0x07, 0xee, 0x45, 0xa7, 0x42, - 0xa0, 0x24, 0x8e, 0x6c, 0xeb, 0x47, 0x68, 0x23, 0xa4, 0x01, 0x73, 0x3b, 0x05, 0x0d, 0x4e, 0xec, - 0x4f, 0x05, 0x9e, 0x77, 0x66, 0x02, 0xef, 0xc8, 0x23, 0xb2, 0x26, 0xce, 0x1c, 0xa8, 0xfc, 0xf7, - 0xf1, 0x0a, 0x2b, 0x3f, 0xaf, 0xb0, 0x42, 0xbe, 0x6a, 0x28, 0x1b, 0x53, 0xa0, 0x9f, 0xa1, 0x4c, - 0xc8, 0x5d, 0x4e, 0xe1, 0x6f, 0xff, 0x97, 0x1f, 0x5b, 0x49, 0x2b, 0x56, 0x8c, 0x6f, 0xbd, 0x88, - 0xc8, 0x52, 0x26, 0x9c, 0x5b, 0xca, 0x84, 0x92, 0x38, 0xb2, 0xad, 0x3f, 0x41, 0xe9, 0xe8, 0x1f, - 0x85, 0x54, 0x51, 0x2d, 0x6d, 0x57, 0xf7, 0xa6, 0x02, 0x43, 0x3d, 0x13, 0x38, 0x2b, 0xe9, 0x51, - 0x45, 0x1c, 0x68, 0x46, 0x9e, 0x7a, 0x97, 0x8d, 0x36, 0x1d, 0x14, 0xd2, 0x40, 0x07, 0x4f, 0xb2, - 0xb3, 0xf4, 0x24, 0x6b, 0xe2, 0xcc, 0x01, 0xf2, 0x16, 0x65, 0x40, 0x8c, 0x5e, 0x42, 0x9b, 0xac, - 0xdb, 0x77, 0x3b, 0xcc, 0xcb, 0x29, 0xc6, 0xfe, 0x70, 0x54, 0xdc, 0x8b, 0x09, 0x06, 0x4a, 0x5d, - 0xc2, 0x3a, 0x46, 0x19, 0xc9, 0x53, 0x8d, 0xfc, 0x70, 0x54, 0xcc, 0xc5, 0x78, 0x67, 0x40, 0x78, - 0x84, 0x36, 0x03, 0xda, 0xf7, 0xdb, 0xd4, 0xcb, 0x69, 0xc6, 0xbd, 0xe1, 0xa8, 0xa8, 0xc7, 0x28, - 0x8e, 0x44, 0x8c, 0xf4, 0xfb, 0x4f, 0xa6, 0x42, 0xbe, 0xa8, 0x68, 0x37, 0x06, 0x9e, 0xb0, 0x0e, - 0xa7, 0xc1, 0xbf, 0x89, 0x33, 0x9a, 0x22, 0x43, 0x4b, 0x2d, 0xa7, 0xfc, 0x29, 0x8d, 0x4a, 0x1a, - 0xb2, 0xff, 0xac, 0xa2, 0xfc, 0x69, 0xd8, 0x3a, 0x0e, 0xa8, 0xcb, 0x69, 0xfc, 0x11, 0xdc, 0x59, - 0xf5, 0x22, 0x5d, 0xed, 0x6e, 0xe9, 0xa6, 0x6e, 0x9d, 0xee, 0x5c, 0xb1, 0x89, 0x1e, 0xac, 0x12, - 0xec, 0xd0, 0xb0, 0xe7, 0x77, 0x43, 0x4a, 0xba, 0x60, 0x48, 0x06, 0x13, 0x37, 0xf4, 0x1c, 0x69, - 0xcc, 0x03, 0x37, 0xd9, 0xf2, 0xc3, 0xbf, 0x3c, 0xe9, 0x7a, 0xad, 0x7a, 0x70, 0x2d, 0xb0, 0x32, - 0x16, 0x58, 0xab, 0xd7, 0xa6, 0x02, 0x6b, 0xcc, 0x9b, 0x09, 0xbc, 0x25, 0x75, 0x31, 0x8f, 0x38, - 0x1a, 0xf3, 0x7e, 0xd3, 0x93, 0x98, 0xb7, 0xd0, 0x53, 0xfe, 0xa1, 0xa2, 0xd4, 0x69, 0xd8, 0xd2, - 0x7d, 0xb4, 0x9b, 0xbc, 0xe5, 0xd2, 0x2a, 0x21, 0xab, 0xec, 0x19, 0xcf, 0x6e, 0xcb, 0x5c, 0x0c, - 0x8e, 0x06, 0x26, 0x6f, 0x61, 0xdd, 0xc0, 0x04, 0x73, 0xed, 0xc0, 0xb5, 0x4e, 0xab, 0x27, 0xd7, - 0x63, 0x53, 0xbd, 0x19, 0x9b, 0xea, 0xf7, 0xb1, 0xa9, 0x7e, 0x98, 0x98, 0xca, 0xcd, 0xc4, 0x54, - 0xbe, 0x4d, 0x4c, 0xe5, 0xe5, 0xd3, 0x16, 0xe3, 0xe7, 0x97, 0x0d, 0xab, 0xe9, 0x5f, 0xd8, 0x7e, - 0x3f, 0x68, 0x76, 0xda, 0xb6, 0xdc, 0x9e, 0x6f, 0xe4, 0xfe, 0xe4, 0x83, 0x1e, 0x0d, 0x17, 0x5b, - 0xb4, 0xb1, 0x01, 0x7b, 0xf1, 0xe8, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc3, 0x23, 0x39, 0x51, - 0x62, 0x05, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // CreateCertificate defines a method to create new certificate given proper inputs. - CreateCertificate(ctx context.Context, in *MsgCreateCertificate, opts ...grpc.CallOption) (*MsgCreateCertificateResponse, error) - // RevokeCertificate defines a method to revoke the certificate - RevokeCertificate(ctx context.Context, in *MsgRevokeCertificate, opts ...grpc.CallOption) (*MsgRevokeCertificateResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) CreateCertificate(ctx context.Context, in *MsgCreateCertificate, opts ...grpc.CallOption) (*MsgCreateCertificateResponse, error) { - out := new(MsgCreateCertificateResponse) - err := c.cc.Invoke(ctx, "/akash.cert.v1beta1.Msg/CreateCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) RevokeCertificate(ctx context.Context, in *MsgRevokeCertificate, opts ...grpc.CallOption) (*MsgRevokeCertificateResponse, error) { - out := new(MsgRevokeCertificateResponse) - err := c.cc.Invoke(ctx, "/akash.cert.v1beta1.Msg/RevokeCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // CreateCertificate defines a method to create new certificate given proper inputs. - CreateCertificate(context.Context, *MsgCreateCertificate) (*MsgCreateCertificateResponse, error) - // RevokeCertificate defines a method to revoke the certificate - RevokeCertificate(context.Context, *MsgRevokeCertificate) (*MsgRevokeCertificateResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) CreateCertificate(ctx context.Context, req *MsgCreateCertificate) (*MsgCreateCertificateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateCertificate not implemented") -} -func (*UnimplementedMsgServer) RevokeCertificate(ctx context.Context, req *MsgRevokeCertificate) (*MsgRevokeCertificateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RevokeCertificate not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_CreateCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateCertificate) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.cert.v1beta1.Msg/CreateCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateCertificate(ctx, req.(*MsgCreateCertificate)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgRevokeCertificate) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).RevokeCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.cert.v1beta1.Msg/RevokeCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).RevokeCertificate(ctx, req.(*MsgRevokeCertificate)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.cert.v1beta1.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateCertificate", - Handler: _Msg_CreateCertificate_Handler, - }, - { - MethodName: "RevokeCertificate", - Handler: _Msg_RevokeCertificate_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/cert/v1beta1/cert.proto", -} - -func (m *CertificateID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Serial) > 0 { - i -= len(m.Serial) - copy(dAtA[i:], m.Serial) - i = encodeVarintCert(dAtA, i, uint64(len(m.Serial))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintCert(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Certificate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Certificate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Certificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Pubkey) > 0 { - i -= len(m.Pubkey) - copy(dAtA[i:], m.Pubkey) - i = encodeVarintCert(dAtA, i, uint64(len(m.Pubkey))) - i-- - dAtA[i] = 0x22 - } - if len(m.Cert) > 0 { - i -= len(m.Cert) - copy(dAtA[i:], m.Cert) - i = encodeVarintCert(dAtA, i, uint64(len(m.Cert))) - i-- - dAtA[i] = 0x1a - } - if m.State != 0 { - i = encodeVarintCert(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - return len(dAtA) - i, nil -} - -func (m *CertificateFilter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateFilter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintCert(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x1a - } - if len(m.Serial) > 0 { - i -= len(m.Serial) - copy(dAtA[i:], m.Serial) - i = encodeVarintCert(dAtA, i, uint64(len(m.Serial))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintCert(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateCertificate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateCertificate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateCertificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Pubkey) > 0 { - i -= len(m.Pubkey) - copy(dAtA[i:], m.Pubkey) - i = encodeVarintCert(dAtA, i, uint64(len(m.Pubkey))) - i-- - dAtA[i] = 0x1a - } - if len(m.Cert) > 0 { - i -= len(m.Cert) - copy(dAtA[i:], m.Cert) - i = encodeVarintCert(dAtA, i, uint64(len(m.Cert))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintCert(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateCertificateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateCertificateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateCertificateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgRevokeCertificate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgRevokeCertificate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgRevokeCertificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCert(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgRevokeCertificateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgRevokeCertificateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgRevokeCertificateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintCert(dAtA []byte, offset int, v uint64) int { - offset -= sovCert(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CertificateID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Serial) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - return n -} - -func (m *Certificate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.State != 0 { - n += 1 + sovCert(uint64(m.State)) - } - l = len(m.Cert) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Pubkey) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - return n -} - -func (m *CertificateFilter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Serial) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - return n -} - -func (m *MsgCreateCertificate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Cert) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Pubkey) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - return n -} - -func (m *MsgCreateCertificateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgRevokeCertificate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovCert(uint64(l)) - return n -} - -func (m *MsgRevokeCertificateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovCert(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCert(x uint64) (n int) { - return sovCert(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CertificateID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Serial", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Serial = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Certificate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Certificate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Certificate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Certificate_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cert", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cert = append(m.Cert[:0], dAtA[iNdEx:postIndex]...) - if m.Cert == nil { - m.Cert = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pubkey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pubkey = append(m.Pubkey[:0], dAtA[iNdEx:postIndex]...) - if m.Pubkey == nil { - m.Pubkey = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateFilter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateFilter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateFilter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Serial", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Serial = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateCertificate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateCertificate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateCertificate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cert", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cert = append(m.Cert[:0], dAtA[iNdEx:postIndex]...) - if m.Cert == nil { - m.Cert = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pubkey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pubkey = append(m.Pubkey[:0], dAtA[iNdEx:postIndex]...) - if m.Pubkey == nil { - m.Pubkey = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateCertificateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateCertificateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgRevokeCertificate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgRevokeCertificate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgRevokeCertificate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgRevokeCertificateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgRevokeCertificateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgRevokeCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCert(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCert - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCert - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCert - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCert - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCert - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCert - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthCert = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCert = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCert = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/cert/v1beta1/codec.go b/go/node/cert/v1beta1/codec.go deleted file mode 100644 index 99c3bc2e..00000000 --- a/go/node/cert/v1beta1/codec.go +++ /dev/null @@ -1,43 +0,0 @@ -package v1beta1 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/provider module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/provider and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterLegacyAminoCodec register concrete types on codec -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgCreateCertificate{}, ModuleName+"/"+MsgTypeCreateCertificate, nil) - cdc.RegisterConcrete(&MsgRevokeCertificate{}, ModuleName+"/"+MsgTypeRevokeCertificate, nil) -} - -// RegisterInterfaces registers the x/provider interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgCreateCertificate{}, - &MsgRevokeCertificate{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/cert/v1beta1/errors.go b/go/node/cert/v1beta1/errors.go deleted file mode 100644 index 16670e1e..00000000 --- a/go/node/cert/v1beta1/errors.go +++ /dev/null @@ -1,19 +0,0 @@ -package v1beta1 - -import ( - "github.com/pkg/errors" -) - -var ( - // ErrInvalidSerialNumber invalid serial number - ErrInvalidSerialNumber = errors.New("invalid serial number") - - // ErrInvalidCertificateValue certificate content is not valid - ErrInvalidCertificateValue = errors.New("invalid certificate value") - - // ErrInvalidPubkeyValue public key is not valid - ErrInvalidPubkeyValue = errors.New("invalid pubkey value") - - // ErrInvalidState invalid certificate state - ErrInvalidState = errors.New("invalid state") -) diff --git a/go/node/cert/v1beta1/genesis.go b/go/node/cert/v1beta1/genesis.go deleted file mode 100644 index c245a5b7..00000000 --- a/go/node/cert/v1beta1/genesis.go +++ /dev/null @@ -1,58 +0,0 @@ -package v1beta1 - -import ( - "bytes" - "encoding/json" - - "github.com/cosmos/cosmos-sdk/codec" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -type GenesisCertificates []GenesisCertificate - -func (obj GenesisCertificates) Contains(cert GenesisCertificate) bool { - for _, c := range obj { - if c.Owner == cert.Owner { - return true - } - - if bytes.Equal(c.Certificate.Cert, cert.Certificate.Cert) { - return true - } - } - - return false -} - -func (m GenesisCertificate) Validate() error { - owner, err := sdk.AccAddressFromBech32(m.Owner) - if err != nil { - return err - } - if err := m.Certificate.Validate(owner); err != nil { - return err - } - - return nil -} - -func (m *GenesisState) Validate() error { - for _, cert := range m.Certificates { - if err := cert.Validate(); err != nil { - return err - } - } - return nil -} - -// GetGenesisStateFromAppState returns x/cert GenesisState given raw application -// genesis state. -func GetGenesisStateFromAppState(cdc codec.JSONCodec, appState map[string]json.RawMessage) *GenesisState { - var genesisState GenesisState - - if appState[ModuleName] != nil { - cdc.MustUnmarshalJSON(appState[ModuleName], &genesisState) - } - - return &genesisState -} diff --git a/go/node/cert/v1beta1/genesis.pb.go b/go/node/cert/v1beta1/genesis.pb.go deleted file mode 100644 index a2b97b62..00000000 --- a/go/node/cert/v1beta1/genesis.pb.go +++ /dev/null @@ -1,561 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/cert/v1beta1/genesis.proto - -package v1beta1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisCertificate defines certificate entry at genesis -type GenesisCertificate struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Certificate Certificate `protobuf:"bytes,2,opt,name=certificate,proto3" json:"certificate" yaml:"certificate"` -} - -func (m *GenesisCertificate) Reset() { *m = GenesisCertificate{} } -func (m *GenesisCertificate) String() string { return proto.CompactTextString(m) } -func (*GenesisCertificate) ProtoMessage() {} -func (*GenesisCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_4ec22c8e0afb2f99, []int{0} -} -func (m *GenesisCertificate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisCertificate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisCertificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisCertificate.Merge(m, src) -} -func (m *GenesisCertificate) XXX_Size() int { - return m.Size() -} -func (m *GenesisCertificate) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisCertificate.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisCertificate proto.InternalMessageInfo - -func (m *GenesisCertificate) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *GenesisCertificate) GetCertificate() Certificate { - if m != nil { - return m.Certificate - } - return Certificate{} -} - -// GenesisState defines the basic genesis state used by cert module -type GenesisState struct { - Certificates GenesisCertificates `protobuf:"bytes,1,rep,name=certificates,proto3,castrepeated=GenesisCertificates" json:"certificates" yaml:"certificates"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_4ec22c8e0afb2f99, []int{1} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetCertificates() GenesisCertificates { - if m != nil { - return m.Certificates - } - return nil -} - -func init() { - proto.RegisterType((*GenesisCertificate)(nil), "akash.cert.v1beta1.GenesisCertificate") - proto.RegisterType((*GenesisState)(nil), "akash.cert.v1beta1.GenesisState") -} - -func init() { proto.RegisterFile("akash/cert/v1beta1/genesis.proto", fileDescriptor_4ec22c8e0afb2f99) } - -var fileDescriptor_4ec22c8e0afb2f99 = []byte{ - // 315 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x4e, 0x2d, 0x2a, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0x4f, - 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x02, 0xab, - 0xd0, 0x03, 0xa9, 0xd0, 0x83, 0xaa, 0x90, 0x92, 0xc5, 0xa2, 0x0b, 0xac, 0x00, 0xac, 0x45, 0x4a, - 0x24, 0x3d, 0x3f, 0x3d, 0x1f, 0xcc, 0xd4, 0x07, 0xb1, 0x20, 0xa2, 0x4a, 0x9b, 0x19, 0xb9, 0x84, - 0xdc, 0x21, 0x46, 0x3b, 0xa7, 0x16, 0x95, 0x64, 0xa6, 0x65, 0x26, 0x27, 0x96, 0xa4, 0x0a, 0xe9, - 0x73, 0xb1, 0xe6, 0x97, 0xe7, 0xa5, 0x16, 0x49, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x3a, 0x49, 0xbe, - 0xba, 0x27, 0x0f, 0x11, 0xf8, 0x74, 0x4f, 0x9e, 0xa7, 0x32, 0x31, 0x37, 0xc7, 0x4a, 0x09, 0xcc, - 0x55, 0x0a, 0x82, 0x08, 0x0b, 0xe5, 0x70, 0x71, 0x27, 0x23, 0xf4, 0x4b, 0x30, 0x29, 0x30, 0x6a, - 0x70, 0x1b, 0xc9, 0xeb, 0x61, 0x3a, 0x53, 0x0f, 0xc9, 0x1a, 0x27, 0xcd, 0x13, 0xf7, 0xe4, 0x19, - 0x5e, 0xdd, 0x93, 0x47, 0xd6, 0xfb, 0xe9, 0x9e, 0xbc, 0x10, 0xc4, 0x06, 0x24, 0x41, 0xa5, 0x20, - 0x64, 0x25, 0x4a, 0xf3, 0x19, 0xb9, 0x78, 0xa0, 0xae, 0x0e, 0x2e, 0x01, 0xb9, 0xb7, 0x8f, 0x91, - 0x8b, 0x07, 0x49, 0x41, 0xb1, 0x04, 0xa3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x1a, 0x36, 0x07, 0x60, - 0x7a, 0xd7, 0xc9, 0x09, 0xea, 0x0e, 0x14, 0x33, 0x3e, 0xdd, 0x93, 0x17, 0xc6, 0x70, 0x48, 0xb1, - 0xd2, 0xaa, 0xfb, 0xf2, 0xc2, 0x98, 0x46, 0x14, 0x07, 0xa1, 0xe8, 0x75, 0x72, 0x3b, 0xf1, 0x48, - 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, - 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0x9d, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, - 0xe4, 0xfc, 0x5c, 0xfd, 0xfc, 0xb2, 0xa2, 0xe4, 0x9c, 0x6c, 0x7d, 0x48, 0xc4, 0x55, 0x40, 0xa2, - 0xae, 0xa4, 0xb2, 0x20, 0xb5, 0x18, 0x16, 0x81, 0x49, 0x6c, 0xe0, 0x68, 0x32, 0x06, 0x04, 0x00, - 0x00, 0xff, 0xff, 0xfe, 0x8e, 0x65, 0xd0, 0x13, 0x02, 0x00, 0x00, -} - -func (m *GenesisCertificate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisCertificate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisCertificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Certificate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintGenesis(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Certificates) > 0 { - for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Certificates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisCertificate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovGenesis(uint64(l)) - } - l = m.Certificate.Size() - n += 1 + l + sovGenesis(uint64(l)) - return n -} - -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Certificates) > 0 { - for _, e := range m.Certificates { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisCertificate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisCertificate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisCertificate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Certificates = append(m.Certificates, GenesisCertificate{}) - if err := m.Certificates[len(m.Certificates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/cert/v1beta1/key.go b/go/node/cert/v1beta1/key.go deleted file mode 100644 index 690af193..00000000 --- a/go/node/cert/v1beta1/key.go +++ /dev/null @@ -1,16 +0,0 @@ -package v1beta1 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "cert" - - // StoreKey is the store key string for provider - StoreKey = ModuleName - - // RouterKey is the message route for provider - RouterKey = ModuleName -) - -func PrefixCertificateID() []byte { - return []byte{0x01} -} diff --git a/go/node/cert/v1beta1/msgs.go b/go/node/cert/v1beta1/msgs.go deleted file mode 100644 index a5988bc9..00000000 --- a/go/node/cert/v1beta1/msgs.go +++ /dev/null @@ -1,98 +0,0 @@ -package v1beta1 - -import ( - "math/big" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - MsgTypeCreateCertificate = "cert-create-certificate" - MsgTypeRevokeCertificate = "cert-revoke-certificate" -) - -var ( - _ sdk.Msg = &MsgCreateCertificate{} - _ sdk.Msg = &MsgRevokeCertificate{} -) - -// ====MsgCreateCertificate==== -// Route implements the sdk.Msg interface -func (m MsgCreateCertificate) Route() string { - return RouterKey -} - -// Type implements the sdk.Msg interface -func (m MsgCreateCertificate) Type() string { - return MsgTypeCreateCertificate -} - -// ValidateBasic does basic validation -func (m MsgCreateCertificate) ValidateBasic() error { - owner, err := sdk.AccAddressFromBech32(m.Owner) - if err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Owner Address") - } - - _, err = ParseAndValidateCertificate(owner, m.Cert, m.Pubkey) - if err != nil { - return err - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (m MsgCreateCertificate) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) -} - -// GetSigners defines whose signature is required -func (m MsgCreateCertificate) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(m.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// ====MsgRevokeCertificate==== -// Route implements the sdk.Msg interface -func (m MsgRevokeCertificate) Route() string { - return RouterKey -} - -// Type implements the sdk.Msg interface -func (m MsgRevokeCertificate) Type() string { - return MsgTypeRevokeCertificate -} - -// ValidateBasic does basic validation -func (m MsgRevokeCertificate) ValidateBasic() error { - if _, err := sdk.AccAddressFromBech32(m.ID.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgRevoke: Invalid Owner Address") - } - - if _, valid := new(big.Int).SetString(m.ID.Serial, 10); !valid { - return ErrInvalidSerialNumber - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (m MsgRevokeCertificate) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) -} - -// GetSigners defines whose signature is required -func (m MsgRevokeCertificate) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(m.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} diff --git a/go/node/cert/v1beta1/query.pb.go b/go/node/cert/v1beta1/query.pb.go deleted file mode 100644 index 850b2d8d..00000000 --- a/go/node/cert/v1beta1/query.pb.go +++ /dev/null @@ -1,953 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/cert/v1beta1/query.proto - -package v1beta1 - -import ( - context "context" - fmt "fmt" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// CertificateResponse is used by QueryCertificatesResponse -type CertificateResponse struct { - Certificate Certificate `protobuf:"bytes,1,opt,name=certificate,proto3" json:"certificate" yaml:"certificate"` - Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial" yaml:"serial"` -} - -func (m *CertificateResponse) Reset() { *m = CertificateResponse{} } -func (m *CertificateResponse) String() string { return proto.CompactTextString(m) } -func (*CertificateResponse) ProtoMessage() {} -func (*CertificateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_253641229681779f, []int{0} -} -func (m *CertificateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CertificateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CertificateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CertificateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateResponse.Merge(m, src) -} -func (m *CertificateResponse) XXX_Size() int { - return m.Size() -} -func (m *CertificateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CertificateResponse proto.InternalMessageInfo - -func (m *CertificateResponse) GetCertificate() Certificate { - if m != nil { - return m.Certificate - } - return Certificate{} -} - -func (m *CertificateResponse) GetSerial() string { - if m != nil { - return m.Serial - } - return "" -} - -// QueryDeploymentsRequest is request type for the Query/Deployments RPC method -type QueryCertificatesRequest struct { - Filter CertificateFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryCertificatesRequest) Reset() { *m = QueryCertificatesRequest{} } -func (m *QueryCertificatesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryCertificatesRequest) ProtoMessage() {} -func (*QueryCertificatesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_253641229681779f, []int{1} -} -func (m *QueryCertificatesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryCertificatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryCertificatesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryCertificatesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryCertificatesRequest.Merge(m, src) -} -func (m *QueryCertificatesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryCertificatesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryCertificatesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryCertificatesRequest proto.InternalMessageInfo - -func (m *QueryCertificatesRequest) GetFilter() CertificateFilter { - if m != nil { - return m.Filter - } - return CertificateFilter{} -} - -func (m *QueryCertificatesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryCertificatesResponse is response type for the Query/Certificates RPC method -type QueryCertificatesResponse struct { - Certificates CertificatesResponse `protobuf:"bytes,1,rep,name=certificates,proto3,castrepeated=CertificatesResponse" json:"certificates"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryCertificatesResponse) Reset() { *m = QueryCertificatesResponse{} } -func (m *QueryCertificatesResponse) String() string { return proto.CompactTextString(m) } -func (*QueryCertificatesResponse) ProtoMessage() {} -func (*QueryCertificatesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_253641229681779f, []int{2} -} -func (m *QueryCertificatesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryCertificatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryCertificatesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryCertificatesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryCertificatesResponse.Merge(m, src) -} -func (m *QueryCertificatesResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryCertificatesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryCertificatesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryCertificatesResponse proto.InternalMessageInfo - -func (m *QueryCertificatesResponse) GetCertificates() CertificatesResponse { - if m != nil { - return m.Certificates - } - return nil -} - -func (m *QueryCertificatesResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -func init() { - proto.RegisterType((*CertificateResponse)(nil), "akash.cert.v1beta1.CertificateResponse") - proto.RegisterType((*QueryCertificatesRequest)(nil), "akash.cert.v1beta1.QueryCertificatesRequest") - proto.RegisterType((*QueryCertificatesResponse)(nil), "akash.cert.v1beta1.QueryCertificatesResponse") -} - -func init() { proto.RegisterFile("akash/cert/v1beta1/query.proto", fileDescriptor_253641229681779f) } - -var fileDescriptor_253641229681779f = []byte{ - // 478 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xb1, 0x6e, 0xd4, 0x30, - 0x18, 0xc7, 0xcf, 0x05, 0x4e, 0xc2, 0x57, 0x16, 0xd3, 0xe1, 0x38, 0x4a, 0x72, 0x8a, 0x54, 0xee, - 0x40, 0xad, 0xad, 0x5e, 0x37, 0xc6, 0x54, 0x3a, 0x56, 0xc8, 0xc8, 0xe6, 0x44, 0x6e, 0x6a, 0x35, - 0x17, 0xa7, 0xb1, 0xaf, 0xe2, 0x56, 0x9e, 0x00, 0x89, 0x8d, 0x19, 0x09, 0x89, 0x27, 0xe0, 0x11, - 0xba, 0x51, 0x89, 0x85, 0x29, 0xa0, 0x3b, 0xa6, 0x8e, 0x7d, 0x02, 0x14, 0xdb, 0xa1, 0x46, 0xa4, - 0xba, 0x6e, 0x89, 0xff, 0xdf, 0xff, 0xff, 0xfd, 0xbe, 0xcf, 0x09, 0xf4, 0xe8, 0x09, 0x95, 0xc7, - 0x24, 0x61, 0xa5, 0x22, 0x67, 0xfb, 0x31, 0x53, 0x74, 0x9f, 0x9c, 0xce, 0x59, 0xb9, 0xc0, 0x45, - 0x29, 0x94, 0x40, 0x48, 0xeb, 0xb8, 0xd6, 0xb1, 0xd5, 0x07, 0x5b, 0xa9, 0x48, 0x85, 0x96, 0x49, - 0xfd, 0x64, 0x2a, 0x07, 0xdb, 0xa9, 0x10, 0x69, 0xc6, 0x08, 0x2d, 0x38, 0xa1, 0x79, 0x2e, 0x14, - 0x55, 0x5c, 0xe4, 0xd2, 0xaa, 0xcf, 0x13, 0x21, 0x67, 0x42, 0x92, 0x98, 0x4a, 0x66, 0x1a, 0xfc, - 0x6d, 0x57, 0xd0, 0x94, 0xe7, 0xba, 0xd8, 0xd6, 0x3e, 0x69, 0x61, 0xd2, 0x00, 0x5a, 0x0e, 0xbe, - 0x02, 0xf8, 0xf0, 0x90, 0x95, 0x8a, 0x1f, 0xf1, 0x84, 0x2a, 0x16, 0x31, 0x59, 0x88, 0x5c, 0x32, - 0x94, 0xc1, 0x5e, 0x72, 0x7d, 0xdc, 0x07, 0x43, 0x30, 0xee, 0x4d, 0x7c, 0xfc, 0xff, 0x00, 0xd8, - 0x71, 0x87, 0xcf, 0xce, 0x2b, 0xbf, 0x73, 0x59, 0xf9, 0xae, 0xf7, 0xaa, 0xf2, 0xd1, 0x82, 0xce, - 0xb2, 0x17, 0x81, 0x73, 0x18, 0x44, 0x6e, 0x09, 0x3a, 0x80, 0x5d, 0xc9, 0x4a, 0x4e, 0xb3, 0xfe, - 0xc6, 0x10, 0x8c, 0xef, 0x87, 0x8f, 0x2f, 0x2b, 0xdf, 0x9e, 0x5c, 0x55, 0xfe, 0x03, 0x63, 0x37, - 0xef, 0x41, 0x64, 0x85, 0xe0, 0x33, 0x80, 0xfd, 0xd7, 0xf5, 0xf0, 0x0e, 0x81, 0x8c, 0xd8, 0xe9, - 0x9c, 0x49, 0x85, 0x0e, 0x61, 0xf7, 0x88, 0x67, 0x8a, 0x95, 0x16, 0x7d, 0x67, 0x0d, 0xfa, 0x54, - 0x17, 0x87, 0x77, 0xeb, 0x01, 0x22, 0x6b, 0x45, 0x53, 0x08, 0xaf, 0xf7, 0xa9, 0xd1, 0x7a, 0x93, - 0xa7, 0xd8, 0x2c, 0x1f, 0xd7, 0xcb, 0xc7, 0xe6, 0x76, 0x9b, 0xbc, 0x57, 0x34, 0x65, 0x16, 0x20, - 0x72, 0x9c, 0xc1, 0x37, 0x00, 0x1f, 0xb5, 0x90, 0xda, 0x55, 0x73, 0xb8, 0xe9, 0xec, 0x42, 0xf6, - 0xc1, 0xf0, 0xce, 0xb8, 0x37, 0x19, 0xad, 0x01, 0x6e, 0xec, 0xe1, 0x76, 0x8d, 0xfc, 0xe5, 0xa7, - 0xbf, 0xd5, 0x16, 0x1e, 0xfd, 0x13, 0x8d, 0x5e, 0xb6, 0x0c, 0x34, 0x5a, 0x3b, 0x90, 0x8d, 0x72, - 0xac, 0x93, 0x4f, 0x00, 0xde, 0xd3, 0x13, 0xa1, 0x8f, 0x00, 0x6e, 0xba, 0x9d, 0xd1, 0x6e, 0x1b, - 0xf8, 0x4d, 0xf7, 0x34, 0xd8, 0xbb, 0x65, 0xb5, 0x61, 0x08, 0xf6, 0xde, 0x7d, 0xff, 0xfd, 0x61, - 0x63, 0x84, 0x76, 0xc8, 0x0d, 0x9f, 0x75, 0xe3, 0x20, 0x19, 0x97, 0x2a, 0x9c, 0x9e, 0x2f, 0x3d, - 0x70, 0xb1, 0xf4, 0xc0, 0xaf, 0xa5, 0x07, 0xde, 0xaf, 0xbc, 0xce, 0xc5, 0xca, 0xeb, 0xfc, 0x58, - 0x79, 0x9d, 0x37, 0xbb, 0x29, 0x57, 0xc7, 0xf3, 0x18, 0x27, 0x62, 0x46, 0xc4, 0x59, 0x99, 0x64, - 0x27, 0x36, 0xf1, 0xad, 0xc9, 0x54, 0x8b, 0x82, 0xc9, 0x26, 0x39, 0xee, 0xea, 0x9f, 0xe5, 0xe0, - 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x95, 0x96, 0xca, 0xe1, 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Certificates queries certificates - Certificates(ctx context.Context, in *QueryCertificatesRequest, opts ...grpc.CallOption) (*QueryCertificatesResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Certificates(ctx context.Context, in *QueryCertificatesRequest, opts ...grpc.CallOption) (*QueryCertificatesResponse, error) { - out := new(QueryCertificatesResponse) - err := c.cc.Invoke(ctx, "/akash.cert.v1beta1.Query/Certificates", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Certificates queries certificates - Certificates(context.Context, *QueryCertificatesRequest) (*QueryCertificatesResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Certificates(ctx context.Context, req *QueryCertificatesRequest) (*QueryCertificatesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Certificates not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Certificates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryCertificatesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Certificates(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.cert.v1beta1.Query/Certificates", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Certificates(ctx, req.(*QueryCertificatesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.cert.v1beta1.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Certificates", - Handler: _Query_Certificates_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/cert/v1beta1/query.proto", -} - -func (m *CertificateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Serial) > 0 { - i -= len(m.Serial) - copy(dAtA[i:], m.Serial) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Serial))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Certificate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryCertificatesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryCertificatesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryCertificatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryCertificatesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryCertificatesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryCertificatesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Certificates) > 0 { - for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Certificates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CertificateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Certificate.Size() - n += 1 + l + sovQuery(uint64(l)) - l = len(m.Serial) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryCertificatesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filter.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryCertificatesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Certificates) > 0 { - for _, e := range m.Certificates { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CertificateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Serial", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Serial = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryCertificatesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryCertificatesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryCertificatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryCertificatesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryCertificatesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryCertificatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Certificates = append(m.Certificates, CertificateResponse{}) - if err := m.Certificates[len(m.Certificates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/cert/v1beta1/query.pb.gw.go b/go/node/cert/v1beta1/query.pb.gw.go deleted file mode 100644 index ee54fd8d..00000000 --- a/go/node/cert/v1beta1/query.pb.gw.go +++ /dev/null @@ -1,171 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/cert/v1beta2/query.proto - -/* -Package v1beta1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta1 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Certificates_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Certificates_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryCertificatesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Certificates_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Certificates(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Certificates_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryCertificatesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Certificates_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Certificates(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Certificates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Certificates_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Certificates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Certificates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Certificates_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Certificates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Certificates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "cert", "v1beta2", "certificates", "list"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Certificates_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/cert/v1beta1/types.go b/go/node/cert/v1beta1/types.go deleted file mode 100644 index 2458610a..00000000 --- a/go/node/cert/v1beta1/types.go +++ /dev/null @@ -1,70 +0,0 @@ -package v1beta1 - -import ( - "bytes" - "math/big" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -const ( - PemBlkTypeCertificate = "CERTIFICATE" - PemBlkTypeECPrivateKey = "EC PRIVATE KEY" - PemBlkTypeECPublicKey = "EC PUBLIC KEY" -) - -type CertID struct { - Owner sdk.Address - Serial big.Int -} - -func ToCertID(id CertificateID) (CertID, error) { - addr, err := sdk.AccAddressFromBech32(id.Owner) - if err != nil { - return CertID{}, err - } - - serial, valid := new(big.Int).SetString(id.Serial, 10) - if !valid { - return CertID{}, ErrInvalidSerialNumber - } - - return CertID{ - Owner: addr, - Serial: *serial, - }, nil -} - -// Certificates is the collection of Certificate -type Certificates []Certificate - -type CertificatesResponse []CertificateResponse - -// String implements the Stringer interface for a Certificates object. -func (obj Certificates) String() string { - var buf bytes.Buffer - - const sep = "\n\n" - - for _, p := range obj { - buf.WriteString(p.String()) - buf.WriteString(sep) - } - - if len(obj) > 0 { - buf.Truncate(buf.Len() - len(sep)) - } - - return buf.String() -} - -func (obj Certificates) Contains(cert Certificate) bool { - for _, c := range obj { - // fixme is bytes.Equal right way to do it? - if bytes.Equal(c.Cert, cert.Cert) { - return true - } - } - - return false -} diff --git a/go/node/cert/v1beta2/cert.go b/go/node/cert/v1beta2/cert.go deleted file mode 100644 index adbc6aca..00000000 --- a/go/node/cert/v1beta2/cert.go +++ /dev/null @@ -1,71 +0,0 @@ -package v1beta2 - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -func ParseAndValidateCertificate(owner sdk.Address, crt, pub []byte) (*x509.Certificate, error) { - blk, rest := pem.Decode(pub) - if blk == nil || len(rest) > 0 { - return nil, ErrInvalidPubkeyValue - } - - if blk.Type != PemBlkTypeECPublicKey { - return nil, errors.Wrap(ErrInvalidPubkeyValue, "invalid pem block type") - } - - blk, rest = pem.Decode(crt) - if blk == nil || len(rest) > 0 { - return nil, ErrInvalidCertificateValue - } - - if blk.Type != PemBlkTypeCertificate { - return nil, errors.Wrap(ErrInvalidCertificateValue, "invalid pem block type") - } - - cert, err := x509.ParseCertificate(blk.Bytes) - if err != nil { - return nil, err - } - - cowner, err := sdk.AccAddressFromBech32(cert.Subject.CommonName) - if err != nil { - return nil, errors.Wrap(ErrInvalidCertificateValue, err.Error()) - } - - if !owner.Equals(cowner) { - return nil, errors.Wrap(ErrInvalidCertificateValue, "CommonName does not match owner") - } - - return cert, nil -} - -func (m *CertificateID) String() string { - return fmt.Sprintf("%s/%s", m.Owner, m.Serial) -} - -func (m *CertificateID) Equals(val CertificateID) bool { - return (m.Owner == val.Owner) && (m.Serial == val.Serial) -} - -func (m Certificate) Validate(owner sdk.Address) error { - if val, exists := Certificate_State_name[int32(m.State)]; !exists || val == "invalid" { - return ErrInvalidState - } - - _, err := ParseAndValidateCertificate(owner, m.Cert, m.Pubkey) - if err != nil { - return err - } - - return nil -} - -func (m Certificate) IsState(state Certificate_State) bool { - return m.State == state -} diff --git a/go/node/cert/v1beta2/cert.pb.go b/go/node/cert/v1beta2/cert.pb.go deleted file mode 100644 index a58d05d8..00000000 --- a/go/node/cert/v1beta2/cert.pb.go +++ /dev/null @@ -1,1775 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/cert/v1beta2/cert.proto - -package v1beta2 - -import ( - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of deployment -type Certificate_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - CertificateStateInvalid Certificate_State = 0 - // CertificateValid denotes state for deployment active - CertificateValid Certificate_State = 1 - // CertificateRevoked denotes state for deployment closed - CertificateRevoked Certificate_State = 2 -) - -var Certificate_State_name = map[int32]string{ - 0: "invalid", - 1: "valid", - 2: "revoked", -} - -var Certificate_State_value = map[string]int32{ - "invalid": 0, - "valid": 1, - "revoked": 2, -} - -func (x Certificate_State) String() string { - return proto.EnumName(Certificate_State_name, int32(x)) -} - -func (Certificate_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_1bfc773547637d6c, []int{1, 0} -} - -// CertificateID stores owner and sequence number -type CertificateID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial" yaml:"serial"` -} - -func (m *CertificateID) Reset() { *m = CertificateID{} } -func (*CertificateID) ProtoMessage() {} -func (*CertificateID) Descriptor() ([]byte, []int) { - return fileDescriptor_1bfc773547637d6c, []int{0} -} -func (m *CertificateID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CertificateID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CertificateID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CertificateID) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateID.Merge(m, src) -} -func (m *CertificateID) XXX_Size() int { - return m.Size() -} -func (m *CertificateID) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateID.DiscardUnknown(m) -} - -var xxx_messageInfo_CertificateID proto.InternalMessageInfo - -func (m *CertificateID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *CertificateID) GetSerial() string { - if m != nil { - return m.Serial - } - return "" -} - -// Certificate stores state, certificate and it's public key -type Certificate struct { - State Certificate_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.cert.v1beta2.Certificate_State" json:"state" yaml:"state"` - Cert []byte `protobuf:"bytes,3,opt,name=cert,proto3" json:"cert" yaml:"cert"` - Pubkey []byte `protobuf:"bytes,4,opt,name=pubkey,proto3" json:"pubkey" yaml:"pubkey"` -} - -func (m *Certificate) Reset() { *m = Certificate{} } -func (m *Certificate) String() string { return proto.CompactTextString(m) } -func (*Certificate) ProtoMessage() {} -func (*Certificate) Descriptor() ([]byte, []int) { - return fileDescriptor_1bfc773547637d6c, []int{1} -} -func (m *Certificate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Certificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Certificate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Certificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_Certificate.Merge(m, src) -} -func (m *Certificate) XXX_Size() int { - return m.Size() -} -func (m *Certificate) XXX_DiscardUnknown() { - xxx_messageInfo_Certificate.DiscardUnknown(m) -} - -var xxx_messageInfo_Certificate proto.InternalMessageInfo - -func (m *Certificate) GetState() Certificate_State { - if m != nil { - return m.State - } - return CertificateStateInvalid -} - -func (m *Certificate) GetCert() []byte { - if m != nil { - return m.Cert - } - return nil -} - -func (m *Certificate) GetPubkey() []byte { - if m != nil { - return m.Pubkey - } - return nil -} - -// CertificateFilter defines filters used to filter certificates -type CertificateFilter struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial" yaml:"serial"` - State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *CertificateFilter) Reset() { *m = CertificateFilter{} } -func (m *CertificateFilter) String() string { return proto.CompactTextString(m) } -func (*CertificateFilter) ProtoMessage() {} -func (*CertificateFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_1bfc773547637d6c, []int{2} -} -func (m *CertificateFilter) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CertificateFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CertificateFilter.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CertificateFilter) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateFilter.Merge(m, src) -} -func (m *CertificateFilter) XXX_Size() int { - return m.Size() -} -func (m *CertificateFilter) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateFilter.DiscardUnknown(m) -} - -var xxx_messageInfo_CertificateFilter proto.InternalMessageInfo - -func (m *CertificateFilter) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *CertificateFilter) GetSerial() string { - if m != nil { - return m.Serial - } - return "" -} - -func (m *CertificateFilter) GetState() string { - if m != nil { - return m.State - } - return "" -} - -// MsgCreateCertificate defines an SDK message for creating certificate -type MsgCreateCertificate struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Cert []byte `protobuf:"bytes,2,opt,name=cert,proto3" json:"cert" yaml:"cert"` - Pubkey []byte `protobuf:"bytes,3,opt,name=pubkey,proto3" json:"pubkey" yaml:"pubkey"` -} - -func (m *MsgCreateCertificate) Reset() { *m = MsgCreateCertificate{} } -func (m *MsgCreateCertificate) String() string { return proto.CompactTextString(m) } -func (*MsgCreateCertificate) ProtoMessage() {} -func (*MsgCreateCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_1bfc773547637d6c, []int{3} -} -func (m *MsgCreateCertificate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateCertificate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateCertificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateCertificate.Merge(m, src) -} -func (m *MsgCreateCertificate) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateCertificate) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateCertificate.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateCertificate proto.InternalMessageInfo - -func (m *MsgCreateCertificate) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgCreateCertificate) GetCert() []byte { - if m != nil { - return m.Cert - } - return nil -} - -func (m *MsgCreateCertificate) GetPubkey() []byte { - if m != nil { - return m.Pubkey - } - return nil -} - -// MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. -type MsgCreateCertificateResponse struct { -} - -func (m *MsgCreateCertificateResponse) Reset() { *m = MsgCreateCertificateResponse{} } -func (m *MsgCreateCertificateResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateCertificateResponse) ProtoMessage() {} -func (*MsgCreateCertificateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1bfc773547637d6c, []int{4} -} -func (m *MsgCreateCertificateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateCertificateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateCertificateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateCertificateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateCertificateResponse.Merge(m, src) -} -func (m *MsgCreateCertificateResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateCertificateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateCertificateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateCertificateResponse proto.InternalMessageInfo - -// MsgRevokeCertificate defines an SDK message for revoking certificate -type MsgRevokeCertificate struct { - ID CertificateID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgRevokeCertificate) Reset() { *m = MsgRevokeCertificate{} } -func (m *MsgRevokeCertificate) String() string { return proto.CompactTextString(m) } -func (*MsgRevokeCertificate) ProtoMessage() {} -func (*MsgRevokeCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_1bfc773547637d6c, []int{5} -} -func (m *MsgRevokeCertificate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgRevokeCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgRevokeCertificate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgRevokeCertificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgRevokeCertificate.Merge(m, src) -} -func (m *MsgRevokeCertificate) XXX_Size() int { - return m.Size() -} -func (m *MsgRevokeCertificate) XXX_DiscardUnknown() { - xxx_messageInfo_MsgRevokeCertificate.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgRevokeCertificate proto.InternalMessageInfo - -func (m *MsgRevokeCertificate) GetID() CertificateID { - if m != nil { - return m.ID - } - return CertificateID{} -} - -// MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. -type MsgRevokeCertificateResponse struct { -} - -func (m *MsgRevokeCertificateResponse) Reset() { *m = MsgRevokeCertificateResponse{} } -func (m *MsgRevokeCertificateResponse) String() string { return proto.CompactTextString(m) } -func (*MsgRevokeCertificateResponse) ProtoMessage() {} -func (*MsgRevokeCertificateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_1bfc773547637d6c, []int{6} -} -func (m *MsgRevokeCertificateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgRevokeCertificateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgRevokeCertificateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgRevokeCertificateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgRevokeCertificateResponse.Merge(m, src) -} -func (m *MsgRevokeCertificateResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgRevokeCertificateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgRevokeCertificateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgRevokeCertificateResponse proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("akash.cert.v1beta2.Certificate_State", Certificate_State_name, Certificate_State_value) - proto.RegisterType((*CertificateID)(nil), "akash.cert.v1beta2.CertificateID") - proto.RegisterType((*Certificate)(nil), "akash.cert.v1beta2.Certificate") - proto.RegisterType((*CertificateFilter)(nil), "akash.cert.v1beta2.CertificateFilter") - proto.RegisterType((*MsgCreateCertificate)(nil), "akash.cert.v1beta2.MsgCreateCertificate") - proto.RegisterType((*MsgCreateCertificateResponse)(nil), "akash.cert.v1beta2.MsgCreateCertificateResponse") - proto.RegisterType((*MsgRevokeCertificate)(nil), "akash.cert.v1beta2.MsgRevokeCertificate") - proto.RegisterType((*MsgRevokeCertificateResponse)(nil), "akash.cert.v1beta2.MsgRevokeCertificateResponse") -} - -func init() { proto.RegisterFile("akash/cert/v1beta2/cert.proto", fileDescriptor_1bfc773547637d6c) } - -var fileDescriptor_1bfc773547637d6c = []byte{ - // 581 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0x31, 0x6f, 0xd3, 0x40, - 0x18, 0xb5, 0x9d, 0xa4, 0xa5, 0x97, 0x16, 0xa5, 0x56, 0x44, 0x83, 0x4b, 0x7d, 0xe1, 0x10, 0x52, - 0x24, 0x84, 0x0d, 0xe9, 0x80, 0x94, 0x31, 0x8d, 0x90, 0x32, 0x54, 0x02, 0x23, 0x75, 0x60, 0x73, - 0xe2, 0xc3, 0x3d, 0x25, 0xf5, 0x45, 0xb6, 0x9b, 0xaa, 0x03, 0x62, 0x45, 0x99, 0x18, 0x59, 0x22, - 0x55, 0xe2, 0x07, 0xc0, 0xaf, 0x40, 0x1d, 0x3b, 0x32, 0x59, 0x28, 0x19, 0x40, 0x19, 0xf3, 0x0b, - 0x90, 0xbf, 0x73, 0x14, 0x57, 0x49, 0xa0, 0x5d, 0xd8, 0xfc, 0xbe, 0xf7, 0xee, 0xbe, 0xf7, 0xfc, - 0x2c, 0xa3, 0x3d, 0xbb, 0x63, 0x07, 0xc7, 0x66, 0x9b, 0xfa, 0xa1, 0xd9, 0x7f, 0xde, 0xa2, 0xa1, - 0x5d, 0x05, 0x60, 0xf4, 0x7c, 0x1e, 0x72, 0x55, 0x05, 0xda, 0x80, 0x49, 0x42, 0x6b, 0x45, 0x97, - 0xbb, 0x1c, 0x68, 0x33, 0x7e, 0x12, 0x4a, 0xf2, 0x01, 0x6d, 0x1d, 0x50, 0x3f, 0x64, 0xef, 0x58, - 0xdb, 0x0e, 0x69, 0xb3, 0xa1, 0x9a, 0x28, 0xc7, 0xcf, 0x3c, 0xea, 0x97, 0xe4, 0xb2, 0x5c, 0xd9, - 0xa8, 0xdf, 0x9f, 0x44, 0x58, 0x0c, 0xa6, 0x11, 0xde, 0x3c, 0xb7, 0x4f, 0xba, 0x35, 0x02, 0x90, - 0x58, 0x62, 0xac, 0xee, 0xa3, 0xb5, 0x80, 0xfa, 0xcc, 0xee, 0x96, 0x14, 0x38, 0xb1, 0x3b, 0x89, - 0x70, 0x32, 0x99, 0x46, 0x78, 0x4b, 0x1c, 0x11, 0x98, 0x58, 0x09, 0x51, 0xbb, 0xf3, 0xf9, 0x02, - 0x4b, 0xbf, 0x2f, 0xb0, 0x44, 0xbe, 0x2b, 0x28, 0x9f, 0x72, 0xa0, 0x1e, 0xa1, 0x5c, 0x10, 0xda, - 0x21, 0x85, 0xdb, 0xee, 0x56, 0x1f, 0x1b, 0x8b, 0x51, 0x8c, 0x94, 0xde, 0x78, 0x13, 0x8b, 0x85, - 0x4d, 0x38, 0x37, 0xb7, 0x09, 0x90, 0x58, 0x62, 0xac, 0x3e, 0x41, 0xd9, 0xf8, 0x8e, 0x52, 0xa6, - 0x2c, 0x57, 0x36, 0xeb, 0x3b, 0x93, 0x08, 0x03, 0x9e, 0x46, 0x38, 0x2f, 0xe4, 0x31, 0x22, 0x16, - 0x0c, 0xe3, 0x4c, 0xbd, 0xd3, 0x56, 0x87, 0x9e, 0x97, 0xb2, 0x20, 0x87, 0x4c, 0x62, 0x32, 0xcf, - 0x24, 0x30, 0xb1, 0x12, 0x82, 0xbc, 0x47, 0x39, 0x30, 0xa3, 0x56, 0xd0, 0x3a, 0xf3, 0xfa, 0x76, - 0x97, 0x39, 0x05, 0x49, 0xdb, 0x1d, 0x0c, 0xcb, 0x3b, 0x29, 0xc3, 0x20, 0x69, 0x0a, 0x5a, 0xc5, - 0x28, 0x27, 0x74, 0xb2, 0x56, 0x1c, 0x0c, 0xcb, 0x85, 0x94, 0xee, 0x08, 0x04, 0x8f, 0xd0, 0xba, - 0x4f, 0xfb, 0xbc, 0x43, 0x9d, 0x82, 0xa2, 0xdd, 0x1b, 0x0c, 0xcb, 0x6a, 0x4a, 0x62, 0x09, 0x46, - 0xcb, 0x7e, 0xfc, 0xa2, 0x4b, 0xe4, 0x9b, 0x8c, 0xb6, 0x53, 0xe4, 0x4b, 0xd6, 0x0d, 0xa9, 0xff, - 0x7f, 0xea, 0x8c, 0xb7, 0x88, 0xd2, 0x32, 0xf3, 0x2d, 0x7f, 0x6b, 0xa3, 0x96, 0x85, 0xee, 0xbf, - 0xca, 0xa8, 0x78, 0x18, 0xb8, 0x07, 0x3e, 0xb5, 0x43, 0x9a, 0xfe, 0x08, 0x6e, 0xed, 0x7a, 0xd6, - 0xae, 0x72, 0xbb, 0x76, 0x33, 0x37, 0x6e, 0x37, 0x71, 0xac, 0xa3, 0x07, 0xcb, 0x0c, 0x5b, 0x34, - 0xe8, 0x71, 0x2f, 0xa0, 0xc4, 0x83, 0x40, 0xa2, 0x98, 0x74, 0xa0, 0x57, 0x48, 0x61, 0x0e, 0xa4, - 0xc9, 0x57, 0x1f, 0xfe, 0xe3, 0x93, 0x6e, 0x36, 0xea, 0x7b, 0x97, 0x11, 0x96, 0x46, 0x11, 0x56, - 0x9a, 0x8d, 0x49, 0x84, 0x15, 0xe6, 0x4c, 0x23, 0xbc, 0x21, 0x7c, 0x31, 0x87, 0x58, 0x0a, 0x73, - 0xae, 0xf9, 0x59, 0xd8, 0x37, 0xf3, 0x53, 0xfd, 0x25, 0xa3, 0xcc, 0x61, 0xe0, 0xaa, 0x1c, 0x6d, - 0x2f, 0xbe, 0xe5, 0xca, 0x32, 0x23, 0xcb, 0xe2, 0x69, 0xcf, 0x6e, 0xaa, 0x9c, 0x2d, 0x8e, 0x17, - 0x2e, 0xbe, 0x85, 0x55, 0x0b, 0x17, 0x94, 0x2b, 0x17, 0xae, 0x4c, 0x5a, 0x7f, 0x7d, 0x39, 0xd2, - 0xe5, 0xab, 0x91, 0x2e, 0xff, 0x1c, 0xe9, 0xf2, 0xa7, 0xb1, 0x2e, 0x5d, 0x8d, 0x75, 0xe9, 0xc7, - 0x58, 0x97, 0xde, 0xbe, 0x70, 0x59, 0x78, 0x7c, 0xda, 0x32, 0xda, 0xfc, 0xc4, 0x84, 0x5b, 0x9f, - 0x7a, 0x34, 0x3c, 0xe3, 0x7e, 0x27, 0x41, 0x76, 0x8f, 0x99, 0x2e, 0x37, 0x3d, 0xee, 0xd0, 0x6b, - 0x3f, 0xd4, 0xd6, 0x1a, 0xfc, 0x22, 0xf7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0xac, 0xf1, 0x22, - 0xfa, 0x6d, 0x05, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // CreateCertificate defines a method to create new certificate given proper inputs. - CreateCertificate(ctx context.Context, in *MsgCreateCertificate, opts ...grpc.CallOption) (*MsgCreateCertificateResponse, error) - // RevokeCertificate defines a method to revoke the certificate - RevokeCertificate(ctx context.Context, in *MsgRevokeCertificate, opts ...grpc.CallOption) (*MsgRevokeCertificateResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) CreateCertificate(ctx context.Context, in *MsgCreateCertificate, opts ...grpc.CallOption) (*MsgCreateCertificateResponse, error) { - out := new(MsgCreateCertificateResponse) - err := c.cc.Invoke(ctx, "/akash.cert.v1beta2.Msg/CreateCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) RevokeCertificate(ctx context.Context, in *MsgRevokeCertificate, opts ...grpc.CallOption) (*MsgRevokeCertificateResponse, error) { - out := new(MsgRevokeCertificateResponse) - err := c.cc.Invoke(ctx, "/akash.cert.v1beta2.Msg/RevokeCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // CreateCertificate defines a method to create new certificate given proper inputs. - CreateCertificate(context.Context, *MsgCreateCertificate) (*MsgCreateCertificateResponse, error) - // RevokeCertificate defines a method to revoke the certificate - RevokeCertificate(context.Context, *MsgRevokeCertificate) (*MsgRevokeCertificateResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) CreateCertificate(ctx context.Context, req *MsgCreateCertificate) (*MsgCreateCertificateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateCertificate not implemented") -} -func (*UnimplementedMsgServer) RevokeCertificate(ctx context.Context, req *MsgRevokeCertificate) (*MsgRevokeCertificateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RevokeCertificate not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_CreateCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateCertificate) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.cert.v1beta2.Msg/CreateCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateCertificate(ctx, req.(*MsgCreateCertificate)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgRevokeCertificate) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).RevokeCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.cert.v1beta2.Msg/RevokeCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).RevokeCertificate(ctx, req.(*MsgRevokeCertificate)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.cert.v1beta2.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateCertificate", - Handler: _Msg_CreateCertificate_Handler, - }, - { - MethodName: "RevokeCertificate", - Handler: _Msg_RevokeCertificate_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/cert/v1beta2/cert.proto", -} - -func (m *CertificateID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Serial) > 0 { - i -= len(m.Serial) - copy(dAtA[i:], m.Serial) - i = encodeVarintCert(dAtA, i, uint64(len(m.Serial))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintCert(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Certificate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Certificate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Certificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Pubkey) > 0 { - i -= len(m.Pubkey) - copy(dAtA[i:], m.Pubkey) - i = encodeVarintCert(dAtA, i, uint64(len(m.Pubkey))) - i-- - dAtA[i] = 0x22 - } - if len(m.Cert) > 0 { - i -= len(m.Cert) - copy(dAtA[i:], m.Cert) - i = encodeVarintCert(dAtA, i, uint64(len(m.Cert))) - i-- - dAtA[i] = 0x1a - } - if m.State != 0 { - i = encodeVarintCert(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - return len(dAtA) - i, nil -} - -func (m *CertificateFilter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateFilter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintCert(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x1a - } - if len(m.Serial) > 0 { - i -= len(m.Serial) - copy(dAtA[i:], m.Serial) - i = encodeVarintCert(dAtA, i, uint64(len(m.Serial))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintCert(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateCertificate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateCertificate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateCertificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Pubkey) > 0 { - i -= len(m.Pubkey) - copy(dAtA[i:], m.Pubkey) - i = encodeVarintCert(dAtA, i, uint64(len(m.Pubkey))) - i-- - dAtA[i] = 0x1a - } - if len(m.Cert) > 0 { - i -= len(m.Cert) - copy(dAtA[i:], m.Cert) - i = encodeVarintCert(dAtA, i, uint64(len(m.Cert))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintCert(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateCertificateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateCertificateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateCertificateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgRevokeCertificate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgRevokeCertificate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgRevokeCertificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCert(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgRevokeCertificateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgRevokeCertificateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgRevokeCertificateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintCert(dAtA []byte, offset int, v uint64) int { - offset -= sovCert(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CertificateID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Serial) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - return n -} - -func (m *Certificate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.State != 0 { - n += 1 + sovCert(uint64(m.State)) - } - l = len(m.Cert) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Pubkey) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - return n -} - -func (m *CertificateFilter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Serial) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - return n -} - -func (m *MsgCreateCertificate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Cert) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Pubkey) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - return n -} - -func (m *MsgCreateCertificateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgRevokeCertificate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovCert(uint64(l)) - return n -} - -func (m *MsgRevokeCertificateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovCert(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCert(x uint64) (n int) { - return sovCert(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CertificateID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Serial", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Serial = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Certificate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Certificate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Certificate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Certificate_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cert", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cert = append(m.Cert[:0], dAtA[iNdEx:postIndex]...) - if m.Cert == nil { - m.Cert = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pubkey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pubkey = append(m.Pubkey[:0], dAtA[iNdEx:postIndex]...) - if m.Pubkey == nil { - m.Pubkey = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateFilter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateFilter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateFilter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Serial", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Serial = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateCertificate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateCertificate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateCertificate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cert", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cert = append(m.Cert[:0], dAtA[iNdEx:postIndex]...) - if m.Cert == nil { - m.Cert = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pubkey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pubkey = append(m.Pubkey[:0], dAtA[iNdEx:postIndex]...) - if m.Pubkey == nil { - m.Pubkey = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateCertificateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateCertificateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgRevokeCertificate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgRevokeCertificate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgRevokeCertificate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgRevokeCertificateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgRevokeCertificateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgRevokeCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCert(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCert - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCert - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCert - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCert - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCert - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCert - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthCert = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCert = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCert = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/cert/v1beta2/codec.go b/go/node/cert/v1beta2/codec.go deleted file mode 100644 index 0689b574..00000000 --- a/go/node/cert/v1beta2/codec.go +++ /dev/null @@ -1,43 +0,0 @@ -package v1beta2 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/provider module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/provider and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterCodec register concrete types on codec -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgCreateCertificate{}, ModuleName+"/"+MsgTypeCreateCertificate, nil) - cdc.RegisterConcrete(&MsgRevokeCertificate{}, ModuleName+"/"+MsgTypeRevokeCertificate, nil) -} - -// RegisterInterfaces registers the x/provider interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgCreateCertificate{}, - &MsgRevokeCertificate{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/cert/v1beta2/errors.go b/go/node/cert/v1beta2/errors.go deleted file mode 100644 index a8a76ce3..00000000 --- a/go/node/cert/v1beta2/errors.go +++ /dev/null @@ -1,34 +0,0 @@ -package v1beta2 - -import ( - "errors" -) - -var ( - // ErrCertificateNotFound certificate not found - ErrCertificateNotFound = errors.New("certificate not found") - - // ErrInvalidAddress invalid trusted auditor address - ErrInvalidAddress = errors.New("invalid address") - - // ErrCertificateExists certificate already exists - ErrCertificateExists = errors.New("certificate exists") - - // ErrCertificateAlreadyRevoked certificate already revoked - ErrCertificateAlreadyRevoked = errors.New("certificate already revoked") - - // ErrInvalidSerialNumber invalid serial number - ErrInvalidSerialNumber = errors.New("invalid serial number") - - // ErrInvalidCertificateValue certificate content is not valid - ErrInvalidCertificateValue = errors.New("invalid certificate value") - - // ErrInvalidPubkeyValue public key is not valid - ErrInvalidPubkeyValue = errors.New("invalid pubkey value") - - // ErrInvalidState invalid certificate state - ErrInvalidState = errors.New("invalid state") - - // ErrInvalidKeySize invalid certificate state - ErrInvalidKeySize = errors.New("invalid key size") -) diff --git a/go/node/cert/v1beta2/genesis.go b/go/node/cert/v1beta2/genesis.go deleted file mode 100644 index fb866daf..00000000 --- a/go/node/cert/v1beta2/genesis.go +++ /dev/null @@ -1,58 +0,0 @@ -package v1beta2 - -import ( - "bytes" - "encoding/json" - - "github.com/cosmos/cosmos-sdk/codec" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -type GenesisCertificates []GenesisCertificate - -func (obj GenesisCertificates) Contains(cert GenesisCertificate) bool { - for _, c := range obj { - if c.Owner == cert.Owner { - return true - } - - if bytes.Equal(c.Certificate.Cert, cert.Certificate.Cert) { - return true - } - } - - return false -} - -func (m GenesisCertificate) Validate() error { - owner, err := sdk.AccAddressFromBech32(m.Owner) - if err != nil { - return err - } - if err := m.Certificate.Validate(owner); err != nil { - return err - } - - return nil -} - -func (m *GenesisState) Validate() error { - for _, cert := range m.Certificates { - if err := cert.Validate(); err != nil { - return err - } - } - return nil -} - -// GetGenesisStateFromAppState returns x/cert GenesisState given raw application -// genesis state. -func GetGenesisStateFromAppState(cdc codec.JSONCodec, appState map[string]json.RawMessage) *GenesisState { - var genesisState GenesisState - - if appState[ModuleName] != nil { - cdc.MustUnmarshalJSON(appState[ModuleName], &genesisState) - } - - return &genesisState -} diff --git a/go/node/cert/v1beta2/genesis.pb.go b/go/node/cert/v1beta2/genesis.pb.go deleted file mode 100644 index c460a9ba..00000000 --- a/go/node/cert/v1beta2/genesis.pb.go +++ /dev/null @@ -1,561 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/cert/v1beta2/genesis.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisCertificate defines certificate entry at genesis -type GenesisCertificate struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Certificate Certificate `protobuf:"bytes,2,opt,name=certificate,proto3" json:"certificate" yaml:"certificate"` -} - -func (m *GenesisCertificate) Reset() { *m = GenesisCertificate{} } -func (m *GenesisCertificate) String() string { return proto.CompactTextString(m) } -func (*GenesisCertificate) ProtoMessage() {} -func (*GenesisCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_1cd7023feaa22f4a, []int{0} -} -func (m *GenesisCertificate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisCertificate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisCertificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisCertificate.Merge(m, src) -} -func (m *GenesisCertificate) XXX_Size() int { - return m.Size() -} -func (m *GenesisCertificate) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisCertificate.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisCertificate proto.InternalMessageInfo - -func (m *GenesisCertificate) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *GenesisCertificate) GetCertificate() Certificate { - if m != nil { - return m.Certificate - } - return Certificate{} -} - -// GenesisState defines the basic genesis state used by cert module -type GenesisState struct { - Certificates GenesisCertificates `protobuf:"bytes,1,rep,name=certificates,proto3,castrepeated=GenesisCertificates" json:"certificates" yaml:"certificates"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_1cd7023feaa22f4a, []int{1} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetCertificates() GenesisCertificates { - if m != nil { - return m.Certificates - } - return nil -} - -func init() { - proto.RegisterType((*GenesisCertificate)(nil), "akash.cert.v1beta2.GenesisCertificate") - proto.RegisterType((*GenesisState)(nil), "akash.cert.v1beta2.GenesisState") -} - -func init() { proto.RegisterFile("akash/cert/v1beta2/genesis.proto", fileDescriptor_1cd7023feaa22f4a) } - -var fileDescriptor_1cd7023feaa22f4a = []byte{ - // 320 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x4e, 0x2d, 0x2a, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd2, 0x4f, - 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x02, 0xab, - 0xd0, 0x03, 0xa9, 0xd0, 0x83, 0xaa, 0x90, 0x92, 0xc5, 0xa2, 0x0b, 0xac, 0x00, 0xac, 0x45, 0x4a, - 0x24, 0x3d, 0x3f, 0x3d, 0x1f, 0xcc, 0xd4, 0x07, 0xb1, 0x20, 0xa2, 0x4a, 0x9b, 0x19, 0xb9, 0x84, - 0xdc, 0x21, 0x46, 0x3b, 0xa7, 0x16, 0x95, 0x64, 0xa6, 0x65, 0x26, 0x27, 0x96, 0xa4, 0x0a, 0xe9, - 0x73, 0xb1, 0xe6, 0x97, 0xe7, 0xa5, 0x16, 0x49, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x3a, 0x49, 0xbe, - 0xba, 0x27, 0x0f, 0x11, 0xf8, 0x74, 0x4f, 0x9e, 0xa7, 0x32, 0x31, 0x37, 0xc7, 0x4a, 0x09, 0xcc, - 0x55, 0x0a, 0x82, 0x08, 0x0b, 0xe5, 0x70, 0x71, 0x27, 0x23, 0xf4, 0x4b, 0x30, 0x29, 0x30, 0x6a, - 0x70, 0x1b, 0xc9, 0xeb, 0x61, 0x3a, 0x53, 0x0f, 0xc9, 0x1a, 0x27, 0xcd, 0x13, 0xf7, 0xe4, 0x19, - 0x5e, 0xdd, 0x93, 0x47, 0xd6, 0xfb, 0xe9, 0x9e, 0xbc, 0x10, 0xc4, 0x06, 0x24, 0x41, 0xa5, 0x20, - 0x64, 0x25, 0x4a, 0xf3, 0x19, 0xb9, 0x78, 0xa0, 0xae, 0x0e, 0x2e, 0x01, 0xb9, 0xb7, 0x8f, 0x91, - 0x8b, 0x07, 0x49, 0x41, 0xb1, 0x04, 0xa3, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x1a, 0x36, 0x07, 0x60, - 0x7a, 0xd7, 0xc9, 0x09, 0xea, 0x0e, 0x14, 0x33, 0x3e, 0xdd, 0x93, 0x17, 0xc6, 0x70, 0x48, 0xb1, - 0xd2, 0xaa, 0xfb, 0xf2, 0xc2, 0x98, 0x46, 0x14, 0x07, 0xa1, 0xe8, 0x75, 0x0a, 0x3c, 0xf1, 0x48, - 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, - 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xf3, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, - 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0xeb, 0x74, 0xf3, 0x52, 0x4b, 0xca, 0xf3, 0x8b, 0xb2, 0xa1, 0xbc, - 0xc4, 0x82, 0x4c, 0xfd, 0xf4, 0x7c, 0xfd, 0xbc, 0xfc, 0x94, 0x54, 0x94, 0xb8, 0x4c, 0x62, 0x03, - 0xc7, 0x98, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xfa, 0xf6, 0xbf, 0x38, 0x1e, 0x02, 0x00, 0x00, -} - -func (m *GenesisCertificate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisCertificate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisCertificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Certificate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintGenesis(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Certificates) > 0 { - for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Certificates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisCertificate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovGenesis(uint64(l)) - } - l = m.Certificate.Size() - n += 1 + l + sovGenesis(uint64(l)) - return n -} - -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Certificates) > 0 { - for _, e := range m.Certificates { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisCertificate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisCertificate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisCertificate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Certificates = append(m.Certificates, GenesisCertificate{}) - if err := m.Certificates[len(m.Certificates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/cert/v1beta2/key.go b/go/node/cert/v1beta2/key.go deleted file mode 100644 index 97f23e3b..00000000 --- a/go/node/cert/v1beta2/key.go +++ /dev/null @@ -1,16 +0,0 @@ -package v1beta2 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "cert" - - // StoreKey is the store key string for provider - StoreKey = ModuleName - - // RouterKey is the message route for provider - RouterKey = ModuleName -) - -func PrefixCertificateID() []byte { - return []byte{0x01} -} diff --git a/go/node/cert/v1beta2/msgs.go b/go/node/cert/v1beta2/msgs.go deleted file mode 100644 index de1abb2f..00000000 --- a/go/node/cert/v1beta2/msgs.go +++ /dev/null @@ -1,98 +0,0 @@ -package v1beta2 - -import ( - "math/big" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - MsgTypeCreateCertificate = "cert-create-certificate" - MsgTypeRevokeCertificate = "cert-revoke-certificate" -) - -var ( - _ sdk.Msg = &MsgCreateCertificate{} - _ sdk.Msg = &MsgRevokeCertificate{} -) - -// ====MsgCreateCertificate==== -// Route implements the sdk.Msg interface -func (m MsgCreateCertificate) Route() string { - return RouterKey -} - -// Type implements the sdk.Msg interface -func (m MsgCreateCertificate) Type() string { - return MsgTypeCreateCertificate -} - -// ValidateBasic does basic validation -func (m MsgCreateCertificate) ValidateBasic() error { - owner, err := sdk.AccAddressFromBech32(m.Owner) - if err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Owner Address") - } - - _, err = ParseAndValidateCertificate(owner, m.Cert, m.Pubkey) - if err != nil { - return err - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (m MsgCreateCertificate) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) -} - -// GetSigners defines whose signature is required -func (m MsgCreateCertificate) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(m.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// ====MsgRevokeCertificate==== -// Route implements the sdk.Msg interface -func (m MsgRevokeCertificate) Route() string { - return RouterKey -} - -// Type implements the sdk.Msg interface -func (m MsgRevokeCertificate) Type() string { - return MsgTypeRevokeCertificate -} - -// ValidateBasic does basic validation -func (m MsgRevokeCertificate) ValidateBasic() error { - if _, err := sdk.AccAddressFromBech32(m.ID.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgRevoke: Invalid Owner Address") - } - - if _, valid := new(big.Int).SetString(m.ID.Serial, 10); !valid { - return ErrInvalidSerialNumber - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (m MsgRevokeCertificate) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) -} - -// GetSigners defines whose signature is required -func (m MsgRevokeCertificate) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(m.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} diff --git a/go/node/cert/v1beta2/query.pb.go b/go/node/cert/v1beta2/query.pb.go deleted file mode 100644 index 9ce28e7a..00000000 --- a/go/node/cert/v1beta2/query.pb.go +++ /dev/null @@ -1,954 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/cert/v1beta2/query.proto - -package v1beta2 - -import ( - context "context" - fmt "fmt" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// CertificateResponse contains a single X509 certificate and its serial number -type CertificateResponse struct { - Certificate Certificate `protobuf:"bytes,1,opt,name=certificate,proto3" json:"certificate" yaml:"certificate"` - Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial" yaml:"serial"` -} - -func (m *CertificateResponse) Reset() { *m = CertificateResponse{} } -func (m *CertificateResponse) String() string { return proto.CompactTextString(m) } -func (*CertificateResponse) ProtoMessage() {} -func (*CertificateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_56dee19acb66f387, []int{0} -} -func (m *CertificateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CertificateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CertificateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CertificateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateResponse.Merge(m, src) -} -func (m *CertificateResponse) XXX_Size() int { - return m.Size() -} -func (m *CertificateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CertificateResponse proto.InternalMessageInfo - -func (m *CertificateResponse) GetCertificate() Certificate { - if m != nil { - return m.Certificate - } - return Certificate{} -} - -func (m *CertificateResponse) GetSerial() string { - if m != nil { - return m.Serial - } - return "" -} - -// QueryDeploymentsRequest is request type for the Query/Deployments RPC method -type QueryCertificatesRequest struct { - Filter CertificateFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryCertificatesRequest) Reset() { *m = QueryCertificatesRequest{} } -func (m *QueryCertificatesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryCertificatesRequest) ProtoMessage() {} -func (*QueryCertificatesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_56dee19acb66f387, []int{1} -} -func (m *QueryCertificatesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryCertificatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryCertificatesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryCertificatesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryCertificatesRequest.Merge(m, src) -} -func (m *QueryCertificatesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryCertificatesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryCertificatesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryCertificatesRequest proto.InternalMessageInfo - -func (m *QueryCertificatesRequest) GetFilter() CertificateFilter { - if m != nil { - return m.Filter - } - return CertificateFilter{} -} - -func (m *QueryCertificatesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryCertificatesResponse is response type for the Query/Certificates RPC method -type QueryCertificatesResponse struct { - Certificates CertificatesResponse `protobuf:"bytes,1,rep,name=certificates,proto3,castrepeated=CertificatesResponse" json:"certificates"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryCertificatesResponse) Reset() { *m = QueryCertificatesResponse{} } -func (m *QueryCertificatesResponse) String() string { return proto.CompactTextString(m) } -func (*QueryCertificatesResponse) ProtoMessage() {} -func (*QueryCertificatesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_56dee19acb66f387, []int{2} -} -func (m *QueryCertificatesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryCertificatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryCertificatesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryCertificatesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryCertificatesResponse.Merge(m, src) -} -func (m *QueryCertificatesResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryCertificatesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryCertificatesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryCertificatesResponse proto.InternalMessageInfo - -func (m *QueryCertificatesResponse) GetCertificates() CertificatesResponse { - if m != nil { - return m.Certificates - } - return nil -} - -func (m *QueryCertificatesResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -func init() { - proto.RegisterType((*CertificateResponse)(nil), "akash.cert.v1beta2.CertificateResponse") - proto.RegisterType((*QueryCertificatesRequest)(nil), "akash.cert.v1beta2.QueryCertificatesRequest") - proto.RegisterType((*QueryCertificatesResponse)(nil), "akash.cert.v1beta2.QueryCertificatesResponse") -} - -func init() { proto.RegisterFile("akash/cert/v1beta2/query.proto", fileDescriptor_56dee19acb66f387) } - -var fileDescriptor_56dee19acb66f387 = []byte{ - // 486 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x6e, 0xd3, 0x30, - 0x18, 0xc7, 0xeb, 0x01, 0x95, 0x70, 0xc7, 0xc5, 0xec, 0x50, 0xca, 0x48, 0xaa, 0x48, 0xa3, 0x05, - 0x51, 0x5b, 0x4b, 0x0f, 0x48, 0x1c, 0x33, 0x69, 0x5c, 0x59, 0x8e, 0xdc, 0xdc, 0xf0, 0x2d, 0xb3, - 0x96, 0xc6, 0x5d, 0xec, 0x82, 0x76, 0xe5, 0x09, 0x90, 0xb8, 0x71, 0x46, 0x42, 0xe2, 0x09, 0x78, - 0x84, 0xdd, 0x98, 0xc4, 0x85, 0x53, 0x40, 0x2d, 0xa7, 0x1d, 0xf7, 0x04, 0x28, 0x8e, 0xd1, 0x3c, - 0x35, 0xa8, 0xbb, 0xc5, 0xfe, 0x7f, 0xff, 0xcf, 0xbf, 0xff, 0xe7, 0x18, 0x7b, 0xfc, 0x98, 0xab, - 0x23, 0x96, 0x40, 0xa1, 0xd9, 0xdb, 0xdd, 0x09, 0x68, 0x1e, 0xb2, 0x93, 0x39, 0x14, 0xa7, 0x74, - 0x56, 0x48, 0x2d, 0x09, 0x31, 0x3a, 0xad, 0x74, 0x6a, 0xf5, 0xde, 0x56, 0x2a, 0x53, 0x69, 0x64, - 0x56, 0x7d, 0xd5, 0x95, 0xbd, 0xed, 0x54, 0xca, 0x34, 0x03, 0xc6, 0x67, 0x82, 0xf1, 0x3c, 0x97, - 0x9a, 0x6b, 0x21, 0x73, 0x65, 0xd5, 0xa7, 0x89, 0x54, 0x53, 0xa9, 0xd8, 0x84, 0x2b, 0xa8, 0x0f, - 0xb0, 0xc7, 0xed, 0xb2, 0x19, 0x4f, 0x45, 0x6e, 0x8a, 0x6d, 0xed, 0xa3, 0x06, 0x26, 0x03, 0x60, - 0xe4, 0xe0, 0x1b, 0xc2, 0xf7, 0xf7, 0xa0, 0xd0, 0xe2, 0x50, 0x24, 0x5c, 0x43, 0x0c, 0x6a, 0x26, - 0x73, 0x05, 0x24, 0xc3, 0x9d, 0xe4, 0x6a, 0xbb, 0x8b, 0xfa, 0x68, 0xd8, 0x09, 0x7d, 0xba, 0x1a, - 0x80, 0x3a, 0xee, 0xe8, 0xc9, 0x59, 0xe9, 0xb7, 0x2e, 0x4a, 0xdf, 0xf5, 0x5e, 0x96, 0x3e, 0x39, - 0xe5, 0xd3, 0xec, 0x45, 0xe0, 0x6c, 0x06, 0xb1, 0x5b, 0x42, 0xc6, 0xb8, 0xad, 0xa0, 0x10, 0x3c, - 0xeb, 0x6e, 0xf4, 0xd1, 0xf0, 0x6e, 0xf4, 0xf0, 0xa2, 0xf4, 0xed, 0xce, 0x65, 0xe9, 0xdf, 0xab, - 0xed, 0xf5, 0x3a, 0x88, 0xad, 0x10, 0x7c, 0x41, 0xb8, 0x7b, 0x50, 0x85, 0x77, 0x08, 0x54, 0x0c, - 0x27, 0x73, 0x50, 0x9a, 0xec, 0xe1, 0xf6, 0xa1, 0xc8, 0x34, 0x14, 0x16, 0x7d, 0x67, 0x0d, 0xfa, - 0xbe, 0x29, 0x8e, 0x6e, 0x57, 0x01, 0x62, 0x6b, 0x25, 0xfb, 0x18, 0x5f, 0xcd, 0xd3, 0xa0, 0x75, - 0xc2, 0xc7, 0xb4, 0x1e, 0x3e, 0xad, 0x86, 0x4f, 0xeb, 0xdb, 0xb5, 0xc3, 0xa7, 0xaf, 0x78, 0x0a, - 0x16, 0x20, 0x76, 0x9c, 0xc1, 0x77, 0x84, 0x1f, 0x34, 0x90, 0xda, 0x51, 0x0b, 0xbc, 0xe9, 0xcc, - 0x42, 0x75, 0x51, 0xff, 0xd6, 0xb0, 0x13, 0x0e, 0xd6, 0x00, 0xff, 0xb3, 0x47, 0xdb, 0x15, 0xf2, - 0xd7, 0x5f, 0xfe, 0x56, 0x53, 0xf3, 0xf8, 0x5a, 0x6b, 0xf2, 0xb2, 0x21, 0xd0, 0x60, 0x6d, 0x20, - 0xdb, 0xca, 0xb1, 0x86, 0x9f, 0x11, 0xbe, 0x63, 0x12, 0x91, 0x4f, 0x08, 0x6f, 0xba, 0x27, 0x93, - 0x67, 0x4d, 0xe0, 0xff, 0xbb, 0xa7, 0xde, 0xe8, 0x86, 0xd5, 0x35, 0x43, 0x30, 0x7a, 0xff, 0xe3, - 0xcf, 0xc7, 0x8d, 0x01, 0xd9, 0x61, 0x2b, 0xbf, 0xf5, 0x98, 0xb9, 0x51, 0x59, 0x26, 0x94, 0x8e, - 0x0e, 0xce, 0x16, 0x1e, 0x3a, 0x5f, 0x78, 0xe8, 0xf7, 0xc2, 0x43, 0x1f, 0x96, 0x5e, 0xeb, 0x7c, - 0xe9, 0xb5, 0x7e, 0x2e, 0xbd, 0xd6, 0xeb, 0xe7, 0xa9, 0xd0, 0x47, 0xf3, 0x09, 0x4d, 0xe4, 0xb4, - 0x6e, 0x35, 0xca, 0x41, 0xbf, 0x93, 0xc5, 0xb1, 0x5d, 0x55, 0x0f, 0x2f, 0x95, 0x2c, 0x97, 0x6f, - 0xe0, 0xda, 0xdb, 0x99, 0xb4, 0xcd, 0xbb, 0x19, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x1c, 0xf4, - 0xa0, 0xc2, 0xec, 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Certificates queries certificates - Certificates(ctx context.Context, in *QueryCertificatesRequest, opts ...grpc.CallOption) (*QueryCertificatesResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Certificates(ctx context.Context, in *QueryCertificatesRequest, opts ...grpc.CallOption) (*QueryCertificatesResponse, error) { - out := new(QueryCertificatesResponse) - err := c.cc.Invoke(ctx, "/akash.cert.v1beta2.Query/Certificates", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Certificates queries certificates - Certificates(context.Context, *QueryCertificatesRequest) (*QueryCertificatesResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Certificates(ctx context.Context, req *QueryCertificatesRequest) (*QueryCertificatesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Certificates not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Certificates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryCertificatesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Certificates(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.cert.v1beta2.Query/Certificates", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Certificates(ctx, req.(*QueryCertificatesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.cert.v1beta2.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Certificates", - Handler: _Query_Certificates_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/cert/v1beta2/query.proto", -} - -func (m *CertificateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Serial) > 0 { - i -= len(m.Serial) - copy(dAtA[i:], m.Serial) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Serial))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Certificate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryCertificatesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryCertificatesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryCertificatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryCertificatesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryCertificatesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryCertificatesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Certificates) > 0 { - for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Certificates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CertificateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Certificate.Size() - n += 1 + l + sovQuery(uint64(l)) - l = len(m.Serial) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryCertificatesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filter.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryCertificatesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Certificates) > 0 { - for _, e := range m.Certificates { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CertificateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Serial", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Serial = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryCertificatesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryCertificatesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryCertificatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryCertificatesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryCertificatesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryCertificatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Certificates = append(m.Certificates, CertificateResponse{}) - if err := m.Certificates[len(m.Certificates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/cert/v1beta2/query.pb.gw.go b/go/node/cert/v1beta2/query.pb.gw.go deleted file mode 100644 index bf0f7790..00000000 --- a/go/node/cert/v1beta2/query.pb.gw.go +++ /dev/null @@ -1,171 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/cert/v1beta2/query.proto - -/* -Package v1beta2 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta2 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Certificates_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Certificates_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryCertificatesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Certificates_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Certificates(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Certificates_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryCertificatesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Certificates_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Certificates(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Certificates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Certificates_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Certificates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Certificates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Certificates_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Certificates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Certificates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "cert", "v1beta3", "certificates", "list"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Certificates_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/cert/v1beta2/types.go b/go/node/cert/v1beta2/types.go deleted file mode 100644 index 38b541f7..00000000 --- a/go/node/cert/v1beta2/types.go +++ /dev/null @@ -1,70 +0,0 @@ -package v1beta2 - -import ( - "bytes" - "math/big" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -const ( - PemBlkTypeCertificate = "CERTIFICATE" - PemBlkTypeECPrivateKey = "EC PRIVATE KEY" - PemBlkTypeECPublicKey = "EC PUBLIC KEY" -) - -type CertID struct { - Owner sdk.Address - Serial big.Int -} - -func ToCertID(id CertificateID) (CertID, error) { - addr, err := sdk.AccAddressFromBech32(id.Owner) - if err != nil { - return CertID{}, err - } - - serial, valid := new(big.Int).SetString(id.Serial, 10) - if !valid { - return CertID{}, ErrInvalidSerialNumber - } - - return CertID{ - Owner: addr, - Serial: *serial, - }, nil -} - -// Certificates is the collection of Certificate -type Certificates []Certificate - -type CertificatesResponse []CertificateResponse - -// String implements the Stringer interface for a Certificates object. -func (obj Certificates) String() string { - var buf bytes.Buffer - - const sep = "\n\n" - - for _, p := range obj { - buf.WriteString(p.String()) - buf.WriteString(sep) - } - - if len(obj) > 0 { - buf.Truncate(buf.Len() - len(sep)) - } - - return buf.String() -} - -func (obj Certificates) Contains(cert Certificate) bool { - for _, c := range obj { - // fixme is bytes.Equal right way to do it? - if bytes.Equal(c.Cert, cert.Cert) { - return true - } - } - - return false -} diff --git a/go/node/cert/v1beta3/cert.go b/go/node/cert/v1beta3/cert.go deleted file mode 100644 index 404d14f7..00000000 --- a/go/node/cert/v1beta3/cert.go +++ /dev/null @@ -1,71 +0,0 @@ -package v1beta3 - -import ( - "crypto/x509" - "encoding/pem" - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -func ParseAndValidateCertificate(owner sdk.Address, crt, pub []byte) (*x509.Certificate, error) { - blk, rest := pem.Decode(pub) - if blk == nil || len(rest) > 0 { - return nil, ErrInvalidPubkeyValue - } - - if blk.Type != PemBlkTypeECPublicKey { - return nil, errors.Wrap(ErrInvalidPubkeyValue, "invalid pem block type") - } - - blk, rest = pem.Decode(crt) - if blk == nil || len(rest) > 0 { - return nil, ErrInvalidCertificateValue - } - - if blk.Type != PemBlkTypeCertificate { - return nil, errors.Wrap(ErrInvalidCertificateValue, "invalid pem block type") - } - - cert, err := x509.ParseCertificate(blk.Bytes) - if err != nil { - return nil, err - } - - cowner, err := sdk.AccAddressFromBech32(cert.Subject.CommonName) - if err != nil { - return nil, errors.Wrap(ErrInvalidCertificateValue, err.Error()) - } - - if !owner.Equals(cowner) { - return nil, errors.Wrap(ErrInvalidCertificateValue, "CommonName does not match owner") - } - - return cert, nil -} - -func (m *CertificateID) String() string { - return fmt.Sprintf("%s/%s", m.Owner, m.Serial) -} - -func (m *CertificateID) Equals(val CertificateID) bool { - return (m.Owner == val.Owner) && (m.Serial == val.Serial) -} - -func (m Certificate) Validate(owner sdk.Address) error { - if val, exists := Certificate_State_name[int32(m.State)]; !exists || val == "invalid" { - return ErrInvalidState - } - - _, err := ParseAndValidateCertificate(owner, m.Cert, m.Pubkey) - if err != nil { - return err - } - - return nil -} - -func (m Certificate) IsState(state Certificate_State) bool { - return m.State == state -} diff --git a/go/node/cert/v1beta3/cert.pb.go b/go/node/cert/v1beta3/cert.pb.go deleted file mode 100644 index 1e44aa58..00000000 --- a/go/node/cert/v1beta3/cert.pb.go +++ /dev/null @@ -1,1775 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/cert/v1beta3/cert.proto - -package v1beta3 - -import ( - context "context" - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of deployment -type Certificate_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - CertificateStateInvalid Certificate_State = 0 - // CertificateValid denotes state for deployment active - CertificateValid Certificate_State = 1 - // CertificateRevoked denotes state for deployment closed - CertificateRevoked Certificate_State = 2 -) - -var Certificate_State_name = map[int32]string{ - 0: "invalid", - 1: "valid", - 2: "revoked", -} - -var Certificate_State_value = map[string]int32{ - "invalid": 0, - "valid": 1, - "revoked": 2, -} - -func (x Certificate_State) String() string { - return proto.EnumName(Certificate_State_name, int32(x)) -} - -func (Certificate_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_a906cf3717502e3d, []int{1, 0} -} - -// CertificateID stores owner and sequence number -type CertificateID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial" yaml:"serial"` -} - -func (m *CertificateID) Reset() { *m = CertificateID{} } -func (*CertificateID) ProtoMessage() {} -func (*CertificateID) Descriptor() ([]byte, []int) { - return fileDescriptor_a906cf3717502e3d, []int{0} -} -func (m *CertificateID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CertificateID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CertificateID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CertificateID) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateID.Merge(m, src) -} -func (m *CertificateID) XXX_Size() int { - return m.Size() -} -func (m *CertificateID) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateID.DiscardUnknown(m) -} - -var xxx_messageInfo_CertificateID proto.InternalMessageInfo - -func (m *CertificateID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *CertificateID) GetSerial() string { - if m != nil { - return m.Serial - } - return "" -} - -// Certificate stores state, certificate and it's public key -type Certificate struct { - State Certificate_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.cert.v1beta3.Certificate_State" json:"state" yaml:"state"` - Cert []byte `protobuf:"bytes,3,opt,name=cert,proto3" json:"cert" yaml:"cert"` - Pubkey []byte `protobuf:"bytes,4,opt,name=pubkey,proto3" json:"pubkey" yaml:"pubkey"` -} - -func (m *Certificate) Reset() { *m = Certificate{} } -func (m *Certificate) String() string { return proto.CompactTextString(m) } -func (*Certificate) ProtoMessage() {} -func (*Certificate) Descriptor() ([]byte, []int) { - return fileDescriptor_a906cf3717502e3d, []int{1} -} -func (m *Certificate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Certificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Certificate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Certificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_Certificate.Merge(m, src) -} -func (m *Certificate) XXX_Size() int { - return m.Size() -} -func (m *Certificate) XXX_DiscardUnknown() { - xxx_messageInfo_Certificate.DiscardUnknown(m) -} - -var xxx_messageInfo_Certificate proto.InternalMessageInfo - -func (m *Certificate) GetState() Certificate_State { - if m != nil { - return m.State - } - return CertificateStateInvalid -} - -func (m *Certificate) GetCert() []byte { - if m != nil { - return m.Cert - } - return nil -} - -func (m *Certificate) GetPubkey() []byte { - if m != nil { - return m.Pubkey - } - return nil -} - -// CertificateFilter defines filters used to filter certificates -type CertificateFilter struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial" yaml:"serial"` - State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *CertificateFilter) Reset() { *m = CertificateFilter{} } -func (m *CertificateFilter) String() string { return proto.CompactTextString(m) } -func (*CertificateFilter) ProtoMessage() {} -func (*CertificateFilter) Descriptor() ([]byte, []int) { - return fileDescriptor_a906cf3717502e3d, []int{2} -} -func (m *CertificateFilter) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CertificateFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CertificateFilter.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CertificateFilter) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateFilter.Merge(m, src) -} -func (m *CertificateFilter) XXX_Size() int { - return m.Size() -} -func (m *CertificateFilter) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateFilter.DiscardUnknown(m) -} - -var xxx_messageInfo_CertificateFilter proto.InternalMessageInfo - -func (m *CertificateFilter) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *CertificateFilter) GetSerial() string { - if m != nil { - return m.Serial - } - return "" -} - -func (m *CertificateFilter) GetState() string { - if m != nil { - return m.State - } - return "" -} - -// MsgCreateCertificate defines an SDK message for creating certificate -type MsgCreateCertificate struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Cert []byte `protobuf:"bytes,2,opt,name=cert,proto3" json:"cert" yaml:"cert"` - Pubkey []byte `protobuf:"bytes,3,opt,name=pubkey,proto3" json:"pubkey" yaml:"pubkey"` -} - -func (m *MsgCreateCertificate) Reset() { *m = MsgCreateCertificate{} } -func (m *MsgCreateCertificate) String() string { return proto.CompactTextString(m) } -func (*MsgCreateCertificate) ProtoMessage() {} -func (*MsgCreateCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_a906cf3717502e3d, []int{3} -} -func (m *MsgCreateCertificate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateCertificate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateCertificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateCertificate.Merge(m, src) -} -func (m *MsgCreateCertificate) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateCertificate) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateCertificate.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateCertificate proto.InternalMessageInfo - -func (m *MsgCreateCertificate) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgCreateCertificate) GetCert() []byte { - if m != nil { - return m.Cert - } - return nil -} - -func (m *MsgCreateCertificate) GetPubkey() []byte { - if m != nil { - return m.Pubkey - } - return nil -} - -// MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. -type MsgCreateCertificateResponse struct { -} - -func (m *MsgCreateCertificateResponse) Reset() { *m = MsgCreateCertificateResponse{} } -func (m *MsgCreateCertificateResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateCertificateResponse) ProtoMessage() {} -func (*MsgCreateCertificateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a906cf3717502e3d, []int{4} -} -func (m *MsgCreateCertificateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateCertificateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateCertificateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateCertificateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateCertificateResponse.Merge(m, src) -} -func (m *MsgCreateCertificateResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateCertificateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateCertificateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateCertificateResponse proto.InternalMessageInfo - -// MsgRevokeCertificate defines an SDK message for revoking certificate -type MsgRevokeCertificate struct { - ID CertificateID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgRevokeCertificate) Reset() { *m = MsgRevokeCertificate{} } -func (m *MsgRevokeCertificate) String() string { return proto.CompactTextString(m) } -func (*MsgRevokeCertificate) ProtoMessage() {} -func (*MsgRevokeCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_a906cf3717502e3d, []int{5} -} -func (m *MsgRevokeCertificate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgRevokeCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgRevokeCertificate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgRevokeCertificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgRevokeCertificate.Merge(m, src) -} -func (m *MsgRevokeCertificate) XXX_Size() int { - return m.Size() -} -func (m *MsgRevokeCertificate) XXX_DiscardUnknown() { - xxx_messageInfo_MsgRevokeCertificate.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgRevokeCertificate proto.InternalMessageInfo - -func (m *MsgRevokeCertificate) GetID() CertificateID { - if m != nil { - return m.ID - } - return CertificateID{} -} - -// MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. -type MsgRevokeCertificateResponse struct { -} - -func (m *MsgRevokeCertificateResponse) Reset() { *m = MsgRevokeCertificateResponse{} } -func (m *MsgRevokeCertificateResponse) String() string { return proto.CompactTextString(m) } -func (*MsgRevokeCertificateResponse) ProtoMessage() {} -func (*MsgRevokeCertificateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_a906cf3717502e3d, []int{6} -} -func (m *MsgRevokeCertificateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgRevokeCertificateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgRevokeCertificateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgRevokeCertificateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgRevokeCertificateResponse.Merge(m, src) -} -func (m *MsgRevokeCertificateResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgRevokeCertificateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgRevokeCertificateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgRevokeCertificateResponse proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("akash.cert.v1beta3.Certificate_State", Certificate_State_name, Certificate_State_value) - proto.RegisterType((*CertificateID)(nil), "akash.cert.v1beta3.CertificateID") - proto.RegisterType((*Certificate)(nil), "akash.cert.v1beta3.Certificate") - proto.RegisterType((*CertificateFilter)(nil), "akash.cert.v1beta3.CertificateFilter") - proto.RegisterType((*MsgCreateCertificate)(nil), "akash.cert.v1beta3.MsgCreateCertificate") - proto.RegisterType((*MsgCreateCertificateResponse)(nil), "akash.cert.v1beta3.MsgCreateCertificateResponse") - proto.RegisterType((*MsgRevokeCertificate)(nil), "akash.cert.v1beta3.MsgRevokeCertificate") - proto.RegisterType((*MsgRevokeCertificateResponse)(nil), "akash.cert.v1beta3.MsgRevokeCertificateResponse") -} - -func init() { proto.RegisterFile("akash/cert/v1beta3/cert.proto", fileDescriptor_a906cf3717502e3d) } - -var fileDescriptor_a906cf3717502e3d = []byte{ - // 581 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0x31, 0x6f, 0xd3, 0x40, - 0x18, 0xb5, 0x9d, 0xa4, 0xa5, 0xd7, 0x16, 0xa5, 0x56, 0x44, 0x83, 0x4b, 0x7d, 0xe1, 0x10, 0x52, - 0x24, 0x84, 0x0d, 0xc9, 0x80, 0x94, 0x31, 0x8d, 0x90, 0x32, 0x54, 0x02, 0x23, 0x75, 0x60, 0x73, - 0xe2, 0xc3, 0x3d, 0x25, 0xf5, 0x45, 0xb6, 0x9b, 0xaa, 0x03, 0x62, 0x45, 0x99, 0x18, 0x59, 0x22, - 0x55, 0xe2, 0x07, 0xc0, 0xaf, 0x40, 0x1d, 0x3b, 0x32, 0x59, 0x28, 0x19, 0x40, 0x19, 0xf3, 0x0b, - 0x90, 0xbf, 0x73, 0x14, 0x57, 0x49, 0xa0, 0x5d, 0xd8, 0xfc, 0xbe, 0xf7, 0xee, 0xbe, 0xf7, 0xfc, - 0x2c, 0xa3, 0x7d, 0xbb, 0x63, 0x07, 0xc7, 0x66, 0x9b, 0xfa, 0xa1, 0xd9, 0x7f, 0xde, 0xa2, 0xa1, - 0x5d, 0x05, 0x60, 0xf4, 0x7c, 0x1e, 0x72, 0x55, 0x05, 0xda, 0x80, 0x49, 0x42, 0x6b, 0x05, 0x97, - 0xbb, 0x1c, 0x68, 0x33, 0x7e, 0x12, 0x4a, 0xf2, 0x01, 0x6d, 0x1f, 0x50, 0x3f, 0x64, 0xef, 0x58, - 0xdb, 0x0e, 0x69, 0xb3, 0xa1, 0x9a, 0x28, 0xc7, 0xcf, 0x3c, 0xea, 0x17, 0xe5, 0x92, 0x5c, 0xde, - 0xa8, 0xdf, 0x9f, 0x44, 0x58, 0x0c, 0xa6, 0x11, 0xde, 0x3a, 0xb7, 0x4f, 0xba, 0x35, 0x02, 0x90, - 0x58, 0x62, 0xac, 0x56, 0xd1, 0x5a, 0x40, 0x7d, 0x66, 0x77, 0x8b, 0x0a, 0x9c, 0xd8, 0x9b, 0x44, - 0x38, 0x99, 0x4c, 0x23, 0xbc, 0x2d, 0x8e, 0x08, 0x4c, 0xac, 0x84, 0xa8, 0xdd, 0xf9, 0x7c, 0x81, - 0xa5, 0xdf, 0x17, 0x58, 0x22, 0xdf, 0x15, 0xb4, 0x99, 0x72, 0xa0, 0x1e, 0xa1, 0x5c, 0x10, 0xda, - 0x21, 0x85, 0xdb, 0xee, 0x56, 0x1e, 0x1b, 0x8b, 0x51, 0x8c, 0x94, 0xde, 0x78, 0x13, 0x8b, 0x85, - 0x4d, 0x38, 0x37, 0xb7, 0x09, 0x90, 0x58, 0x62, 0xac, 0x3e, 0x41, 0xd9, 0xf8, 0x8e, 0x62, 0xa6, - 0x24, 0x97, 0xb7, 0xea, 0xbb, 0x93, 0x08, 0x03, 0x9e, 0x46, 0x78, 0x53, 0xc8, 0x63, 0x44, 0x2c, - 0x18, 0xc6, 0x99, 0x7a, 0xa7, 0xad, 0x0e, 0x3d, 0x2f, 0x66, 0x41, 0x0e, 0x99, 0xc4, 0x64, 0x9e, - 0x49, 0x60, 0x62, 0x25, 0x04, 0x79, 0x8f, 0x72, 0x60, 0x46, 0x2d, 0xa3, 0x75, 0xe6, 0xf5, 0xed, - 0x2e, 0x73, 0xf2, 0x92, 0xb6, 0x37, 0x18, 0x96, 0x76, 0x53, 0x86, 0x41, 0xd2, 0x14, 0xb4, 0x8a, - 0x51, 0x4e, 0xe8, 0x64, 0xad, 0x30, 0x18, 0x96, 0xf2, 0x29, 0xdd, 0x11, 0x08, 0x1e, 0xa1, 0x75, - 0x9f, 0xf6, 0x79, 0x87, 0x3a, 0x79, 0x45, 0xbb, 0x37, 0x18, 0x96, 0xd4, 0x94, 0xc4, 0x12, 0x8c, - 0x96, 0xfd, 0xf8, 0x45, 0x97, 0xc8, 0x37, 0x19, 0xed, 0xa4, 0xc8, 0x97, 0xac, 0x1b, 0x52, 0xff, - 0xff, 0xd4, 0x19, 0x6f, 0x11, 0xa5, 0x65, 0xe6, 0x5b, 0xfe, 0xd6, 0x46, 0x2d, 0x0b, 0xdd, 0x7f, - 0x95, 0x51, 0xe1, 0x30, 0x70, 0x0f, 0x7c, 0x6a, 0x87, 0x34, 0xfd, 0x11, 0xdc, 0xda, 0xf5, 0xac, - 0x5d, 0xe5, 0x76, 0xed, 0x66, 0x6e, 0xdc, 0x6e, 0xe2, 0x58, 0x47, 0x0f, 0x96, 0x19, 0xb6, 0x68, - 0xd0, 0xe3, 0x5e, 0x40, 0x89, 0x07, 0x81, 0x44, 0x31, 0xe9, 0x40, 0xaf, 0x90, 0xc2, 0x1c, 0x48, - 0xb3, 0x59, 0x79, 0xf8, 0x8f, 0x4f, 0xba, 0xd9, 0xa8, 0xef, 0x5f, 0x46, 0x58, 0x1a, 0x45, 0x58, - 0x69, 0x36, 0x26, 0x11, 0x56, 0x98, 0x33, 0x8d, 0xf0, 0x86, 0xf0, 0xc5, 0x1c, 0x62, 0x29, 0xcc, - 0xb9, 0xe6, 0x67, 0x61, 0xdf, 0xcc, 0x4f, 0xe5, 0x97, 0x8c, 0x32, 0x87, 0x81, 0xab, 0x72, 0xb4, - 0xb3, 0xf8, 0x96, 0xcb, 0xcb, 0x8c, 0x2c, 0x8b, 0xa7, 0x3d, 0xbb, 0xa9, 0x72, 0xb6, 0x38, 0x5e, - 0xb8, 0xf8, 0x16, 0x56, 0x2d, 0x5c, 0x50, 0xae, 0x5c, 0xb8, 0x32, 0x69, 0xfd, 0xf5, 0xe5, 0x48, - 0x97, 0xaf, 0x46, 0xba, 0xfc, 0x73, 0xa4, 0xcb, 0x9f, 0xc6, 0xba, 0x74, 0x35, 0xd6, 0xa5, 0x1f, - 0x63, 0x5d, 0x7a, 0xfb, 0xc2, 0x65, 0xe1, 0xf1, 0x69, 0xcb, 0x68, 0xf3, 0x13, 0x13, 0x6e, 0x7d, - 0xea, 0xd1, 0xf0, 0x8c, 0xfb, 0x9d, 0x04, 0xd9, 0x3d, 0x66, 0xba, 0xdc, 0xf4, 0xb8, 0x43, 0xaf, - 0xfd, 0x50, 0x5b, 0x6b, 0xf0, 0x8b, 0xac, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x35, 0x32, - 0x11, 0x6d, 0x05, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // CreateCertificate defines a method to create new certificate given proper inputs. - CreateCertificate(ctx context.Context, in *MsgCreateCertificate, opts ...grpc.CallOption) (*MsgCreateCertificateResponse, error) - // RevokeCertificate defines a method to revoke the certificate - RevokeCertificate(ctx context.Context, in *MsgRevokeCertificate, opts ...grpc.CallOption) (*MsgRevokeCertificateResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) CreateCertificate(ctx context.Context, in *MsgCreateCertificate, opts ...grpc.CallOption) (*MsgCreateCertificateResponse, error) { - out := new(MsgCreateCertificateResponse) - err := c.cc.Invoke(ctx, "/akash.cert.v1beta3.Msg/CreateCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) RevokeCertificate(ctx context.Context, in *MsgRevokeCertificate, opts ...grpc.CallOption) (*MsgRevokeCertificateResponse, error) { - out := new(MsgRevokeCertificateResponse) - err := c.cc.Invoke(ctx, "/akash.cert.v1beta3.Msg/RevokeCertificate", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // CreateCertificate defines a method to create new certificate given proper inputs. - CreateCertificate(context.Context, *MsgCreateCertificate) (*MsgCreateCertificateResponse, error) - // RevokeCertificate defines a method to revoke the certificate - RevokeCertificate(context.Context, *MsgRevokeCertificate) (*MsgRevokeCertificateResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) CreateCertificate(ctx context.Context, req *MsgCreateCertificate) (*MsgCreateCertificateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateCertificate not implemented") -} -func (*UnimplementedMsgServer) RevokeCertificate(ctx context.Context, req *MsgRevokeCertificate) (*MsgRevokeCertificateResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method RevokeCertificate not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_CreateCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateCertificate) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.cert.v1beta3.Msg/CreateCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateCertificate(ctx, req.(*MsgCreateCertificate)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_RevokeCertificate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgRevokeCertificate) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).RevokeCertificate(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.cert.v1beta3.Msg/RevokeCertificate", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).RevokeCertificate(ctx, req.(*MsgRevokeCertificate)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.cert.v1beta3.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateCertificate", - Handler: _Msg_CreateCertificate_Handler, - }, - { - MethodName: "RevokeCertificate", - Handler: _Msg_RevokeCertificate_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/cert/v1beta3/cert.proto", -} - -func (m *CertificateID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Serial) > 0 { - i -= len(m.Serial) - copy(dAtA[i:], m.Serial) - i = encodeVarintCert(dAtA, i, uint64(len(m.Serial))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintCert(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Certificate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Certificate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Certificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Pubkey) > 0 { - i -= len(m.Pubkey) - copy(dAtA[i:], m.Pubkey) - i = encodeVarintCert(dAtA, i, uint64(len(m.Pubkey))) - i-- - dAtA[i] = 0x22 - } - if len(m.Cert) > 0 { - i -= len(m.Cert) - copy(dAtA[i:], m.Cert) - i = encodeVarintCert(dAtA, i, uint64(len(m.Cert))) - i-- - dAtA[i] = 0x1a - } - if m.State != 0 { - i = encodeVarintCert(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - return len(dAtA) - i, nil -} - -func (m *CertificateFilter) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateFilter) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateFilter) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintCert(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x1a - } - if len(m.Serial) > 0 { - i -= len(m.Serial) - copy(dAtA[i:], m.Serial) - i = encodeVarintCert(dAtA, i, uint64(len(m.Serial))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintCert(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateCertificate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateCertificate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateCertificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Pubkey) > 0 { - i -= len(m.Pubkey) - copy(dAtA[i:], m.Pubkey) - i = encodeVarintCert(dAtA, i, uint64(len(m.Pubkey))) - i-- - dAtA[i] = 0x1a - } - if len(m.Cert) > 0 { - i -= len(m.Cert) - copy(dAtA[i:], m.Cert) - i = encodeVarintCert(dAtA, i, uint64(len(m.Cert))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintCert(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateCertificateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateCertificateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateCertificateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgRevokeCertificate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgRevokeCertificate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgRevokeCertificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCert(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgRevokeCertificateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgRevokeCertificateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgRevokeCertificateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintCert(dAtA []byte, offset int, v uint64) int { - offset -= sovCert(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CertificateID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Serial) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - return n -} - -func (m *Certificate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.State != 0 { - n += 1 + sovCert(uint64(m.State)) - } - l = len(m.Cert) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Pubkey) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - return n -} - -func (m *CertificateFilter) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Serial) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - return n -} - -func (m *MsgCreateCertificate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Cert) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - l = len(m.Pubkey) - if l > 0 { - n += 1 + l + sovCert(uint64(l)) - } - return n -} - -func (m *MsgCreateCertificateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgRevokeCertificate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovCert(uint64(l)) - return n -} - -func (m *MsgRevokeCertificateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovCert(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCert(x uint64) (n int) { - return sovCert(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CertificateID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Serial", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Serial = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Certificate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Certificate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Certificate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Certificate_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cert", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cert = append(m.Cert[:0], dAtA[iNdEx:postIndex]...) - if m.Cert == nil { - m.Cert = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pubkey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pubkey = append(m.Pubkey[:0], dAtA[iNdEx:postIndex]...) - if m.Pubkey == nil { - m.Pubkey = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateFilter) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateFilter: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateFilter: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Serial", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Serial = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateCertificate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateCertificate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateCertificate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cert", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cert = append(m.Cert[:0], dAtA[iNdEx:postIndex]...) - if m.Cert == nil { - m.Cert = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pubkey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pubkey = append(m.Pubkey[:0], dAtA[iNdEx:postIndex]...) - if m.Pubkey == nil { - m.Pubkey = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateCertificateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateCertificateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgRevokeCertificate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgRevokeCertificate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgRevokeCertificate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCert - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCert - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgRevokeCertificateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCert - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgRevokeCertificateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgRevokeCertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipCert(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCert - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCert(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCert - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCert - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCert - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCert - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCert - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCert - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthCert = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCert = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCert = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/cert/v1beta3/codec.go b/go/node/cert/v1beta3/codec.go deleted file mode 100644 index faef3ce3..00000000 --- a/go/node/cert/v1beta3/codec.go +++ /dev/null @@ -1,43 +0,0 @@ -package v1beta3 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/provider module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/provider and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterCodec register concrete types on codec -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgCreateCertificate{}, ModuleName+"/"+MsgTypeCreateCertificate, nil) - cdc.RegisterConcrete(&MsgRevokeCertificate{}, ModuleName+"/"+MsgTypeRevokeCertificate, nil) -} - -// RegisterInterfaces registers the x/provider interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgCreateCertificate{}, - &MsgRevokeCertificate{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/cert/v1beta3/errors.go b/go/node/cert/v1beta3/errors.go deleted file mode 100644 index daa527a8..00000000 --- a/go/node/cert/v1beta3/errors.go +++ /dev/null @@ -1,52 +0,0 @@ -package v1beta3 - -import ( - "errors" - - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - errCertificateNotFound uint32 = iota + 1 - errInvalidAddress - errCertificateExists - errCertificateAlreadyRevoked - errInvalidSerialNumber - errInvalidCertificateValue - errInvalidPubkeyValue - errInvalidState - errInvalidKeySize -) - -var ( - ErrCertificate = errors.New("certificate error") -) - -var ( - // ErrCertificateNotFound certificate not found - ErrCertificateNotFound = sdkerrors.Register(ModuleName, errCertificateNotFound, "certificate not found") - - // ErrInvalidAddress invalid trusted auditor address - ErrInvalidAddress = sdkerrors.Register(ModuleName, errInvalidAddress, "invalid address") - - // ErrCertificateExists certificate already exists - ErrCertificateExists = sdkerrors.Register(ModuleName, errCertificateExists, "certificate exists") - - // ErrCertificateAlreadyRevoked certificate already revoked - ErrCertificateAlreadyRevoked = sdkerrors.Register(ModuleName, errCertificateAlreadyRevoked, "certificate already revoked") - - // ErrInvalidSerialNumber invalid serial number - ErrInvalidSerialNumber = sdkerrors.Register(ModuleName, errInvalidSerialNumber, "invalid serial number") - - // ErrInvalidCertificateValue certificate content is not valid - ErrInvalidCertificateValue = sdkerrors.Register(ModuleName, errInvalidCertificateValue, "invalid certificate value") - - // ErrInvalidPubkeyValue public key is not valid - ErrInvalidPubkeyValue = sdkerrors.Register(ModuleName, errInvalidPubkeyValue, "invalid pubkey value") - - // ErrInvalidState invalid certificate state - ErrInvalidState = sdkerrors.Register(ModuleName, errInvalidState, "invalid state") - - // ErrInvalidKeySize invalid certificate state - ErrInvalidKeySize = sdkerrors.Register(ModuleName, errInvalidKeySize, "invalid key size") -) diff --git a/go/node/cert/v1beta3/genesis.go b/go/node/cert/v1beta3/genesis.go deleted file mode 100644 index 9b49f8cc..00000000 --- a/go/node/cert/v1beta3/genesis.go +++ /dev/null @@ -1,58 +0,0 @@ -package v1beta3 - -import ( - "bytes" - "encoding/json" - - "github.com/cosmos/cosmos-sdk/codec" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -type GenesisCertificates []GenesisCertificate - -func (obj GenesisCertificates) Contains(cert GenesisCertificate) bool { - for _, c := range obj { - if c.Owner == cert.Owner { - return true - } - - if bytes.Equal(c.Certificate.Cert, cert.Certificate.Cert) { - return true - } - } - - return false -} - -func (m GenesisCertificate) Validate() error { - owner, err := sdk.AccAddressFromBech32(m.Owner) - if err != nil { - return err - } - if err := m.Certificate.Validate(owner); err != nil { - return err - } - - return nil -} - -func (m *GenesisState) Validate() error { - for _, cert := range m.Certificates { - if err := cert.Validate(); err != nil { - return err - } - } - return nil -} - -// GetGenesisStateFromAppState returns x/cert GenesisState given raw application -// genesis state. -func GetGenesisStateFromAppState(cdc codec.JSONCodec, appState map[string]json.RawMessage) *GenesisState { - var genesisState GenesisState - - if appState[ModuleName] != nil { - cdc.MustUnmarshalJSON(appState[ModuleName], &genesisState) - } - - return &genesisState -} diff --git a/go/node/cert/v1beta3/genesis.pb.go b/go/node/cert/v1beta3/genesis.pb.go deleted file mode 100644 index de4b528c..00000000 --- a/go/node/cert/v1beta3/genesis.pb.go +++ /dev/null @@ -1,561 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/cert/v1beta3/genesis.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisCertificate defines certificate entry at genesis -type GenesisCertificate struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - Certificate Certificate `protobuf:"bytes,2,opt,name=certificate,proto3" json:"certificate" yaml:"certificate"` -} - -func (m *GenesisCertificate) Reset() { *m = GenesisCertificate{} } -func (m *GenesisCertificate) String() string { return proto.CompactTextString(m) } -func (*GenesisCertificate) ProtoMessage() {} -func (*GenesisCertificate) Descriptor() ([]byte, []int) { - return fileDescriptor_b9698bf259a960a0, []int{0} -} -func (m *GenesisCertificate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisCertificate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisCertificate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisCertificate) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisCertificate.Merge(m, src) -} -func (m *GenesisCertificate) XXX_Size() int { - return m.Size() -} -func (m *GenesisCertificate) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisCertificate.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisCertificate proto.InternalMessageInfo - -func (m *GenesisCertificate) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *GenesisCertificate) GetCertificate() Certificate { - if m != nil { - return m.Certificate - } - return Certificate{} -} - -// GenesisState defines the basic genesis state used by cert module -type GenesisState struct { - Certificates GenesisCertificates `protobuf:"bytes,1,rep,name=certificates,proto3,castrepeated=GenesisCertificates" json:"certificates" yaml:"certificates"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_b9698bf259a960a0, []int{1} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetCertificates() GenesisCertificates { - if m != nil { - return m.Certificates - } - return nil -} - -func init() { - proto.RegisterType((*GenesisCertificate)(nil), "akash.cert.v1beta3.GenesisCertificate") - proto.RegisterType((*GenesisState)(nil), "akash.cert.v1beta3.GenesisState") -} - -func init() { proto.RegisterFile("akash/cert/v1beta3/genesis.proto", fileDescriptor_b9698bf259a960a0) } - -var fileDescriptor_b9698bf259a960a0 = []byte{ - // 320 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x31, 0x4e, 0xc3, 0x30, - 0x18, 0x85, 0x63, 0x10, 0x48, 0xb8, 0x9d, 0x5c, 0x86, 0x52, 0x09, 0x3b, 0xf2, 0x80, 0xca, 0x80, - 0x2d, 0xda, 0x01, 0x89, 0x31, 0x0c, 0xcc, 0x94, 0x8d, 0xcd, 0x0d, 0x26, 0x8d, 0xda, 0xc6, 0x55, - 0x6c, 0xa8, 0xb8, 0x04, 0xe2, 0x06, 0xec, 0xac, 0x5c, 0xa2, 0x63, 0x47, 0x26, 0x83, 0xda, 0xad, - 0x63, 0x4f, 0x80, 0xe2, 0x44, 0x22, 0x51, 0xba, 0xf9, 0x7f, 0x7a, 0xef, 0xfd, 0x9f, 0xf5, 0x43, - 0x5f, 0x8c, 0x85, 0x1e, 0xf1, 0x50, 0xa6, 0x86, 0xbf, 0x5c, 0x0e, 0xa5, 0x11, 0x7d, 0x1e, 0xc9, - 0x44, 0xea, 0x58, 0xb3, 0x59, 0xaa, 0x8c, 0x42, 0xc8, 0x39, 0x58, 0xe6, 0x60, 0x85, 0xa3, 0x73, - 0xba, 0x23, 0xe5, 0x0c, 0x2e, 0xd2, 0x39, 0x8e, 0x54, 0xa4, 0xdc, 0x93, 0x67, 0xaf, 0x5c, 0xa5, - 0x5f, 0x00, 0xa2, 0xdb, 0xbc, 0xfa, 0x46, 0xa6, 0x26, 0x7e, 0x8a, 0x43, 0x61, 0x24, 0xe2, 0xf0, - 0x40, 0xcd, 0x13, 0x99, 0xb6, 0x81, 0x0f, 0xba, 0x47, 0xc1, 0xc9, 0xc6, 0x92, 0x5c, 0xd8, 0x5a, - 0xd2, 0x7c, 0x15, 0xd3, 0xc9, 0x35, 0x75, 0x23, 0x1d, 0xe4, 0x32, 0x9a, 0xc0, 0x46, 0xf8, 0x9f, - 0x6f, 0xef, 0xf9, 0xa0, 0xdb, 0xe8, 0x11, 0x56, 0xc7, 0x64, 0xa5, 0x35, 0xc1, 0xf9, 0xc2, 0x12, - 0x6f, 0x63, 0x49, 0x39, 0xbb, 0xb5, 0x04, 0xe5, 0x1b, 0x4a, 0x22, 0x1d, 0x94, 0x2d, 0xf4, 0x03, - 0xc0, 0x66, 0x41, 0x7d, 0x6f, 0x32, 0xde, 0x37, 0x00, 0x9b, 0x25, 0x83, 0x6e, 0x03, 0x7f, 0xbf, - 0xdb, 0xe8, 0x9d, 0xed, 0x02, 0xa8, 0x7f, 0x37, 0x08, 0x0a, 0x8e, 0x4a, 0xc7, 0xd6, 0x92, 0x56, - 0x0d, 0x44, 0xd3, 0xcf, 0x1f, 0xd2, 0xaa, 0x57, 0xe8, 0x41, 0x25, 0x1b, 0xdc, 0x2d, 0x56, 0x18, - 0x2c, 0x57, 0x18, 0xfc, 0xae, 0x30, 0x78, 0x5f, 0x63, 0x6f, 0xb9, 0xc6, 0xde, 0xf7, 0x1a, 0x7b, - 0x0f, 0x57, 0x51, 0x6c, 0x46, 0xcf, 0x43, 0x16, 0xaa, 0x29, 0x77, 0x74, 0x17, 0x89, 0x34, 0x73, - 0x95, 0x8e, 0x8b, 0x49, 0xcc, 0x62, 0x1e, 0x29, 0x9e, 0xa8, 0x47, 0x59, 0xb9, 0xe5, 0xf0, 0xd0, - 0x5d, 0xac, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x86, 0x5f, 0x41, 0xfe, 0x1e, 0x02, 0x00, 0x00, -} - -func (m *GenesisCertificate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisCertificate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisCertificate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Certificate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintGenesis(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Certificates) > 0 { - for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Certificates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisCertificate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovGenesis(uint64(l)) - } - l = m.Certificate.Size() - n += 1 + l + sovGenesis(uint64(l)) - return n -} - -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Certificates) > 0 { - for _, e := range m.Certificates { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisCertificate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisCertificate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisCertificate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Certificates = append(m.Certificates, GenesisCertificate{}) - if err := m.Certificates[len(m.Certificates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/cert/v1beta3/key.go b/go/node/cert/v1beta3/key.go deleted file mode 100644 index ace5bb82..00000000 --- a/go/node/cert/v1beta3/key.go +++ /dev/null @@ -1,16 +0,0 @@ -package v1beta3 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "cert" - - // StoreKey is the store key string for provider - StoreKey = ModuleName - - // RouterKey is the message route for provider - RouterKey = ModuleName -) - -func PrefixCertificateID() []byte { - return []byte{0x01} -} diff --git a/go/node/cert/v1beta3/msgs.go b/go/node/cert/v1beta3/msgs.go deleted file mode 100644 index 86dd0d94..00000000 --- a/go/node/cert/v1beta3/msgs.go +++ /dev/null @@ -1,98 +0,0 @@ -package v1beta3 - -import ( - "math/big" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - MsgTypeCreateCertificate = "cert-create-certificate" - MsgTypeRevokeCertificate = "cert-revoke-certificate" -) - -var ( - _ sdk.Msg = &MsgCreateCertificate{} - _ sdk.Msg = &MsgRevokeCertificate{} -) - -// ====MsgCreateCertificate==== -// Route implements the sdk.Msg interface -func (m MsgCreateCertificate) Route() string { - return RouterKey -} - -// Type implements the sdk.Msg interface -func (m MsgCreateCertificate) Type() string { - return MsgTypeCreateCertificate -} - -// ValidateBasic does basic validation -func (m MsgCreateCertificate) ValidateBasic() error { - owner, err := sdk.AccAddressFromBech32(m.Owner) - if err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Owner Address") - } - - _, err = ParseAndValidateCertificate(owner, m.Cert, m.Pubkey) - if err != nil { - return err - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (m MsgCreateCertificate) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) -} - -// GetSigners defines whose signature is required -func (m MsgCreateCertificate) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(m.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// ====MsgRevokeCertificate==== -// Route implements the sdk.Msg interface -func (m MsgRevokeCertificate) Route() string { - return RouterKey -} - -// Type implements the sdk.Msg interface -func (m MsgRevokeCertificate) Type() string { - return MsgTypeRevokeCertificate -} - -// ValidateBasic does basic validation -func (m MsgRevokeCertificate) ValidateBasic() error { - if _, err := sdk.AccAddressFromBech32(m.ID.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgRevoke: Invalid Owner Address") - } - - if _, valid := new(big.Int).SetString(m.ID.Serial, 10); !valid { - return ErrInvalidSerialNumber - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (m MsgRevokeCertificate) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) -} - -// GetSigners defines whose signature is required -func (m MsgRevokeCertificate) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(m.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} diff --git a/go/node/cert/v1beta3/query.pb.go b/go/node/cert/v1beta3/query.pb.go deleted file mode 100644 index 7f4c09c0..00000000 --- a/go/node/cert/v1beta3/query.pb.go +++ /dev/null @@ -1,954 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/cert/v1beta3/query.proto - -package v1beta3 - -import ( - context "context" - fmt "fmt" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// CertificateResponse contains a single X509 certificate and its serial number -type CertificateResponse struct { - Certificate Certificate `protobuf:"bytes,1,opt,name=certificate,proto3" json:"certificate" yaml:"certificate"` - Serial string `protobuf:"bytes,2,opt,name=serial,proto3" json:"serial" yaml:"serial"` -} - -func (m *CertificateResponse) Reset() { *m = CertificateResponse{} } -func (m *CertificateResponse) String() string { return proto.CompactTextString(m) } -func (*CertificateResponse) ProtoMessage() {} -func (*CertificateResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2bbd23f37f87a8be, []int{0} -} -func (m *CertificateResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CertificateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CertificateResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CertificateResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateResponse.Merge(m, src) -} -func (m *CertificateResponse) XXX_Size() int { - return m.Size() -} -func (m *CertificateResponse) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_CertificateResponse proto.InternalMessageInfo - -func (m *CertificateResponse) GetCertificate() Certificate { - if m != nil { - return m.Certificate - } - return Certificate{} -} - -func (m *CertificateResponse) GetSerial() string { - if m != nil { - return m.Serial - } - return "" -} - -// QueryDeploymentsRequest is request type for the Query/Deployments RPC method -type QueryCertificatesRequest struct { - Filter CertificateFilter `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryCertificatesRequest) Reset() { *m = QueryCertificatesRequest{} } -func (m *QueryCertificatesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryCertificatesRequest) ProtoMessage() {} -func (*QueryCertificatesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2bbd23f37f87a8be, []int{1} -} -func (m *QueryCertificatesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryCertificatesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryCertificatesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryCertificatesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryCertificatesRequest.Merge(m, src) -} -func (m *QueryCertificatesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryCertificatesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryCertificatesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryCertificatesRequest proto.InternalMessageInfo - -func (m *QueryCertificatesRequest) GetFilter() CertificateFilter { - if m != nil { - return m.Filter - } - return CertificateFilter{} -} - -func (m *QueryCertificatesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryCertificatesResponse is response type for the Query/Certificates RPC method -type QueryCertificatesResponse struct { - Certificates CertificatesResponse `protobuf:"bytes,1,rep,name=certificates,proto3,castrepeated=CertificatesResponse" json:"certificates"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryCertificatesResponse) Reset() { *m = QueryCertificatesResponse{} } -func (m *QueryCertificatesResponse) String() string { return proto.CompactTextString(m) } -func (*QueryCertificatesResponse) ProtoMessage() {} -func (*QueryCertificatesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2bbd23f37f87a8be, []int{2} -} -func (m *QueryCertificatesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryCertificatesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryCertificatesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryCertificatesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryCertificatesResponse.Merge(m, src) -} -func (m *QueryCertificatesResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryCertificatesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryCertificatesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryCertificatesResponse proto.InternalMessageInfo - -func (m *QueryCertificatesResponse) GetCertificates() CertificatesResponse { - if m != nil { - return m.Certificates - } - return nil -} - -func (m *QueryCertificatesResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -func init() { - proto.RegisterType((*CertificateResponse)(nil), "akash.cert.v1beta3.CertificateResponse") - proto.RegisterType((*QueryCertificatesRequest)(nil), "akash.cert.v1beta3.QueryCertificatesRequest") - proto.RegisterType((*QueryCertificatesResponse)(nil), "akash.cert.v1beta3.QueryCertificatesResponse") -} - -func init() { proto.RegisterFile("akash/cert/v1beta3/query.proto", fileDescriptor_2bbd23f37f87a8be) } - -var fileDescriptor_2bbd23f37f87a8be = []byte{ - // 485 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xc7, 0x73, 0x05, 0x22, 0x71, 0x29, 0xcb, 0xd1, 0x21, 0x84, 0x62, 0x47, 0x96, 0x4a, 0x02, - 0x22, 0x77, 0x6a, 0x32, 0x20, 0x31, 0xba, 0x52, 0x59, 0xa9, 0x47, 0xb6, 0x8b, 0x79, 0x75, 0x4f, - 0x75, 0x7c, 0xae, 0xef, 0x02, 0xea, 0xca, 0x27, 0x40, 0x62, 0x63, 0x46, 0x42, 0xe2, 0x13, 0xf0, - 0x11, 0xba, 0x51, 0x89, 0x85, 0xc9, 0xa0, 0x84, 0xa9, 0x63, 0x3f, 0x01, 0xf2, 0xf9, 0xaa, 0x5e, - 0x85, 0xa3, 0xb0, 0xf9, 0xde, 0xff, 0xfd, 0xdf, 0xfb, 0xbd, 0x77, 0x3e, 0xec, 0xf1, 0x63, 0xae, - 0x8e, 0x58, 0x0c, 0x85, 0x66, 0x6f, 0x77, 0xa7, 0xa0, 0xf9, 0x84, 0x9d, 0xcc, 0xa1, 0x38, 0xa5, - 0x79, 0x21, 0xb5, 0x24, 0xc4, 0xe8, 0xb4, 0xd2, 0xa9, 0xd5, 0x7b, 0x5b, 0x89, 0x4c, 0xa4, 0x91, - 0x59, 0xf5, 0x55, 0x67, 0xf6, 0xb6, 0x13, 0x29, 0x93, 0x14, 0x18, 0xcf, 0x05, 0xe3, 0x59, 0x26, - 0x35, 0xd7, 0x42, 0x66, 0xca, 0xaa, 0x4f, 0x63, 0xa9, 0x66, 0x52, 0xb1, 0x29, 0x57, 0x50, 0x37, - 0xb0, 0xed, 0x76, 0x59, 0xce, 0x13, 0x91, 0x99, 0x64, 0x9b, 0xfb, 0xa8, 0x81, 0xc9, 0x00, 0x18, - 0x39, 0xf8, 0x86, 0xf0, 0xfd, 0x3d, 0x28, 0xb4, 0x38, 0x14, 0x31, 0xd7, 0x10, 0x81, 0xca, 0x65, - 0xa6, 0x80, 0xa4, 0xb8, 0x13, 0x5f, 0x87, 0xbb, 0xa8, 0x8f, 0x86, 0x9d, 0xb1, 0x4f, 0xff, 0x1d, - 0x80, 0x3a, 0xee, 0xf0, 0xc9, 0x59, 0xe9, 0xb7, 0x2e, 0x4a, 0xdf, 0xf5, 0x5e, 0x96, 0x3e, 0x39, - 0xe5, 0xb3, 0xf4, 0x45, 0xe0, 0x04, 0x83, 0xc8, 0x4d, 0x21, 0x13, 0xdc, 0x56, 0x50, 0x08, 0x9e, - 0x76, 0x37, 0xfa, 0x68, 0x78, 0x37, 0x7c, 0x78, 0x51, 0xfa, 0x36, 0x72, 0x59, 0xfa, 0xf7, 0x6a, - 0x7b, 0x7d, 0x0e, 0x22, 0x2b, 0x04, 0x5f, 0x10, 0xee, 0x1e, 0x54, 0xc3, 0x3b, 0x04, 0x2a, 0x82, - 0x93, 0x39, 0x28, 0x4d, 0xf6, 0x70, 0xfb, 0x50, 0xa4, 0x1a, 0x0a, 0x8b, 0xbe, 0xb3, 0x06, 0x7d, - 0xdf, 0x24, 0x87, 0xb7, 0xab, 0x01, 0x22, 0x6b, 0x25, 0xfb, 0x18, 0x5f, 0xef, 0xd3, 0xa0, 0x75, - 0xc6, 0x8f, 0x69, 0xbd, 0x7c, 0x5a, 0x2d, 0x9f, 0xd6, 0xb7, 0x6b, 0x97, 0x4f, 0x5f, 0xf1, 0x04, - 0x2c, 0x40, 0xe4, 0x38, 0x83, 0xef, 0x08, 0x3f, 0x68, 0x20, 0xb5, 0xab, 0x16, 0x78, 0xd3, 0xd9, - 0x85, 0xea, 0xa2, 0xfe, 0xad, 0x61, 0x67, 0x3c, 0x58, 0x03, 0x7c, 0x65, 0x0f, 0xb7, 0x2b, 0xe4, - 0xaf, 0xbf, 0xfc, 0xad, 0xa6, 0xe2, 0xd1, 0x8d, 0xd2, 0xe4, 0x65, 0xc3, 0x40, 0x83, 0xb5, 0x03, - 0xd9, 0x52, 0x8e, 0x75, 0xfc, 0x19, 0xe1, 0x3b, 0x66, 0x22, 0xf2, 0x09, 0xe1, 0x4d, 0xb7, 0x33, - 0x79, 0xd6, 0x04, 0xbe, 0xea, 0x9e, 0x7a, 0xa3, 0xff, 0xcc, 0xae, 0x19, 0x82, 0xd1, 0xfb, 0x1f, - 0x7f, 0x3e, 0x6e, 0x0c, 0xc8, 0x0e, 0x5b, 0xf1, 0x5b, 0x5f, 0x39, 0x58, 0x2a, 0x94, 0x0e, 0x0f, - 0xce, 0x16, 0x1e, 0x3a, 0x5f, 0x78, 0xe8, 0xf7, 0xc2, 0x43, 0x1f, 0x96, 0x5e, 0xeb, 0x7c, 0xe9, - 0xb5, 0x7e, 0x2e, 0xbd, 0xd6, 0xeb, 0xe7, 0x89, 0xd0, 0x47, 0xf3, 0x29, 0x8d, 0xe5, 0xac, 0x2e, - 0x35, 0xca, 0x40, 0xbf, 0x93, 0xc5, 0xb1, 0x3d, 0x55, 0x0f, 0x2f, 0x91, 0x2c, 0x93, 0x6f, 0xe0, - 0x46, 0x93, 0x69, 0xdb, 0xbc, 0x9b, 0xc9, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5b, 0x57, 0xec, - 0x65, 0xec, 0x03, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Certificates queries certificates - Certificates(ctx context.Context, in *QueryCertificatesRequest, opts ...grpc.CallOption) (*QueryCertificatesResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Certificates(ctx context.Context, in *QueryCertificatesRequest, opts ...grpc.CallOption) (*QueryCertificatesResponse, error) { - out := new(QueryCertificatesResponse) - err := c.cc.Invoke(ctx, "/akash.cert.v1beta3.Query/Certificates", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Certificates queries certificates - Certificates(context.Context, *QueryCertificatesRequest) (*QueryCertificatesResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Certificates(ctx context.Context, req *QueryCertificatesRequest) (*QueryCertificatesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Certificates not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Certificates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryCertificatesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Certificates(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.cert.v1beta3.Query/Certificates", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Certificates(ctx, req.(*QueryCertificatesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.cert.v1beta3.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Certificates", - Handler: _Query_Certificates_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/cert/v1beta3/query.proto", -} - -func (m *CertificateResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Serial) > 0 { - i -= len(m.Serial) - copy(dAtA[i:], m.Serial) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Serial))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Certificate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryCertificatesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryCertificatesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryCertificatesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filter.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryCertificatesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryCertificatesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryCertificatesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Certificates) > 0 { - for iNdEx := len(m.Certificates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Certificates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CertificateResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Certificate.Size() - n += 1 + l + sovQuery(uint64(l)) - l = len(m.Serial) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryCertificatesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filter.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryCertificatesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Certificates) > 0 { - for _, e := range m.Certificates { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CertificateResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Certificate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Serial", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Serial = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryCertificatesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryCertificatesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryCertificatesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filter.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryCertificatesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryCertificatesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryCertificatesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Certificates = append(m.Certificates, CertificateResponse{}) - if err := m.Certificates[len(m.Certificates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/cert/v1beta3/query.pb.gw.go b/go/node/cert/v1beta3/query.pb.gw.go deleted file mode 100644 index 4694c126..00000000 --- a/go/node/cert/v1beta3/query.pb.gw.go +++ /dev/null @@ -1,171 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/cert/v1beta3/query.proto - -/* -Package v1beta3 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta3 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Certificates_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Certificates_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryCertificatesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Certificates_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Certificates(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Certificates_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryCertificatesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Certificates_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Certificates(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Certificates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Certificates_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Certificates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Certificates_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Certificates_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Certificates_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Certificates_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "cert", "v1beta3", "certificates", "list"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Certificates_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/cert/v1beta3/types.go b/go/node/cert/v1beta3/types.go deleted file mode 100644 index 4402f697..00000000 --- a/go/node/cert/v1beta3/types.go +++ /dev/null @@ -1,70 +0,0 @@ -package v1beta3 - -import ( - "bytes" - "math/big" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -const ( - PemBlkTypeCertificate = "CERTIFICATE" - PemBlkTypeECPrivateKey = "EC PRIVATE KEY" - PemBlkTypeECPublicKey = "EC PUBLIC KEY" -) - -type CertID struct { - Owner sdk.Address - Serial big.Int -} - -func ToCertID(id CertificateID) (CertID, error) { - addr, err := sdk.AccAddressFromBech32(id.Owner) - if err != nil { - return CertID{}, err - } - - serial, valid := new(big.Int).SetString(id.Serial, 10) - if !valid { - return CertID{}, ErrInvalidSerialNumber - } - - return CertID{ - Owner: addr, - Serial: *serial, - }, nil -} - -// Certificates is the collection of Certificate -type Certificates []Certificate - -type CertificatesResponse []CertificateResponse - -// String implements the Stringer interface for a Certificates object. -func (obj Certificates) String() string { - var buf bytes.Buffer - - const sep = "\n\n" - - for _, p := range obj { - buf.WriteString(p.String()) - buf.WriteString(sep) - } - - if len(obj) > 0 { - buf.Truncate(buf.Len() - len(sep)) - } - - return buf.String() -} - -func (obj Certificates) Contains(cert Certificate) bool { - for _, c := range obj { - // fixme is bytes.Equal right way to do it? - if bytes.Equal(c.Cert, cert.Cert) { - return true - } - } - - return false -} diff --git a/go/node/cert/v1beta3/utils/key_pair_manager.go b/go/node/cert/v1beta3/utils/key_pair_manager.go deleted file mode 100644 index 4d27026a..00000000 --- a/go/node/cert/v1beta3/utils/key_pair_manager.go +++ /dev/null @@ -1,310 +0,0 @@ -package utils - -import ( - "bytes" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/tls" - "crypto/x509" - "crypto/x509/pkix" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" - "net" - "os" - "time" - - "go.step.sm/crypto/pemutil" - - sdkclient "github.com/cosmos/cosmos-sdk/client" - sdk "github.com/cosmos/cosmos-sdk/types" - - types "github.com/akash-network/akash-api/go/node/cert/v1beta3" -) - -var ( - errCertificateNotFoundInPEM = fmt.Errorf("%w: certificate not found in PEM", types.ErrCertificate) - errPrivateKeyNotFoundInPEM = fmt.Errorf("%w: private key not found in PEM", types.ErrCertificate) - errPublicKeyNotFoundInPEM = fmt.Errorf("%w: public key not found in PEM", types.ErrCertificate) - errUnsupportedEncryptedPEM = errors.New("unsupported encrypted PEM") -) - -type KeyPairManager interface { - KeyExists() (bool, error) - Generate(notBefore, notAfter time.Time, domains []string) error - - // Read the PEM blocks, containing the cert, private key, & public key - Read(fin ...io.Reader) ([]byte, []byte, []byte, error) - - ReadX509KeyPair(fin ...io.Reader) (*x509.Certificate, tls.Certificate, error) -} - -type keyPairManager struct { - addr sdk.AccAddress - passwordBytes []byte - passwordLegacy []byte - homeDir string -} - -func NewKeyPairManager(cctx sdkclient.Context, fromAddress sdk.AccAddress) (KeyPairManager, error) { - sig, _, err := cctx.Keyring.SignByAddress(fromAddress, []byte(fromAddress.String())) - if err != nil { - return nil, err - } - - // ignore error if ledger device is being used - // due to its jsonparser not liking bech address sent as data in binary format - // if test or file keyring used it will allow to decode old private keys for the mTLS cert - sigLegacy, _, _ := cctx.Keyring.SignByAddress(fromAddress, fromAddress.Bytes()) - - return &keyPairManager{ - addr: fromAddress, - passwordBytes: sig, - passwordLegacy: sigLegacy, - homeDir: cctx.HomeDir, - }, nil -} - -func (kpm *keyPairManager) getKeyPath() string { - return kpm.homeDir + "/" + kpm.addr.String() + ".pem" -} - -func (kpm *keyPairManager) ReadX509KeyPair(fin ...io.Reader) (*x509.Certificate, tls.Certificate, error) { - certData, privKeyData, _, err := kpm.Read(fin...) - if err != nil { - return nil, tls.Certificate{}, err - } - - x509cert, err := x509.ParseCertificate(certData) - if err != nil { - return nil, tls.Certificate{}, fmt.Errorf("could not parse x509 cert: %w", err) - } - - result := tls.Certificate{ - Certificate: [][]byte{certData}, - } - - result.PrivateKey, err = x509.ParsePKCS8PrivateKey(privKeyData) - if err != nil { - return nil, tls.Certificate{}, fmt.Errorf("%w: failed parsing private key data", err) - } - - return x509cert, result, err -} - -func (kpm *keyPairManager) KeyExists() (bool, error) { - _, err := os.Stat(kpm.getKeyPath()) - if err == nil { - return true, nil - } - - if os.IsNotExist(err) { - return false, nil - } - - return false, err -} - -func (kpm *keyPairManager) Generate(notBefore, notAfter time.Time, domains []string) error { - var err error - var pemOut *os.File - if pemOut, err = os.OpenFile(kpm.getKeyPath(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600); err != nil { - return err - } - - err = kpm.generateImpl(notBefore, notAfter, domains, pemOut) - - closeErr := pemOut.Close() - if closeErr != nil { - return closeErr - } - - return err -} - -func (kpm *keyPairManager) generateImpl(notBefore, notAfter time.Time, domains []string, fout io.Writer) error { - var err error - // Generate the private key - var priv *ecdsa.PrivateKey - if priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { - return fmt.Errorf("could not generate key: %w", err) - } - - serialNumber := new(big.Int).SetInt64(time.Now().UTC().UnixNano()) - - extKeyUsage := []x509.ExtKeyUsage{ - x509.ExtKeyUsageClientAuth, - } - - if len(domains) != 0 { - extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) - } - - template := x509.Certificate{ - SerialNumber: serialNumber, - Subject: pkix.Name{ - CommonName: kpm.addr.String(), - ExtraNames: []pkix.AttributeTypeAndValue{ - { - Type: AuthVersionOID, - Value: "v0.0.1", - }, - }, - }, - Issuer: pkix.Name{ - CommonName: kpm.addr.String(), - }, - NotBefore: notBefore, - NotAfter: notAfter, - KeyUsage: x509.KeyUsageDataEncipherment | x509.KeyUsageKeyEncipherment, - ExtKeyUsage: extKeyUsage, - BasicConstraintsValid: true, - } - - var ips []net.IP - - for i := len(domains) - 1; i >= 0; i-- { - if ip := net.ParseIP(domains[i]); ip != nil { - ips = append(ips, ip) - domains = append(domains[:i], domains[i+1:]...) - } - } - - if len(domains) != 0 || len(ips) != 0 { - template.PermittedDNSDomainsCritical = true - template.PermittedDNSDomains = domains - template.DNSNames = domains - template.IPAddresses = ips - } - - var certDer []byte - if certDer, err = x509.CreateCertificate(rand.Reader, &template, &template, priv.Public(), priv); err != nil { - return fmt.Errorf("could not create certificate: %w", err) - } - - var keyDer []byte - if keyDer, err = x509.MarshalPKCS8PrivateKey(priv); err != nil { - return fmt.Errorf("could not create private key: %w", err) - } - - var blk *pem.Block - blk, err = pemutil.EncryptPKCS8PrivateKey(rand.Reader, keyDer, kpm.passwordBytes, x509.PEMCipherAES256) - if err != nil { - return fmt.Errorf("could not encrypt private key as PEM: %w", err) - } - - // Write the certificate - if err = pem.Encode(fout, &pem.Block{Type: types.PemBlkTypeCertificate, Bytes: certDer}); err != nil { - return fmt.Errorf("could not encode certificate as PEM: %w", err) - } - - // Write the encrypted private key - if err = pem.Encode(fout, blk); err != nil { - return fmt.Errorf("could not encode private key as PEM: %w", err) - } - - return nil -} - -func (kpm *keyPairManager) Read(fin ...io.Reader) ([]byte, []byte, []byte, error) { - var pemIn io.Reader - var closeMe io.ReadCloser - - if len(fin) != 0 { - if len(fin) != 1 { - return nil, nil, nil, fmt.Errorf("%w: Read() takes exactly 1 or 0 arguments, not %d", types.ErrCertificate, len(fin)) - } - pemIn = fin[0] - } - - if pemIn == nil { - fopen, err := os.OpenFile(kpm.getKeyPath(), os.O_RDONLY, 0x0) - if err != nil { - return nil, nil, nil, fmt.Errorf("could not open certificate PEM file: %w", err) - } - closeMe = fopen - pemIn = fopen - } - - cert, privKey, pubKey, err := kpm.readImpl(pemIn) - - if closeMe != nil { - closeErr := closeMe.Close() - if closeErr != nil { - return nil, nil, nil, fmt.Errorf("could not close PEM file: %w", closeErr) - } - } - - return cert, privKey, pubKey, err -} - -func (kpm *keyPairManager) readImpl(fin io.Reader) ([]byte, []byte, []byte, error) { - buf := &bytes.Buffer{} - _, err := io.Copy(buf, fin) - if err != nil { - return nil, nil, nil, fmt.Errorf("failed reading certificate PEM file: %w", err) - } - data := buf.Bytes() - - // Read certificate - block, remaining := pem.Decode(data) - if block == nil { - return nil, nil, nil, errCertificateNotFoundInPEM - } - cert := block.Bytes - - // Read private key - block, _ = pem.Decode(remaining) - if block == nil { - return nil, nil, nil, errPrivateKeyNotFoundInPEM - } - - var privKeyPlaintext []byte - var privKeyI interface{} - - // PKCS#8 header defined in RFC7468 section 11 - // nolint: gocritic - if block.Type == "ENCRYPTED PRIVATE KEY" { - privKeyPlaintext, err = pemutil.DecryptPKCS8PrivateKey(block.Bytes, kpm.passwordBytes) - } else if block.Headers["Proc-Type"] == "4,ENCRYPTED" { - // nolint: staticcheck - privKeyPlaintext, _ = x509.DecryptPEMBlock(block, kpm.passwordBytes) - - // DecryptPEMBlock may not return IncorrectPasswordError. - // Try parse private key instead and if it fails give another try with legacy password - privKeyI, err = x509.ParsePKCS8PrivateKey(privKeyPlaintext) - if err != nil { - // nolint: staticcheck - privKeyPlaintext, err = x509.DecryptPEMBlock(block, kpm.passwordLegacy) - } - } else { - return nil, nil, nil, errUnsupportedEncryptedPEM - } - if err != nil { - return nil, nil, nil, fmt.Errorf("%w: failed decrypting x509 block with private key", err) - } - - if privKeyI == nil { - if privKeyI, err = x509.ParsePKCS8PrivateKey(privKeyPlaintext); err != nil { - return nil, nil, nil, fmt.Errorf("%w: failed parsing private key data", err) - } - } - - eckey, valid := privKeyI.(*ecdsa.PrivateKey) - if !valid { - return nil, nil, nil, fmt.Errorf("%w: unexpected private key type, expected %T but got %T", - errPublicKeyNotFoundInPEM, - &ecdsa.PrivateKey{}, - privKeyI) - } - - var pubKey []byte - if pubKey, err = x509.MarshalPKIXPublicKey(eckey.Public()); err != nil { - return nil, nil, nil, fmt.Errorf("%w: failed extracting public key", err) - } - - return cert, privKeyPlaintext, pubKey, nil -} diff --git a/go/node/cert/v1beta3/utils/utils.go b/go/node/cert/v1beta3/utils/utils.go deleted file mode 100644 index c8ea0e95..00000000 --- a/go/node/cert/v1beta3/utils/utils.go +++ /dev/null @@ -1,58 +0,0 @@ -package utils - -import ( - "context" - "crypto/tls" - "fmt" - "io" - "time" - - "github.com/cosmos/cosmos-sdk/client" - - ctypes "github.com/akash-network/akash-api/go/node/cert/v1beta3" -) - -// LoadAndQueryCertificateForAccount wraps LoadAndQueryPEMForAccount and tls.X509KeyPair -func LoadAndQueryCertificateForAccount(ctx context.Context, cctx client.Context, fin io.Reader) (tls.Certificate, error) { - kpm, err := NewKeyPairManager(cctx, cctx.FromAddress) - if err != nil { - return tls.Certificate{}, err - } - - x509cert, tlsCert, err := kpm.ReadX509KeyPair(fin) - if err != nil { - return tls.Certificate{}, err - } - - // Check if valid according to time - if x509cert.NotBefore.After(time.Now().UTC()) { - return tls.Certificate{}, fmt.Errorf("%w: certificate is not yet active, start ts %s", ctypes.ErrCertificate, x509cert.NotBefore) - } - - if time.Now().UTC().After(x509cert.NotAfter) { - return tls.Certificate{}, fmt.Errorf("%w: certificate has been expired since %s", ctypes.ErrCertificate, x509cert.NotAfter) - } - - params := &ctypes.QueryCertificatesRequest{ - Filter: ctypes.CertificateFilter{ - Owner: x509cert.Subject.CommonName, - Serial: x509cert.SerialNumber.String(), - }, - } - - certs, err := ctypes.NewQueryClient(cctx).Certificates(ctx, params) - if err != nil { - return tls.Certificate{}, err - } - - if len(certs.Certificates) == 0 { - return tls.Certificate{}, fmt.Errorf("%w: certificate has not been committed to blockchain", ctypes.ErrCertificate) - } - - foundCert := certs.Certificates[0] - if foundCert.GetCertificate().State != ctypes.CertificateValid { - return tls.Certificate{}, fmt.Errorf("%w: certificate is not valid", ctypes.ErrCertificate) - } - - return tlsCert, nil -} diff --git a/go/node/client/akash.pb.go b/go/node/client/akash.pb.go index 503fbd7e..9ddf2681 100644 --- a/go/node/client/akash.pb.go +++ b/go/node/client/akash.pb.go @@ -5,8 +5,8 @@ package client import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -75,7 +75,7 @@ func init() { func init() { proto.RegisterFile("akash/discovery/v1/akash.proto", fileDescriptor_bf31dd3d85bbd20d) } var fileDescriptor_bf31dd3d85bbd20d = []byte{ - // 225 bytes of a gzipped FileDescriptorProto + // 210 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcc, 0x4e, 0x2c, 0xce, 0xd0, 0x4f, 0xc9, 0x2c, 0x4e, 0xce, 0x2f, 0x4b, 0x2d, 0xaa, 0xd4, 0x2f, 0x33, 0xd4, 0x07, 0x0b, 0xe9, 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0x09, 0x41, 0x38, 0x70, 0x79, 0xbd, 0x32, 0x43, @@ -84,13 +84,12 @@ var fileDescriptor_bf31dd3d85bbd20d = []byte{ 0xc5, 0xea, 0x08, 0xd2, 0x2d, 0x54, 0xc2, 0xc5, 0x8d, 0xa4, 0x47, 0x82, 0x51, 0x81, 0x51, 0x83, 0xdb, 0x48, 0x4e, 0x0f, 0xd3, 0x3a, 0x3d, 0x67, 0xb0, 0x32, 0xcf, 0xbc, 0xb4, 0x7c, 0x27, 0xe3, 0x47, 0xf7, 0xe4, 0xb9, 0x10, 0xfc, 0x57, 0xf7, 0xe4, 0x91, 0x0d, 0xf9, 0x74, 0x4f, 0x5e, 0xa8, - 0x32, 0x31, 0x37, 0xc7, 0x4a, 0x09, 0x49, 0x50, 0x29, 0x88, 0x2b, 0x19, 0x61, 0x80, 0xf7, 0x89, + 0x32, 0x31, 0x37, 0xc7, 0x4a, 0x09, 0x49, 0x50, 0x29, 0x88, 0x2b, 0x19, 0x61, 0x80, 0xc9, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, - 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x19, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, - 0xe9, 0x25, 0xe7, 0xe7, 0x42, 0x02, 0x40, 0x37, 0x2f, 0xb5, 0xa4, 0x3c, 0xbf, 0x28, 0x1b, 0xca, - 0x4b, 0x2c, 0xc8, 0xd4, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, 0x49, 0x85, 0x7a, 0x36, 0x89, 0x0d, - 0xec, 0x25, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x54, 0xd8, 0x43, 0x8d, 0x44, 0x01, 0x00, - 0x00, + 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x49, 0x15, 0x64, 0xa7, 0xeb, 0x25, 0x66, + 0x97, 0xe8, 0xa5, 0xa4, 0x96, 0xe9, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, 0xa7, 0xa4, 0x42, 0x7d, 0x95, + 0xc4, 0x06, 0x76, 0xbb, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x5b, 0xc3, 0xe7, 0xa0, 0x2d, 0x01, + 0x00, 0x00, } func (m *Akash) Marshal() (dAtA []byte, err error) { diff --git a/go/node/client/client.go b/go/node/client/client.go index 7a232ea1..7a58f223 100644 --- a/go/node/client/client.go +++ b/go/node/client/client.go @@ -4,11 +4,13 @@ import ( "context" "errors" + tmjclient "github.com/cometbft/cometbft/rpc/jsonrpc/client" + cmtrpctypes "github.com/cometbft/cometbft/rpc/jsonrpc/types" + sdkclient "github.com/cosmos/cosmos-sdk/client" - tmjclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" - cltypes "github.com/akash-network/akash-api/go/node/client/types" - "github.com/akash-network/akash-api/go/node/client/v1beta2" + cltypes "pkg.akt.dev/go/node/client/types" + "pkg.akt.dev/go/node/client/v1beta3" ) var ( @@ -16,8 +18,9 @@ var ( ) const ( - // DefaultClientApiVersion indicates the default ApiVersion of the client. - DefaultClientApiVersion = "v1beta2" + // DefaultClientAPIVersion indicates the default ApiVersion of the client. + DefaultClientAPIVersion = "v1beta3" + VersionV1beta3 = "v1beta3" ) // SetupFn defines a function that takes a parameter, ideally a Client or QueryClient. @@ -30,29 +33,79 @@ type SetupFn func(interface{}) error // DefaultClientApiVersion will be used. // An error is returned if client discovery is not successful. func DiscoverClient(ctx context.Context, cctx sdkclient.Context, setup SetupFn, opts ...cltypes.ClientOption) error { - rpc, err := tmjclient.New(cctx.NodeURI) + result := new(Akash) + + if cctx.Client == nil { + rpc, err := tmjclient.New(cctx.NodeURI) + if err != nil { + return err + } + + if !cctx.Offline { + params := make(map[string]interface{}) + _, _ = rpc.Call(ctx, "akash", params, result) + } + + // if client info is nil, mostly likely "akash" endpoint is not yet supported on the node + // fallback to manually set version to DefaultClientApiVersion + if result.ClientInfo == nil || cctx.Offline { + result.ClientInfo = &ClientInfo{ApiVersion: DefaultClientAPIVersion} + } + } else { + result.ClientInfo = &ClientInfo{ApiVersion: DefaultClientAPIVersion} + } + + var cl interface{} + + var err error + + switch result.ClientInfo.ApiVersion { + case VersionV1beta3: + cl, err = v1beta3.NewClient(ctx, cctx, opts...) + default: + err = ErrUnknownClientVersion + } + if err != nil { return err } - result := new(Akash) - - if !cctx.Offline { - params := make(map[string]interface{}) - _, _ = rpc.Call(ctx, "akash", params, result) + if err = setup(cl); err != nil { + return err } - // if client info is nil, mostly likely "akash" endpoint is not yet supported on the node - // fallback to manually set version to DefaultClientApiVersion - if result.ClientInfo == nil || cctx.Offline { - result.ClientInfo = &ClientInfo{ApiVersion: DefaultClientApiVersion} + return nil +} + +func DiscoverLightClient(ctx context.Context, cctx sdkclient.Context, setup SetupFn) error { + result := new(Akash) + + if cctx.Client == nil { + rpc, err := tmjclient.New(cctx.NodeURI) + if err != nil { + return err + } + + if !cctx.Offline { + params := make(map[string]interface{}) + _, _ = rpc.Call(ctx, "akash", params, result) + } + + // if client info is nil, mostly likely "akash" endpoint is not yet supported on the node + // fallback to manually set version to DefaultClientApiVersion + if result.ClientInfo == nil || cctx.Offline { + result.ClientInfo = &ClientInfo{ApiVersion: DefaultClientAPIVersion} + } + } else { + result.ClientInfo = &ClientInfo{ApiVersion: DefaultClientAPIVersion} } var cl interface{} + var err error switch result.ClientInfo.ApiVersion { - case "v1beta2": - cl, err = v1beta2.NewClient(ctx, cctx, opts...) + case VersionV1beta3: + cl, err = v1beta3.NewLightClient(cctx) default: err = ErrUnknownClientVersion } @@ -87,14 +140,14 @@ func DiscoverQueryClient(ctx context.Context, cctx sdkclient.Context, setup Setu } if result.ClientInfo == nil { - result.ClientInfo = &ClientInfo{ApiVersion: DefaultClientApiVersion} + result.ClientInfo = &ClientInfo{ApiVersion: DefaultClientAPIVersion} } var cl interface{} switch result.ClientInfo.ApiVersion { - case "v1beta2": - cl = v1beta2.NewQueryClient(cctx) + case VersionV1beta3: + cl = v1beta3.NewQueryClient(cctx) default: err = ErrUnknownClientVersion } @@ -109,3 +162,13 @@ func DiscoverQueryClient(ctx context.Context, cctx sdkclient.Context, setup Setu return nil } + +func RPCAkash(_ *cmtrpctypes.Context) (*Akash, error) { + result := &Akash{ + ClientInfo: &ClientInfo{ + ApiVersion: "v1beta3", + }, + } + + return result, nil +} diff --git a/go/node/client/client_info.pb.go b/go/node/client/client_info.pb.go index 8218fcf8..7a304072 100644 --- a/go/node/client/client_info.pb.go +++ b/go/node/client/client_info.pb.go @@ -5,8 +5,8 @@ package client import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -77,7 +77,7 @@ func init() { } var fileDescriptor_d0e1ef320145891a = []byte{ - // 223 bytes of a gzipped FileDescriptorProto + // 208 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, 0xce, 0xd0, 0x4f, 0xc9, 0x2c, 0x4e, 0xce, 0x2f, 0x4b, 0x2d, 0xaa, 0xd4, 0x2f, 0x33, 0xd4, 0x4f, 0xce, 0xc9, 0x4c, 0xcd, 0x2b, 0x89, 0xcf, 0xcc, 0x4b, 0xcb, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, @@ -86,12 +86,11 @@ var fileDescriptor_d0e1ef320145891a = []byte{ 0x69, 0xf9, 0x42, 0x21, 0x5c, 0xdc, 0x89, 0x05, 0x99, 0xf1, 0x65, 0xa9, 0x45, 0xc5, 0x99, 0xf9, 0x79, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x4e, 0xc6, 0x8f, 0xee, 0xc9, 0x73, 0x39, 0x16, 0x64, 0x86, 0x41, 0x44, 0x5f, 0xdd, 0x93, 0x47, 0x56, 0xf4, 0xe9, 0x9e, 0xbc, 0x50, 0x65, 0x62, 0x6e, - 0x8e, 0x95, 0x12, 0x92, 0xa0, 0x52, 0x10, 0x57, 0x22, 0x5c, 0x83, 0x93, 0xf7, 0x89, 0x47, 0x72, + 0x8e, 0x95, 0x12, 0x92, 0xa0, 0x52, 0x10, 0x57, 0x22, 0x5c, 0x83, 0x93, 0xc9, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, - 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x19, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, - 0xe7, 0xe7, 0xea, 0x83, 0x9d, 0xac, 0x9b, 0x97, 0x5a, 0x52, 0x9e, 0x5f, 0x94, 0x0d, 0xe5, 0x25, - 0x16, 0x64, 0xea, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, 0xa7, 0xa4, 0x42, 0x7d, 0x99, 0xc4, 0x06, 0x76, - 0xb7, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x3d, 0x00, 0x57, 0x09, 0x01, 0x00, 0x00, + 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x49, 0x15, 0x64, 0xa7, 0xeb, 0x25, 0x66, 0x97, 0xe8, + 0xa5, 0xa4, 0x96, 0xe9, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, 0xa7, 0xa4, 0x42, 0xbd, 0x93, 0xc4, 0x06, + 0x76, 0xa0, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x68, 0x55, 0x48, 0xbb, 0xf2, 0x00, 0x00, 0x00, } func (m *ClientInfo) Marshal() (dAtA []byte, err error) { diff --git a/go/node/client/types/options.go b/go/node/client/types/options.go index 6be6ce5e..0d8b1496 100644 --- a/go/node/client/types/options.go +++ b/go/node/client/types/options.go @@ -1,26 +1,52 @@ package types import ( + "fmt" + "strconv" "time" - "github.com/spf13/pflag" - "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/tx" "github.com/cosmos/cosmos-sdk/types/tx/signing" ) +const ( + // SignModeDirect is the value of the --sign-mode flag for SIGN_MODE_DIRECT + SignModeDirect = "direct" + // SignModeLegacyAminoJSON is the value of the --sign-mode flag for SIGN_MODE_LEGACY_AMINO_JSON + SignModeLegacyAminoJSON = "amino-json" + // SignModeDirectAux is the value of the --sign-mode flag for SIGN_MODE_DIRECT_AUX + SignModeDirectAux = "direct-aux" + // SignModeEIP191 is the value of the --sign-mode flag for SIGN_MODE_EIP_191 + SignModeEIP191 = "eip-191" +) + +// GasSetting encapsulates the possible values passed through the --gas flag. +type GasSetting struct { + Simulate bool + Gas uint64 +} + +func (v *GasSetting) String() string { + if v.Simulate { + return "auto" + } + + return strconv.FormatUint(v.Gas, 10) +} + type ClientOptions struct { AccountNumber uint64 AccountSequence uint64 GasAdjustment float64 - Gas flags.GasSetting + Gas GasSetting GasPrices string Fees string Note string TimeoutHeight uint64 BroadcastTimeout time.Duration + SkipConfirm bool + SignMode string } type ClientOption func(options *ClientOptions) error @@ -35,19 +61,8 @@ func NewTxFactory(cctx client.Context, opts ...ClientOption) (tx.Factory, error) } } - signMode := signing.SignMode_SIGN_MODE_UNSPECIFIED - switch cctx.SignModeStr { - case flags.SignModeDirect: - signMode = signing.SignMode_SIGN_MODE_DIRECT - case flags.SignModeLegacyAminoJSON: - signMode = signing.SignMode_SIGN_MODE_LEGACY_AMINO_JSON - case flags.SignModeEIP191: - signMode = signing.SignMode_SIGN_MODE_EIP_191 - } - - txf := tx.Factory{} - - txf = txf.WithTxConfig(cctx.TxConfig). + txf := tx.Factory{}. + WithTxConfig(cctx.TxConfig). WithAccountRetriever(cctx.AccountRetriever). WithAccountNumber(clOpts.AccountNumber). WithSequence(clOpts.AccountSequence). @@ -59,8 +74,32 @@ func NewTxFactory(cctx client.Context, opts ...ClientOption) (tx.Factory, error) WithSimulateAndExecute(clOpts.Gas.Simulate). WithTimeoutHeight(clOpts.TimeoutHeight). WithMemo(clOpts.Note). - WithSignMode(signMode). - WithFees(clOpts.Fees) + WithFees(clOpts.Fees). + WithFromName(cctx.FromName) + + if !cctx.GenerateOnly { + var signMode signing.SignMode + + switch cctx.SignModeStr { + case SignModeDirect: + signMode = signing.SignMode_SIGN_MODE_DIRECT + case SignModeDirectAux: + signMode = signing.SignMode_SIGN_MODE_DIRECT_AUX + case SignModeLegacyAminoJSON: + signMode = signing.SignMode_SIGN_MODE_LEGACY_AMINO_JSON + case SignModeEIP191: + signMode = signing.SignMode_SIGN_MODE_EIP_191 + default: + return tx.Factory{}, fmt.Errorf("invalid sign mode \"%s\". expected %s|%s|%s|%s", + cctx.SignModeStr, + SignModeDirect, + SignModeDirectAux, + SignModeLegacyAminoJSON, + SignModeEIP191) + } + + txf = txf.WithSignMode(signMode) + } if !cctx.Offline { address := cctx.GetFromAddress() @@ -110,7 +149,7 @@ func WithNote(val string) ClientOption { } } -func WithGas(val flags.GasSetting) ClientOption { +func WithGas(val GasSetting) ClientOption { return func(options *ClientOptions) error { options.Gas = val return nil @@ -138,41 +177,16 @@ func WithTimeoutHeight(val uint64) ClientOption { } } -func ClientOptionsFromFlags(flagSet *pflag.FlagSet) ([]ClientOption, error) { - opts := make([]ClientOption, 0) - - if flagSet.Changed(flags.FlagAccountNumber) { - accNum, _ := flagSet.GetUint64(flags.FlagAccountNumber) - opts = append(opts, WithAccountNumber(accNum)) - } - - if flagSet.Changed(flags.FlagSequence) { - accSeq, _ := flagSet.GetUint64(flags.FlagSequence) - opts = append(opts, WithAccountSequence(accSeq)) - } - - gasAdj, _ := flagSet.GetFloat64(flags.FlagGasAdjustment) - opts = append(opts, WithGasAdjustment(gasAdj)) - - if flagSet.Changed(flags.FlagNote) { - memo, _ := flagSet.GetString(flags.FlagNote) - opts = append(opts, WithNote(memo)) +func WithSkipConfirm(val bool) ClientOption { + return func(options *ClientOptions) error { + options.SkipConfirm = val + return nil } +} - if flagSet.Changed(flags.FlagTimeoutHeight) { - timeoutHeight, _ := flagSet.GetUint64(flags.FlagTimeoutHeight) - opts = append(opts, WithTimeoutHeight(timeoutHeight)) +func WithSignMode(val string) ClientOption { + return func(options *ClientOptions) error { + options.SignMode = val + return nil } - - gasStr, _ := flagSet.GetString(flags.FlagGas) - gasSetting, _ := flags.ParseGasSetting(gasStr) - opts = append(opts, WithGas(gasSetting)) - - feesStr, _ := flagSet.GetString(flags.FlagFees) - opts = append(opts, WithFees(feesStr)) - - gasPrices, _ := flagSet.GetString(flags.FlagGasPrices) - opts = append(opts, WithGasPrices(gasPrices)) - - return opts, nil } diff --git a/go/node/client/v1beta1/client.go b/go/node/client/v1beta1/client.go deleted file mode 100644 index 10e23672..00000000 --- a/go/node/client/v1beta1/client.go +++ /dev/null @@ -1,189 +0,0 @@ -package v1beta1 - -import ( - "context" - - "github.com/pkg/errors" - "google.golang.org/grpc" - - tmrpc "github.com/tendermint/tendermint/rpc/core/types" - - sdk "github.com/cosmos/cosmos-sdk/types" - - atypes "github.com/akash-network/akash-api/go/node/audit/v1beta3" - ctypes "github.com/akash-network/akash-api/go/node/cert/v1beta3" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - mtypes "github.com/akash-network/akash-api/go/node/market/v1beta3" - ptypes "github.com/akash-network/akash-api/go/node/provider/v1beta3" -) - -var ( - ErrClientNotFound = errors.New("client not found") -) - -//go:generate mockery --name Query --output ./mocks -type Query interface { - dtypes.QueryClient - mtypes.QueryClient - ptypes.QueryClient - atypes.QueryClient - ctypes.QueryClient -} - -//go:generate mockery --name Tx --output ./mocks -type Tx interface { - Broadcast(context.Context, ...sdk.Msg) error -} - -//go:generate mockery --name Node --output ./mocks -type Node interface { - SyncInfo(ctx context.Context) (*tmrpc.SyncInfo, error) -} - -//go:generate mockery --name Client --output ./mocks -type Client interface { - Query() Query - Tx() Tx - Node() Node -} - -type qclient struct { - dclient dtypes.QueryClient - mclient mtypes.QueryClient - pclient ptypes.QueryClient - aclient atypes.QueryClient - cclient ctypes.QueryClient -} - -// NewQueryClient creates new query client instance -func NewQueryClient( - dclient dtypes.QueryClient, - mclient mtypes.QueryClient, - pclient ptypes.QueryClient, - aclient atypes.QueryClient, - cclient ctypes.QueryClient, -) Query { - return &qclient{ - dclient: dclient, - mclient: mclient, - pclient: pclient, - aclient: aclient, - cclient: cclient, - } -} - -func (c *qclient) Deployments(ctx context.Context, in *dtypes.QueryDeploymentsRequest, opts ...grpc.CallOption) (*dtypes.QueryDeploymentsResponse, error) { - if c.dclient == nil { - return &dtypes.QueryDeploymentsResponse{}, ErrClientNotFound - } - return c.dclient.Deployments(ctx, in, opts...) -} - -func (c *qclient) Deployment(ctx context.Context, in *dtypes.QueryDeploymentRequest, opts ...grpc.CallOption) (*dtypes.QueryDeploymentResponse, error) { - if c.dclient == nil { - return &dtypes.QueryDeploymentResponse{}, ErrClientNotFound - } - return c.dclient.Deployment(ctx, in, opts...) -} - -func (c *qclient) Group(ctx context.Context, in *dtypes.QueryGroupRequest, opts ...grpc.CallOption) (*dtypes.QueryGroupResponse, error) { - if c.dclient == nil { - return &dtypes.QueryGroupResponse{}, ErrClientNotFound - } - return c.dclient.Group(ctx, in, opts...) -} - -func (c *qclient) Orders(ctx context.Context, in *mtypes.QueryOrdersRequest, opts ...grpc.CallOption) (*mtypes.QueryOrdersResponse, error) { - if c.mclient == nil { - return &mtypes.QueryOrdersResponse{}, ErrClientNotFound - } - return c.mclient.Orders(ctx, in, opts...) -} - -func (c *qclient) Order(ctx context.Context, in *mtypes.QueryOrderRequest, opts ...grpc.CallOption) (*mtypes.QueryOrderResponse, error) { - if c.mclient == nil { - return &mtypes.QueryOrderResponse{}, ErrClientNotFound - } - return c.mclient.Order(ctx, in, opts...) -} - -func (c *qclient) Bids(ctx context.Context, in *mtypes.QueryBidsRequest, opts ...grpc.CallOption) (*mtypes.QueryBidsResponse, error) { - if c.mclient == nil { - return &mtypes.QueryBidsResponse{}, ErrClientNotFound - } - return c.mclient.Bids(ctx, in, opts...) -} - -func (c *qclient) Bid(ctx context.Context, in *mtypes.QueryBidRequest, opts ...grpc.CallOption) (*mtypes.QueryBidResponse, error) { - if c.mclient == nil { - return &mtypes.QueryBidResponse{}, ErrClientNotFound - } - return c.mclient.Bid(ctx, in, opts...) -} - -func (c *qclient) Leases(ctx context.Context, in *mtypes.QueryLeasesRequest, opts ...grpc.CallOption) (*mtypes.QueryLeasesResponse, error) { - if c.mclient == nil { - return &mtypes.QueryLeasesResponse{}, ErrClientNotFound - } - return c.mclient.Leases(ctx, in, opts...) -} - -func (c *qclient) Lease(ctx context.Context, in *mtypes.QueryLeaseRequest, opts ...grpc.CallOption) (*mtypes.QueryLeaseResponse, error) { - if c.mclient == nil { - return &mtypes.QueryLeaseResponse{}, ErrClientNotFound - } - return c.mclient.Lease(ctx, in, opts...) -} - -func (c *qclient) Providers(ctx context.Context, in *ptypes.QueryProvidersRequest, opts ...grpc.CallOption) (*ptypes.QueryProvidersResponse, error) { - if c.pclient == nil { - return &ptypes.QueryProvidersResponse{}, ErrClientNotFound - } - return c.pclient.Providers(ctx, in, opts...) -} - -func (c *qclient) Provider(ctx context.Context, in *ptypes.QueryProviderRequest, opts ...grpc.CallOption) (*ptypes.QueryProviderResponse, error) { - if c.pclient == nil { - return &ptypes.QueryProviderResponse{}, ErrClientNotFound - } - return c.pclient.Provider(ctx, in, opts...) -} - -// AllProvidersAttributes queries all providers -func (c *qclient) AllProvidersAttributes(ctx context.Context, in *atypes.QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*atypes.QueryProvidersResponse, error) { - if c.aclient == nil { - return &atypes.QueryProvidersResponse{}, ErrClientNotFound - } - return c.aclient.AllProvidersAttributes(ctx, in, opts...) -} - -// ProviderAttributes queries all provider signed attributes -func (c *qclient) ProviderAttributes(ctx context.Context, in *atypes.QueryProviderAttributesRequest, opts ...grpc.CallOption) (*atypes.QueryProvidersResponse, error) { - if c.aclient == nil { - return &atypes.QueryProvidersResponse{}, ErrClientNotFound - } - return c.aclient.ProviderAttributes(ctx, in, opts...) -} - -// ProviderAuditorAttributes queries provider signed attributes by specific validator -func (c *qclient) ProviderAuditorAttributes(ctx context.Context, in *atypes.QueryProviderAuditorRequest, opts ...grpc.CallOption) (*atypes.QueryProvidersResponse, error) { - if c.aclient == nil { - return &atypes.QueryProvidersResponse{}, ErrClientNotFound - } - return c.aclient.ProviderAuditorAttributes(ctx, in, opts...) -} - -// AuditorAttributes queries all providers signed by this validator -func (c *qclient) AuditorAttributes(ctx context.Context, in *atypes.QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*atypes.QueryProvidersResponse, error) { - if c.aclient == nil { - return &atypes.QueryProvidersResponse{}, ErrClientNotFound - } - return c.aclient.AuditorAttributes(ctx, in, opts...) -} - -func (c *qclient) Certificates(ctx context.Context, in *ctypes.QueryCertificatesRequest, opts ...grpc.CallOption) (*ctypes.QueryCertificatesResponse, error) { - if c.cclient == nil { - return &ctypes.QueryCertificatesResponse{}, ErrClientNotFound - } - return c.cclient.Certificates(ctx, in, opts...) -} diff --git a/go/node/client/v1beta1/mocks/client.go b/go/node/client/v1beta1/mocks/client.go deleted file mode 100644 index d77d89e4..00000000 --- a/go/node/client/v1beta1/mocks/client.go +++ /dev/null @@ -1,176 +0,0 @@ -// Code generated by mockery v2.42.0. DO NOT EDIT. - -package mocks - -import ( - v1beta1 "github.com/akash-network/akash-api/go/node/client/v1beta1" - mock "github.com/stretchr/testify/mock" -) - -// Client is an autogenerated mock type for the Client type -type Client struct { - mock.Mock -} - -type Client_Expecter struct { - mock *mock.Mock -} - -func (_m *Client) EXPECT() *Client_Expecter { - return &Client_Expecter{mock: &_m.Mock} -} - -// Node provides a mock function with given fields: -func (_m *Client) Node() v1beta1.Node { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Node") - } - - var r0 v1beta1.Node - if rf, ok := ret.Get(0).(func() v1beta1.Node); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(v1beta1.Node) - } - } - - return r0 -} - -// Client_Node_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Node' -type Client_Node_Call struct { - *mock.Call -} - -// Node is a helper method to define mock.On call -func (_e *Client_Expecter) Node() *Client_Node_Call { - return &Client_Node_Call{Call: _e.mock.On("Node")} -} - -func (_c *Client_Node_Call) Run(run func()) *Client_Node_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Client_Node_Call) Return(_a0 v1beta1.Node) *Client_Node_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *Client_Node_Call) RunAndReturn(run func() v1beta1.Node) *Client_Node_Call { - _c.Call.Return(run) - return _c -} - -// Query provides a mock function with given fields: -func (_m *Client) Query() v1beta1.Query { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Query") - } - - var r0 v1beta1.Query - if rf, ok := ret.Get(0).(func() v1beta1.Query); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(v1beta1.Query) - } - } - - return r0 -} - -// Client_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' -type Client_Query_Call struct { - *mock.Call -} - -// Query is a helper method to define mock.On call -func (_e *Client_Expecter) Query() *Client_Query_Call { - return &Client_Query_Call{Call: _e.mock.On("Query")} -} - -func (_c *Client_Query_Call) Run(run func()) *Client_Query_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Client_Query_Call) Return(_a0 v1beta1.Query) *Client_Query_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *Client_Query_Call) RunAndReturn(run func() v1beta1.Query) *Client_Query_Call { - _c.Call.Return(run) - return _c -} - -// Tx provides a mock function with given fields: -func (_m *Client) Tx() v1beta1.Tx { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Tx") - } - - var r0 v1beta1.Tx - if rf, ok := ret.Get(0).(func() v1beta1.Tx); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(v1beta1.Tx) - } - } - - return r0 -} - -// Client_Tx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Tx' -type Client_Tx_Call struct { - *mock.Call -} - -// Tx is a helper method to define mock.On call -func (_e *Client_Expecter) Tx() *Client_Tx_Call { - return &Client_Tx_Call{Call: _e.mock.On("Tx")} -} - -func (_c *Client_Tx_Call) Run(run func()) *Client_Tx_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Client_Tx_Call) Return(_a0 v1beta1.Tx) *Client_Tx_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *Client_Tx_Call) RunAndReturn(run func() v1beta1.Tx) *Client_Tx_Call { - _c.Call.Return(run) - return _c -} - -// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewClient(t interface { - mock.TestingT - Cleanup(func()) -}) *Client { - mock := &Client{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/go/node/client/v1beta1/mocks/node.go b/go/node/client/v1beta1/mocks/node.go deleted file mode 100644 index 47f5a823..00000000 --- a/go/node/client/v1beta1/mocks/node.go +++ /dev/null @@ -1,95 +0,0 @@ -// Code generated by mockery v2.42.0. DO NOT EDIT. - -package mocks - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" - coretypes "github.com/tendermint/tendermint/rpc/core/types" -) - -// Node is an autogenerated mock type for the Node type -type Node struct { - mock.Mock -} - -type Node_Expecter struct { - mock *mock.Mock -} - -func (_m *Node) EXPECT() *Node_Expecter { - return &Node_Expecter{mock: &_m.Mock} -} - -// SyncInfo provides a mock function with given fields: ctx -func (_m *Node) SyncInfo(ctx context.Context) (*coretypes.SyncInfo, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SyncInfo") - } - - var r0 *coretypes.SyncInfo - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.SyncInfo, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.SyncInfo); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.SyncInfo) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Node_SyncInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SyncInfo' -type Node_SyncInfo_Call struct { - *mock.Call -} - -// SyncInfo is a helper method to define mock.On call -// - ctx context.Context -func (_e *Node_Expecter) SyncInfo(ctx interface{}) *Node_SyncInfo_Call { - return &Node_SyncInfo_Call{Call: _e.mock.On("SyncInfo", ctx)} -} - -func (_c *Node_SyncInfo_Call) Run(run func(ctx context.Context)) *Node_SyncInfo_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *Node_SyncInfo_Call) Return(_a0 *coretypes.SyncInfo, _a1 error) *Node_SyncInfo_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Node_SyncInfo_Call) RunAndReturn(run func(context.Context) (*coretypes.SyncInfo, error)) *Node_SyncInfo_Call { - _c.Call.Return(run) - return _c -} - -// NewNode creates a new instance of Node. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewNode(t interface { - mock.TestingT - Cleanup(func()) -}) *Node { - mock := &Node{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/go/node/client/v1beta1/mocks/query.go b/go/node/client/v1beta1/mocks/query.go deleted file mode 100644 index 32763e64..00000000 --- a/go/node/client/v1beta1/mocks/query.go +++ /dev/null @@ -1,1232 +0,0 @@ -// Code generated by mockery v2.42.0. DO NOT EDIT. - -package mocks - -import ( - context "context" - - certv1beta3 "github.com/akash-network/akash-api/go/node/cert/v1beta3" - - deploymentv1beta3 "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - - grpc "google.golang.org/grpc" - - marketv1beta3 "github.com/akash-network/akash-api/go/node/market/v1beta3" - - mock "github.com/stretchr/testify/mock" - - providerv1beta3 "github.com/akash-network/akash-api/go/node/provider/v1beta3" - - v1beta3 "github.com/akash-network/akash-api/go/node/audit/v1beta3" -) - -// Query is an autogenerated mock type for the Query type -type Query struct { - mock.Mock -} - -type Query_Expecter struct { - mock *mock.Mock -} - -func (_m *Query) EXPECT() *Query_Expecter { - return &Query_Expecter{mock: &_m.Mock} -} - -// AllProvidersAttributes provides a mock function with given fields: ctx, in, opts -func (_m *Query) AllProvidersAttributes(ctx context.Context, in *v1beta3.QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for AllProvidersAttributes") - } - - var r0 *v1beta3.QueryProvidersResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryAllProvidersAttributesRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryAllProvidersAttributesRequest, ...grpc.CallOption) *v1beta3.QueryProvidersResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta3.QueryProvidersResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta3.QueryAllProvidersAttributesRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_AllProvidersAttributes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AllProvidersAttributes' -type Query_AllProvidersAttributes_Call struct { - *mock.Call -} - -// AllProvidersAttributes is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta3.QueryAllProvidersAttributesRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) AllProvidersAttributes(ctx interface{}, in interface{}, opts ...interface{}) *Query_AllProvidersAttributes_Call { - return &Query_AllProvidersAttributes_Call{Call: _e.mock.On("AllProvidersAttributes", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_AllProvidersAttributes_Call) Run(run func(ctx context.Context, in *v1beta3.QueryAllProvidersAttributesRequest, opts ...grpc.CallOption)) *Query_AllProvidersAttributes_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta3.QueryAllProvidersAttributesRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_AllProvidersAttributes_Call) Return(_a0 *v1beta3.QueryProvidersResponse, _a1 error) *Query_AllProvidersAttributes_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_AllProvidersAttributes_Call) RunAndReturn(run func(context.Context, *v1beta3.QueryAllProvidersAttributesRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)) *Query_AllProvidersAttributes_Call { - _c.Call.Return(run) - return _c -} - -// AuditorAttributes provides a mock function with given fields: ctx, in, opts -func (_m *Query) AuditorAttributes(ctx context.Context, in *v1beta3.QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for AuditorAttributes") - } - - var r0 *v1beta3.QueryProvidersResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryAuditorAttributesRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryAuditorAttributesRequest, ...grpc.CallOption) *v1beta3.QueryProvidersResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta3.QueryProvidersResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta3.QueryAuditorAttributesRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_AuditorAttributes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AuditorAttributes' -type Query_AuditorAttributes_Call struct { - *mock.Call -} - -// AuditorAttributes is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta3.QueryAuditorAttributesRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) AuditorAttributes(ctx interface{}, in interface{}, opts ...interface{}) *Query_AuditorAttributes_Call { - return &Query_AuditorAttributes_Call{Call: _e.mock.On("AuditorAttributes", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_AuditorAttributes_Call) Run(run func(ctx context.Context, in *v1beta3.QueryAuditorAttributesRequest, opts ...grpc.CallOption)) *Query_AuditorAttributes_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta3.QueryAuditorAttributesRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_AuditorAttributes_Call) Return(_a0 *v1beta3.QueryProvidersResponse, _a1 error) *Query_AuditorAttributes_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_AuditorAttributes_Call) RunAndReturn(run func(context.Context, *v1beta3.QueryAuditorAttributesRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)) *Query_AuditorAttributes_Call { - _c.Call.Return(run) - return _c -} - -// Bid provides a mock function with given fields: ctx, in, opts -func (_m *Query) Bid(ctx context.Context, in *marketv1beta3.QueryBidRequest, opts ...grpc.CallOption) (*marketv1beta3.QueryBidResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Bid") - } - - var r0 *marketv1beta3.QueryBidResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *marketv1beta3.QueryBidRequest, ...grpc.CallOption) (*marketv1beta3.QueryBidResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *marketv1beta3.QueryBidRequest, ...grpc.CallOption) *marketv1beta3.QueryBidResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*marketv1beta3.QueryBidResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *marketv1beta3.QueryBidRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_Bid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Bid' -type Query_Bid_Call struct { - *mock.Call -} - -// Bid is a helper method to define mock.On call -// - ctx context.Context -// - in *marketv1beta3.QueryBidRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) Bid(ctx interface{}, in interface{}, opts ...interface{}) *Query_Bid_Call { - return &Query_Bid_Call{Call: _e.mock.On("Bid", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_Bid_Call) Run(run func(ctx context.Context, in *marketv1beta3.QueryBidRequest, opts ...grpc.CallOption)) *Query_Bid_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*marketv1beta3.QueryBidRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_Bid_Call) Return(_a0 *marketv1beta3.QueryBidResponse, _a1 error) *Query_Bid_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_Bid_Call) RunAndReturn(run func(context.Context, *marketv1beta3.QueryBidRequest, ...grpc.CallOption) (*marketv1beta3.QueryBidResponse, error)) *Query_Bid_Call { - _c.Call.Return(run) - return _c -} - -// Bids provides a mock function with given fields: ctx, in, opts -func (_m *Query) Bids(ctx context.Context, in *marketv1beta3.QueryBidsRequest, opts ...grpc.CallOption) (*marketv1beta3.QueryBidsResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Bids") - } - - var r0 *marketv1beta3.QueryBidsResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *marketv1beta3.QueryBidsRequest, ...grpc.CallOption) (*marketv1beta3.QueryBidsResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *marketv1beta3.QueryBidsRequest, ...grpc.CallOption) *marketv1beta3.QueryBidsResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*marketv1beta3.QueryBidsResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *marketv1beta3.QueryBidsRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_Bids_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Bids' -type Query_Bids_Call struct { - *mock.Call -} - -// Bids is a helper method to define mock.On call -// - ctx context.Context -// - in *marketv1beta3.QueryBidsRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) Bids(ctx interface{}, in interface{}, opts ...interface{}) *Query_Bids_Call { - return &Query_Bids_Call{Call: _e.mock.On("Bids", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_Bids_Call) Run(run func(ctx context.Context, in *marketv1beta3.QueryBidsRequest, opts ...grpc.CallOption)) *Query_Bids_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*marketv1beta3.QueryBidsRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_Bids_Call) Return(_a0 *marketv1beta3.QueryBidsResponse, _a1 error) *Query_Bids_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_Bids_Call) RunAndReturn(run func(context.Context, *marketv1beta3.QueryBidsRequest, ...grpc.CallOption) (*marketv1beta3.QueryBidsResponse, error)) *Query_Bids_Call { - _c.Call.Return(run) - return _c -} - -// Certificates provides a mock function with given fields: ctx, in, opts -func (_m *Query) Certificates(ctx context.Context, in *certv1beta3.QueryCertificatesRequest, opts ...grpc.CallOption) (*certv1beta3.QueryCertificatesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Certificates") - } - - var r0 *certv1beta3.QueryCertificatesResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *certv1beta3.QueryCertificatesRequest, ...grpc.CallOption) (*certv1beta3.QueryCertificatesResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *certv1beta3.QueryCertificatesRequest, ...grpc.CallOption) *certv1beta3.QueryCertificatesResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*certv1beta3.QueryCertificatesResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *certv1beta3.QueryCertificatesRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_Certificates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Certificates' -type Query_Certificates_Call struct { - *mock.Call -} - -// Certificates is a helper method to define mock.On call -// - ctx context.Context -// - in *certv1beta3.QueryCertificatesRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) Certificates(ctx interface{}, in interface{}, opts ...interface{}) *Query_Certificates_Call { - return &Query_Certificates_Call{Call: _e.mock.On("Certificates", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_Certificates_Call) Run(run func(ctx context.Context, in *certv1beta3.QueryCertificatesRequest, opts ...grpc.CallOption)) *Query_Certificates_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*certv1beta3.QueryCertificatesRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_Certificates_Call) Return(_a0 *certv1beta3.QueryCertificatesResponse, _a1 error) *Query_Certificates_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_Certificates_Call) RunAndReturn(run func(context.Context, *certv1beta3.QueryCertificatesRequest, ...grpc.CallOption) (*certv1beta3.QueryCertificatesResponse, error)) *Query_Certificates_Call { - _c.Call.Return(run) - return _c -} - -// Deployment provides a mock function with given fields: ctx, in, opts -func (_m *Query) Deployment(ctx context.Context, in *deploymentv1beta3.QueryDeploymentRequest, opts ...grpc.CallOption) (*deploymentv1beta3.QueryDeploymentResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Deployment") - } - - var r0 *deploymentv1beta3.QueryDeploymentResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta3.QueryDeploymentRequest, ...grpc.CallOption) (*deploymentv1beta3.QueryDeploymentResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta3.QueryDeploymentRequest, ...grpc.CallOption) *deploymentv1beta3.QueryDeploymentResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*deploymentv1beta3.QueryDeploymentResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *deploymentv1beta3.QueryDeploymentRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_Deployment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Deployment' -type Query_Deployment_Call struct { - *mock.Call -} - -// Deployment is a helper method to define mock.On call -// - ctx context.Context -// - in *deploymentv1beta3.QueryDeploymentRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) Deployment(ctx interface{}, in interface{}, opts ...interface{}) *Query_Deployment_Call { - return &Query_Deployment_Call{Call: _e.mock.On("Deployment", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_Deployment_Call) Run(run func(ctx context.Context, in *deploymentv1beta3.QueryDeploymentRequest, opts ...grpc.CallOption)) *Query_Deployment_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*deploymentv1beta3.QueryDeploymentRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_Deployment_Call) Return(_a0 *deploymentv1beta3.QueryDeploymentResponse, _a1 error) *Query_Deployment_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_Deployment_Call) RunAndReturn(run func(context.Context, *deploymentv1beta3.QueryDeploymentRequest, ...grpc.CallOption) (*deploymentv1beta3.QueryDeploymentResponse, error)) *Query_Deployment_Call { - _c.Call.Return(run) - return _c -} - -// Deployments provides a mock function with given fields: ctx, in, opts -func (_m *Query) Deployments(ctx context.Context, in *deploymentv1beta3.QueryDeploymentsRequest, opts ...grpc.CallOption) (*deploymentv1beta3.QueryDeploymentsResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Deployments") - } - - var r0 *deploymentv1beta3.QueryDeploymentsResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta3.QueryDeploymentsRequest, ...grpc.CallOption) (*deploymentv1beta3.QueryDeploymentsResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta3.QueryDeploymentsRequest, ...grpc.CallOption) *deploymentv1beta3.QueryDeploymentsResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*deploymentv1beta3.QueryDeploymentsResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *deploymentv1beta3.QueryDeploymentsRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_Deployments_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Deployments' -type Query_Deployments_Call struct { - *mock.Call -} - -// Deployments is a helper method to define mock.On call -// - ctx context.Context -// - in *deploymentv1beta3.QueryDeploymentsRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) Deployments(ctx interface{}, in interface{}, opts ...interface{}) *Query_Deployments_Call { - return &Query_Deployments_Call{Call: _e.mock.On("Deployments", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_Deployments_Call) Run(run func(ctx context.Context, in *deploymentv1beta3.QueryDeploymentsRequest, opts ...grpc.CallOption)) *Query_Deployments_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*deploymentv1beta3.QueryDeploymentsRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_Deployments_Call) Return(_a0 *deploymentv1beta3.QueryDeploymentsResponse, _a1 error) *Query_Deployments_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_Deployments_Call) RunAndReturn(run func(context.Context, *deploymentv1beta3.QueryDeploymentsRequest, ...grpc.CallOption) (*deploymentv1beta3.QueryDeploymentsResponse, error)) *Query_Deployments_Call { - _c.Call.Return(run) - return _c -} - -// Group provides a mock function with given fields: ctx, in, opts -func (_m *Query) Group(ctx context.Context, in *deploymentv1beta3.QueryGroupRequest, opts ...grpc.CallOption) (*deploymentv1beta3.QueryGroupResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Group") - } - - var r0 *deploymentv1beta3.QueryGroupResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta3.QueryGroupRequest, ...grpc.CallOption) (*deploymentv1beta3.QueryGroupResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta3.QueryGroupRequest, ...grpc.CallOption) *deploymentv1beta3.QueryGroupResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*deploymentv1beta3.QueryGroupResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *deploymentv1beta3.QueryGroupRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_Group_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Group' -type Query_Group_Call struct { - *mock.Call -} - -// Group is a helper method to define mock.On call -// - ctx context.Context -// - in *deploymentv1beta3.QueryGroupRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) Group(ctx interface{}, in interface{}, opts ...interface{}) *Query_Group_Call { - return &Query_Group_Call{Call: _e.mock.On("Group", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_Group_Call) Run(run func(ctx context.Context, in *deploymentv1beta3.QueryGroupRequest, opts ...grpc.CallOption)) *Query_Group_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*deploymentv1beta3.QueryGroupRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_Group_Call) Return(_a0 *deploymentv1beta3.QueryGroupResponse, _a1 error) *Query_Group_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_Group_Call) RunAndReturn(run func(context.Context, *deploymentv1beta3.QueryGroupRequest, ...grpc.CallOption) (*deploymentv1beta3.QueryGroupResponse, error)) *Query_Group_Call { - _c.Call.Return(run) - return _c -} - -// Lease provides a mock function with given fields: ctx, in, opts -func (_m *Query) Lease(ctx context.Context, in *marketv1beta3.QueryLeaseRequest, opts ...grpc.CallOption) (*marketv1beta3.QueryLeaseResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Lease") - } - - var r0 *marketv1beta3.QueryLeaseResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *marketv1beta3.QueryLeaseRequest, ...grpc.CallOption) (*marketv1beta3.QueryLeaseResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *marketv1beta3.QueryLeaseRequest, ...grpc.CallOption) *marketv1beta3.QueryLeaseResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*marketv1beta3.QueryLeaseResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *marketv1beta3.QueryLeaseRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_Lease_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Lease' -type Query_Lease_Call struct { - *mock.Call -} - -// Lease is a helper method to define mock.On call -// - ctx context.Context -// - in *marketv1beta3.QueryLeaseRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) Lease(ctx interface{}, in interface{}, opts ...interface{}) *Query_Lease_Call { - return &Query_Lease_Call{Call: _e.mock.On("Lease", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_Lease_Call) Run(run func(ctx context.Context, in *marketv1beta3.QueryLeaseRequest, opts ...grpc.CallOption)) *Query_Lease_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*marketv1beta3.QueryLeaseRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_Lease_Call) Return(_a0 *marketv1beta3.QueryLeaseResponse, _a1 error) *Query_Lease_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_Lease_Call) RunAndReturn(run func(context.Context, *marketv1beta3.QueryLeaseRequest, ...grpc.CallOption) (*marketv1beta3.QueryLeaseResponse, error)) *Query_Lease_Call { - _c.Call.Return(run) - return _c -} - -// Leases provides a mock function with given fields: ctx, in, opts -func (_m *Query) Leases(ctx context.Context, in *marketv1beta3.QueryLeasesRequest, opts ...grpc.CallOption) (*marketv1beta3.QueryLeasesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Leases") - } - - var r0 *marketv1beta3.QueryLeasesResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *marketv1beta3.QueryLeasesRequest, ...grpc.CallOption) (*marketv1beta3.QueryLeasesResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *marketv1beta3.QueryLeasesRequest, ...grpc.CallOption) *marketv1beta3.QueryLeasesResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*marketv1beta3.QueryLeasesResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *marketv1beta3.QueryLeasesRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_Leases_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Leases' -type Query_Leases_Call struct { - *mock.Call -} - -// Leases is a helper method to define mock.On call -// - ctx context.Context -// - in *marketv1beta3.QueryLeasesRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) Leases(ctx interface{}, in interface{}, opts ...interface{}) *Query_Leases_Call { - return &Query_Leases_Call{Call: _e.mock.On("Leases", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_Leases_Call) Run(run func(ctx context.Context, in *marketv1beta3.QueryLeasesRequest, opts ...grpc.CallOption)) *Query_Leases_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*marketv1beta3.QueryLeasesRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_Leases_Call) Return(_a0 *marketv1beta3.QueryLeasesResponse, _a1 error) *Query_Leases_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_Leases_Call) RunAndReturn(run func(context.Context, *marketv1beta3.QueryLeasesRequest, ...grpc.CallOption) (*marketv1beta3.QueryLeasesResponse, error)) *Query_Leases_Call { - _c.Call.Return(run) - return _c -} - -// Order provides a mock function with given fields: ctx, in, opts -func (_m *Query) Order(ctx context.Context, in *marketv1beta3.QueryOrderRequest, opts ...grpc.CallOption) (*marketv1beta3.QueryOrderResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Order") - } - - var r0 *marketv1beta3.QueryOrderResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *marketv1beta3.QueryOrderRequest, ...grpc.CallOption) (*marketv1beta3.QueryOrderResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *marketv1beta3.QueryOrderRequest, ...grpc.CallOption) *marketv1beta3.QueryOrderResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*marketv1beta3.QueryOrderResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *marketv1beta3.QueryOrderRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_Order_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Order' -type Query_Order_Call struct { - *mock.Call -} - -// Order is a helper method to define mock.On call -// - ctx context.Context -// - in *marketv1beta3.QueryOrderRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) Order(ctx interface{}, in interface{}, opts ...interface{}) *Query_Order_Call { - return &Query_Order_Call{Call: _e.mock.On("Order", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_Order_Call) Run(run func(ctx context.Context, in *marketv1beta3.QueryOrderRequest, opts ...grpc.CallOption)) *Query_Order_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*marketv1beta3.QueryOrderRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_Order_Call) Return(_a0 *marketv1beta3.QueryOrderResponse, _a1 error) *Query_Order_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_Order_Call) RunAndReturn(run func(context.Context, *marketv1beta3.QueryOrderRequest, ...grpc.CallOption) (*marketv1beta3.QueryOrderResponse, error)) *Query_Order_Call { - _c.Call.Return(run) - return _c -} - -// Orders provides a mock function with given fields: ctx, in, opts -func (_m *Query) Orders(ctx context.Context, in *marketv1beta3.QueryOrdersRequest, opts ...grpc.CallOption) (*marketv1beta3.QueryOrdersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Orders") - } - - var r0 *marketv1beta3.QueryOrdersResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *marketv1beta3.QueryOrdersRequest, ...grpc.CallOption) (*marketv1beta3.QueryOrdersResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *marketv1beta3.QueryOrdersRequest, ...grpc.CallOption) *marketv1beta3.QueryOrdersResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*marketv1beta3.QueryOrdersResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *marketv1beta3.QueryOrdersRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_Orders_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Orders' -type Query_Orders_Call struct { - *mock.Call -} - -// Orders is a helper method to define mock.On call -// - ctx context.Context -// - in *marketv1beta3.QueryOrdersRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) Orders(ctx interface{}, in interface{}, opts ...interface{}) *Query_Orders_Call { - return &Query_Orders_Call{Call: _e.mock.On("Orders", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_Orders_Call) Run(run func(ctx context.Context, in *marketv1beta3.QueryOrdersRequest, opts ...grpc.CallOption)) *Query_Orders_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*marketv1beta3.QueryOrdersRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_Orders_Call) Return(_a0 *marketv1beta3.QueryOrdersResponse, _a1 error) *Query_Orders_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_Orders_Call) RunAndReturn(run func(context.Context, *marketv1beta3.QueryOrdersRequest, ...grpc.CallOption) (*marketv1beta3.QueryOrdersResponse, error)) *Query_Orders_Call { - _c.Call.Return(run) - return _c -} - -// Provider provides a mock function with given fields: ctx, in, opts -func (_m *Query) Provider(ctx context.Context, in *providerv1beta3.QueryProviderRequest, opts ...grpc.CallOption) (*providerv1beta3.QueryProviderResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Provider") - } - - var r0 *providerv1beta3.QueryProviderResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta3.QueryProviderRequest, ...grpc.CallOption) (*providerv1beta3.QueryProviderResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta3.QueryProviderRequest, ...grpc.CallOption) *providerv1beta3.QueryProviderResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*providerv1beta3.QueryProviderResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *providerv1beta3.QueryProviderRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_Provider_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Provider' -type Query_Provider_Call struct { - *mock.Call -} - -// Provider is a helper method to define mock.On call -// - ctx context.Context -// - in *providerv1beta3.QueryProviderRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) Provider(ctx interface{}, in interface{}, opts ...interface{}) *Query_Provider_Call { - return &Query_Provider_Call{Call: _e.mock.On("Provider", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_Provider_Call) Run(run func(ctx context.Context, in *providerv1beta3.QueryProviderRequest, opts ...grpc.CallOption)) *Query_Provider_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*providerv1beta3.QueryProviderRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_Provider_Call) Return(_a0 *providerv1beta3.QueryProviderResponse, _a1 error) *Query_Provider_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_Provider_Call) RunAndReturn(run func(context.Context, *providerv1beta3.QueryProviderRequest, ...grpc.CallOption) (*providerv1beta3.QueryProviderResponse, error)) *Query_Provider_Call { - _c.Call.Return(run) - return _c -} - -// ProviderAttributes provides a mock function with given fields: ctx, in, opts -func (_m *Query) ProviderAttributes(ctx context.Context, in *v1beta3.QueryProviderAttributesRequest, opts ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for ProviderAttributes") - } - - var r0 *v1beta3.QueryProvidersResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryProviderAttributesRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryProviderAttributesRequest, ...grpc.CallOption) *v1beta3.QueryProvidersResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta3.QueryProvidersResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta3.QueryProviderAttributesRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_ProviderAttributes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProviderAttributes' -type Query_ProviderAttributes_Call struct { - *mock.Call -} - -// ProviderAttributes is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta3.QueryProviderAttributesRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) ProviderAttributes(ctx interface{}, in interface{}, opts ...interface{}) *Query_ProviderAttributes_Call { - return &Query_ProviderAttributes_Call{Call: _e.mock.On("ProviderAttributes", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_ProviderAttributes_Call) Run(run func(ctx context.Context, in *v1beta3.QueryProviderAttributesRequest, opts ...grpc.CallOption)) *Query_ProviderAttributes_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta3.QueryProviderAttributesRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_ProviderAttributes_Call) Return(_a0 *v1beta3.QueryProvidersResponse, _a1 error) *Query_ProviderAttributes_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_ProviderAttributes_Call) RunAndReturn(run func(context.Context, *v1beta3.QueryProviderAttributesRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)) *Query_ProviderAttributes_Call { - _c.Call.Return(run) - return _c -} - -// ProviderAuditorAttributes provides a mock function with given fields: ctx, in, opts -func (_m *Query) ProviderAuditorAttributes(ctx context.Context, in *v1beta3.QueryProviderAuditorRequest, opts ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for ProviderAuditorAttributes") - } - - var r0 *v1beta3.QueryProvidersResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryProviderAuditorRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryProviderAuditorRequest, ...grpc.CallOption) *v1beta3.QueryProvidersResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta3.QueryProvidersResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta3.QueryProviderAuditorRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_ProviderAuditorAttributes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProviderAuditorAttributes' -type Query_ProviderAuditorAttributes_Call struct { - *mock.Call -} - -// ProviderAuditorAttributes is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta3.QueryProviderAuditorRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) ProviderAuditorAttributes(ctx interface{}, in interface{}, opts ...interface{}) *Query_ProviderAuditorAttributes_Call { - return &Query_ProviderAuditorAttributes_Call{Call: _e.mock.On("ProviderAuditorAttributes", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_ProviderAuditorAttributes_Call) Run(run func(ctx context.Context, in *v1beta3.QueryProviderAuditorRequest, opts ...grpc.CallOption)) *Query_ProviderAuditorAttributes_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta3.QueryProviderAuditorRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_ProviderAuditorAttributes_Call) Return(_a0 *v1beta3.QueryProvidersResponse, _a1 error) *Query_ProviderAuditorAttributes_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_ProviderAuditorAttributes_Call) RunAndReturn(run func(context.Context, *v1beta3.QueryProviderAuditorRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)) *Query_ProviderAuditorAttributes_Call { - _c.Call.Return(run) - return _c -} - -// Providers provides a mock function with given fields: ctx, in, opts -func (_m *Query) Providers(ctx context.Context, in *providerv1beta3.QueryProvidersRequest, opts ...grpc.CallOption) (*providerv1beta3.QueryProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Providers") - } - - var r0 *providerv1beta3.QueryProvidersResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta3.QueryProvidersRequest, ...grpc.CallOption) (*providerv1beta3.QueryProvidersResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta3.QueryProvidersRequest, ...grpc.CallOption) *providerv1beta3.QueryProvidersResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*providerv1beta3.QueryProvidersResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *providerv1beta3.QueryProvidersRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Query_Providers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Providers' -type Query_Providers_Call struct { - *mock.Call -} - -// Providers is a helper method to define mock.On call -// - ctx context.Context -// - in *providerv1beta3.QueryProvidersRequest -// - opts ...grpc.CallOption -func (_e *Query_Expecter) Providers(ctx interface{}, in interface{}, opts ...interface{}) *Query_Providers_Call { - return &Query_Providers_Call{Call: _e.mock.On("Providers", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *Query_Providers_Call) Run(run func(ctx context.Context, in *providerv1beta3.QueryProvidersRequest, opts ...grpc.CallOption)) *Query_Providers_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*providerv1beta3.QueryProvidersRequest), variadicArgs...) - }) - return _c -} - -func (_c *Query_Providers_Call) Return(_a0 *providerv1beta3.QueryProvidersResponse, _a1 error) *Query_Providers_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *Query_Providers_Call) RunAndReturn(run func(context.Context, *providerv1beta3.QueryProvidersRequest, ...grpc.CallOption) (*providerv1beta3.QueryProvidersResponse, error)) *Query_Providers_Call { - _c.Call.Return(run) - return _c -} - -// NewQuery creates a new instance of Query. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewQuery(t interface { - mock.TestingT - Cleanup(func()) -}) *Query { - mock := &Query{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/go/node/client/v1beta1/mocks/tx.go b/go/node/client/v1beta1/mocks/tx.go deleted file mode 100644 index 6846b8a1..00000000 --- a/go/node/client/v1beta1/mocks/tx.go +++ /dev/null @@ -1,98 +0,0 @@ -// Code generated by mockery v2.42.0. DO NOT EDIT. - -package mocks - -import ( - context "context" - - types "github.com/cosmos/cosmos-sdk/types" - mock "github.com/stretchr/testify/mock" -) - -// Tx is an autogenerated mock type for the Tx type -type Tx struct { - mock.Mock -} - -type Tx_Expecter struct { - mock *mock.Mock -} - -func (_m *Tx) EXPECT() *Tx_Expecter { - return &Tx_Expecter{mock: &_m.Mock} -} - -// Broadcast provides a mock function with given fields: _a0, _a1 -func (_m *Tx) Broadcast(_a0 context.Context, _a1 ...types.Msg) error { - _va := make([]interface{}, len(_a1)) - for _i := range _a1 { - _va[_i] = _a1[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Broadcast") - } - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, ...types.Msg) error); ok { - r0 = rf(_a0, _a1...) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Tx_Broadcast_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Broadcast' -type Tx_Broadcast_Call struct { - *mock.Call -} - -// Broadcast is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 ...types.Msg -func (_e *Tx_Expecter) Broadcast(_a0 interface{}, _a1 ...interface{}) *Tx_Broadcast_Call { - return &Tx_Broadcast_Call{Call: _e.mock.On("Broadcast", - append([]interface{}{_a0}, _a1...)...)} -} - -func (_c *Tx_Broadcast_Call) Run(run func(_a0 context.Context, _a1 ...types.Msg)) *Tx_Broadcast_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]types.Msg, len(args)-1) - for i, a := range args[1:] { - if a != nil { - variadicArgs[i] = a.(types.Msg) - } - } - run(args[0].(context.Context), variadicArgs...) - }) - return _c -} - -func (_c *Tx_Broadcast_Call) Return(_a0 error) *Tx_Broadcast_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *Tx_Broadcast_Call) RunAndReturn(run func(context.Context, ...types.Msg) error) *Tx_Broadcast_Call { - _c.Call.Return(run) - return _c -} - -// NewTx creates a new instance of Tx. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewTx(t interface { - mock.TestingT - Cleanup(func()) -}) *Tx { - mock := &Tx{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/go/node/client/v1beta2/client.go b/go/node/client/v1beta2/client.go deleted file mode 100644 index 1f3c8e5c..00000000 --- a/go/node/client/v1beta2/client.go +++ /dev/null @@ -1,143 +0,0 @@ -package v1beta2 - -import ( - "context" - "fmt" - - sdkclient "github.com/cosmos/cosmos-sdk/client" - authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" - banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - disttypes "github.com/cosmos/cosmos-sdk/x/distribution/types" - evdtypes "github.com/cosmos/cosmos-sdk/x/evidence/types" - feegranttypes "github.com/cosmos/cosmos-sdk/x/feegrant" - govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" - minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types/proposal" - slashtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" - staketypes "github.com/cosmos/cosmos-sdk/x/staking/types" - upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - - "github.com/cosmos/cosmos-sdk/x/authz" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/gogo/protobuf/proto" - tmrpc "github.com/tendermint/tendermint/rpc/core/types" - - atypes "github.com/akash-network/akash-api/go/node/audit/v1beta3" - ctypes "github.com/akash-network/akash-api/go/node/cert/v1beta3" - cltypes "github.com/akash-network/akash-api/go/node/client/types" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - mtypes "github.com/akash-network/akash-api/go/node/market/v1beta4" - ptypes "github.com/akash-network/akash-api/go/node/provider/v1beta3" -) - -// QueryClient is the interface that exposes query modules. -// -//go:generate mockery --name QueryClient --output ./mocks -type QueryClient interface { - dtypes.QueryClient - mtypes.QueryClient - ptypes.QueryClient - atypes.QueryClient - ctypes.QueryClient - Auth() authtypes.QueryClient - Authz() authz.QueryClient - Bank() banktypes.QueryClient - Distribution() disttypes.QueryClient - Evidence() evdtypes.QueryClient - Feegrant() feegranttypes.QueryClient - Gov() govtypes.QueryClient - Mint() minttypes.QueryClient - Params() paramtypes.QueryClient - Slashing() slashtypes.QueryClient - Staking() staketypes.QueryClient - Upgrade() upgradetypes.QueryClient - - ClientContext() sdkclient.Context -} - -// TxClient is the interface that wraps the Broadcast method. -// Broadcast broadcasts a transaction. A transaction is composed of 1 or many messages. This allows several -// operations to be performed in a single transaction. -// A transaction broadcast can be configured with an arbitrary number of BroadcastOption. -// -//go:generate mockery --name TxClient --output ./mocks -type TxClient interface { - Broadcast(context.Context, []sdk.Msg, ...BroadcastOption) (interface{}, error) -} - -//go:generate mockery --name NodeClient --output ./mocks -type NodeClient interface { - SyncInfo(ctx context.Context) (*tmrpc.SyncInfo, error) -} - -// Client is the umbrella interface that exposes every other client's modules. -// -//go:generate mockery --name Client --output ./mocks -type Client interface { - Query() QueryClient - Tx() TxClient - Node() NodeClient - ClientContext() sdkclient.Context - PrintMessage(interface{}) error -} - -type client struct { - qclient *queryClient - tx TxClient - node *node -} - -var _ Client = (*client)(nil) - -// NewClient creates a new client. -func NewClient(ctx context.Context, cctx sdkclient.Context, opts ...cltypes.ClientOption) (Client, error) { - nd := newNode(cctx) - - cl := &client{ - qclient: newQueryClient(cctx), - node: nd, - } - - var err error - cl.tx, err = newSerialTx(ctx, cctx, nd, opts...) - if err != nil { - return nil, err - } - - return cl, nil -} - -// Query implements Client by returning the QueryClient instance of the client. -func (cl *client) Query() QueryClient { - return cl.qclient -} - -// Tx implements Client by returning the TxClient instance of the client. -func (cl *client) Tx() TxClient { - return cl.tx -} - -// Node implements Client by returning the NodeClient instance of the client. -func (cl *client) Node() NodeClient { - return cl.node -} - -// ClientContext implements Client by returning the Cosmos SDK client context instance of the client. -func (cl *client) ClientContext() sdkclient.Context { - return cl.qclient.cctx -} - -// PrintMessage implements Client by printing the raw message passed as parameter. -func (cl *client) PrintMessage(msg interface{}) error { - var err error - - switch m := msg.(type) { - case proto.Message: - err = cl.qclient.cctx.PrintProto(m) - case []byte: - err = cl.qclient.cctx.PrintString(fmt.Sprintf("%s\n", string(m))) - } - - return err -} diff --git a/go/node/client/v1beta2/errors.go b/go/node/client/v1beta2/errors.go deleted file mode 100644 index a5af3fcf..00000000 --- a/go/node/client/v1beta2/errors.go +++ /dev/null @@ -1,11 +0,0 @@ -package v1beta2 - -import ( - "errors" -) - -var ( - // ErrClientNotFound is a new error with message "Client not found" - ErrClientNotFound = errors.New("client not found") - ErrNodeNotSynced = errors.New("rpc node is not catching up") -) diff --git a/go/node/client/v1beta2/mocks/client.go b/go/node/client/v1beta2/mocks/client.go deleted file mode 100644 index ee15a525..00000000 --- a/go/node/client/v1beta2/mocks/client.go +++ /dev/null @@ -1,269 +0,0 @@ -// Code generated by mockery v2.42.0. DO NOT EDIT. - -package mocks - -import ( - client "github.com/cosmos/cosmos-sdk/client" - mock "github.com/stretchr/testify/mock" - - v1beta2 "github.com/akash-network/akash-api/go/node/client/v1beta2" -) - -// Client is an autogenerated mock type for the Client type -type Client struct { - mock.Mock -} - -type Client_Expecter struct { - mock *mock.Mock -} - -func (_m *Client) EXPECT() *Client_Expecter { - return &Client_Expecter{mock: &_m.Mock} -} - -// ClientContext provides a mock function with given fields: -func (_m *Client) ClientContext() client.Context { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for ClientContext") - } - - var r0 client.Context - if rf, ok := ret.Get(0).(func() client.Context); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(client.Context) - } - - return r0 -} - -// Client_ClientContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClientContext' -type Client_ClientContext_Call struct { - *mock.Call -} - -// ClientContext is a helper method to define mock.On call -func (_e *Client_Expecter) ClientContext() *Client_ClientContext_Call { - return &Client_ClientContext_Call{Call: _e.mock.On("ClientContext")} -} - -func (_c *Client_ClientContext_Call) Run(run func()) *Client_ClientContext_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Client_ClientContext_Call) Return(_a0 client.Context) *Client_ClientContext_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *Client_ClientContext_Call) RunAndReturn(run func() client.Context) *Client_ClientContext_Call { - _c.Call.Return(run) - return _c -} - -// Node provides a mock function with given fields: -func (_m *Client) Node() v1beta2.NodeClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Node") - } - - var r0 v1beta2.NodeClient - if rf, ok := ret.Get(0).(func() v1beta2.NodeClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(v1beta2.NodeClient) - } - } - - return r0 -} - -// Client_Node_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Node' -type Client_Node_Call struct { - *mock.Call -} - -// Node is a helper method to define mock.On call -func (_e *Client_Expecter) Node() *Client_Node_Call { - return &Client_Node_Call{Call: _e.mock.On("Node")} -} - -func (_c *Client_Node_Call) Run(run func()) *Client_Node_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Client_Node_Call) Return(_a0 v1beta2.NodeClient) *Client_Node_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *Client_Node_Call) RunAndReturn(run func() v1beta2.NodeClient) *Client_Node_Call { - _c.Call.Return(run) - return _c -} - -// PrintMessage provides a mock function with given fields: _a0 -func (_m *Client) PrintMessage(_a0 interface{}) error { - ret := _m.Called(_a0) - - if len(ret) == 0 { - panic("no return value specified for PrintMessage") - } - - var r0 error - if rf, ok := ret.Get(0).(func(interface{}) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Client_PrintMessage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PrintMessage' -type Client_PrintMessage_Call struct { - *mock.Call -} - -// PrintMessage is a helper method to define mock.On call -// - _a0 interface{} -func (_e *Client_Expecter) PrintMessage(_a0 interface{}) *Client_PrintMessage_Call { - return &Client_PrintMessage_Call{Call: _e.mock.On("PrintMessage", _a0)} -} - -func (_c *Client_PrintMessage_Call) Run(run func(_a0 interface{})) *Client_PrintMessage_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(interface{})) - }) - return _c -} - -func (_c *Client_PrintMessage_Call) Return(_a0 error) *Client_PrintMessage_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *Client_PrintMessage_Call) RunAndReturn(run func(interface{}) error) *Client_PrintMessage_Call { - _c.Call.Return(run) - return _c -} - -// Query provides a mock function with given fields: -func (_m *Client) Query() v1beta2.QueryClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Query") - } - - var r0 v1beta2.QueryClient - if rf, ok := ret.Get(0).(func() v1beta2.QueryClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(v1beta2.QueryClient) - } - } - - return r0 -} - -// Client_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' -type Client_Query_Call struct { - *mock.Call -} - -// Query is a helper method to define mock.On call -func (_e *Client_Expecter) Query() *Client_Query_Call { - return &Client_Query_Call{Call: _e.mock.On("Query")} -} - -func (_c *Client_Query_Call) Run(run func()) *Client_Query_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Client_Query_Call) Return(_a0 v1beta2.QueryClient) *Client_Query_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *Client_Query_Call) RunAndReturn(run func() v1beta2.QueryClient) *Client_Query_Call { - _c.Call.Return(run) - return _c -} - -// Tx provides a mock function with given fields: -func (_m *Client) Tx() v1beta2.TxClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Tx") - } - - var r0 v1beta2.TxClient - if rf, ok := ret.Get(0).(func() v1beta2.TxClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(v1beta2.TxClient) - } - } - - return r0 -} - -// Client_Tx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Tx' -type Client_Tx_Call struct { - *mock.Call -} - -// Tx is a helper method to define mock.On call -func (_e *Client_Expecter) Tx() *Client_Tx_Call { - return &Client_Tx_Call{Call: _e.mock.On("Tx")} -} - -func (_c *Client_Tx_Call) Run(run func()) *Client_Tx_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *Client_Tx_Call) Return(_a0 v1beta2.TxClient) *Client_Tx_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *Client_Tx_Call) RunAndReturn(run func() v1beta2.TxClient) *Client_Tx_Call { - _c.Call.Return(run) - return _c -} - -// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewClient(t interface { - mock.TestingT - Cleanup(func()) -}) *Client { - mock := &Client{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/go/node/client/v1beta2/mocks/node_client.go b/go/node/client/v1beta2/mocks/node_client.go deleted file mode 100644 index e567347b..00000000 --- a/go/node/client/v1beta2/mocks/node_client.go +++ /dev/null @@ -1,95 +0,0 @@ -// Code generated by mockery v2.42.0. DO NOT EDIT. - -package mocks - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" - coretypes "github.com/tendermint/tendermint/rpc/core/types" -) - -// NodeClient is an autogenerated mock type for the NodeClient type -type NodeClient struct { - mock.Mock -} - -type NodeClient_Expecter struct { - mock *mock.Mock -} - -func (_m *NodeClient) EXPECT() *NodeClient_Expecter { - return &NodeClient_Expecter{mock: &_m.Mock} -} - -// SyncInfo provides a mock function with given fields: ctx -func (_m *NodeClient) SyncInfo(ctx context.Context) (*coretypes.SyncInfo, error) { - ret := _m.Called(ctx) - - if len(ret) == 0 { - panic("no return value specified for SyncInfo") - } - - var r0 *coretypes.SyncInfo - var r1 error - if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.SyncInfo, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.SyncInfo); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.SyncInfo) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NodeClient_SyncInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SyncInfo' -type NodeClient_SyncInfo_Call struct { - *mock.Call -} - -// SyncInfo is a helper method to define mock.On call -// - ctx context.Context -func (_e *NodeClient_Expecter) SyncInfo(ctx interface{}) *NodeClient_SyncInfo_Call { - return &NodeClient_SyncInfo_Call{Call: _e.mock.On("SyncInfo", ctx)} -} - -func (_c *NodeClient_SyncInfo_Call) Run(run func(ctx context.Context)) *NodeClient_SyncInfo_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *NodeClient_SyncInfo_Call) Return(_a0 *coretypes.SyncInfo, _a1 error) *NodeClient_SyncInfo_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *NodeClient_SyncInfo_Call) RunAndReturn(run func(context.Context) (*coretypes.SyncInfo, error)) *NodeClient_SyncInfo_Call { - _c.Call.Return(run) - return _c -} - -// NewNodeClient creates a new instance of NodeClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewNodeClient(t interface { - mock.TestingT - Cleanup(func()) -}) *NodeClient { - mock := &NodeClient{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/go/node/client/v1beta2/mocks/query_client.go b/go/node/client/v1beta2/mocks/query_client.go deleted file mode 100644 index 2d6f4733..00000000 --- a/go/node/client/v1beta2/mocks/query_client.go +++ /dev/null @@ -1,1866 +0,0 @@ -// Code generated by mockery v2.42.0. DO NOT EDIT. - -package mocks - -import ( - authz "github.com/cosmos/cosmos-sdk/x/authz" - banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - - certv1beta3 "github.com/akash-network/akash-api/go/node/cert/v1beta3" - - client "github.com/cosmos/cosmos-sdk/client" - - context "context" - - deploymentv1beta3 "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - - distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types" - - evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types" - - feegrant "github.com/cosmos/cosmos-sdk/x/feegrant" - - govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" - - grpc "google.golang.org/grpc" - - minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" - - mock "github.com/stretchr/testify/mock" - - proposal "github.com/cosmos/cosmos-sdk/x/params/types/proposal" - - providerv1beta3 "github.com/akash-network/akash-api/go/node/provider/v1beta3" - - slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" - - stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - - types "github.com/cosmos/cosmos-sdk/x/auth/types" - - upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - - v1beta3 "github.com/akash-network/akash-api/go/node/audit/v1beta3" - - v1beta4 "github.com/akash-network/akash-api/go/node/market/v1beta4" -) - -// QueryClient is an autogenerated mock type for the QueryClient type -type QueryClient struct { - mock.Mock -} - -type QueryClient_Expecter struct { - mock *mock.Mock -} - -func (_m *QueryClient) EXPECT() *QueryClient_Expecter { - return &QueryClient_Expecter{mock: &_m.Mock} -} - -// AllProvidersAttributes provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) AllProvidersAttributes(ctx context.Context, in *v1beta3.QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for AllProvidersAttributes") - } - - var r0 *v1beta3.QueryProvidersResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryAllProvidersAttributesRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryAllProvidersAttributesRequest, ...grpc.CallOption) *v1beta3.QueryProvidersResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta3.QueryProvidersResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta3.QueryAllProvidersAttributesRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_AllProvidersAttributes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AllProvidersAttributes' -type QueryClient_AllProvidersAttributes_Call struct { - *mock.Call -} - -// AllProvidersAttributes is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta3.QueryAllProvidersAttributesRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) AllProvidersAttributes(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_AllProvidersAttributes_Call { - return &QueryClient_AllProvidersAttributes_Call{Call: _e.mock.On("AllProvidersAttributes", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_AllProvidersAttributes_Call) Run(run func(ctx context.Context, in *v1beta3.QueryAllProvidersAttributesRequest, opts ...grpc.CallOption)) *QueryClient_AllProvidersAttributes_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta3.QueryAllProvidersAttributesRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_AllProvidersAttributes_Call) Return(_a0 *v1beta3.QueryProvidersResponse, _a1 error) *QueryClient_AllProvidersAttributes_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_AllProvidersAttributes_Call) RunAndReturn(run func(context.Context, *v1beta3.QueryAllProvidersAttributesRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)) *QueryClient_AllProvidersAttributes_Call { - _c.Call.Return(run) - return _c -} - -// AuditorAttributes provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) AuditorAttributes(ctx context.Context, in *v1beta3.QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for AuditorAttributes") - } - - var r0 *v1beta3.QueryProvidersResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryAuditorAttributesRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryAuditorAttributesRequest, ...grpc.CallOption) *v1beta3.QueryProvidersResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta3.QueryProvidersResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta3.QueryAuditorAttributesRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_AuditorAttributes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AuditorAttributes' -type QueryClient_AuditorAttributes_Call struct { - *mock.Call -} - -// AuditorAttributes is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta3.QueryAuditorAttributesRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) AuditorAttributes(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_AuditorAttributes_Call { - return &QueryClient_AuditorAttributes_Call{Call: _e.mock.On("AuditorAttributes", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_AuditorAttributes_Call) Run(run func(ctx context.Context, in *v1beta3.QueryAuditorAttributesRequest, opts ...grpc.CallOption)) *QueryClient_AuditorAttributes_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta3.QueryAuditorAttributesRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_AuditorAttributes_Call) Return(_a0 *v1beta3.QueryProvidersResponse, _a1 error) *QueryClient_AuditorAttributes_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_AuditorAttributes_Call) RunAndReturn(run func(context.Context, *v1beta3.QueryAuditorAttributesRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)) *QueryClient_AuditorAttributes_Call { - _c.Call.Return(run) - return _c -} - -// Auth provides a mock function with given fields: -func (_m *QueryClient) Auth() types.QueryClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Auth") - } - - var r0 types.QueryClient - if rf, ok := ret.Get(0).(func() types.QueryClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(types.QueryClient) - } - } - - return r0 -} - -// QueryClient_Auth_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Auth' -type QueryClient_Auth_Call struct { - *mock.Call -} - -// Auth is a helper method to define mock.On call -func (_e *QueryClient_Expecter) Auth() *QueryClient_Auth_Call { - return &QueryClient_Auth_Call{Call: _e.mock.On("Auth")} -} - -func (_c *QueryClient_Auth_Call) Run(run func()) *QueryClient_Auth_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *QueryClient_Auth_Call) Return(_a0 types.QueryClient) *QueryClient_Auth_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *QueryClient_Auth_Call) RunAndReturn(run func() types.QueryClient) *QueryClient_Auth_Call { - _c.Call.Return(run) - return _c -} - -// Authz provides a mock function with given fields: -func (_m *QueryClient) Authz() authz.QueryClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Authz") - } - - var r0 authz.QueryClient - if rf, ok := ret.Get(0).(func() authz.QueryClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(authz.QueryClient) - } - } - - return r0 -} - -// QueryClient_Authz_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Authz' -type QueryClient_Authz_Call struct { - *mock.Call -} - -// Authz is a helper method to define mock.On call -func (_e *QueryClient_Expecter) Authz() *QueryClient_Authz_Call { - return &QueryClient_Authz_Call{Call: _e.mock.On("Authz")} -} - -func (_c *QueryClient_Authz_Call) Run(run func()) *QueryClient_Authz_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *QueryClient_Authz_Call) Return(_a0 authz.QueryClient) *QueryClient_Authz_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *QueryClient_Authz_Call) RunAndReturn(run func() authz.QueryClient) *QueryClient_Authz_Call { - _c.Call.Return(run) - return _c -} - -// Bank provides a mock function with given fields: -func (_m *QueryClient) Bank() banktypes.QueryClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Bank") - } - - var r0 banktypes.QueryClient - if rf, ok := ret.Get(0).(func() banktypes.QueryClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(banktypes.QueryClient) - } - } - - return r0 -} - -// QueryClient_Bank_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Bank' -type QueryClient_Bank_Call struct { - *mock.Call -} - -// Bank is a helper method to define mock.On call -func (_e *QueryClient_Expecter) Bank() *QueryClient_Bank_Call { - return &QueryClient_Bank_Call{Call: _e.mock.On("Bank")} -} - -func (_c *QueryClient_Bank_Call) Run(run func()) *QueryClient_Bank_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *QueryClient_Bank_Call) Return(_a0 banktypes.QueryClient) *QueryClient_Bank_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *QueryClient_Bank_Call) RunAndReturn(run func() banktypes.QueryClient) *QueryClient_Bank_Call { - _c.Call.Return(run) - return _c -} - -// Bid provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) Bid(ctx context.Context, in *v1beta4.QueryBidRequest, opts ...grpc.CallOption) (*v1beta4.QueryBidResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Bid") - } - - var r0 *v1beta4.QueryBidResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryBidRequest, ...grpc.CallOption) (*v1beta4.QueryBidResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryBidRequest, ...grpc.CallOption) *v1beta4.QueryBidResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta4.QueryBidResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta4.QueryBidRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_Bid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Bid' -type QueryClient_Bid_Call struct { - *mock.Call -} - -// Bid is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta4.QueryBidRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) Bid(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Bid_Call { - return &QueryClient_Bid_Call{Call: _e.mock.On("Bid", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_Bid_Call) Run(run func(ctx context.Context, in *v1beta4.QueryBidRequest, opts ...grpc.CallOption)) *QueryClient_Bid_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta4.QueryBidRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_Bid_Call) Return(_a0 *v1beta4.QueryBidResponse, _a1 error) *QueryClient_Bid_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_Bid_Call) RunAndReturn(run func(context.Context, *v1beta4.QueryBidRequest, ...grpc.CallOption) (*v1beta4.QueryBidResponse, error)) *QueryClient_Bid_Call { - _c.Call.Return(run) - return _c -} - -// Bids provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) Bids(ctx context.Context, in *v1beta4.QueryBidsRequest, opts ...grpc.CallOption) (*v1beta4.QueryBidsResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Bids") - } - - var r0 *v1beta4.QueryBidsResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryBidsRequest, ...grpc.CallOption) (*v1beta4.QueryBidsResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryBidsRequest, ...grpc.CallOption) *v1beta4.QueryBidsResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta4.QueryBidsResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta4.QueryBidsRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_Bids_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Bids' -type QueryClient_Bids_Call struct { - *mock.Call -} - -// Bids is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta4.QueryBidsRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) Bids(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Bids_Call { - return &QueryClient_Bids_Call{Call: _e.mock.On("Bids", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_Bids_Call) Run(run func(ctx context.Context, in *v1beta4.QueryBidsRequest, opts ...grpc.CallOption)) *QueryClient_Bids_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta4.QueryBidsRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_Bids_Call) Return(_a0 *v1beta4.QueryBidsResponse, _a1 error) *QueryClient_Bids_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_Bids_Call) RunAndReturn(run func(context.Context, *v1beta4.QueryBidsRequest, ...grpc.CallOption) (*v1beta4.QueryBidsResponse, error)) *QueryClient_Bids_Call { - _c.Call.Return(run) - return _c -} - -// Certificates provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) Certificates(ctx context.Context, in *certv1beta3.QueryCertificatesRequest, opts ...grpc.CallOption) (*certv1beta3.QueryCertificatesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Certificates") - } - - var r0 *certv1beta3.QueryCertificatesResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *certv1beta3.QueryCertificatesRequest, ...grpc.CallOption) (*certv1beta3.QueryCertificatesResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *certv1beta3.QueryCertificatesRequest, ...grpc.CallOption) *certv1beta3.QueryCertificatesResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*certv1beta3.QueryCertificatesResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *certv1beta3.QueryCertificatesRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_Certificates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Certificates' -type QueryClient_Certificates_Call struct { - *mock.Call -} - -// Certificates is a helper method to define mock.On call -// - ctx context.Context -// - in *certv1beta3.QueryCertificatesRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) Certificates(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Certificates_Call { - return &QueryClient_Certificates_Call{Call: _e.mock.On("Certificates", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_Certificates_Call) Run(run func(ctx context.Context, in *certv1beta3.QueryCertificatesRequest, opts ...grpc.CallOption)) *QueryClient_Certificates_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*certv1beta3.QueryCertificatesRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_Certificates_Call) Return(_a0 *certv1beta3.QueryCertificatesResponse, _a1 error) *QueryClient_Certificates_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_Certificates_Call) RunAndReturn(run func(context.Context, *certv1beta3.QueryCertificatesRequest, ...grpc.CallOption) (*certv1beta3.QueryCertificatesResponse, error)) *QueryClient_Certificates_Call { - _c.Call.Return(run) - return _c -} - -// ClientContext provides a mock function with given fields: -func (_m *QueryClient) ClientContext() client.Context { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for ClientContext") - } - - var r0 client.Context - if rf, ok := ret.Get(0).(func() client.Context); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(client.Context) - } - - return r0 -} - -// QueryClient_ClientContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClientContext' -type QueryClient_ClientContext_Call struct { - *mock.Call -} - -// ClientContext is a helper method to define mock.On call -func (_e *QueryClient_Expecter) ClientContext() *QueryClient_ClientContext_Call { - return &QueryClient_ClientContext_Call{Call: _e.mock.On("ClientContext")} -} - -func (_c *QueryClient_ClientContext_Call) Run(run func()) *QueryClient_ClientContext_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *QueryClient_ClientContext_Call) Return(_a0 client.Context) *QueryClient_ClientContext_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *QueryClient_ClientContext_Call) RunAndReturn(run func() client.Context) *QueryClient_ClientContext_Call { - _c.Call.Return(run) - return _c -} - -// Deployment provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) Deployment(ctx context.Context, in *deploymentv1beta3.QueryDeploymentRequest, opts ...grpc.CallOption) (*deploymentv1beta3.QueryDeploymentResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Deployment") - } - - var r0 *deploymentv1beta3.QueryDeploymentResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta3.QueryDeploymentRequest, ...grpc.CallOption) (*deploymentv1beta3.QueryDeploymentResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta3.QueryDeploymentRequest, ...grpc.CallOption) *deploymentv1beta3.QueryDeploymentResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*deploymentv1beta3.QueryDeploymentResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *deploymentv1beta3.QueryDeploymentRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_Deployment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Deployment' -type QueryClient_Deployment_Call struct { - *mock.Call -} - -// Deployment is a helper method to define mock.On call -// - ctx context.Context -// - in *deploymentv1beta3.QueryDeploymentRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) Deployment(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Deployment_Call { - return &QueryClient_Deployment_Call{Call: _e.mock.On("Deployment", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_Deployment_Call) Run(run func(ctx context.Context, in *deploymentv1beta3.QueryDeploymentRequest, opts ...grpc.CallOption)) *QueryClient_Deployment_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*deploymentv1beta3.QueryDeploymentRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_Deployment_Call) Return(_a0 *deploymentv1beta3.QueryDeploymentResponse, _a1 error) *QueryClient_Deployment_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_Deployment_Call) RunAndReturn(run func(context.Context, *deploymentv1beta3.QueryDeploymentRequest, ...grpc.CallOption) (*deploymentv1beta3.QueryDeploymentResponse, error)) *QueryClient_Deployment_Call { - _c.Call.Return(run) - return _c -} - -// Deployments provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) Deployments(ctx context.Context, in *deploymentv1beta3.QueryDeploymentsRequest, opts ...grpc.CallOption) (*deploymentv1beta3.QueryDeploymentsResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Deployments") - } - - var r0 *deploymentv1beta3.QueryDeploymentsResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta3.QueryDeploymentsRequest, ...grpc.CallOption) (*deploymentv1beta3.QueryDeploymentsResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta3.QueryDeploymentsRequest, ...grpc.CallOption) *deploymentv1beta3.QueryDeploymentsResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*deploymentv1beta3.QueryDeploymentsResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *deploymentv1beta3.QueryDeploymentsRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_Deployments_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Deployments' -type QueryClient_Deployments_Call struct { - *mock.Call -} - -// Deployments is a helper method to define mock.On call -// - ctx context.Context -// - in *deploymentv1beta3.QueryDeploymentsRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) Deployments(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Deployments_Call { - return &QueryClient_Deployments_Call{Call: _e.mock.On("Deployments", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_Deployments_Call) Run(run func(ctx context.Context, in *deploymentv1beta3.QueryDeploymentsRequest, opts ...grpc.CallOption)) *QueryClient_Deployments_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*deploymentv1beta3.QueryDeploymentsRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_Deployments_Call) Return(_a0 *deploymentv1beta3.QueryDeploymentsResponse, _a1 error) *QueryClient_Deployments_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_Deployments_Call) RunAndReturn(run func(context.Context, *deploymentv1beta3.QueryDeploymentsRequest, ...grpc.CallOption) (*deploymentv1beta3.QueryDeploymentsResponse, error)) *QueryClient_Deployments_Call { - _c.Call.Return(run) - return _c -} - -// Distribution provides a mock function with given fields: -func (_m *QueryClient) Distribution() distributiontypes.QueryClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Distribution") - } - - var r0 distributiontypes.QueryClient - if rf, ok := ret.Get(0).(func() distributiontypes.QueryClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(distributiontypes.QueryClient) - } - } - - return r0 -} - -// QueryClient_Distribution_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Distribution' -type QueryClient_Distribution_Call struct { - *mock.Call -} - -// Distribution is a helper method to define mock.On call -func (_e *QueryClient_Expecter) Distribution() *QueryClient_Distribution_Call { - return &QueryClient_Distribution_Call{Call: _e.mock.On("Distribution")} -} - -func (_c *QueryClient_Distribution_Call) Run(run func()) *QueryClient_Distribution_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *QueryClient_Distribution_Call) Return(_a0 distributiontypes.QueryClient) *QueryClient_Distribution_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *QueryClient_Distribution_Call) RunAndReturn(run func() distributiontypes.QueryClient) *QueryClient_Distribution_Call { - _c.Call.Return(run) - return _c -} - -// Evidence provides a mock function with given fields: -func (_m *QueryClient) Evidence() evidencetypes.QueryClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Evidence") - } - - var r0 evidencetypes.QueryClient - if rf, ok := ret.Get(0).(func() evidencetypes.QueryClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(evidencetypes.QueryClient) - } - } - - return r0 -} - -// QueryClient_Evidence_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Evidence' -type QueryClient_Evidence_Call struct { - *mock.Call -} - -// Evidence is a helper method to define mock.On call -func (_e *QueryClient_Expecter) Evidence() *QueryClient_Evidence_Call { - return &QueryClient_Evidence_Call{Call: _e.mock.On("Evidence")} -} - -func (_c *QueryClient_Evidence_Call) Run(run func()) *QueryClient_Evidence_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *QueryClient_Evidence_Call) Return(_a0 evidencetypes.QueryClient) *QueryClient_Evidence_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *QueryClient_Evidence_Call) RunAndReturn(run func() evidencetypes.QueryClient) *QueryClient_Evidence_Call { - _c.Call.Return(run) - return _c -} - -// Feegrant provides a mock function with given fields: -func (_m *QueryClient) Feegrant() feegrant.QueryClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Feegrant") - } - - var r0 feegrant.QueryClient - if rf, ok := ret.Get(0).(func() feegrant.QueryClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(feegrant.QueryClient) - } - } - - return r0 -} - -// QueryClient_Feegrant_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Feegrant' -type QueryClient_Feegrant_Call struct { - *mock.Call -} - -// Feegrant is a helper method to define mock.On call -func (_e *QueryClient_Expecter) Feegrant() *QueryClient_Feegrant_Call { - return &QueryClient_Feegrant_Call{Call: _e.mock.On("Feegrant")} -} - -func (_c *QueryClient_Feegrant_Call) Run(run func()) *QueryClient_Feegrant_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *QueryClient_Feegrant_Call) Return(_a0 feegrant.QueryClient) *QueryClient_Feegrant_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *QueryClient_Feegrant_Call) RunAndReturn(run func() feegrant.QueryClient) *QueryClient_Feegrant_Call { - _c.Call.Return(run) - return _c -} - -// Gov provides a mock function with given fields: -func (_m *QueryClient) Gov() govtypes.QueryClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Gov") - } - - var r0 govtypes.QueryClient - if rf, ok := ret.Get(0).(func() govtypes.QueryClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(govtypes.QueryClient) - } - } - - return r0 -} - -// QueryClient_Gov_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Gov' -type QueryClient_Gov_Call struct { - *mock.Call -} - -// Gov is a helper method to define mock.On call -func (_e *QueryClient_Expecter) Gov() *QueryClient_Gov_Call { - return &QueryClient_Gov_Call{Call: _e.mock.On("Gov")} -} - -func (_c *QueryClient_Gov_Call) Run(run func()) *QueryClient_Gov_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *QueryClient_Gov_Call) Return(_a0 govtypes.QueryClient) *QueryClient_Gov_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *QueryClient_Gov_Call) RunAndReturn(run func() govtypes.QueryClient) *QueryClient_Gov_Call { - _c.Call.Return(run) - return _c -} - -// Group provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) Group(ctx context.Context, in *deploymentv1beta3.QueryGroupRequest, opts ...grpc.CallOption) (*deploymentv1beta3.QueryGroupResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Group") - } - - var r0 *deploymentv1beta3.QueryGroupResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta3.QueryGroupRequest, ...grpc.CallOption) (*deploymentv1beta3.QueryGroupResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta3.QueryGroupRequest, ...grpc.CallOption) *deploymentv1beta3.QueryGroupResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*deploymentv1beta3.QueryGroupResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *deploymentv1beta3.QueryGroupRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_Group_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Group' -type QueryClient_Group_Call struct { - *mock.Call -} - -// Group is a helper method to define mock.On call -// - ctx context.Context -// - in *deploymentv1beta3.QueryGroupRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) Group(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Group_Call { - return &QueryClient_Group_Call{Call: _e.mock.On("Group", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_Group_Call) Run(run func(ctx context.Context, in *deploymentv1beta3.QueryGroupRequest, opts ...grpc.CallOption)) *QueryClient_Group_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*deploymentv1beta3.QueryGroupRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_Group_Call) Return(_a0 *deploymentv1beta3.QueryGroupResponse, _a1 error) *QueryClient_Group_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_Group_Call) RunAndReturn(run func(context.Context, *deploymentv1beta3.QueryGroupRequest, ...grpc.CallOption) (*deploymentv1beta3.QueryGroupResponse, error)) *QueryClient_Group_Call { - _c.Call.Return(run) - return _c -} - -// Lease provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) Lease(ctx context.Context, in *v1beta4.QueryLeaseRequest, opts ...grpc.CallOption) (*v1beta4.QueryLeaseResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Lease") - } - - var r0 *v1beta4.QueryLeaseResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryLeaseRequest, ...grpc.CallOption) (*v1beta4.QueryLeaseResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryLeaseRequest, ...grpc.CallOption) *v1beta4.QueryLeaseResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta4.QueryLeaseResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta4.QueryLeaseRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_Lease_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Lease' -type QueryClient_Lease_Call struct { - *mock.Call -} - -// Lease is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta4.QueryLeaseRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) Lease(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Lease_Call { - return &QueryClient_Lease_Call{Call: _e.mock.On("Lease", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_Lease_Call) Run(run func(ctx context.Context, in *v1beta4.QueryLeaseRequest, opts ...grpc.CallOption)) *QueryClient_Lease_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta4.QueryLeaseRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_Lease_Call) Return(_a0 *v1beta4.QueryLeaseResponse, _a1 error) *QueryClient_Lease_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_Lease_Call) RunAndReturn(run func(context.Context, *v1beta4.QueryLeaseRequest, ...grpc.CallOption) (*v1beta4.QueryLeaseResponse, error)) *QueryClient_Lease_Call { - _c.Call.Return(run) - return _c -} - -// Leases provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) Leases(ctx context.Context, in *v1beta4.QueryLeasesRequest, opts ...grpc.CallOption) (*v1beta4.QueryLeasesResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Leases") - } - - var r0 *v1beta4.QueryLeasesResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryLeasesRequest, ...grpc.CallOption) (*v1beta4.QueryLeasesResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryLeasesRequest, ...grpc.CallOption) *v1beta4.QueryLeasesResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta4.QueryLeasesResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta4.QueryLeasesRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_Leases_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Leases' -type QueryClient_Leases_Call struct { - *mock.Call -} - -// Leases is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta4.QueryLeasesRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) Leases(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Leases_Call { - return &QueryClient_Leases_Call{Call: _e.mock.On("Leases", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_Leases_Call) Run(run func(ctx context.Context, in *v1beta4.QueryLeasesRequest, opts ...grpc.CallOption)) *QueryClient_Leases_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta4.QueryLeasesRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_Leases_Call) Return(_a0 *v1beta4.QueryLeasesResponse, _a1 error) *QueryClient_Leases_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_Leases_Call) RunAndReturn(run func(context.Context, *v1beta4.QueryLeasesRequest, ...grpc.CallOption) (*v1beta4.QueryLeasesResponse, error)) *QueryClient_Leases_Call { - _c.Call.Return(run) - return _c -} - -// Mint provides a mock function with given fields: -func (_m *QueryClient) Mint() minttypes.QueryClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Mint") - } - - var r0 minttypes.QueryClient - if rf, ok := ret.Get(0).(func() minttypes.QueryClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(minttypes.QueryClient) - } - } - - return r0 -} - -// QueryClient_Mint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Mint' -type QueryClient_Mint_Call struct { - *mock.Call -} - -// Mint is a helper method to define mock.On call -func (_e *QueryClient_Expecter) Mint() *QueryClient_Mint_Call { - return &QueryClient_Mint_Call{Call: _e.mock.On("Mint")} -} - -func (_c *QueryClient_Mint_Call) Run(run func()) *QueryClient_Mint_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *QueryClient_Mint_Call) Return(_a0 minttypes.QueryClient) *QueryClient_Mint_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *QueryClient_Mint_Call) RunAndReturn(run func() minttypes.QueryClient) *QueryClient_Mint_Call { - _c.Call.Return(run) - return _c -} - -// Order provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) Order(ctx context.Context, in *v1beta4.QueryOrderRequest, opts ...grpc.CallOption) (*v1beta4.QueryOrderResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Order") - } - - var r0 *v1beta4.QueryOrderResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryOrderRequest, ...grpc.CallOption) (*v1beta4.QueryOrderResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryOrderRequest, ...grpc.CallOption) *v1beta4.QueryOrderResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta4.QueryOrderResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta4.QueryOrderRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_Order_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Order' -type QueryClient_Order_Call struct { - *mock.Call -} - -// Order is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta4.QueryOrderRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) Order(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Order_Call { - return &QueryClient_Order_Call{Call: _e.mock.On("Order", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_Order_Call) Run(run func(ctx context.Context, in *v1beta4.QueryOrderRequest, opts ...grpc.CallOption)) *QueryClient_Order_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta4.QueryOrderRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_Order_Call) Return(_a0 *v1beta4.QueryOrderResponse, _a1 error) *QueryClient_Order_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_Order_Call) RunAndReturn(run func(context.Context, *v1beta4.QueryOrderRequest, ...grpc.CallOption) (*v1beta4.QueryOrderResponse, error)) *QueryClient_Order_Call { - _c.Call.Return(run) - return _c -} - -// Orders provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) Orders(ctx context.Context, in *v1beta4.QueryOrdersRequest, opts ...grpc.CallOption) (*v1beta4.QueryOrdersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Orders") - } - - var r0 *v1beta4.QueryOrdersResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryOrdersRequest, ...grpc.CallOption) (*v1beta4.QueryOrdersResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryOrdersRequest, ...grpc.CallOption) *v1beta4.QueryOrdersResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta4.QueryOrdersResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta4.QueryOrdersRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_Orders_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Orders' -type QueryClient_Orders_Call struct { - *mock.Call -} - -// Orders is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta4.QueryOrdersRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) Orders(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Orders_Call { - return &QueryClient_Orders_Call{Call: _e.mock.On("Orders", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_Orders_Call) Run(run func(ctx context.Context, in *v1beta4.QueryOrdersRequest, opts ...grpc.CallOption)) *QueryClient_Orders_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta4.QueryOrdersRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_Orders_Call) Return(_a0 *v1beta4.QueryOrdersResponse, _a1 error) *QueryClient_Orders_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_Orders_Call) RunAndReturn(run func(context.Context, *v1beta4.QueryOrdersRequest, ...grpc.CallOption) (*v1beta4.QueryOrdersResponse, error)) *QueryClient_Orders_Call { - _c.Call.Return(run) - return _c -} - -// Params provides a mock function with given fields: -func (_m *QueryClient) Params() proposal.QueryClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Params") - } - - var r0 proposal.QueryClient - if rf, ok := ret.Get(0).(func() proposal.QueryClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(proposal.QueryClient) - } - } - - return r0 -} - -// QueryClient_Params_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Params' -type QueryClient_Params_Call struct { - *mock.Call -} - -// Params is a helper method to define mock.On call -func (_e *QueryClient_Expecter) Params() *QueryClient_Params_Call { - return &QueryClient_Params_Call{Call: _e.mock.On("Params")} -} - -func (_c *QueryClient_Params_Call) Run(run func()) *QueryClient_Params_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *QueryClient_Params_Call) Return(_a0 proposal.QueryClient) *QueryClient_Params_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *QueryClient_Params_Call) RunAndReturn(run func() proposal.QueryClient) *QueryClient_Params_Call { - _c.Call.Return(run) - return _c -} - -// Provider provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) Provider(ctx context.Context, in *providerv1beta3.QueryProviderRequest, opts ...grpc.CallOption) (*providerv1beta3.QueryProviderResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Provider") - } - - var r0 *providerv1beta3.QueryProviderResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta3.QueryProviderRequest, ...grpc.CallOption) (*providerv1beta3.QueryProviderResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta3.QueryProviderRequest, ...grpc.CallOption) *providerv1beta3.QueryProviderResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*providerv1beta3.QueryProviderResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *providerv1beta3.QueryProviderRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_Provider_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Provider' -type QueryClient_Provider_Call struct { - *mock.Call -} - -// Provider is a helper method to define mock.On call -// - ctx context.Context -// - in *providerv1beta3.QueryProviderRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) Provider(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Provider_Call { - return &QueryClient_Provider_Call{Call: _e.mock.On("Provider", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_Provider_Call) Run(run func(ctx context.Context, in *providerv1beta3.QueryProviderRequest, opts ...grpc.CallOption)) *QueryClient_Provider_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*providerv1beta3.QueryProviderRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_Provider_Call) Return(_a0 *providerv1beta3.QueryProviderResponse, _a1 error) *QueryClient_Provider_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_Provider_Call) RunAndReturn(run func(context.Context, *providerv1beta3.QueryProviderRequest, ...grpc.CallOption) (*providerv1beta3.QueryProviderResponse, error)) *QueryClient_Provider_Call { - _c.Call.Return(run) - return _c -} - -// ProviderAttributes provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) ProviderAttributes(ctx context.Context, in *v1beta3.QueryProviderAttributesRequest, opts ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for ProviderAttributes") - } - - var r0 *v1beta3.QueryProvidersResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryProviderAttributesRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryProviderAttributesRequest, ...grpc.CallOption) *v1beta3.QueryProvidersResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta3.QueryProvidersResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta3.QueryProviderAttributesRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_ProviderAttributes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProviderAttributes' -type QueryClient_ProviderAttributes_Call struct { - *mock.Call -} - -// ProviderAttributes is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta3.QueryProviderAttributesRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) ProviderAttributes(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_ProviderAttributes_Call { - return &QueryClient_ProviderAttributes_Call{Call: _e.mock.On("ProviderAttributes", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_ProviderAttributes_Call) Run(run func(ctx context.Context, in *v1beta3.QueryProviderAttributesRequest, opts ...grpc.CallOption)) *QueryClient_ProviderAttributes_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta3.QueryProviderAttributesRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_ProviderAttributes_Call) Return(_a0 *v1beta3.QueryProvidersResponse, _a1 error) *QueryClient_ProviderAttributes_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_ProviderAttributes_Call) RunAndReturn(run func(context.Context, *v1beta3.QueryProviderAttributesRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)) *QueryClient_ProviderAttributes_Call { - _c.Call.Return(run) - return _c -} - -// ProviderAuditorAttributes provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) ProviderAuditorAttributes(ctx context.Context, in *v1beta3.QueryProviderAuditorRequest, opts ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for ProviderAuditorAttributes") - } - - var r0 *v1beta3.QueryProvidersResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryProviderAuditorRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *v1beta3.QueryProviderAuditorRequest, ...grpc.CallOption) *v1beta3.QueryProvidersResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*v1beta3.QueryProvidersResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *v1beta3.QueryProviderAuditorRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_ProviderAuditorAttributes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProviderAuditorAttributes' -type QueryClient_ProviderAuditorAttributes_Call struct { - *mock.Call -} - -// ProviderAuditorAttributes is a helper method to define mock.On call -// - ctx context.Context -// - in *v1beta3.QueryProviderAuditorRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) ProviderAuditorAttributes(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_ProviderAuditorAttributes_Call { - return &QueryClient_ProviderAuditorAttributes_Call{Call: _e.mock.On("ProviderAuditorAttributes", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_ProviderAuditorAttributes_Call) Run(run func(ctx context.Context, in *v1beta3.QueryProviderAuditorRequest, opts ...grpc.CallOption)) *QueryClient_ProviderAuditorAttributes_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*v1beta3.QueryProviderAuditorRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_ProviderAuditorAttributes_Call) Return(_a0 *v1beta3.QueryProvidersResponse, _a1 error) *QueryClient_ProviderAuditorAttributes_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_ProviderAuditorAttributes_Call) RunAndReturn(run func(context.Context, *v1beta3.QueryProviderAuditorRequest, ...grpc.CallOption) (*v1beta3.QueryProvidersResponse, error)) *QueryClient_ProviderAuditorAttributes_Call { - _c.Call.Return(run) - return _c -} - -// Providers provides a mock function with given fields: ctx, in, opts -func (_m *QueryClient) Providers(ctx context.Context, in *providerv1beta3.QueryProvidersRequest, opts ...grpc.CallOption) (*providerv1beta3.QueryProvidersResponse, error) { - _va := make([]interface{}, len(opts)) - for _i := range opts { - _va[_i] = opts[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, in) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Providers") - } - - var r0 *providerv1beta3.QueryProvidersResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta3.QueryProvidersRequest, ...grpc.CallOption) (*providerv1beta3.QueryProvidersResponse, error)); ok { - return rf(ctx, in, opts...) - } - if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta3.QueryProvidersRequest, ...grpc.CallOption) *providerv1beta3.QueryProvidersResponse); ok { - r0 = rf(ctx, in, opts...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*providerv1beta3.QueryProvidersResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *providerv1beta3.QueryProvidersRequest, ...grpc.CallOption) error); ok { - r1 = rf(ctx, in, opts...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// QueryClient_Providers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Providers' -type QueryClient_Providers_Call struct { - *mock.Call -} - -// Providers is a helper method to define mock.On call -// - ctx context.Context -// - in *providerv1beta3.QueryProvidersRequest -// - opts ...grpc.CallOption -func (_e *QueryClient_Expecter) Providers(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Providers_Call { - return &QueryClient_Providers_Call{Call: _e.mock.On("Providers", - append([]interface{}{ctx, in}, opts...)...)} -} - -func (_c *QueryClient_Providers_Call) Run(run func(ctx context.Context, in *providerv1beta3.QueryProvidersRequest, opts ...grpc.CallOption)) *QueryClient_Providers_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]grpc.CallOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(grpc.CallOption) - } - } - run(args[0].(context.Context), args[1].(*providerv1beta3.QueryProvidersRequest), variadicArgs...) - }) - return _c -} - -func (_c *QueryClient_Providers_Call) Return(_a0 *providerv1beta3.QueryProvidersResponse, _a1 error) *QueryClient_Providers_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *QueryClient_Providers_Call) RunAndReturn(run func(context.Context, *providerv1beta3.QueryProvidersRequest, ...grpc.CallOption) (*providerv1beta3.QueryProvidersResponse, error)) *QueryClient_Providers_Call { - _c.Call.Return(run) - return _c -} - -// Slashing provides a mock function with given fields: -func (_m *QueryClient) Slashing() slashingtypes.QueryClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Slashing") - } - - var r0 slashingtypes.QueryClient - if rf, ok := ret.Get(0).(func() slashingtypes.QueryClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(slashingtypes.QueryClient) - } - } - - return r0 -} - -// QueryClient_Slashing_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Slashing' -type QueryClient_Slashing_Call struct { - *mock.Call -} - -// Slashing is a helper method to define mock.On call -func (_e *QueryClient_Expecter) Slashing() *QueryClient_Slashing_Call { - return &QueryClient_Slashing_Call{Call: _e.mock.On("Slashing")} -} - -func (_c *QueryClient_Slashing_Call) Run(run func()) *QueryClient_Slashing_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *QueryClient_Slashing_Call) Return(_a0 slashingtypes.QueryClient) *QueryClient_Slashing_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *QueryClient_Slashing_Call) RunAndReturn(run func() slashingtypes.QueryClient) *QueryClient_Slashing_Call { - _c.Call.Return(run) - return _c -} - -// Staking provides a mock function with given fields: -func (_m *QueryClient) Staking() stakingtypes.QueryClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Staking") - } - - var r0 stakingtypes.QueryClient - if rf, ok := ret.Get(0).(func() stakingtypes.QueryClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(stakingtypes.QueryClient) - } - } - - return r0 -} - -// QueryClient_Staking_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Staking' -type QueryClient_Staking_Call struct { - *mock.Call -} - -// Staking is a helper method to define mock.On call -func (_e *QueryClient_Expecter) Staking() *QueryClient_Staking_Call { - return &QueryClient_Staking_Call{Call: _e.mock.On("Staking")} -} - -func (_c *QueryClient_Staking_Call) Run(run func()) *QueryClient_Staking_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *QueryClient_Staking_Call) Return(_a0 stakingtypes.QueryClient) *QueryClient_Staking_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *QueryClient_Staking_Call) RunAndReturn(run func() stakingtypes.QueryClient) *QueryClient_Staking_Call { - _c.Call.Return(run) - return _c -} - -// Upgrade provides a mock function with given fields: -func (_m *QueryClient) Upgrade() upgradetypes.QueryClient { - ret := _m.Called() - - if len(ret) == 0 { - panic("no return value specified for Upgrade") - } - - var r0 upgradetypes.QueryClient - if rf, ok := ret.Get(0).(func() upgradetypes.QueryClient); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(upgradetypes.QueryClient) - } - } - - return r0 -} - -// QueryClient_Upgrade_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Upgrade' -type QueryClient_Upgrade_Call struct { - *mock.Call -} - -// Upgrade is a helper method to define mock.On call -func (_e *QueryClient_Expecter) Upgrade() *QueryClient_Upgrade_Call { - return &QueryClient_Upgrade_Call{Call: _e.mock.On("Upgrade")} -} - -func (_c *QueryClient_Upgrade_Call) Run(run func()) *QueryClient_Upgrade_Call { - _c.Call.Run(func(args mock.Arguments) { - run() - }) - return _c -} - -func (_c *QueryClient_Upgrade_Call) Return(_a0 upgradetypes.QueryClient) *QueryClient_Upgrade_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *QueryClient_Upgrade_Call) RunAndReturn(run func() upgradetypes.QueryClient) *QueryClient_Upgrade_Call { - _c.Call.Return(run) - return _c -} - -// NewQueryClient creates a new instance of QueryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewQueryClient(t interface { - mock.TestingT - Cleanup(func()) -}) *QueryClient { - mock := &QueryClient{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/go/node/client/v1beta2/mocks/tx_client.go b/go/node/client/v1beta2/mocks/tx_client.go deleted file mode 100644 index f0402d63..00000000 --- a/go/node/client/v1beta2/mocks/tx_client.go +++ /dev/null @@ -1,113 +0,0 @@ -// Code generated by mockery v2.42.0. DO NOT EDIT. - -package mocks - -import ( - context "context" - - types "github.com/cosmos/cosmos-sdk/types" - mock "github.com/stretchr/testify/mock" - - v1beta2 "github.com/akash-network/akash-api/go/node/client/v1beta2" -) - -// TxClient is an autogenerated mock type for the TxClient type -type TxClient struct { - mock.Mock -} - -type TxClient_Expecter struct { - mock *mock.Mock -} - -func (_m *TxClient) EXPECT() *TxClient_Expecter { - return &TxClient_Expecter{mock: &_m.Mock} -} - -// Broadcast provides a mock function with given fields: _a0, _a1, _a2 -func (_m *TxClient) Broadcast(_a0 context.Context, _a1 []types.Msg, _a2 ...v1beta2.BroadcastOption) (interface{}, error) { - _va := make([]interface{}, len(_a2)) - for _i := range _a2 { - _va[_i] = _a2[_i] - } - var _ca []interface{} - _ca = append(_ca, _a0, _a1) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - if len(ret) == 0 { - panic("no return value specified for Broadcast") - } - - var r0 interface{} - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, []types.Msg, ...v1beta2.BroadcastOption) (interface{}, error)); ok { - return rf(_a0, _a1, _a2...) - } - if rf, ok := ret.Get(0).(func(context.Context, []types.Msg, ...v1beta2.BroadcastOption) interface{}); ok { - r0 = rf(_a0, _a1, _a2...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, []types.Msg, ...v1beta2.BroadcastOption) error); ok { - r1 = rf(_a0, _a1, _a2...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TxClient_Broadcast_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Broadcast' -type TxClient_Broadcast_Call struct { - *mock.Call -} - -// Broadcast is a helper method to define mock.On call -// - _a0 context.Context -// - _a1 []types.Msg -// - _a2 ...v1beta2.BroadcastOption -func (_e *TxClient_Expecter) Broadcast(_a0 interface{}, _a1 interface{}, _a2 ...interface{}) *TxClient_Broadcast_Call { - return &TxClient_Broadcast_Call{Call: _e.mock.On("Broadcast", - append([]interface{}{_a0, _a1}, _a2...)...)} -} - -func (_c *TxClient_Broadcast_Call) Run(run func(_a0 context.Context, _a1 []types.Msg, _a2 ...v1beta2.BroadcastOption)) *TxClient_Broadcast_Call { - _c.Call.Run(func(args mock.Arguments) { - variadicArgs := make([]v1beta2.BroadcastOption, len(args)-2) - for i, a := range args[2:] { - if a != nil { - variadicArgs[i] = a.(v1beta2.BroadcastOption) - } - } - run(args[0].(context.Context), args[1].([]types.Msg), variadicArgs...) - }) - return _c -} - -func (_c *TxClient_Broadcast_Call) Return(_a0 interface{}, _a1 error) *TxClient_Broadcast_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *TxClient_Broadcast_Call) RunAndReturn(run func(context.Context, []types.Msg, ...v1beta2.BroadcastOption) (interface{}, error)) *TxClient_Broadcast_Call { - _c.Call.Return(run) - return _c -} - -// NewTxClient creates a new instance of TxClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -// The first argument is typically a *testing.T value. -func NewTxClient(t interface { - mock.TestingT - Cleanup(func()) -}) *TxClient { - mock := &TxClient{} - mock.Mock.Test(t) - - t.Cleanup(func() { mock.AssertExpectations(t) }) - - return mock -} diff --git a/go/node/client/v1beta2/node.go b/go/node/client/v1beta2/node.go deleted file mode 100644 index 0f764c91..00000000 --- a/go/node/client/v1beta2/node.go +++ /dev/null @@ -1,34 +0,0 @@ -package v1beta2 - -import ( - "context" - - sdkclient "github.com/cosmos/cosmos-sdk/client" - rpcclient "github.com/tendermint/tendermint/rpc/client" - tmrpc "github.com/tendermint/tendermint/rpc/core/types" -) - -var _ NodeClient = (*node)(nil) - -type node struct { - rpc rpcclient.Client -} - -func newNode(cctx sdkclient.Context) *node { - nd := &node{ - rpc: cctx.Client, - } - - return nd -} - -func (nd *node) SyncInfo(ctx context.Context) (*tmrpc.SyncInfo, error) { - status, err := nd.rpc.Status(ctx) - if err != nil { - return nil, err - } - - info := status.SyncInfo - - return &info, nil -} diff --git a/go/node/client/v1beta2/query.go b/go/node/client/v1beta2/query.go deleted file mode 100644 index 47937dc4..00000000 --- a/go/node/client/v1beta2/query.go +++ /dev/null @@ -1,277 +0,0 @@ -package v1beta2 - -import ( - "context" - - "google.golang.org/grpc" - - sdkclient "github.com/cosmos/cosmos-sdk/client" - authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" - "github.com/cosmos/cosmos-sdk/x/authz" - banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" - disttypes "github.com/cosmos/cosmos-sdk/x/distribution/types" - evdtypes "github.com/cosmos/cosmos-sdk/x/evidence/types" - feegranttypes "github.com/cosmos/cosmos-sdk/x/feegrant" - govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" - minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types/proposal" - slashtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" - staketypes "github.com/cosmos/cosmos-sdk/x/staking/types" - upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - - atypes "github.com/akash-network/akash-api/go/node/audit/v1beta3" - ctypes "github.com/akash-network/akash-api/go/node/cert/v1beta3" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - mtypes "github.com/akash-network/akash-api/go/node/market/v1beta4" - ptypes "github.com/akash-network/akash-api/go/node/provider/v1beta3" -) - -var _ QueryClient = (*queryClient)(nil) - -type sdkQueryClient struct { - auth authtypes.QueryClient - authz authz.QueryClient - bank banktypes.QueryClient - distr disttypes.QueryClient - evidence evdtypes.QueryClient - feegrant feegranttypes.QueryClient - gov govtypes.QueryClient - mint minttypes.QueryClient - params paramtypes.QueryClient - slashing slashtypes.QueryClient - staking staketypes.QueryClient - upgrade upgradetypes.QueryClient -} - -type queryClient struct { - dclient dtypes.QueryClient - mclient mtypes.QueryClient - pclient ptypes.QueryClient - aclient atypes.QueryClient - cclient ctypes.QueryClient - sdk sdkQueryClient - cctx sdkclient.Context -} - -// NewQueryClient creates new query client instance based on a Cosmos SDK client context. -func NewQueryClient(cctx sdkclient.Context) QueryClient { - return newQueryClient(cctx) -} - -func newQueryClient(cctx sdkclient.Context) *queryClient { - return &queryClient{ - dclient: dtypes.NewQueryClient(cctx), - mclient: mtypes.NewQueryClient(cctx), - pclient: ptypes.NewQueryClient(cctx), - aclient: atypes.NewQueryClient(cctx), - cclient: ctypes.NewQueryClient(cctx), - sdk: sdkQueryClient{ - auth: authtypes.NewQueryClient(cctx), - authz: authz.NewQueryClient(cctx), - bank: banktypes.NewQueryClient(cctx), - distr: disttypes.NewQueryClient(cctx), - evidence: evdtypes.NewQueryClient(cctx), - feegrant: feegranttypes.NewQueryClient(cctx), - gov: govtypes.NewQueryClient(cctx), - mint: minttypes.NewQueryClient(cctx), - params: paramtypes.NewQueryClient(cctx), - slashing: slashtypes.NewQueryClient(cctx), - staking: staketypes.NewQueryClient(cctx), - upgrade: upgradetypes.NewQueryClient(cctx), - }, - cctx: cctx, - } -} - -// ClientContext returns the client's Cosmos SDK client context. -func (c *queryClient) ClientContext() sdkclient.Context { - return c.cctx -} - -// Deployments queries deployments. -func (c *queryClient) Deployments(ctx context.Context, in *dtypes.QueryDeploymentsRequest, opts ...grpc.CallOption) (*dtypes.QueryDeploymentsResponse, error) { - if c.dclient == nil { - return &dtypes.QueryDeploymentsResponse{}, ErrClientNotFound - } - return c.dclient.Deployments(ctx, in, opts...) -} - -// Deployment queries a deployment. -func (c *queryClient) Deployment(ctx context.Context, in *dtypes.QueryDeploymentRequest, opts ...grpc.CallOption) (*dtypes.QueryDeploymentResponse, error) { - if c.dclient == nil { - return &dtypes.QueryDeploymentResponse{}, ErrClientNotFound - } - return c.dclient.Deployment(ctx, in, opts...) -} - -// Group queries a group. -func (c *queryClient) Group(ctx context.Context, in *dtypes.QueryGroupRequest, opts ...grpc.CallOption) (*dtypes.QueryGroupResponse, error) { - if c.dclient == nil { - return &dtypes.QueryGroupResponse{}, ErrClientNotFound - } - return c.dclient.Group(ctx, in, opts...) -} - -// Orders queries orders. -func (c *queryClient) Orders(ctx context.Context, in *mtypes.QueryOrdersRequest, opts ...grpc.CallOption) (*mtypes.QueryOrdersResponse, error) { - if c.mclient == nil { - return &mtypes.QueryOrdersResponse{}, ErrClientNotFound - } - return c.mclient.Orders(ctx, in, opts...) -} - -// Order queries an order. -func (c *queryClient) Order(ctx context.Context, in *mtypes.QueryOrderRequest, opts ...grpc.CallOption) (*mtypes.QueryOrderResponse, error) { - if c.mclient == nil { - return &mtypes.QueryOrderResponse{}, ErrClientNotFound - } - return c.mclient.Order(ctx, in, opts...) -} - -// Bids queries bids. -func (c *queryClient) Bids(ctx context.Context, in *mtypes.QueryBidsRequest, opts ...grpc.CallOption) (*mtypes.QueryBidsResponse, error) { - if c.mclient == nil { - return &mtypes.QueryBidsResponse{}, ErrClientNotFound - } - return c.mclient.Bids(ctx, in, opts...) -} - -// Bid queries a specific bid. -func (c *queryClient) Bid(ctx context.Context, in *mtypes.QueryBidRequest, opts ...grpc.CallOption) (*mtypes.QueryBidResponse, error) { - if c.mclient == nil { - return &mtypes.QueryBidResponse{}, ErrClientNotFound - } - return c.mclient.Bid(ctx, in, opts...) -} - -// Leases queries leases. -func (c *queryClient) Leases(ctx context.Context, in *mtypes.QueryLeasesRequest, opts ...grpc.CallOption) (*mtypes.QueryLeasesResponse, error) { - if c.mclient == nil { - return &mtypes.QueryLeasesResponse{}, ErrClientNotFound - } - return c.mclient.Leases(ctx, in, opts...) -} - -// Lease queries a lease. -func (c *queryClient) Lease(ctx context.Context, in *mtypes.QueryLeaseRequest, opts ...grpc.CallOption) (*mtypes.QueryLeaseResponse, error) { - if c.mclient == nil { - return &mtypes.QueryLeaseResponse{}, ErrClientNotFound - } - return c.mclient.Lease(ctx, in, opts...) -} - -// Providers queries providers. -func (c *queryClient) Providers(ctx context.Context, in *ptypes.QueryProvidersRequest, opts ...grpc.CallOption) (*ptypes.QueryProvidersResponse, error) { - if c.pclient == nil { - return &ptypes.QueryProvidersResponse{}, ErrClientNotFound - } - return c.pclient.Providers(ctx, in, opts...) -} - -// Provider queries a provider. -func (c *queryClient) Provider(ctx context.Context, in *ptypes.QueryProviderRequest, opts ...grpc.CallOption) (*ptypes.QueryProviderResponse, error) { - if c.pclient == nil { - return &ptypes.QueryProviderResponse{}, ErrClientNotFound - } - return c.pclient.Provider(ctx, in, opts...) -} - -// AllProvidersAttributes queries all providers. -func (c *queryClient) AllProvidersAttributes(ctx context.Context, in *atypes.QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*atypes.QueryProvidersResponse, error) { - if c.pclient == nil { - return &atypes.QueryProvidersResponse{}, ErrClientNotFound - } - return c.aclient.AllProvidersAttributes(ctx, in, opts...) -} - -// ProviderAttributes queries all provider signed attributes. -func (c *queryClient) ProviderAttributes(ctx context.Context, in *atypes.QueryProviderAttributesRequest, opts ...grpc.CallOption) (*atypes.QueryProvidersResponse, error) { - if c.pclient == nil { - return &atypes.QueryProvidersResponse{}, ErrClientNotFound - } - return c.aclient.ProviderAttributes(ctx, in, opts...) -} - -// ProviderAuditorAttributes queries provider signed attributes by specific validator. -func (c *queryClient) ProviderAuditorAttributes(ctx context.Context, in *atypes.QueryProviderAuditorRequest, opts ...grpc.CallOption) (*atypes.QueryProvidersResponse, error) { - if c.pclient == nil { - return &atypes.QueryProvidersResponse{}, ErrClientNotFound - } - return c.aclient.ProviderAuditorAttributes(ctx, in, opts...) -} - -// AuditorAttributes queries all providers signed by this validator. -func (c *queryClient) AuditorAttributes(ctx context.Context, in *atypes.QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*atypes.QueryProvidersResponse, error) { - if c.aclient == nil { - return &atypes.QueryProvidersResponse{}, ErrClientNotFound - } - return c.aclient.AuditorAttributes(ctx, in, opts...) -} - -// Certificates queries certificates. -func (c *queryClient) Certificates(ctx context.Context, in *ctypes.QueryCertificatesRequest, opts ...grpc.CallOption) (*ctypes.QueryCertificatesResponse, error) { - if c.cclient == nil { - return &ctypes.QueryCertificatesResponse{}, ErrClientNotFound - } - return c.cclient.Certificates(ctx, in, opts...) -} - -// Auth implements QueryClient by returning the auth Cosmos SDK query client. -func (c *queryClient) Auth() authtypes.QueryClient { - return c.sdk.auth -} - -// Authz implements QueryClient by returning the authz Cosmos SDK query client. -func (c *queryClient) Authz() authz.QueryClient { - return c.sdk.authz -} - -// Bank implements QueryClient by returning the bank Cosmos SDK query client. -func (c *queryClient) Bank() banktypes.QueryClient { - return c.sdk.bank -} - -// Distribution implements QueryClient by returning the distribution Cosmos SDK query client. -func (c *queryClient) Distribution() disttypes.QueryClient { - return c.sdk.distr -} - -// Evidence implements QueryClient by returning the evidence Cosmos SDK query client. -func (c *queryClient) Evidence() evdtypes.QueryClient { - return c.sdk.evidence -} - -// Feegrant implements QueryClient by returning the feegrant Cosmos SDK query client. -func (c *queryClient) Feegrant() feegranttypes.QueryClient { - return c.sdk.feegrant -} - -// Gov implements QueryClient by returning the governance Cosmos SDK query client. -func (c *queryClient) Gov() govtypes.QueryClient { - return c.sdk.gov -} - -// Mint implements QueryClient by returning the mint Cosmos SDK query client. -func (c *queryClient) Mint() minttypes.QueryClient { - return c.sdk.mint -} - -// Params implements QueryClient by returning the params Cosmos SDK query client. -func (c *queryClient) Params() paramtypes.QueryClient { - return c.sdk.params -} - -// Slashing implements QueryClient by returning the slashing Cosmos SDK query client. -func (c *queryClient) Slashing() slashtypes.QueryClient { - return c.sdk.slashing -} - -// Staking implements QueryClient by returning the staking Cosmos SDK query client. -func (c *queryClient) Staking() staketypes.QueryClient { - return c.sdk.staking -} - -// Upgrade implements QueryClient by returning the upgrade Cosmos SDK query client. -func (c *queryClient) Upgrade() upgradetypes.QueryClient { - return c.sdk.upgrade -} diff --git a/go/node/client/v1beta2/tx.go b/go/node/client/v1beta2/tx.go deleted file mode 100644 index a0301d94..00000000 --- a/go/node/client/v1beta2/tx.go +++ /dev/null @@ -1,649 +0,0 @@ -package v1beta2 - -import ( - "bufio" - "context" - "encoding/hex" - "errors" - "fmt" - "os" - "strings" - "time" - "unsafe" - - "github.com/boz/go-lifecycle" - "github.com/cosmos/cosmos-sdk/client/flags" - "github.com/cosmos/cosmos-sdk/client/input" - "github.com/edwingeng/deque/v2" - "github.com/gogo/protobuf/proto" - "github.com/tendermint/tendermint/libs/log" - ttypes "github.com/tendermint/tendermint/types" - - sdkclient "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/client/tx" - "github.com/cosmos/cosmos-sdk/crypto/keyring" - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" - - cltypes "github.com/akash-network/akash-api/go/node/client/types" - "github.com/akash-network/akash-api/go/util/ctxlog" -) - -var ( - ErrNotRunning = errors.New("tx client: not running") - ErrSyncTimedOut = errors.New("tx client: timed-out waiting for sequence sync") - ErrNodeCatchingUp = errors.New("tx client: cannot sync from catching up node") - ErrSimulateOffline = errors.New("tx client: cannot simulate tx in offline mode") - ErrBroadcastOffline = errors.New("tx client: cannot broadcast tx in offline mode") - ErrTxCanceledByUser = errors.New("tx client: transaction declined by user input") -) - -const ( - BroadcastDefaultTimeout = 30 * time.Second - BroadcastBlockRetryTimeout = 300 * time.Second - broadcastBlockRetryPeriod = time.Second - sequenceSyncTimeout = 30 * time.Second - - // sadface. - - // Only way to detect the timeout error. - // https://github.com/tendermint/tendermint/blob/46e06c97320bc61c4d98d3018f59d47ec69863c9/rpc/core/mempool.go#L124 - timeoutErrorMessage = "timed out waiting for tx to be included in a block" - - // Only way to check for tx not found error. - // https://github.com/tendermint/tendermint/blob/46e06c97320bc61c4d98d3018f59d47ec69863c9/rpc/core/tx.go#L31-L33 - notFoundErrorMessageSuffix = ") not found" -) - -var _ TxClient = (*serialBroadcaster)(nil) - -type ConfirmFn func(string) (bool, error) - -// BroadcastOptions defines the options allowed to configure a transaction broadcast. -type BroadcastOptions struct { - timeoutHeight *uint64 - gasAdjustment *float64 - gas *flags.GasSetting - gasPrices *string - fees *string - note *string - broadcastTimeout *time.Duration - resultAsError bool - skipConfirm *bool - confirmFn ConfirmFn -} - -// BroadcastOption is a function that takes as first argument a pointer to BroadcastOptions and returns an error -// if the option cannot be configured. A number of BroadcastOption functions are available in this package. -type BroadcastOption func(*BroadcastOptions) error - -// WithGasAdjustment returns a BroadcastOption that sets the gas adjustment configuration for the transaction. -func WithGasAdjustment(val float64) BroadcastOption { - return func(options *BroadcastOptions) error { - options.gasAdjustment = new(float64) - *options.gasAdjustment = val - return nil - } -} - -// WithNote returns a BroadcastOption that sets the note configuration for the transaction. -func WithNote(val string) BroadcastOption { - return func(options *BroadcastOptions) error { - options.note = new(string) - *options.note = val - return nil - } -} - -// WithGas returns a BroadcastOption that sets the gas setting configuration for the transaction. -func WithGas(val flags.GasSetting) BroadcastOption { - return func(options *BroadcastOptions) error { - options.gas = new(flags.GasSetting) - *options.gas = val - return nil - } -} - -// WithGasPrices returns a BroadcastOption that sets the gas price configuration for the transaction. -// Gas price is a string of the amount. E.g. "0.25uakt". -func WithGasPrices(val string) BroadcastOption { - return func(options *BroadcastOptions) error { - options.gasPrices = new(string) - *options.gasPrices = val - return nil - } -} - -// WithFees returns a BroadcastOption that sets the fees configuration for the transaction. -func WithFees(val string) BroadcastOption { - return func(options *BroadcastOptions) error { - options.fees = new(string) - *options.fees = val - return nil - } -} - -// WithTimeoutHeight returns a BroadcastOption that sets the timeout height configuration for the transaction. -func WithTimeoutHeight(val uint64) BroadcastOption { - return func(options *BroadcastOptions) error { - options.timeoutHeight = new(uint64) - *options.timeoutHeight = val - return nil - } -} - -// WithResultCodeAsError returns a BroadcastOption that enables the result code as error configuration for the transaction. -func WithResultCodeAsError() BroadcastOption { - return func(opts *BroadcastOptions) error { - opts.resultAsError = true - return nil - } -} - -// WithSkipConfirm returns a BroadcastOption that sets whether to skip or not the confirmation for the transaction. -func WithSkipConfirm(val bool) BroadcastOption { - return func(opts *BroadcastOptions) error { - opts.skipConfirm = new(bool) - *opts.skipConfirm = val - return nil - } -} - -// WithConfirmFn returns a BroadcastOption that sets the ConfirmFn function configuration for the transaction. -func WithConfirmFn(val ConfirmFn) BroadcastOption { - return func(opts *BroadcastOptions) error { - opts.confirmFn = val - return nil - } -} - -type broadcastResp struct { - resp interface{} - err error -} - -type broadcastReq struct { - id uintptr - responsech chan<- broadcastResp - msgs []sdk.Msg - opts *BroadcastOptions -} -type broadcastTxs struct { - msgs []sdk.Msg - opts *BroadcastOptions -} - -type seqResp struct { - seq uint64 - err error -} - -type seqReq struct { - curr uint64 - ch chan<- seqResp -} - -type broadcast struct { - donech chan<- error - respch chan<- broadcastResp - msgs []sdk.Msg - opts *BroadcastOptions -} - -type serialBroadcaster struct { - ctx context.Context - cctx sdkclient.Context - info keyring.Info - reqch chan broadcastReq - broadcastch chan broadcast - seqreqch chan seqReq - lc lifecycle.Lifecycle - nd *node - log log.Logger -} - -func newSerialTx(ctx context.Context, cctx sdkclient.Context, nd *node, opts ...cltypes.ClientOption) (*serialBroadcaster, error) { - txf, err := cltypes.NewTxFactory(cctx, opts...) - if err != nil { - return nil, err - } - - keyname := cctx.GetFromName() - info, err := txf.Keybase().Key(keyname) - if err != nil { - info, err = txf.Keybase().KeyByAddress(cctx.GetFromAddress()) - } - - if err != nil { - return nil, err - } - - client := &serialBroadcaster{ - ctx: ctx, - cctx: cctx, - info: info, - lc: lifecycle.New(), - reqch: make(chan broadcastReq, 1), - broadcastch: make(chan broadcast, 1), - seqreqch: make(chan seqReq), - nd: nd, - log: ctxlog.Logger(ctx).With("cmp", "client/broadcaster"), - } - - go client.lc.WatchContext(ctx) - go client.run() - go client.broadcaster(txf) - - if !client.cctx.Offline { - go client.sequenceSync() - } - - return client, nil -} - -// Broadcast broadcasts a transaction. A transaction is composed of 1 or many messages. This allows several -// operations to be performed in a single transaction. -// A transaction broadcast can be configured with an arbitrary number of BroadcastOption. -// This method returns the response as an interface{} instance. If an error occurs when preparing the transaction -// an error is returned. -// A transaction can fail with a given "transaction code" which will not be passed to the error value. -// This needs to be checked by the caller and handled accordingly. -func (c *serialBroadcaster) Broadcast(ctx context.Context, msgs []sdk.Msg, opts ...BroadcastOption) (interface{}, error) { - bOpts := &BroadcastOptions{ - confirmFn: defaultTxConfirm, - } - - for _, opt := range opts { - if err := opt(bOpts); err != nil { - return nil, err - } - } - - if bOpts.broadcastTimeout == nil { - bOpts.broadcastTimeout = new(time.Duration) - *bOpts.broadcastTimeout = BroadcastDefaultTimeout - } - - responsech := make(chan broadcastResp, 1) - request := broadcastReq{ - responsech: responsech, - msgs: msgs, - opts: bOpts, - } - - request.id = uintptr(unsafe.Pointer(&request)) - - select { - case c.reqch <- request: - case <-ctx.Done(): - return nil, ctx.Err() - case <-c.lc.ShuttingDown(): - return nil, ErrNotRunning - } - - select { - case resp := <-responsech: - // if returned error is sdk error, it is likely to be wrapped response so discard it - // as clients supposed to check Tx code, unless resp is nil, which is error during Tx preparation - if !errors.As(resp.err, &sdkerrors.Error{}) || resp.resp == nil || bOpts.resultAsError { - return resp.resp, resp.err - } - return resp.resp, nil - case <-ctx.Done(): - return nil, ctx.Err() - case <-c.lc.ShuttingDown(): - return nil, ErrNotRunning - } -} - -func (c *serialBroadcaster) run() { - defer c.lc.ShutdownCompleted() - - pending := deque.NewDeque[broadcastReq]() - broadcastCh := c.broadcastch - broadcastDoneCh := make(chan error, 1) - - tryBroadcast := func() { - if pending.Len() == 0 { - return - } - - req := pending.Peek(0) - - select { - case broadcastCh <- broadcast{ - donech: broadcastDoneCh, - respch: req.responsech, - msgs: req.msgs, - opts: req.opts, - }: - broadcastCh = nil - _ = pending.PopFront() - default: - } - } - -loop: - for { - select { - case err := <-c.lc.ShutdownRequest(): - c.lc.ShutdownInitiated(err) - break loop - case req := <-c.reqch: - pending.PushBack(req) - - tryBroadcast() - case err := <-broadcastDoneCh: - broadcastCh = c.broadcastch - - if err != nil { - c.log.Error("unable to broadcast messages", "error", err) - } - tryBroadcast() - } - } -} - -func deriveTxfFromOptions(txf tx.Factory, opts *BroadcastOptions) tx.Factory { - if opt := opts.note; opt != nil { - txf = txf.WithMemo(*opt) - } - - if opt := opts.gas; opt != nil { - txf = txf.WithGas(opt.Gas).WithSimulateAndExecute(opt.Simulate) - } - - if opt := opts.fees; opt != nil { - txf = txf.WithFees(*opt) - } - - if opt := opts.gasPrices; opt != nil { - txf = txf.WithGasPrices(*opt) - } - - if opt := opts.timeoutHeight; opt != nil { - txf = txf.WithTimeoutHeight(*opt) - } - - if opt := opts.gasAdjustment; opt != nil { - txf = txf.WithGasAdjustment(*opt) - } - - return txf -} - -func (c *serialBroadcaster) broadcaster(ptxf tx.Factory) { - syncSequence := func(f tx.Factory, rErr error) (uint64, bool) { - if rErr != nil { - if sdkerrors.ErrWrongSequence.Is(rErr) { - // attempt to sync account sequence - if rSeq, err := c.syncAccountSequence(f.Sequence()); err == nil { - return rSeq, true - } - - return f.Sequence(), true - } - } - - return f.Sequence(), false - } - - for { - select { - case <-c.lc.ShuttingDown(): - return - case req := <-c.broadcastch: - var err error - var resp interface{} - - done: - for i := 0; i < 2; i++ { - txf := deriveTxfFromOptions(ptxf, req.opts) - if c.cctx.GenerateOnly { - resp, err = c.generateTxs(txf, req.msgs...) - break done - } - - var rseq uint64 - txs := broadcastTxs{ - msgs: req.msgs, - opts: req.opts, - } - - resp, rseq, err = c.broadcastTxs(txf, txs) - ptxf = ptxf.WithSequence(rseq) - - rSeq, synced := syncSequence(ptxf, err) - ptxf = ptxf.WithSequence(rSeq) - - if !synced { - break done - } - } - - req.respch <- broadcastResp{ - resp: resp, - err: err, - } - - terr := &sdkerrors.Error{} - if !c.cctx.GenerateOnly && errors.Is(err, terr) { - rSeq, _ := syncSequence(ptxf, err) - ptxf = ptxf.WithSequence(rSeq) - } - - select { - case <-c.lc.ShuttingDown(): - return - case req.donech <- err: - } - } - } -} - -func (c *serialBroadcaster) sequenceSync() { - for { - select { - case <-c.lc.ShuttingDown(): - return - case req := <-c.seqreqch: - // reply back with current value if any error to occur - seq := seqResp{ - seq: req.curr, - } - - ndStatus, err := c.nd.SyncInfo(c.ctx) - if err != nil { - c.log.Error("cannot obtain node status to sync account sequence", "err", err) - seq.err = err - } - - if err == nil && ndStatus.CatchingUp { - c.log.Error("cannot sync account sequence from node that is catching up") - err = ErrNodeCatchingUp - } - - if err == nil { - // query sequence number - if _, seq.seq, err = c.cctx.AccountRetriever.GetAccountNumberSequence(c.cctx, c.info.GetAddress()); err != nil { - c.log.Error("error requesting account", "err", err) - seq.err = err - } - } - - select { - case req.ch <- seq: - case <-c.lc.ShuttingDown(): - } - } - } -} - -func (c *serialBroadcaster) generateTxs(txf tx.Factory, msgs ...sdk.Msg) ([]byte, error) { - if txf.SimulateAndExecute() { - if c.cctx.Offline { - return nil, ErrSimulateOffline - } - - _, adjusted, err := tx.CalculateGas(c.cctx, txf, msgs...) - if err != nil { - return nil, err - } - - txf = txf.WithGas(adjusted) - } - - utx, err := tx.BuildUnsignedTx(txf, msgs...) - if err != nil { - return nil, err - } - - data, err := c.cctx.TxConfig.TxJSONEncoder()(utx.GetTx()) - if err != nil { - return nil, err - } - - return data, nil -} - -func defaultTxConfirm(txn string) (bool, error) { - _, _ = fmt.Printf("%s\n\n", txn) - - buf := bufio.NewReader(os.Stdin) - - return input.GetConfirmation("confirm transaction before signing and broadcasting", buf, os.Stdin) -} - -func (c *serialBroadcaster) broadcastTxs(txf tx.Factory, txs broadcastTxs) (interface{}, uint64, error) { - var err error - var resp proto.Message - - if txf.SimulateAndExecute() || c.cctx.Simulate { - var adjusted uint64 - resp, adjusted, err = tx.CalculateGas(c.cctx, txf, txs.msgs...) - if err != nil { - return nil, txf.Sequence(), err - } - - txf = txf.WithGas(adjusted) - } - - if c.cctx.Simulate { - return resp, txf.Sequence(), nil - } - - txn, err := tx.BuildUnsignedTx(txf, txs.msgs...) - if err != nil { - return nil, txf.Sequence(), err - } - - if c.cctx.Offline { - return nil, txf.Sequence(), ErrBroadcastOffline - } - - if !c.cctx.SkipConfirm { - out, err := c.cctx.TxConfig.TxJSONEncoder()(txn.GetTx()) - if err != nil { - return nil, txf.Sequence(), err - } - - isYes, err := txs.opts.confirmFn(string(out)) - if err != nil { - return nil, txf.Sequence(), err - } - - if !isYes { - return nil, txf.Sequence(), ErrTxCanceledByUser - } - } - - txn.SetFeeGranter(c.cctx.GetFeeGranterAddress()) - - err = tx.Sign(txf, c.info.GetName(), txn, true) - if err != nil { - return nil, txf.Sequence(), err - } - - bytes, err := c.cctx.TxConfig.TxEncoder()(txn.GetTx()) - if err != nil { - return nil, txf.Sequence(), err - } - - response, err := c.doBroadcast(c.cctx, bytes, *txs.opts.broadcastTimeout) - if err != nil { - return response, txf.Sequence(), err - } - - txf = txf.WithSequence(txf.Sequence() + 1) - - if response.Code != 0 { - return response, txf.Sequence(), sdkerrors.ABCIError(response.Codespace, response.Code, response.RawLog) - } - - return response, txf.Sequence(), nil -} - -func (c *serialBroadcaster) syncAccountSequence(lSeq uint64) (uint64, error) { - ch := make(chan seqResp, 1) - - c.seqreqch <- seqReq{ - curr: lSeq, - ch: ch, - } - - ctx, cancel := context.WithTimeout(c.ctx, sequenceSyncTimeout) - defer cancel() - - select { - case rSeq := <-ch: - return rSeq.seq, rSeq.err - case <-ctx.Done(): - return lSeq, ErrSyncTimedOut - case <-c.lc.ShuttingDown(): - return lSeq, ErrNotRunning - } -} - -func (c *serialBroadcaster) doBroadcast(cctx sdkclient.Context, data []byte, timeout time.Duration) (*sdk.TxResponse, error) { - txb := ttypes.Tx(data) - hash := hex.EncodeToString(txb.Hash()) - - // broadcast-mode=block - // submit with mode commit/block - cres, err := cctx.BroadcastTxCommit(txb) - if err == nil { - // good job - return cres, nil - } else if !strings.HasSuffix(err.Error(), timeoutErrorMessage) { - return cres, err - } - - // timeout error, continue on to retry - // loop - lctx, cancel := context.WithTimeout(c.ctx, timeout) - defer cancel() - - for lctx.Err() == nil { - // wait up to one second - select { - case <-lctx.Done(): - return cres, err - case <-time.After(broadcastBlockRetryPeriod): - } - - // check transaction - // https://github.com/cosmos/cosmos-sdk/pull/8734 - res, err := authtx.QueryTx(cctx, hash) - if err == nil { - return res, nil - } - - // if it's not a "not found" error, return - if !strings.HasSuffix(err.Error(), notFoundErrorMessageSuffix) { - return res, err - } - } - - return cres, lctx.Err() -} diff --git a/go/node/client/v1beta3/client.go b/go/node/client/v1beta3/client.go new file mode 100644 index 00000000..7d4933f6 --- /dev/null +++ b/go/node/client/v1beta3/client.go @@ -0,0 +1,174 @@ +package v1beta3 + +import ( + "context" + "fmt" + + "github.com/cosmos/gogoproto/proto" + + tmrpc "github.com/cometbft/cometbft/rpc/core/types" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/cosmos/cosmos-sdk/x/authz" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + disttypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + evdtypes "github.com/cosmos/cosmos-sdk/x/evidence/types" + feegranttypes "github.com/cosmos/cosmos-sdk/x/feegrant" + v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + paramstypes "github.com/cosmos/cosmos-sdk/x/params/types/proposal" + slashtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + staketypes "github.com/cosmos/cosmos-sdk/x/staking/types" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + atypes "pkg.akt.dev/go/node/audit/v1" + ctypes "pkg.akt.dev/go/node/cert/v1" + cltypes "pkg.akt.dev/go/node/client/types" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + mtypes "pkg.akt.dev/go/node/market/v1beta5" + ptypes "pkg.akt.dev/go/node/provider/v1beta4" +) + +// QueryClient is the interface that exposes query modules. +// +//go:generate mockery --name QueryClient --output ./mocks +type QueryClient interface { + Deployment() dtypes.QueryClient + Market() mtypes.QueryClient + Provider() ptypes.QueryClient + Audit() atypes.QueryClient + Certs() ctypes.QueryClient + Auth() authtypes.QueryClient + Authz() authz.QueryClient + Bank() banktypes.QueryClient + Distribution() disttypes.QueryClient + Evidence() evdtypes.QueryClient + Feegrant() feegranttypes.QueryClient + GovLegacy() govtypes.QueryClient + Gov() v1.QueryClient + Mint() minttypes.QueryClient + Slashing() slashtypes.QueryClient + Staking() staketypes.QueryClient + Upgrade() upgradetypes.QueryClient + Params() paramstypes.QueryClient + + ClientContext() sdkclient.Context +} + +// TxClient is the interface that wraps the Broadcast method. +// Broadcast broadcasts a transaction. A transaction is composed of 1 or many messages. This allows several +// operations to be performed in a single transaction. +// A transaction broadcast can be configured with an arbitrary number of BroadcastOption. +// +//go:generate mockery --name TxClient --output ./mocks +type TxClient interface { + BroadcastMsgs(context.Context, []sdk.Msg, ...BroadcastOption) (interface{}, error) + BroadcastTx(context.Context, sdk.Tx, ...BroadcastOption) (interface{}, error) +} + +//go:generate mockery --name NodeClient --output ./mocks +type NodeClient interface { + SyncInfo(context.Context) (*tmrpc.SyncInfo, error) + CurrentBlockHeight(context.Context) (int64, error) +} + +// LightClient is the umbrella interface that exposes every other client's modules. +// +//go:generate mockery --name LightClient --output ./mocks +type LightClient interface { + Query() QueryClient + Node() NodeClient + ClientContext() sdkclient.Context + PrintMessage(interface{}) error +} + +// Client is the umbrella interface that exposes every other client's modules. +// +//go:generate mockery --name Client --output ./mocks +type Client interface { + LightClient + Tx() TxClient +} + +type lightClient struct { + qclient *queryClient + node *node +} + +type client struct { + lightClient + tx TxClient +} + +var ( + _ Client = (*client)(nil) + _ LightClient = (*lightClient)(nil) +) + +// NewClient creates a new client. +func NewClient(ctx context.Context, cctx sdkclient.Context, opts ...cltypes.ClientOption) (Client, error) { + nd := newNode(cctx) + tcl, cctx, err := newSerialTx(ctx, cctx, nd, opts...) + if err != nil { + return nil, err + } + + cl := &client{ + lightClient: lightClient{ + qclient: newQueryClient(cctx), + node: nd, + }, + tx: tcl, + } + + return cl, nil +} + +// NewLightClient creates a new client. +func NewLightClient(cctx sdkclient.Context) (LightClient, error) { + cl := &lightClient{ + qclient: newQueryClient(cctx), + node: newNode(cctx), + } + + return cl, nil +} + +// Tx implements Client by returning the TxClient instance of the client. +func (cl *client) Tx() TxClient { + return cl.tx +} + +// Query implements Client by returning the QueryClient instance of the client. +func (cl *lightClient) Query() QueryClient { + return cl.qclient +} + +// Node implements Client by returning the NodeClient instance of the client. +func (cl *lightClient) Node() NodeClient { + return cl.node +} + +// ClientContext implements Client by returning the Cosmos SDK client context instance of the client. +func (cl *lightClient) ClientContext() sdkclient.Context { + return cl.qclient.cctx +} + +// PrintMessage implements Client by printing the raw message passed as parameter. +func (cl *lightClient) PrintMessage(msg interface{}) error { + var err error + + switch m := msg.(type) { + case proto.Message: + err = cl.qclient.cctx.PrintProto(m) + case []byte: + err = cl.qclient.cctx.PrintString(fmt.Sprintf("%s\n", string(m))) + default: + err = cl.qclient.cctx.PrintObjectLegacy(m) + } + + return err +} diff --git a/go/node/client/v1beta3/errors.go b/go/node/client/v1beta3/errors.go new file mode 100644 index 00000000..10ebaedb --- /dev/null +++ b/go/node/client/v1beta3/errors.go @@ -0,0 +1,11 @@ +package v1beta3 + +import ( + "errors" +) + +var ( + // ErrClientNotFound is a new error with message "Client not found" + ErrClientNotFound = errors.New("client not found") + ErrNodeNotSynced = errors.New("rpc node is not catching up") +) diff --git a/go/node/client/v1beta3/mocks/client.go b/go/node/client/v1beta3/mocks/client.go new file mode 100644 index 00000000..46b97efd --- /dev/null +++ b/go/node/client/v1beta3/mocks/client.go @@ -0,0 +1,269 @@ +// Code generated by mockery v2.45.0. DO NOT EDIT. + +package mocks + +import ( + client "github.com/cosmos/cosmos-sdk/client" + mock "github.com/stretchr/testify/mock" + + v1beta3 "pkg.akt.dev/go/node/client/v1beta3" +) + +// Client is an autogenerated mock type for the Client type +type Client struct { + mock.Mock +} + +type Client_Expecter struct { + mock *mock.Mock +} + +func (_m *Client) EXPECT() *Client_Expecter { + return &Client_Expecter{mock: &_m.Mock} +} + +// ClientContext provides a mock function with given fields: +func (_m *Client) ClientContext() client.Context { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ClientContext") + } + + var r0 client.Context + if rf, ok := ret.Get(0).(func() client.Context); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(client.Context) + } + + return r0 +} + +// Client_ClientContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClientContext' +type Client_ClientContext_Call struct { + *mock.Call +} + +// ClientContext is a helper method to define mock.On call +func (_e *Client_Expecter) ClientContext() *Client_ClientContext_Call { + return &Client_ClientContext_Call{Call: _e.mock.On("ClientContext")} +} + +func (_c *Client_ClientContext_Call) Run(run func()) *Client_ClientContext_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Client_ClientContext_Call) Return(_a0 client.Context) *Client_ClientContext_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Client_ClientContext_Call) RunAndReturn(run func() client.Context) *Client_ClientContext_Call { + _c.Call.Return(run) + return _c +} + +// Node provides a mock function with given fields: +func (_m *Client) Node() v1beta3.NodeClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Node") + } + + var r0 v1beta3.NodeClient + if rf, ok := ret.Get(0).(func() v1beta3.NodeClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(v1beta3.NodeClient) + } + } + + return r0 +} + +// Client_Node_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Node' +type Client_Node_Call struct { + *mock.Call +} + +// Node is a helper method to define mock.On call +func (_e *Client_Expecter) Node() *Client_Node_Call { + return &Client_Node_Call{Call: _e.mock.On("Node")} +} + +func (_c *Client_Node_Call) Run(run func()) *Client_Node_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Client_Node_Call) Return(_a0 v1beta3.NodeClient) *Client_Node_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Client_Node_Call) RunAndReturn(run func() v1beta3.NodeClient) *Client_Node_Call { + _c.Call.Return(run) + return _c +} + +// PrintMessage provides a mock function with given fields: _a0 +func (_m *Client) PrintMessage(_a0 interface{}) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for PrintMessage") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Client_PrintMessage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PrintMessage' +type Client_PrintMessage_Call struct { + *mock.Call +} + +// PrintMessage is a helper method to define mock.On call +// - _a0 interface{} +func (_e *Client_Expecter) PrintMessage(_a0 interface{}) *Client_PrintMessage_Call { + return &Client_PrintMessage_Call{Call: _e.mock.On("PrintMessage", _a0)} +} + +func (_c *Client_PrintMessage_Call) Run(run func(_a0 interface{})) *Client_PrintMessage_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(interface{})) + }) + return _c +} + +func (_c *Client_PrintMessage_Call) Return(_a0 error) *Client_PrintMessage_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Client_PrintMessage_Call) RunAndReturn(run func(interface{}) error) *Client_PrintMessage_Call { + _c.Call.Return(run) + return _c +} + +// Query provides a mock function with given fields: +func (_m *Client) Query() v1beta3.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Query") + } + + var r0 v1beta3.QueryClient + if rf, ok := ret.Get(0).(func() v1beta3.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(v1beta3.QueryClient) + } + } + + return r0 +} + +// Client_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' +type Client_Query_Call struct { + *mock.Call +} + +// Query is a helper method to define mock.On call +func (_e *Client_Expecter) Query() *Client_Query_Call { + return &Client_Query_Call{Call: _e.mock.On("Query")} +} + +func (_c *Client_Query_Call) Run(run func()) *Client_Query_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Client_Query_Call) Return(_a0 v1beta3.QueryClient) *Client_Query_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Client_Query_Call) RunAndReturn(run func() v1beta3.QueryClient) *Client_Query_Call { + _c.Call.Return(run) + return _c +} + +// Tx provides a mock function with given fields: +func (_m *Client) Tx() v1beta3.TxClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Tx") + } + + var r0 v1beta3.TxClient + if rf, ok := ret.Get(0).(func() v1beta3.TxClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(v1beta3.TxClient) + } + } + + return r0 +} + +// Client_Tx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Tx' +type Client_Tx_Call struct { + *mock.Call +} + +// Tx is a helper method to define mock.On call +func (_e *Client_Expecter) Tx() *Client_Tx_Call { + return &Client_Tx_Call{Call: _e.mock.On("Tx")} +} + +func (_c *Client_Tx_Call) Run(run func()) *Client_Tx_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Client_Tx_Call) Return(_a0 v1beta3.TxClient) *Client_Tx_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Client_Tx_Call) RunAndReturn(run func() v1beta3.TxClient) *Client_Tx_Call { + _c.Call.Return(run) + return _c +} + +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClient(t interface { + mock.TestingT + Cleanup(func()) +}) *Client { + mock := &Client{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/go/node/client/v1beta3/mocks/light_client.go b/go/node/client/v1beta3/mocks/light_client.go new file mode 100644 index 00000000..4aefbd7e --- /dev/null +++ b/go/node/client/v1beta3/mocks/light_client.go @@ -0,0 +1,222 @@ +// Code generated by mockery v2.45.0. DO NOT EDIT. + +package mocks + +import ( + client "github.com/cosmos/cosmos-sdk/client" + mock "github.com/stretchr/testify/mock" + + v1beta3 "pkg.akt.dev/go/node/client/v1beta3" +) + +// LightClient is an autogenerated mock type for the LightClient type +type LightClient struct { + mock.Mock +} + +type LightClient_Expecter struct { + mock *mock.Mock +} + +func (_m *LightClient) EXPECT() *LightClient_Expecter { + return &LightClient_Expecter{mock: &_m.Mock} +} + +// ClientContext provides a mock function with given fields: +func (_m *LightClient) ClientContext() client.Context { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ClientContext") + } + + var r0 client.Context + if rf, ok := ret.Get(0).(func() client.Context); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(client.Context) + } + + return r0 +} + +// LightClient_ClientContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClientContext' +type LightClient_ClientContext_Call struct { + *mock.Call +} + +// ClientContext is a helper method to define mock.On call +func (_e *LightClient_Expecter) ClientContext() *LightClient_ClientContext_Call { + return &LightClient_ClientContext_Call{Call: _e.mock.On("ClientContext")} +} + +func (_c *LightClient_ClientContext_Call) Run(run func()) *LightClient_ClientContext_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *LightClient_ClientContext_Call) Return(_a0 client.Context) *LightClient_ClientContext_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *LightClient_ClientContext_Call) RunAndReturn(run func() client.Context) *LightClient_ClientContext_Call { + _c.Call.Return(run) + return _c +} + +// Node provides a mock function with given fields: +func (_m *LightClient) Node() v1beta3.NodeClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Node") + } + + var r0 v1beta3.NodeClient + if rf, ok := ret.Get(0).(func() v1beta3.NodeClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(v1beta3.NodeClient) + } + } + + return r0 +} + +// LightClient_Node_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Node' +type LightClient_Node_Call struct { + *mock.Call +} + +// Node is a helper method to define mock.On call +func (_e *LightClient_Expecter) Node() *LightClient_Node_Call { + return &LightClient_Node_Call{Call: _e.mock.On("Node")} +} + +func (_c *LightClient_Node_Call) Run(run func()) *LightClient_Node_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *LightClient_Node_Call) Return(_a0 v1beta3.NodeClient) *LightClient_Node_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *LightClient_Node_Call) RunAndReturn(run func() v1beta3.NodeClient) *LightClient_Node_Call { + _c.Call.Return(run) + return _c +} + +// PrintMessage provides a mock function with given fields: _a0 +func (_m *LightClient) PrintMessage(_a0 interface{}) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for PrintMessage") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// LightClient_PrintMessage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PrintMessage' +type LightClient_PrintMessage_Call struct { + *mock.Call +} + +// PrintMessage is a helper method to define mock.On call +// - _a0 interface{} +func (_e *LightClient_Expecter) PrintMessage(_a0 interface{}) *LightClient_PrintMessage_Call { + return &LightClient_PrintMessage_Call{Call: _e.mock.On("PrintMessage", _a0)} +} + +func (_c *LightClient_PrintMessage_Call) Run(run func(_a0 interface{})) *LightClient_PrintMessage_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(interface{})) + }) + return _c +} + +func (_c *LightClient_PrintMessage_Call) Return(_a0 error) *LightClient_PrintMessage_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *LightClient_PrintMessage_Call) RunAndReturn(run func(interface{}) error) *LightClient_PrintMessage_Call { + _c.Call.Return(run) + return _c +} + +// Query provides a mock function with given fields: +func (_m *LightClient) Query() v1beta3.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Query") + } + + var r0 v1beta3.QueryClient + if rf, ok := ret.Get(0).(func() v1beta3.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(v1beta3.QueryClient) + } + } + + return r0 +} + +// LightClient_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' +type LightClient_Query_Call struct { + *mock.Call +} + +// Query is a helper method to define mock.On call +func (_e *LightClient_Expecter) Query() *LightClient_Query_Call { + return &LightClient_Query_Call{Call: _e.mock.On("Query")} +} + +func (_c *LightClient_Query_Call) Run(run func()) *LightClient_Query_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *LightClient_Query_Call) Return(_a0 v1beta3.QueryClient) *LightClient_Query_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *LightClient_Query_Call) RunAndReturn(run func() v1beta3.QueryClient) *LightClient_Query_Call { + _c.Call.Return(run) + return _c +} + +// NewLightClient creates a new instance of LightClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewLightClient(t interface { + mock.TestingT + Cleanup(func()) +}) *LightClient { + mock := &LightClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/go/node/client/v1beta3/mocks/node_client.go b/go/node/client/v1beta3/mocks/node_client.go new file mode 100644 index 00000000..27f0dd35 --- /dev/null +++ b/go/node/client/v1beta3/mocks/node_client.go @@ -0,0 +1,151 @@ +// Code generated by mockery v2.45.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + coretypes "github.com/cometbft/cometbft/rpc/core/types" + mock "github.com/stretchr/testify/mock" +) + +// NodeClient is an autogenerated mock type for the NodeClient type +type NodeClient struct { + mock.Mock +} + +type NodeClient_Expecter struct { + mock *mock.Mock +} + +func (_m *NodeClient) EXPECT() *NodeClient_Expecter { + return &NodeClient_Expecter{mock: &_m.Mock} +} + +// CurrentBlockHeight provides a mock function with given fields: _a0 +func (_m *NodeClient) CurrentBlockHeight(_a0 context.Context) (int64, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for CurrentBlockHeight") + } + + var r0 int64 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (int64, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) int64); ok { + r0 = rf(_a0) + } else { + r0 = ret.Get(0).(int64) + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NodeClient_CurrentBlockHeight_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CurrentBlockHeight' +type NodeClient_CurrentBlockHeight_Call struct { + *mock.Call +} + +// CurrentBlockHeight is a helper method to define mock.On call +// - _a0 context.Context +func (_e *NodeClient_Expecter) CurrentBlockHeight(_a0 interface{}) *NodeClient_CurrentBlockHeight_Call { + return &NodeClient_CurrentBlockHeight_Call{Call: _e.mock.On("CurrentBlockHeight", _a0)} +} + +func (_c *NodeClient_CurrentBlockHeight_Call) Run(run func(_a0 context.Context)) *NodeClient_CurrentBlockHeight_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *NodeClient_CurrentBlockHeight_Call) Return(_a0 int64, _a1 error) *NodeClient_CurrentBlockHeight_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NodeClient_CurrentBlockHeight_Call) RunAndReturn(run func(context.Context) (int64, error)) *NodeClient_CurrentBlockHeight_Call { + _c.Call.Return(run) + return _c +} + +// SyncInfo provides a mock function with given fields: _a0 +func (_m *NodeClient) SyncInfo(_a0 context.Context) (*coretypes.SyncInfo, error) { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for SyncInfo") + } + + var r0 *coretypes.SyncInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.SyncInfo, error)); ok { + return rf(_a0) + } + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.SyncInfo); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.SyncInfo) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NodeClient_SyncInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SyncInfo' +type NodeClient_SyncInfo_Call struct { + *mock.Call +} + +// SyncInfo is a helper method to define mock.On call +// - _a0 context.Context +func (_e *NodeClient_Expecter) SyncInfo(_a0 interface{}) *NodeClient_SyncInfo_Call { + return &NodeClient_SyncInfo_Call{Call: _e.mock.On("SyncInfo", _a0)} +} + +func (_c *NodeClient_SyncInfo_Call) Run(run func(_a0 context.Context)) *NodeClient_SyncInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *NodeClient_SyncInfo_Call) Return(_a0 *coretypes.SyncInfo, _a1 error) *NodeClient_SyncInfo_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NodeClient_SyncInfo_Call) RunAndReturn(run func(context.Context) (*coretypes.SyncInfo, error)) *NodeClient_SyncInfo_Call { + _c.Call.Return(run) + return _c +} + +// NewNodeClient creates a new instance of NodeClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNodeClient(t interface { + mock.TestingT + Cleanup(func()) +}) *NodeClient { + mock := &NodeClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/go/node/client/v1beta3/mocks/query_client.go b/go/node/client/v1beta3/mocks/query_client.go new file mode 100644 index 00000000..3c22e5f5 --- /dev/null +++ b/go/node/client/v1beta3/mocks/query_client.go @@ -0,0 +1,913 @@ +// Code generated by mockery v2.45.0. DO NOT EDIT. + +package mocks + +import ( + authz "github.com/cosmos/cosmos-sdk/x/authz" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + + certv1 "pkg.akt.dev/go/node/cert/v1" + + client "github.com/cosmos/cosmos-sdk/client" + + distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + + evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types" + + feegrant "github.com/cosmos/cosmos-sdk/x/feegrant" + + govv1 "cosmossdk.io/api/cosmos/gov/v1" + + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + + mock "github.com/stretchr/testify/mock" + + providerv1beta4 "pkg.akt.dev/go/node/provider/v1beta4" + + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + types "github.com/cosmos/cosmos-sdk/x/auth/types" + + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + v1 "pkg.akt.dev/go/node/audit/v1" + + v1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + + v1beta4 "pkg.akt.dev/go/node/deployment/v1beta4" + + v1beta5 "pkg.akt.dev/go/node/market/v1beta5" +) + +// QueryClient is an autogenerated mock type for the QueryClient type +type QueryClient struct { + mock.Mock +} + +type QueryClient_Expecter struct { + mock *mock.Mock +} + +func (_m *QueryClient) EXPECT() *QueryClient_Expecter { + return &QueryClient_Expecter{mock: &_m.Mock} +} + +// Audit provides a mock function with given fields: +func (_m *QueryClient) Audit() v1.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Audit") + } + + var r0 v1.QueryClient + if rf, ok := ret.Get(0).(func() v1.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(v1.QueryClient) + } + } + + return r0 +} + +// QueryClient_Audit_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Audit' +type QueryClient_Audit_Call struct { + *mock.Call +} + +// Audit is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Audit() *QueryClient_Audit_Call { + return &QueryClient_Audit_Call{Call: _e.mock.On("Audit")} +} + +func (_c *QueryClient_Audit_Call) Run(run func()) *QueryClient_Audit_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Audit_Call) Return(_a0 v1.QueryClient) *QueryClient_Audit_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Audit_Call) RunAndReturn(run func() v1.QueryClient) *QueryClient_Audit_Call { + _c.Call.Return(run) + return _c +} + +// Auth provides a mock function with given fields: +func (_m *QueryClient) Auth() types.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Auth") + } + + var r0 types.QueryClient + if rf, ok := ret.Get(0).(func() types.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.QueryClient) + } + } + + return r0 +} + +// QueryClient_Auth_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Auth' +type QueryClient_Auth_Call struct { + *mock.Call +} + +// Auth is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Auth() *QueryClient_Auth_Call { + return &QueryClient_Auth_Call{Call: _e.mock.On("Auth")} +} + +func (_c *QueryClient_Auth_Call) Run(run func()) *QueryClient_Auth_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Auth_Call) Return(_a0 types.QueryClient) *QueryClient_Auth_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Auth_Call) RunAndReturn(run func() types.QueryClient) *QueryClient_Auth_Call { + _c.Call.Return(run) + return _c +} + +// Authz provides a mock function with given fields: +func (_m *QueryClient) Authz() authz.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Authz") + } + + var r0 authz.QueryClient + if rf, ok := ret.Get(0).(func() authz.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(authz.QueryClient) + } + } + + return r0 +} + +// QueryClient_Authz_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Authz' +type QueryClient_Authz_Call struct { + *mock.Call +} + +// Authz is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Authz() *QueryClient_Authz_Call { + return &QueryClient_Authz_Call{Call: _e.mock.On("Authz")} +} + +func (_c *QueryClient_Authz_Call) Run(run func()) *QueryClient_Authz_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Authz_Call) Return(_a0 authz.QueryClient) *QueryClient_Authz_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Authz_Call) RunAndReturn(run func() authz.QueryClient) *QueryClient_Authz_Call { + _c.Call.Return(run) + return _c +} + +// Bank provides a mock function with given fields: +func (_m *QueryClient) Bank() banktypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Bank") + } + + var r0 banktypes.QueryClient + if rf, ok := ret.Get(0).(func() banktypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(banktypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Bank_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Bank' +type QueryClient_Bank_Call struct { + *mock.Call +} + +// Bank is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Bank() *QueryClient_Bank_Call { + return &QueryClient_Bank_Call{Call: _e.mock.On("Bank")} +} + +func (_c *QueryClient_Bank_Call) Run(run func()) *QueryClient_Bank_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Bank_Call) Return(_a0 banktypes.QueryClient) *QueryClient_Bank_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Bank_Call) RunAndReturn(run func() banktypes.QueryClient) *QueryClient_Bank_Call { + _c.Call.Return(run) + return _c +} + +// Certs provides a mock function with given fields: +func (_m *QueryClient) Certs() certv1.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Certs") + } + + var r0 certv1.QueryClient + if rf, ok := ret.Get(0).(func() certv1.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(certv1.QueryClient) + } + } + + return r0 +} + +// QueryClient_Certs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Certs' +type QueryClient_Certs_Call struct { + *mock.Call +} + +// Certs is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Certs() *QueryClient_Certs_Call { + return &QueryClient_Certs_Call{Call: _e.mock.On("Certs")} +} + +func (_c *QueryClient_Certs_Call) Run(run func()) *QueryClient_Certs_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Certs_Call) Return(_a0 certv1.QueryClient) *QueryClient_Certs_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Certs_Call) RunAndReturn(run func() certv1.QueryClient) *QueryClient_Certs_Call { + _c.Call.Return(run) + return _c +} + +// ClientContext provides a mock function with given fields: +func (_m *QueryClient) ClientContext() client.Context { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ClientContext") + } + + var r0 client.Context + if rf, ok := ret.Get(0).(func() client.Context); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(client.Context) + } + + return r0 +} + +// QueryClient_ClientContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClientContext' +type QueryClient_ClientContext_Call struct { + *mock.Call +} + +// ClientContext is a helper method to define mock.On call +func (_e *QueryClient_Expecter) ClientContext() *QueryClient_ClientContext_Call { + return &QueryClient_ClientContext_Call{Call: _e.mock.On("ClientContext")} +} + +func (_c *QueryClient_ClientContext_Call) Run(run func()) *QueryClient_ClientContext_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_ClientContext_Call) Return(_a0 client.Context) *QueryClient_ClientContext_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_ClientContext_Call) RunAndReturn(run func() client.Context) *QueryClient_ClientContext_Call { + _c.Call.Return(run) + return _c +} + +// Deployment provides a mock function with given fields: +func (_m *QueryClient) Deployment() v1beta4.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Deployment") + } + + var r0 v1beta4.QueryClient + if rf, ok := ret.Get(0).(func() v1beta4.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(v1beta4.QueryClient) + } + } + + return r0 +} + +// QueryClient_Deployment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Deployment' +type QueryClient_Deployment_Call struct { + *mock.Call +} + +// Deployment is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Deployment() *QueryClient_Deployment_Call { + return &QueryClient_Deployment_Call{Call: _e.mock.On("Deployment")} +} + +func (_c *QueryClient_Deployment_Call) Run(run func()) *QueryClient_Deployment_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Deployment_Call) Return(_a0 v1beta4.QueryClient) *QueryClient_Deployment_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Deployment_Call) RunAndReturn(run func() v1beta4.QueryClient) *QueryClient_Deployment_Call { + _c.Call.Return(run) + return _c +} + +// Distribution provides a mock function with given fields: +func (_m *QueryClient) Distribution() distributiontypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Distribution") + } + + var r0 distributiontypes.QueryClient + if rf, ok := ret.Get(0).(func() distributiontypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(distributiontypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Distribution_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Distribution' +type QueryClient_Distribution_Call struct { + *mock.Call +} + +// Distribution is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Distribution() *QueryClient_Distribution_Call { + return &QueryClient_Distribution_Call{Call: _e.mock.On("Distribution")} +} + +func (_c *QueryClient_Distribution_Call) Run(run func()) *QueryClient_Distribution_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Distribution_Call) Return(_a0 distributiontypes.QueryClient) *QueryClient_Distribution_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Distribution_Call) RunAndReturn(run func() distributiontypes.QueryClient) *QueryClient_Distribution_Call { + _c.Call.Return(run) + return _c +} + +// Evidence provides a mock function with given fields: +func (_m *QueryClient) Evidence() evidencetypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Evidence") + } + + var r0 evidencetypes.QueryClient + if rf, ok := ret.Get(0).(func() evidencetypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(evidencetypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Evidence_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Evidence' +type QueryClient_Evidence_Call struct { + *mock.Call +} + +// Evidence is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Evidence() *QueryClient_Evidence_Call { + return &QueryClient_Evidence_Call{Call: _e.mock.On("Evidence")} +} + +func (_c *QueryClient_Evidence_Call) Run(run func()) *QueryClient_Evidence_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Evidence_Call) Return(_a0 evidencetypes.QueryClient) *QueryClient_Evidence_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Evidence_Call) RunAndReturn(run func() evidencetypes.QueryClient) *QueryClient_Evidence_Call { + _c.Call.Return(run) + return _c +} + +// Feegrant provides a mock function with given fields: +func (_m *QueryClient) Feegrant() feegrant.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Feegrant") + } + + var r0 feegrant.QueryClient + if rf, ok := ret.Get(0).(func() feegrant.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(feegrant.QueryClient) + } + } + + return r0 +} + +// QueryClient_Feegrant_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Feegrant' +type QueryClient_Feegrant_Call struct { + *mock.Call +} + +// Feegrant is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Feegrant() *QueryClient_Feegrant_Call { + return &QueryClient_Feegrant_Call{Call: _e.mock.On("Feegrant")} +} + +func (_c *QueryClient_Feegrant_Call) Run(run func()) *QueryClient_Feegrant_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Feegrant_Call) Return(_a0 feegrant.QueryClient) *QueryClient_Feegrant_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Feegrant_Call) RunAndReturn(run func() feegrant.QueryClient) *QueryClient_Feegrant_Call { + _c.Call.Return(run) + return _c +} + +// Gov provides a mock function with given fields: +func (_m *QueryClient) Gov() govv1.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Gov") + } + + var r0 govv1.QueryClient + if rf, ok := ret.Get(0).(func() govv1.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(govv1.QueryClient) + } + } + + return r0 +} + +// QueryClient_Gov_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Gov' +type QueryClient_Gov_Call struct { + *mock.Call +} + +// Gov is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Gov() *QueryClient_Gov_Call { + return &QueryClient_Gov_Call{Call: _e.mock.On("Gov")} +} + +func (_c *QueryClient_Gov_Call) Run(run func()) *QueryClient_Gov_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Gov_Call) Return(_a0 govv1.QueryClient) *QueryClient_Gov_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Gov_Call) RunAndReturn(run func() govv1.QueryClient) *QueryClient_Gov_Call { + _c.Call.Return(run) + return _c +} + +// GovLegacy provides a mock function with given fields: +func (_m *QueryClient) GovLegacy() v1beta1.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for GovLegacy") + } + + var r0 v1beta1.QueryClient + if rf, ok := ret.Get(0).(func() v1beta1.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(v1beta1.QueryClient) + } + } + + return r0 +} + +// QueryClient_GovLegacy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GovLegacy' +type QueryClient_GovLegacy_Call struct { + *mock.Call +} + +// GovLegacy is a helper method to define mock.On call +func (_e *QueryClient_Expecter) GovLegacy() *QueryClient_GovLegacy_Call { + return &QueryClient_GovLegacy_Call{Call: _e.mock.On("GovLegacy")} +} + +func (_c *QueryClient_GovLegacy_Call) Run(run func()) *QueryClient_GovLegacy_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_GovLegacy_Call) Return(_a0 v1beta1.QueryClient) *QueryClient_GovLegacy_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_GovLegacy_Call) RunAndReturn(run func() v1beta1.QueryClient) *QueryClient_GovLegacy_Call { + _c.Call.Return(run) + return _c +} + +// Market provides a mock function with given fields: +func (_m *QueryClient) Market() v1beta5.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Market") + } + + var r0 v1beta5.QueryClient + if rf, ok := ret.Get(0).(func() v1beta5.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(v1beta5.QueryClient) + } + } + + return r0 +} + +// QueryClient_Market_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Market' +type QueryClient_Market_Call struct { + *mock.Call +} + +// Market is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Market() *QueryClient_Market_Call { + return &QueryClient_Market_Call{Call: _e.mock.On("Market")} +} + +func (_c *QueryClient_Market_Call) Run(run func()) *QueryClient_Market_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Market_Call) Return(_a0 v1beta5.QueryClient) *QueryClient_Market_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Market_Call) RunAndReturn(run func() v1beta5.QueryClient) *QueryClient_Market_Call { + _c.Call.Return(run) + return _c +} + +// Mint provides a mock function with given fields: +func (_m *QueryClient) Mint() minttypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Mint") + } + + var r0 minttypes.QueryClient + if rf, ok := ret.Get(0).(func() minttypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(minttypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Mint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Mint' +type QueryClient_Mint_Call struct { + *mock.Call +} + +// Mint is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Mint() *QueryClient_Mint_Call { + return &QueryClient_Mint_Call{Call: _e.mock.On("Mint")} +} + +func (_c *QueryClient_Mint_Call) Run(run func()) *QueryClient_Mint_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Mint_Call) Return(_a0 minttypes.QueryClient) *QueryClient_Mint_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Mint_Call) RunAndReturn(run func() minttypes.QueryClient) *QueryClient_Mint_Call { + _c.Call.Return(run) + return _c +} + +// Provider provides a mock function with given fields: +func (_m *QueryClient) Provider() providerv1beta4.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Provider") + } + + var r0 providerv1beta4.QueryClient + if rf, ok := ret.Get(0).(func() providerv1beta4.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(providerv1beta4.QueryClient) + } + } + + return r0 +} + +// QueryClient_Provider_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Provider' +type QueryClient_Provider_Call struct { + *mock.Call +} + +// Provider is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Provider() *QueryClient_Provider_Call { + return &QueryClient_Provider_Call{Call: _e.mock.On("Provider")} +} + +func (_c *QueryClient_Provider_Call) Run(run func()) *QueryClient_Provider_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Provider_Call) Return(_a0 providerv1beta4.QueryClient) *QueryClient_Provider_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Provider_Call) RunAndReturn(run func() providerv1beta4.QueryClient) *QueryClient_Provider_Call { + _c.Call.Return(run) + return _c +} + +// Slashing provides a mock function with given fields: +func (_m *QueryClient) Slashing() slashingtypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Slashing") + } + + var r0 slashingtypes.QueryClient + if rf, ok := ret.Get(0).(func() slashingtypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(slashingtypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Slashing_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Slashing' +type QueryClient_Slashing_Call struct { + *mock.Call +} + +// Slashing is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Slashing() *QueryClient_Slashing_Call { + return &QueryClient_Slashing_Call{Call: _e.mock.On("Slashing")} +} + +func (_c *QueryClient_Slashing_Call) Run(run func()) *QueryClient_Slashing_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Slashing_Call) Return(_a0 slashingtypes.QueryClient) *QueryClient_Slashing_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Slashing_Call) RunAndReturn(run func() slashingtypes.QueryClient) *QueryClient_Slashing_Call { + _c.Call.Return(run) + return _c +} + +// Staking provides a mock function with given fields: +func (_m *QueryClient) Staking() stakingtypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Staking") + } + + var r0 stakingtypes.QueryClient + if rf, ok := ret.Get(0).(func() stakingtypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(stakingtypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Staking_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Staking' +type QueryClient_Staking_Call struct { + *mock.Call +} + +// Staking is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Staking() *QueryClient_Staking_Call { + return &QueryClient_Staking_Call{Call: _e.mock.On("Staking")} +} + +func (_c *QueryClient_Staking_Call) Run(run func()) *QueryClient_Staking_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Staking_Call) Return(_a0 stakingtypes.QueryClient) *QueryClient_Staking_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Staking_Call) RunAndReturn(run func() stakingtypes.QueryClient) *QueryClient_Staking_Call { + _c.Call.Return(run) + return _c +} + +// Upgrade provides a mock function with given fields: +func (_m *QueryClient) Upgrade() upgradetypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Upgrade") + } + + var r0 upgradetypes.QueryClient + if rf, ok := ret.Get(0).(func() upgradetypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(upgradetypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Upgrade_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Upgrade' +type QueryClient_Upgrade_Call struct { + *mock.Call +} + +// Upgrade is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Upgrade() *QueryClient_Upgrade_Call { + return &QueryClient_Upgrade_Call{Call: _e.mock.On("Upgrade")} +} + +func (_c *QueryClient_Upgrade_Call) Run(run func()) *QueryClient_Upgrade_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Upgrade_Call) Return(_a0 upgradetypes.QueryClient) *QueryClient_Upgrade_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Upgrade_Call) RunAndReturn(run func() upgradetypes.QueryClient) *QueryClient_Upgrade_Call { + _c.Call.Return(run) + return _c +} + +// NewQueryClient creates a new instance of QueryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewQueryClient(t interface { + mock.TestingT + Cleanup(func()) +}) *QueryClient { + mock := &QueryClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/go/node/client/v1beta3/mocks/tx_client.go b/go/node/client/v1beta3/mocks/tx_client.go new file mode 100644 index 00000000..0b65ca59 --- /dev/null +++ b/go/node/client/v1beta3/mocks/tx_client.go @@ -0,0 +1,187 @@ +// Code generated by mockery v2.45.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + types "github.com/cosmos/cosmos-sdk/types" + mock "github.com/stretchr/testify/mock" + + v1beta3 "pkg.akt.dev/go/node/client/v1beta3" +) + +// TxClient is an autogenerated mock type for the TxClient type +type TxClient struct { + mock.Mock +} + +type TxClient_Expecter struct { + mock *mock.Mock +} + +func (_m *TxClient) EXPECT() *TxClient_Expecter { + return &TxClient_Expecter{mock: &_m.Mock} +} + +// BroadcastMsgs provides a mock function with given fields: _a0, _a1, _a2 +func (_m *TxClient) BroadcastMsgs(_a0 context.Context, _a1 []types.Msg, _a2 ...v1beta3.BroadcastOption) (interface{}, error) { + _va := make([]interface{}, len(_a2)) + for _i := range _a2 { + _va[_i] = _a2[_i] + } + var _ca []interface{} + _ca = append(_ca, _a0, _a1) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for BroadcastMsgs") + } + + var r0 interface{} + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []types.Msg, ...v1beta3.BroadcastOption) (interface{}, error)); ok { + return rf(_a0, _a1, _a2...) + } + if rf, ok := ret.Get(0).(func(context.Context, []types.Msg, ...v1beta3.BroadcastOption) interface{}); ok { + r0 = rf(_a0, _a1, _a2...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []types.Msg, ...v1beta3.BroadcastOption) error); ok { + r1 = rf(_a0, _a1, _a2...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TxClient_BroadcastMsgs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BroadcastMsgs' +type TxClient_BroadcastMsgs_Call struct { + *mock.Call +} + +// BroadcastMsgs is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 []types.Msg +// - _a2 ...v1beta3.BroadcastOption +func (_e *TxClient_Expecter) BroadcastMsgs(_a0 interface{}, _a1 interface{}, _a2 ...interface{}) *TxClient_BroadcastMsgs_Call { + return &TxClient_BroadcastMsgs_Call{Call: _e.mock.On("BroadcastMsgs", + append([]interface{}{_a0, _a1}, _a2...)...)} +} + +func (_c *TxClient_BroadcastMsgs_Call) Run(run func(_a0 context.Context, _a1 []types.Msg, _a2 ...v1beta3.BroadcastOption)) *TxClient_BroadcastMsgs_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]v1beta3.BroadcastOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(v1beta3.BroadcastOption) + } + } + run(args[0].(context.Context), args[1].([]types.Msg), variadicArgs...) + }) + return _c +} + +func (_c *TxClient_BroadcastMsgs_Call) Return(_a0 interface{}, _a1 error) *TxClient_BroadcastMsgs_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *TxClient_BroadcastMsgs_Call) RunAndReturn(run func(context.Context, []types.Msg, ...v1beta3.BroadcastOption) (interface{}, error)) *TxClient_BroadcastMsgs_Call { + _c.Call.Return(run) + return _c +} + +// BroadcastTx provides a mock function with given fields: _a0, _a1, _a2 +func (_m *TxClient) BroadcastTx(_a0 context.Context, _a1 types.Tx, _a2 ...v1beta3.BroadcastOption) (interface{}, error) { + _va := make([]interface{}, len(_a2)) + for _i := range _a2 { + _va[_i] = _a2[_i] + } + var _ca []interface{} + _ca = append(_ca, _a0, _a1) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for BroadcastTx") + } + + var r0 interface{} + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, types.Tx, ...v1beta3.BroadcastOption) (interface{}, error)); ok { + return rf(_a0, _a1, _a2...) + } + if rf, ok := ret.Get(0).(func(context.Context, types.Tx, ...v1beta3.BroadcastOption) interface{}); ok { + r0 = rf(_a0, _a1, _a2...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, types.Tx, ...v1beta3.BroadcastOption) error); ok { + r1 = rf(_a0, _a1, _a2...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TxClient_BroadcastTx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BroadcastTx' +type TxClient_BroadcastTx_Call struct { + *mock.Call +} + +// BroadcastTx is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 types.Tx +// - _a2 ...v1beta3.BroadcastOption +func (_e *TxClient_Expecter) BroadcastTx(_a0 interface{}, _a1 interface{}, _a2 ...interface{}) *TxClient_BroadcastTx_Call { + return &TxClient_BroadcastTx_Call{Call: _e.mock.On("BroadcastTx", + append([]interface{}{_a0, _a1}, _a2...)...)} +} + +func (_c *TxClient_BroadcastTx_Call) Run(run func(_a0 context.Context, _a1 types.Tx, _a2 ...v1beta3.BroadcastOption)) *TxClient_BroadcastTx_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]v1beta3.BroadcastOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(v1beta3.BroadcastOption) + } + } + run(args[0].(context.Context), args[1].(types.Tx), variadicArgs...) + }) + return _c +} + +func (_c *TxClient_BroadcastTx_Call) Return(_a0 interface{}, _a1 error) *TxClient_BroadcastTx_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *TxClient_BroadcastTx_Call) RunAndReturn(run func(context.Context, types.Tx, ...v1beta3.BroadcastOption) (interface{}, error)) *TxClient_BroadcastTx_Call { + _c.Call.Return(run) + return _c +} + +// NewTxClient creates a new instance of TxClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTxClient(t interface { + mock.TestingT + Cleanup(func()) +}) *TxClient { + mock := &TxClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/go/node/client/v1beta3/node.go b/go/node/client/v1beta3/node.go new file mode 100644 index 00000000..44bd52ad --- /dev/null +++ b/go/node/client/v1beta3/node.go @@ -0,0 +1,43 @@ +package v1beta3 + +import ( + "context" + + tmrpc "github.com/cometbft/cometbft/rpc/core/types" + + sdkclient "github.com/cosmos/cosmos-sdk/client" +) + +var _ NodeClient = (*node)(nil) + +type node struct { + rpc sdkclient.TendermintRPC +} + +func newNode(cctx sdkclient.Context) *node { + nd := &node{ + rpc: cctx.Client, + } + + return nd +} + +func (nd *node) SyncInfo(ctx context.Context) (*tmrpc.SyncInfo, error) { + status, err := nd.rpc.Status(ctx) + if err != nil { + return nil, err + } + + info := status.SyncInfo + + return &info, nil +} + +func (nd *node) CurrentBlockHeight(ctx context.Context) (int64, error) { + info, err := nd.SyncInfo(ctx) + if err != nil { + return 0, err + } + + return info.LatestBlockHeight, nil +} diff --git a/go/node/client/v1beta2/options.go b/go/node/client/v1beta3/options.go similarity index 88% rename from go/node/client/v1beta2/options.go rename to go/node/client/v1beta3/options.go index 3cf1802f..3ace8f1e 100644 --- a/go/node/client/v1beta2/options.go +++ b/go/node/client/v1beta3/options.go @@ -1,4 +1,4 @@ -package v1beta2 +package v1beta3 type ClientOptions struct { tclient TxClient // nolint: unused diff --git a/go/node/client/v1beta3/query.go b/go/node/client/v1beta3/query.go new file mode 100644 index 00000000..a627ac6d --- /dev/null +++ b/go/node/client/v1beta3/query.go @@ -0,0 +1,174 @@ +package v1beta3 + +import ( + sdkclient "github.com/cosmos/cosmos-sdk/client" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/cosmos/cosmos-sdk/x/authz" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + disttypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + evdtypes "github.com/cosmos/cosmos-sdk/x/evidence/types" + feegranttypes "github.com/cosmos/cosmos-sdk/x/feegrant" + v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + paramstypes "github.com/cosmos/cosmos-sdk/x/params/types/proposal" + slashtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + staketypes "github.com/cosmos/cosmos-sdk/x/staking/types" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + atypes "pkg.akt.dev/go/node/audit/v1" + ctypes "pkg.akt.dev/go/node/cert/v1" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + mtypes "pkg.akt.dev/go/node/market/v1beta5" + ptypes "pkg.akt.dev/go/node/provider/v1beta4" +) + +var _ QueryClient = (*queryClient)(nil) + +type sdkQueryClient struct { + auth authtypes.QueryClient + authz authz.QueryClient + bank banktypes.QueryClient + distr disttypes.QueryClient + evidence evdtypes.QueryClient + feegrant feegranttypes.QueryClient + govLegacy govtypes.QueryClient + gov v1.QueryClient + mint minttypes.QueryClient + slashing slashtypes.QueryClient + staking staketypes.QueryClient + upgrade upgradetypes.QueryClient + params paramstypes.QueryClient +} + +type queryClient struct { + dclient dtypes.QueryClient + mclient mtypes.QueryClient + pclient ptypes.QueryClient + aclient atypes.QueryClient + cclient ctypes.QueryClient + sdk sdkQueryClient + cctx sdkclient.Context +} + +// NewQueryClient creates new query client instance based on a Cosmos SDK client context. +func NewQueryClient(cctx sdkclient.Context) QueryClient { + return newQueryClient(cctx) +} + +func newQueryClient(cctx sdkclient.Context) *queryClient { + return &queryClient{ + dclient: dtypes.NewQueryClient(cctx), + mclient: mtypes.NewQueryClient(cctx), + pclient: ptypes.NewQueryClient(cctx), + aclient: atypes.NewQueryClient(cctx), + cclient: ctypes.NewQueryClient(cctx), + sdk: sdkQueryClient{ + auth: authtypes.NewQueryClient(cctx), + authz: authz.NewQueryClient(cctx), + bank: banktypes.NewQueryClient(cctx), + distr: disttypes.NewQueryClient(cctx), + evidence: evdtypes.NewQueryClient(cctx), + feegrant: feegranttypes.NewQueryClient(cctx), + govLegacy: govtypes.NewQueryClient(cctx), + gov: v1.NewQueryClient(cctx), + mint: minttypes.NewQueryClient(cctx), + slashing: slashtypes.NewQueryClient(cctx), + staking: staketypes.NewQueryClient(cctx), + upgrade: upgradetypes.NewQueryClient(cctx), + params: paramstypes.NewQueryClient(cctx), + }, + cctx: cctx, + } +} + +// ClientContext returns the client's Cosmos SDK client context. +func (c *queryClient) ClientContext() sdkclient.Context { + return c.cctx +} + +func (c *queryClient) Deployment() dtypes.QueryClient { + return c.dclient +} + +func (c *queryClient) Market() mtypes.QueryClient { + return c.mclient +} + +func (c *queryClient) Provider() ptypes.QueryClient { + return c.pclient +} + +func (c *queryClient) Audit() atypes.QueryClient { + return c.aclient +} + +// Certs implements QueryClient by returning the certs Akash SDK query client. +func (c *queryClient) Certs() ctypes.QueryClient { + return c.cclient +} + +// Auth implements QueryClient by returning the auth Cosmos SDK query client. +func (c *queryClient) Auth() authtypes.QueryClient { + return c.sdk.auth +} + +// Authz implements QueryClient by returning the authz Cosmos SDK query client. +func (c *queryClient) Authz() authz.QueryClient { + return c.sdk.authz +} + +// Bank implements QueryClient by returning the bank Cosmos SDK query client. +func (c *queryClient) Bank() banktypes.QueryClient { + return c.sdk.bank +} + +// Distribution implements QueryClient by returning the distribution Cosmos SDK query client. +func (c *queryClient) Distribution() disttypes.QueryClient { + return c.sdk.distr +} + +// Evidence implements QueryClient by returning the evidence Cosmos SDK query client. +func (c *queryClient) Evidence() evdtypes.QueryClient { + return c.sdk.evidence +} + +// Feegrant implements QueryClient by returning the feegrant Cosmos SDK query client. +func (c *queryClient) Feegrant() feegranttypes.QueryClient { + return c.sdk.feegrant +} + +// Gov implements QueryClient by returning the governance Cosmos SDK query client. +func (c *queryClient) Gov() v1.QueryClient { + return c.sdk.gov +} + +// GovLegacy implements QueryClient by returning the governance Cosmos SDK query client. +func (c *queryClient) GovLegacy() govtypes.QueryClient { + return c.sdk.govLegacy +} + +// Mint implements QueryClient by returning the mint Cosmos SDK query client. +func (c *queryClient) Mint() minttypes.QueryClient { + return c.sdk.mint +} + +// Slashing implements QueryClient by returning the slashing Cosmos SDK query client. +func (c *queryClient) Slashing() slashtypes.QueryClient { + return c.sdk.slashing +} + +// Staking implements QueryClient by returning the staking Cosmos SDK query client. +func (c *queryClient) Staking() staketypes.QueryClient { + return c.sdk.staking +} + +// Upgrade implements QueryClient by returning the upgrade Cosmos SDK query client. +func (c *queryClient) Upgrade() upgradetypes.QueryClient { + return c.sdk.upgrade +} + +// Params implements QueryClient by returning the params Cosmos SDK query client. +func (c *queryClient) Params() paramstypes.QueryClient { + return c.sdk.params +} diff --git a/go/node/client/v1beta3/result.go b/go/node/client/v1beta3/result.go new file mode 100644 index 00000000..94794e43 --- /dev/null +++ b/go/node/client/v1beta3/result.go @@ -0,0 +1,85 @@ +package v1beta3 + +import ( + "encoding/hex" + "encoding/json" + "strings" + + cbcoretypes "github.com/cometbft/cometbft/rpc/core/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// NewResponseFormatBroadcastTxCommit returns a TxResponse given a +// ResultBroadcastTxCommit from tendermint. +func NewResponseFormatBroadcastTxCommit(res *cbcoretypes.ResultBroadcastTxCommit) *sdk.TxResponse { + if res == nil { + return nil + } + + if !res.CheckTx.IsOK() { + return newTxResponseCheckTx(res) + } + + return newTxResponseDeliverTx(res) +} + +func newTxResponseCheckTx(res *cbcoretypes.ResultBroadcastTxCommit) *sdk.TxResponse { + if res == nil { + return nil + } + + var txHash string + if res.Hash != nil { + txHash = res.Hash.String() + } + + parsedLogs, _ := ParseABCILogs(res.CheckTx.Log) + + return &sdk.TxResponse{ + Height: res.Height, + TxHash: txHash, + Codespace: res.CheckTx.Codespace, + Code: res.CheckTx.Code, + Data: strings.ToUpper(hex.EncodeToString(res.CheckTx.Data)), + RawLog: res.CheckTx.Log, + Logs: parsedLogs, + Info: res.CheckTx.Info, + GasWanted: res.CheckTx.GasWanted, + GasUsed: res.CheckTx.GasUsed, + Events: res.CheckTx.Events, + } +} + +func newTxResponseDeliverTx(res *cbcoretypes.ResultBroadcastTxCommit) *sdk.TxResponse { + if res == nil { + return nil + } + + var txHash string + if res.Hash != nil { + txHash = res.Hash.String() + } + + parsedLogs, _ := ParseABCILogs(res.DeliverTx.Log) + + return &sdk.TxResponse{ + Height: res.Height, + TxHash: txHash, + Codespace: res.DeliverTx.Codespace, + Code: res.DeliverTx.Code, + Data: strings.ToUpper(hex.EncodeToString(res.DeliverTx.Data)), + RawLog: res.DeliverTx.Log, + Logs: parsedLogs, + Info: res.DeliverTx.Info, + GasWanted: res.DeliverTx.GasWanted, + GasUsed: res.DeliverTx.GasUsed, + Events: res.DeliverTx.Events, + } +} + +// ParseABCILogs attempts to parse a stringified ABCI tx log into a slice of +// ABCIMessageLog types. It returns an error upon JSON decoding failure. +func ParseABCILogs(logs string) (res sdk.ABCIMessageLogs, err error) { + err = json.Unmarshal([]byte(logs), &res) + return res, err +} diff --git a/go/node/client/v1beta3/testutil/base.go b/go/node/client/v1beta3/testutil/base.go new file mode 100644 index 00000000..dc80fa97 --- /dev/null +++ b/go/node/client/v1beta3/testutil/base.go @@ -0,0 +1,120 @@ +package testutil + +import ( + "fmt" + "testing" + + "github.com/cometbft/cometbft/libs/rand" + sdk "github.com/cosmos/cosmos-sdk/types" + + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + attrv1 "pkg.akt.dev/go/node/types/attributes/v1" + types "pkg.akt.dev/go/node/types/resources/v1beta4" + + // ensure sdkutil.init() to seal SDK config for the tests + _ "pkg.akt.dev/go/sdkutil" +) + +// CoinDenom provides ability to create coins in test functions and +// pass them into testutil functionality. +const ( + CoinDenom = "uakt" +) + +// Name generates a random name with the given prefix +func Name(_ testing.TB, prefix string) string { + return fmt.Sprintf("%s-%v", prefix, rand.Uint64()) +} + +// Hostname generates a random hostname with a "test.com" domain +func Hostname(t testing.TB) string { + return Name(t, "hostname") + ".test.com" +} + +func ProviderHostname(t testing.TB) string { + return "https://" + Hostname(t) +} + +// Attribute generates a random sdk.Attribute +func Attribute(t testing.TB) attrv1.Attribute { + t.Helper() + return attrv1.NewStringAttribute(Name(t, "attr-key"), Name(t, "attr-value")) +} + +// Attributes generates a set of sdk.Attribute +func Attributes(t testing.TB) attrv1.Attributes { + t.Helper() + count := rand.Intn(10) + 1 + + vals := make(attrv1.Attributes, 0, count) + for i := 0; i < count; i++ { + vals = append(vals, Attribute(t)) + } + return vals +} + +// PlacementRequirements generates placement requirements +func PlacementRequirements(t testing.TB) attrv1.PlacementRequirements { + return attrv1.PlacementRequirements{ + Attributes: Attributes(t), + } +} + +func RandCPUUnits() uint { + return RandRangeUint( + dtypes.GetValidationConfig().Unit.Min.CPU, + dtypes.GetValidationConfig().Unit.Max.CPU) +} + +func RandGPUUnits() uint { + return RandRangeUint( + dtypes.GetValidationConfig().Unit.Min.GPU, + dtypes.GetValidationConfig().Unit.Max.GPU) +} + +func RandMemoryQuantity() uint64 { + return RandRangeUint64( + dtypes.GetValidationConfig().Unit.Min.Memory, + dtypes.GetValidationConfig().Unit.Max.Memory) +} + +func RandStorageQuantity() uint64 { + return RandRangeUint64( + dtypes.GetValidationConfig().Unit.Min.Storage, + dtypes.GetValidationConfig().Unit.Max.Storage) +} + +// Resources produces an attribute list for populating a Group's +// 'Resources' fields. +func Resources(t testing.TB) dtypes.ResourceUnits { + t.Helper() + count := rand.Intn(10) + 1 + + vals := make(dtypes.ResourceUnits, 0, count) + for i := 0; i < count; i++ { + coin := sdk.NewDecCoin(CoinDenom, sdk.NewInt(rand.Int63n(9999)+1)) + res := dtypes.ResourceUnit{ + Resources: types.Resources{ + ID: uint32(i) + 1, // nolint: gosec + CPU: &types.CPU{ + Units: types.NewResourceValue(uint64(dtypes.GetValidationConfig().Unit.Min.CPU)), + }, + GPU: &types.GPU{ + Units: types.NewResourceValue(uint64(dtypes.GetValidationConfig().Unit.Min.GPU)), + }, + Memory: &types.Memory{ + Quantity: types.NewResourceValue(dtypes.GetValidationConfig().Unit.Min.Memory), + }, + Storage: types.Volumes{ + types.Storage{ + Quantity: types.NewResourceValue(dtypes.GetValidationConfig().Unit.Min.Storage), + }, + }, + }, + Count: 1, + Price: coin, + } + vals = append(vals, res) + } + return vals +} diff --git a/go/node/client/v1beta3/testutil/ids.go b/go/node/client/v1beta3/testutil/ids.go new file mode 100644 index 00000000..0f4d9bba --- /dev/null +++ b/go/node/client/v1beta3/testutil/ids.go @@ -0,0 +1,103 @@ +package testutil + +import ( + cryptorand "crypto/rand" + "crypto/sha256" + "math/rand" + "testing" + + "github.com/cometbft/cometbft/crypto/ed25519" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" + + dtypes "pkg.akt.dev/go/node/deployment/v1" + mtypes "pkg.akt.dev/go/node/market/v1" +) + +func Keyring(t testing.TB, cdc codec.Codec) keyring.Keyring { + t.Helper() + obj := keyring.NewInMemory(cdc) + return obj +} + +// AccAddress provides an Account's Address bytes from a ed25519 generated +// private key. +func AccAddress(t testing.TB) sdk.AccAddress { + t.Helper() + privKey := ed25519.GenPrivKey() + return sdk.AccAddress(privKey.PubKey().Address()) +} + +func Key(t testing.TB) ed25519.PrivKey { + t.Helper() + return ed25519.GenPrivKey() +} + +func DeploymentID(t testing.TB) dtypes.DeploymentID { + t.Helper() + return dtypes.DeploymentID{ + Owner: AccAddress(t).String(), + DSeq: uint64(rand.Uint32()), // nolint: gosec + } +} + +func DeploymentIDForAccount(t testing.TB, addr sdk.Address) dtypes.DeploymentID { + t.Helper() + return dtypes.DeploymentID{ + Owner: addr.String(), + DSeq: uint64(rand.Uint32()), // nolint: gosec + } +} + +// DeploymentVersion provides a random sha256 sum for simulating Deployments. +func DeploymentVersion(t testing.TB) []byte { + t.Helper() + src := make([]byte, 128) + _, err := cryptorand.Read(src) + if err != nil { + t.Fatal(err) + } + sum := sha256.Sum256(src) + return sum[:] +} + +func GroupID(t testing.TB) dtypes.GroupID { + t.Helper() + return dtypes.MakeGroupID(DeploymentID(t), rand.Uint32()) // nolint: gosec +} + +func GroupIDForAccount(t testing.TB, addr sdk.Address) dtypes.GroupID { + t.Helper() + return dtypes.MakeGroupID(DeploymentIDForAccount(t, addr), rand.Uint32()) // nolint: gosec +} + +func OrderID(t testing.TB) mtypes.OrderID { + t.Helper() + return mtypes.MakeOrderID(GroupID(t), rand.Uint32()) // nolint: gosec +} + +func OrderIDForAccount(t testing.TB, addr sdk.Address) mtypes.OrderID { + t.Helper() + return mtypes.MakeOrderID(GroupIDForAccount(t, addr), rand.Uint32()) // nolint: gosec +} + +func BidID(t testing.TB) mtypes.BidID { + t.Helper() + return mtypes.MakeBidID(OrderID(t), AccAddress(t)) +} + +func BidIDForAccount(t testing.TB, owner, provider sdk.Address) mtypes.BidID { + t.Helper() + return mtypes.MakeBidID(OrderIDForAccount(t, owner), provider.Bytes()) +} + +func LeaseID(t testing.TB) mtypes.LeaseID { + t.Helper() + return mtypes.MakeLeaseID(BidID(t)) +} + +func LeaseIDForAccount(t testing.TB, owner, provider sdk.Address) mtypes.LeaseID { + t.Helper() + return mtypes.MakeLeaseID(BidIDForAccount(t, owner, provider)) +} diff --git a/go/node/client/v1beta3/testutil/provider.go b/go/node/client/v1beta3/testutil/provider.go new file mode 100644 index 00000000..685d71b7 --- /dev/null +++ b/go/node/client/v1beta3/testutil/provider.go @@ -0,0 +1,21 @@ +package testutil + +import ( + "testing" + + ptypes "pkg.akt.dev/go/node/provider/v1beta4" +) + +func Provider(t testing.TB) ptypes.Provider { + t.Helper() + + return ptypes.Provider{ + Owner: AccAddress(t).String(), + HostURI: Hostname(t), + Attributes: Attributes(t), + Info: ptypes.Info{ + EMail: "test@example.com", + Website: ProviderHostname(t), + }, + } +} diff --git a/go/node/client/v1beta3/testutil/types.go b/go/node/client/v1beta3/testutil/types.go new file mode 100644 index 00000000..97e98531 --- /dev/null +++ b/go/node/client/v1beta3/testutil/types.go @@ -0,0 +1,149 @@ +package testutil + +import ( + "encoding/json" + "math/rand" + "testing" + + "github.com/cosmos/cosmos-sdk/codec" + + rtypes "pkg.akt.dev/go/node/types/resources/v1beta4" +) + +type InterceptState func(codec.Codec, string, json.RawMessage) json.RawMessage + +type networkConfigOptions struct { + interceptState InterceptState +} + +type ConfigOption func(*networkConfigOptions) + +// WithInterceptState set custom name of the log object +func WithInterceptState(val InterceptState) ConfigOption { + return func(t *networkConfigOptions) { + t.interceptState = val + } +} + +func RandRangeInt(min, max int) int { + return rand.Intn(max-min) + min // nolint: gosec +} + +func RandRangeUint(min, max uint) uint { + val := rand.Uint64() // nolint: gosec + val %= uint64(max - min) + val += uint64(min) + return uint(val) +} + +func RandRangeUint64(min, max uint64) uint64 { + val := rand.Uint64() // nolint: gosec + val %= max - min + val += min + return val +} + +func ResourceUnits(_ testing.TB) rtypes.Resources { + return rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(uint64(RandCPUUnits())), + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(RandMemoryQuantity()), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(uint64(RandGPUUnits())), + }, + Storage: rtypes.Volumes{ + rtypes.Storage{ + Quantity: rtypes.NewResourceValue(RandStorageQuantity()), + }, + }, + } +} + +// func NewApp(val network.Validator) servertypes.Application { +// return app.NewApp( +// val.Ctx.Logger, dbm.NewMemDB(), nil, true, 0, make(map[int64]bool), val.Ctx.Config.RootDir, +// // simapp.EmptyAppOptions{}, +// baseapp.SetPruning(storetypes.NewPruningOptionsFromString(val.AppConfig.Pruning)), +// baseapp.SetMinGasPrices(val.AppConfig.MinGasPrices), +// ) +// } +// +// // DefaultConfig returns a default configuration suitable for nearly all +// // testing requirements. +// func DefaultConfig(opts ...ConfigOption) network.Config { +// cfg := &networkConfigOptions{} +// for _, opt := range opts { +// opt(cfg) +// } +// +// encCfg := app.MakeEncodingConfig() +// origGenesisState := app.ModuleBasics().DefaultGenesis(encCfg.Marshaler) +// +// genesisState := make(map[string]json.RawMessage) +// for k, v := range origGenesisState { +// data, err := v.MarshalJSON() +// if err != nil { +// panic(err) +// } +// +// buf := &bytes.Buffer{} +// _, err = buf.Write(data) +// if err != nil { +// panic(err) +// } +// +// stringData := buf.String() +// stringDataAfter := strings.ReplaceAll(stringData, `"stake"`, `"uakt"`) +// if stringData == stringDataAfter { +// genesisState[k] = v +// continue +// } +// +// var val map[string]interface{} +// err = json.Unmarshal(buf.Bytes(), &val) +// if err != nil { +// panic(err) +// } +// +// replacementV := json.RawMessage(stringDataAfter) +// genesisState[k] = replacementV +// } +// +// if cfg.interceptState != nil { +// for k, v := range genesisState { +// res := cfg.interceptState(encCfg.Marshaler, k, v) +// if res != nil { +// genesisState[k] = res +// } +// } +// } +// +// return network.Config{ +// Codec: encCfg.Marshaler, +// TxConfig: encCfg.TxConfig, +// LegacyAmino: encCfg.Amino, +// InterfaceRegistry: encCfg.InterfaceRegistry, +// AccountRetriever: authtypes.AccountRetriever{}, +// AppConstructor: NewApp, +// GenesisState: genesisState, +// TimeoutCommit: 2 * time.Second, +// ChainID: "chain-" + tmrand.NewRand().Str(6), +// NumValidators: 4, +// BondDenom: CoinDenom, +// Denoms: []string{ +// "ibc/12C6A0C374171B595A0A9E18B83FA09D295FB1F2D8C6DAA3AC28683471752D84", +// }, +// MinGasPrices: fmt.Sprintf("0.000006%s", CoinDenom), +// AccountTokens: sdk.TokensFromConsensusPower(1000000000000, sdk.DefaultPowerReduction), +// StakingTokens: sdk.TokensFromConsensusPower(100000, sdk.DefaultPowerReduction), +// BondedTokens: sdk.TokensFromConsensusPower(100, sdk.DefaultPowerReduction), +// PruningStrategy: storetypes.PruningOptionNothing, +// CleanupDir: true, +// SigningAlgo: string(hd.Secp256k1Type), +// KeyringOptions: []keyring.Option{}, +// } +// } diff --git a/go/node/client/v1beta3/tx.go b/go/node/client/v1beta3/tx.go new file mode 100644 index 00000000..f6201d58 --- /dev/null +++ b/go/node/client/v1beta3/tx.go @@ -0,0 +1,858 @@ +package v1beta3 + +import ( + "bufio" + "context" + "errors" + "fmt" + "os" + "strings" + "time" + "unsafe" + + cerrors "cosmossdk.io/errors" + "github.com/boz/go-lifecycle" + "github.com/edwingeng/deque/v2" + + "github.com/cometbft/cometbft/libs/log" + "github.com/cometbft/cometbft/mempool" + cbcoretypes "github.com/cometbft/cometbft/rpc/core/types" + cbtypes "github.com/cometbft/cometbft/types" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/input" + clienttx "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + ttx "github.com/cosmos/cosmos-sdk/types/tx" + gogogrpc "github.com/cosmos/gogoproto/grpc" + + nutils "pkg.akt.dev/go/node/utils" + "pkg.akt.dev/go/util/ctxlog" + + cltypes "pkg.akt.dev/go/node/client/types" +) + +var ( + ErrNotRunning = errors.New("tx client: not running") + ErrSyncTimedOut = errors.New("tx client: timed-out waiting for sequence sync") + ErrNodeCatchingUp = errors.New("tx client: cannot sync from catching up node") + ErrSimulateOffline = errors.New("tx client: cannot simulate tx in offline mode") + ErrBroadcastOffline = errors.New("tx client: cannot broadcast tx in offline mode") + ErrTxCanceledByUser = errors.New("tx client: transaction declined by user input") +) + +const ( + // BroadcastSync defines a tx broadcasting mode where the client waits for + // a CheckTx execution response only. + BroadcastSync = "sync" + // BroadcastAsync defines a tx broadcasting mode where the client returns + // immediately. + BroadcastAsync = "async" + + BroadcastBlock = "block" + + BroadcastDefaultTimeout = 30 * time.Second + BroadcastBlockRetryTimeout = 300 * time.Second + broadcastBlockRetryPeriod = time.Second + sequenceSyncTimeout = 30 * time.Second + + // sadface. + + // Only way to detect the timeout error. + // https://github.com/tendermint/tendermint/blob/46e06c97320bc61c4d98d3018f59d47ec69863c9/rpc/core/mempool.go#L124 + timeoutErrorMessage = "timed out waiting for tx to be included in a block" + + // Only way to check for tx not found error. + // https://github.com/tendermint/tendermint/blob/46e06c97320bc61c4d98d3018f59d47ec69863c9/rpc/core/tx.go#L31-L33 + notFoundErrorMessageSuffix = ") not found" +) + +var _ TxClient = (*serialBroadcaster)(nil) + +type ConfirmFn func(string) (bool, error) + +// BroadcastOptions defines the options allowed to configure a transaction broadcast. +type BroadcastOptions struct { + timeoutHeight *uint64 + gasAdjustment *float64 + gas *cltypes.GasSetting + gasPrices *string + fees *string + note *string + broadcastTimeout time.Duration + resultAsError bool + skipConfirm *bool + confirmFn ConfirmFn + broadcastMode *string +} + +// BroadcastOption is a function that takes as first argument a pointer to BroadcastOptions and returns an error +// if the option cannot be configured. A number of BroadcastOption functions are available in this package. +type BroadcastOption func(*BroadcastOptions) error + +// WithGasAdjustment returns a BroadcastOption that sets the gas adjustment configuration for the transaction. +func WithGasAdjustment(val float64) BroadcastOption { + return func(options *BroadcastOptions) error { + options.gasAdjustment = new(float64) + *options.gasAdjustment = val + return nil + } +} + +// WithNote returns a BroadcastOption that sets the note configuration for the transaction. +func WithNote(val string) BroadcastOption { + return func(options *BroadcastOptions) error { + options.note = new(string) + *options.note = val + return nil + } +} + +// WithGas returns a BroadcastOption that sets the gas setting configuration for the transaction. +func WithGas(val cltypes.GasSetting) BroadcastOption { + return func(options *BroadcastOptions) error { + options.gas = new(cltypes.GasSetting) + *options.gas = val + return nil + } +} + +// WithGasPrices returns a BroadcastOption that sets the gas price configuration for the transaction. +// Gas price is a string of the amount. E.g. "0.25uakt". +func WithGasPrices(val string) BroadcastOption { + return func(options *BroadcastOptions) error { + options.gasPrices = new(string) + *options.gasPrices = val + return nil + } +} + +// WithFees returns a BroadcastOption that sets the fees configuration for the transaction. +func WithFees(val string) BroadcastOption { + return func(options *BroadcastOptions) error { + options.fees = new(string) + *options.fees = val + return nil + } +} + +// WithTimeoutHeight returns a BroadcastOption that sets the timeout height configuration for the transaction. +func WithTimeoutHeight(val uint64) BroadcastOption { + return func(options *BroadcastOptions) error { + options.timeoutHeight = new(uint64) + *options.timeoutHeight = val + return nil + } +} + +// WithBroadcastTimeout returns a BroadcastOption that sets the timeout configuration for the transaction. +func WithBroadcastTimeout(val time.Duration) BroadcastOption { + return func(options *BroadcastOptions) error { + options.broadcastTimeout = val + return nil + } +} + +// WithResultCodeAsError returns a BroadcastOption that enables the result code as error configuration for the transaction. +func WithResultCodeAsError() BroadcastOption { + return func(opts *BroadcastOptions) error { + opts.resultAsError = true + return nil + } +} + +// WithSkipConfirm returns a BroadcastOption that sets whether to skip or not the confirmation for the transaction. +func WithSkipConfirm(val bool) BroadcastOption { + return func(opts *BroadcastOptions) error { + opts.skipConfirm = new(bool) + *opts.skipConfirm = val + return nil + } +} + +// WithConfirmFn returns a BroadcastOption that sets the ConfirmFn function configuration for the transaction. +func WithConfirmFn(val ConfirmFn) BroadcastOption { + return func(opts *BroadcastOptions) error { + opts.confirmFn = val + return nil + } +} + +// WithBroadcastMode returns a BroadcastOption that sets the broadcast for particular tx +func WithBroadcastMode(val string) BroadcastOption { + return func(opts *BroadcastOptions) error { + + opts.broadcastMode = new(string) + *opts.broadcastMode = val + return nil + } +} + +type broadcastResp struct { + resp interface{} + err error +} + +type broadcastReq struct { + ctx context.Context + id uintptr + responsech chan<- broadcastResp + data interface{} + opts *BroadcastOptions +} + +type seqResp struct { + seq uint64 + err error +} + +type seqReq struct { + curr uint64 + ch chan<- seqResp +} + +type broadcast struct { + donech chan<- error + respch chan<- broadcastResp + ctx context.Context + data interface{} + opts *BroadcastOptions +} + +type serialBroadcaster struct { + ctx context.Context + cctx sdkclient.Context + info *keyring.Record + reqch chan broadcastReq + broadcastch chan broadcast + seqreqch chan seqReq + lc lifecycle.Lifecycle + nd *node + log log.Logger +} + +func newSerialTx(ctx context.Context, cctx sdkclient.Context, nd *node, opts ...cltypes.ClientOption) (*serialBroadcaster, sdkclient.Context, error) { + if !cctx.GenerateOnly { + if err := validateBroadcastMode(cctx.BroadcastMode); err != nil { + return nil, cctx, err + } + } + + key := cctx.From + if key == "" { + key = cctx.FromName + } + + info, err := cctx.Keyring.Key(key) + if err != nil { + info, err = cctx.Keyring.KeyByAddress(cctx.GetFromAddress()) + } + + if err != nil { + return nil, cctx, err + } + + if cctx.FromAddress == nil { + addr, err := info.GetAddress() + if err != nil { + return nil, cctx, err + } + + cctx = cctx.WithFromAddress(addr) + } + + if cctx.From == "" { + cctx = cctx.WithFrom(info.Name) + } + + if cctx.FromName == "" { + cctx = cctx.WithFromName(info.Name) + } + + txf, err := cltypes.NewTxFactory(cctx, opts...) + if err != nil { + return nil, cctx, err + } + + client := &serialBroadcaster{ + ctx: ctx, + cctx: cctx, + info: info, + lc: lifecycle.New(), + reqch: make(chan broadcastReq, 1), + broadcastch: make(chan broadcast, 1), + seqreqch: make(chan seqReq), + nd: nd, + log: ctxlog.Logger(ctx).With("cmp", "client/broadcaster"), + } + + go client.lc.WatchContext(ctx) + go client.run() + go client.broadcaster(txf) + + if !client.cctx.Offline { + go client.sequenceSync() + } + + return client, cctx, nil +} + +// BroadcastMsgs builds and broadcasts transaction. Thi transaction is composed of 1 or many messages. This allows several +// operations to be performed in a single transaction. +// A transaction broadcast can be configured with an arbitrary number of BroadcastOption. +// This method returns the response as an interface{} instance. If an error occurs when preparing the transaction +// an error is returned. +// A transaction can fail with a given "transaction code" which will not be passed to the error value. +// This needs to be checked by the caller and handled accordingly. +func (c *serialBroadcaster) BroadcastMsgs(ctx context.Context, msgs []sdk.Msg, opts ...BroadcastOption) (interface{}, error) { + bOpts := &BroadcastOptions{ + confirmFn: defaultTxConfirm, + broadcastTimeout: BroadcastDefaultTimeout, + } + + for _, opt := range opts { + if err := opt(bOpts); err != nil { + return nil, err + } + } + + responsech := make(chan broadcastResp, 1) + request := broadcastReq{ + ctx: ctx, + responsech: responsech, + data: msgs, + opts: bOpts, + } + + request.id = uintptr(unsafe.Pointer(&request)) + + select { + case c.reqch <- request: + case <-ctx.Done(): + return nil, ctx.Err() + case <-c.lc.ShuttingDown(): + return nil, ErrNotRunning + } + + select { + case resp := <-responsech: + // if returned error is sdk error, it is likely to be wrapped response so discard it + // as clients supposed to check Tx code, unless resp is nil, which is error during Tx preparation + if !errors.As(resp.err, &cerrors.Error{}) || resp.resp == nil || bOpts.resultAsError { + return resp.resp, resp.err + } + return resp.resp, nil + case <-ctx.Done(): + return nil, ctx.Err() + case <-c.lc.ShuttingDown(): + return nil, ErrNotRunning + } +} + +func (c *serialBroadcaster) BroadcastTx(ctx context.Context, tx sdk.Tx, opts ...BroadcastOption) (interface{}, error) { + bOpts := &BroadcastOptions{ + confirmFn: defaultTxConfirm, + broadcastTimeout: BroadcastDefaultTimeout, + } + + for _, opt := range opts { + if err := opt(bOpts); err != nil { + return nil, err + } + } + + responsech := make(chan broadcastResp, 1) + request := broadcastReq{ + responsech: responsech, + data: tx, + opts: bOpts, + } + + request.id = uintptr(unsafe.Pointer(&request)) + + select { + case c.reqch <- request: + case <-ctx.Done(): + return nil, ctx.Err() + case <-c.lc.ShuttingDown(): + return nil, ErrNotRunning + } + + select { + case resp := <-responsech: + // if returned error is sdk error, it is likely to be wrapped response so discard it + // as clients supposed to check Tx code, unless resp is nil, which is error during Tx preparation + if !errors.As(resp.err, &cerrors.Error{}) || resp.resp == nil || bOpts.resultAsError { + return resp.resp, resp.err + } + return resp.resp, nil + case <-ctx.Done(): + return nil, ctx.Err() + case <-c.lc.ShuttingDown(): + return nil, ErrNotRunning + } +} + +func (c *serialBroadcaster) run() { + defer c.lc.ShutdownCompleted() + + pending := deque.NewDeque[broadcastReq]() + broadcastCh := c.broadcastch + broadcastDoneCh := make(chan error, 1) + + tryBroadcast := func() { + if pending.Len() == 0 { + return + } + + req := pending.Peek(0) + + select { + case broadcastCh <- broadcast{ + donech: broadcastDoneCh, + respch: req.responsech, + ctx: req.ctx, + data: req.data, + opts: req.opts, + }: + broadcastCh = nil + _ = pending.PopFront() + default: + } + } + +loop: + for { + select { + case err := <-c.lc.ShutdownRequest(): + c.lc.ShutdownInitiated(err) + break loop + case req := <-c.reqch: + pending.PushBack(req) + + tryBroadcast() + case err := <-broadcastDoneCh: + broadcastCh = c.broadcastch + + if err != nil { + c.log.Error("unable to broadcast messages", "error", err) + } + tryBroadcast() + } + } +} + +func deriveTxfFromOptions(txf clienttx.Factory, opts *BroadcastOptions) clienttx.Factory { + if opt := opts.note; opt != nil { + txf = txf.WithMemo(*opt) + } + + if opt := opts.gas; opt != nil { + txf = txf.WithGas(opt.Gas).WithSimulateAndExecute(opt.Simulate) + } + + if opt := opts.fees; opt != nil { + txf = txf.WithFees(*opt) + } + + if opt := opts.gasPrices; opt != nil { + txf = txf.WithGasPrices(*opt) + } + + if opt := opts.timeoutHeight; opt != nil { + txf = txf.WithTimeoutHeight(*opt) + } + + if opt := opts.gasAdjustment; opt != nil { + txf = txf.WithGasAdjustment(*opt) + } + + return txf +} + +func deriveCctxFromOptions(cctx sdkclient.Context, opts *BroadcastOptions) sdkclient.Context { + if opt := opts.broadcastMode; opt != nil { + cctx = cctx.WithBroadcastMode(*opt) + } + + return cctx +} + +func (c *serialBroadcaster) syncSequence(f clienttx.Factory, rErr error) (uint64, bool) { + if rErr != nil && sdkerrors.ErrWrongSequence.Is(rErr) { + // attempt to sync account sequence + if rSeq, err := c.syncAccountSequence(f.Sequence()); err == nil { + return rSeq, true + } + + return f.Sequence(), true + } + + return f.Sequence(), false +} + +func (c *serialBroadcaster) broadcaster(ptxf clienttx.Factory) { + for { + select { + case <-c.lc.ShuttingDown(): + return + case req := <-c.broadcastch: + var err error + var resp interface{} + + switch mType := req.data.(type) { + case []sdk.Msg: + var seq uint64 + resp, seq, err = c.buildAndBroadcastTx(req.ctx, ptxf, mType, req.opts) + ptxf = ptxf.WithSequence(seq) + + case sdk.Tx: + cctx := deriveCctxFromOptions(c.cctx, req.opts) + resp, err = c.broadcastTx(req.ctx, cctx, mType, req.opts.broadcastTimeout) + } + + req.respch <- broadcastResp{ + resp: resp, + err: err, + } + + terr := &cerrors.Error{} + if !c.cctx.GenerateOnly && errors.Is(err, terr) { + rSeq, _ := c.syncSequence(ptxf, err) + ptxf = ptxf.WithSequence(rSeq) + } + + select { + case <-c.lc.ShuttingDown(): + return + case req.donech <- err: + } + } + } +} + +func (c *serialBroadcaster) buildAndBroadcastTx(ctx context.Context, ptxf clienttx.Factory, msgs []sdk.Msg, opts *BroadcastOptions) (interface{}, uint64, error) { + var err error + var res *sdk.TxResponse + + for i := 0; i < 2; i++ { + txf := deriveTxfFromOptions(ptxf, opts) + cctx := deriveCctxFromOptions(c.cctx, opts) + + if txf.SimulateAndExecute() || cctx.Simulate { + if cctx.Offline { + return nil, txf.Sequence(), ErrSimulateOffline + } + + simResp, adjusted, err := CalculateGas(ctx, cctx, txf, msgs...) + if err != nil { + return nil, txf.Sequence(), err + } + + // context Simulate differs from tx.Factory.simulate! + // later is to calculate gas if one set to auto + // if context has Simulate flag set, just bail out here with simulation result + if cctx.Simulate { + return simResp, txf.Sequence(), nil + } + + txf = txf.WithGas(adjusted) + } + + utx, err := txf.BuildUnsignedTx(msgs...) + if err != nil { + return nil, txf.Sequence(), err + } + + if gAddr := cctx.GetFeeGranterAddress(); gAddr != nil { + utx.SetFeeGranter(gAddr) + } + + if cctx.GenerateOnly { + txb, err := cctx.TxConfig.TxJSONEncoder()(utx.GetTx()) + if err != nil { + return nil, ptxf.Sequence(), err + } + + return txb, ptxf.Sequence(), nil + } + + if !cctx.SkipConfirm { + var out []byte + if out, err = cctx.TxConfig.TxJSONEncoder()(utx.GetTx()); err != nil { + return nil, txf.Sequence(), err + } + + var shipIt bool + if shipIt, err = opts.confirmFn(string(out)); err != nil { + return nil, txf.Sequence(), err + } + + if !shipIt { + return nil, txf.Sequence(), ErrTxCanceledByUser + } + } + + if err = clienttx.Sign(txf, c.info.Name, utx, true); err != nil { + return nil, txf.Sequence(), err + } + + res, err = c.broadcastTx(ctx, cctx, utx.GetTx(), opts.broadcastTimeout) + if err != nil { + return res, txf.Sequence(), err + } + + ptxf = ptxf.WithSequence(txf.Sequence() + 1) + + if res.Code != 0 { + return res, ptxf.Sequence(), cerrors.ABCIError(res.Codespace, res.Code, res.RawLog) + } + + rSeq, synced := c.syncSequence(ptxf, err) + ptxf = ptxf.WithSequence(rSeq) + + if !synced { + break + } + } + + return res, ptxf.Sequence(), err +} + +func (c *serialBroadcaster) sequenceSync() { + for { + select { + case <-c.lc.ShuttingDown(): + return + case req := <-c.seqreqch: + // reply back with current value if any error to occur + seq := seqResp{ + seq: req.curr, + } + + ndStatus, err := c.nd.SyncInfo(c.ctx) + if err != nil { + c.log.Error("cannot obtain node status to sync account sequence", "err", err) + seq.err = err + } + + if err == nil && ndStatus.CatchingUp { + c.log.Error("cannot sync account sequence from node that is catching up") + err = ErrNodeCatchingUp + } + + if err == nil { + addr, _ := c.info.GetAddress() + // query sequence number + if _, seq.seq, err = c.cctx.AccountRetriever.GetAccountNumberSequence(c.cctx, addr); err != nil { + c.log.Error("error requesting account", "err", err) + seq.err = err + } + } + + select { + case req.ch <- seq: + case <-c.lc.ShuttingDown(): + } + } + } +} + +func (c *serialBroadcaster) syncAccountSequence(lSeq uint64) (uint64, error) { + ch := make(chan seqResp, 1) + + c.seqreqch <- seqReq{ + curr: lSeq, + ch: ch, + } + + ctx, cancel := context.WithTimeout(c.ctx, sequenceSyncTimeout) + defer cancel() + + select { + case rSeq := <-ch: + return rSeq.seq, rSeq.err + case <-ctx.Done(): + return lSeq, ErrSyncTimedOut + case <-c.lc.ShuttingDown(): + return lSeq, ErrNotRunning + } +} + +// broadcastTxb broadcasts fully built transaction in sync/async or block modes +// based on the context parameters. The result of the broadcast is parsed into +// an intermediate structure which is logged if the context has a logger +// defined. +func (c *serialBroadcaster) broadcastTx(ctx context.Context, cctx sdkclient.Context, tx sdk.Tx, timeout time.Duration) (*sdk.TxResponse, error) { + txb, err := cctx.TxConfig.TxEncoder()(tx) + if err != nil { + return nil, err + } + + hash := cbtypes.Tx(txb).Hash() + + node, err := cctx.GetNode() + if err != nil { + return nil, err + } + + var resp *sdk.TxResponse + + // broadcast mode has been validated + switch cctx.BroadcastMode { + case BroadcastBlock: + var res *cbcoretypes.ResultBroadcastTxCommit + res, err = node.BroadcastTxCommit(context.Background(), txb) + if errRes := CheckTendermintError(err, txb); errRes != nil { + return errRes, nil + } + + resp = NewResponseFormatBroadcastTxCommit(res) + case BroadcastSync: + var res *cbcoretypes.ResultBroadcastTx + res, err = node.BroadcastTxSync(context.Background(), txb) + if errRes := CheckTendermintError(err, txb); errRes != nil { + return errRes, nil + } + + resp = sdk.NewResponseFormatBroadcastTx(res) + case BroadcastAsync: + var res *cbcoretypes.ResultBroadcastTx + res, err = node.BroadcastTxAsync(context.Background(), txb) + if errRes := CheckTendermintError(err, txb); errRes != nil { + return errRes, nil + } + + resp = sdk.NewResponseFormatBroadcastTx(res) + } + + if err == nil { + // good job + return resp, nil + } else if !strings.HasSuffix(err.Error(), timeoutErrorMessage) { + return resp, err + } + + // timeout error, continue on to retry + // loop + lctx, cancel := context.WithTimeout(c.ctx, timeout) + defer cancel() + + for lctx.Err() == nil { + // wait up to one second + select { + case <-lctx.Done(): + return resp, err + case <-time.After(broadcastBlockRetryPeriod): + } + + // check transaction + // https://github.com/cosmos/cosmos-sdk/pull/8734 + res, err := nutils.QueryTx(ctx, cctx, hash) + if err == nil { + return res, nil + } + + // if it's not a "not found" error, return + if !strings.HasSuffix(err.Error(), notFoundErrorMessageSuffix) { + return res, err + } + } + + return resp, lctx.Err() +} + +// CalculateGas simulates the execution of a transaction and returns the +// simulation response obtained by the query and the adjusted gas amount. +func CalculateGas( + ctx context.Context, + clientCtx gogogrpc.ClientConn, + txf clienttx.Factory, + msgs ...sdk.Msg, +) (*ttx.SimulateResponse, uint64, error) { + txBytes, err := txf.BuildSimTx(msgs...) + if err != nil { + return nil, 0, err + } + + txSvcClient := ttx.NewServiceClient(clientCtx) + simRes, err := txSvcClient.Simulate(ctx, &ttx.SimulateRequest{ + TxBytes: txBytes, + }) + if err != nil { + return nil, 0, err + } + + return simRes, uint64(txf.GasAdjustment() * float64(simRes.GasInfo.GasUsed)), nil +} + +func defaultTxConfirm(txn string) (bool, error) { + _, _ = fmt.Printf("%s\n\n", txn) + + buf := bufio.NewReader(os.Stdin) + + return input.GetConfirmation("confirm transaction before signing and broadcasting", buf, os.Stdin) +} + +func validateBroadcastMode(val string) error { + switch val { + case BroadcastAsync: + fallthrough + case BroadcastSync: + fallthrough + case BroadcastBlock: + return nil + } + + return fmt.Errorf("invalid broadcast mode \"%s\". expected %s|%s|%s", + val, + BroadcastAsync, + BroadcastSync, + BroadcastBlock, + ) +} + +// CheckTendermintError checks if the error returned from BroadcastTx is a +// Tendermint error that is returned before the tx is submitted due to +// precondition checks that failed. If an Tendermint error is detected, this +// function returns the correct code back in TxResponse. +// +// TODO: Avoid brittle string matching in favor of error matching. This requires +// a change to Tendermint's RPCError type to allow retrieval or matching against +// a concrete error type. +func CheckTendermintError(err error, tx cbtypes.Tx) *sdk.TxResponse { + if err == nil { + return nil + } + + errStr := strings.ToLower(err.Error()) + txHash := fmt.Sprintf("%X", tx.Hash()) + + switch { + case strings.Contains(errStr, strings.ToLower(mempool.ErrTxInCache.Error())): + return &sdk.TxResponse{ + Code: sdkerrors.ErrTxInMempoolCache.ABCICode(), + Codespace: sdkerrors.ErrTxInMempoolCache.Codespace(), + TxHash: txHash, + } + + case strings.Contains(errStr, "mempool is full"): + return &sdk.TxResponse{ + Code: sdkerrors.ErrMempoolIsFull.ABCICode(), + Codespace: sdkerrors.ErrMempoolIsFull.Codespace(), + TxHash: txHash, + } + + case strings.Contains(errStr, "tx too large"): + return &sdk.TxResponse{ + Code: sdkerrors.ErrTxTooLarge.ABCICode(), + Codespace: sdkerrors.ErrTxTooLarge.Codespace(), + TxHash: txHash, + } + + default: + return nil + } +} diff --git a/go/node/deployment/v1/auth.go b/go/node/deployment/v1/auth.go new file mode 100644 index 00000000..0716cb33 --- /dev/null +++ b/go/node/deployment/v1/auth.go @@ -0,0 +1,107 @@ +package v1 + +import ( + "reflect" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/authz" +) + +var ( + MsgTypeDepositDeployment = "" +) + +var ( + _ authz.Authorization = &DepositAuthorization{} +) + +func init() { + MsgTypeDepositDeployment = reflect.TypeOf(&DepositAuthorization{}).Name() +} + +// NewDepositAuthorization creates a new DepositAuthorization object. +func NewDepositAuthorization(spendLimit sdk.Coin) *DepositAuthorization { + return &DepositAuthorization{ + SpendLimit: spendLimit, + } +} + +// MsgTypeURL implements Authorization.MsgTypeURL. +func (m *DepositAuthorization) MsgTypeURL() string { + return sdk.MsgTypeURL(&MsgDepositDeployment{}) +} + +// Accept implements Authorization.Accept. +func (m *DepositAuthorization) Accept(_ sdk.Context, msg sdk.Msg) (authz.AcceptResponse, error) { + mDepositDeployment, ok := msg.(*MsgDepositDeployment) + if !ok { + return authz.AcceptResponse{}, sdkerrors.ErrInvalidType.Wrap("type mismatch") + } + if m.SpendLimit.IsLT(mDepositDeployment.Amount) { + return authz.AcceptResponse{}, sdkerrors.ErrInsufficientFunds.Wrapf("requested amount is more than spend limit") + } + limitLeft := m.SpendLimit.Sub(mDepositDeployment.Amount) + + return authz.AcceptResponse{Accept: true, Delete: false, Updated: &DepositAuthorization{SpendLimit: limitLeft}}, nil +} + +// ValidateBasic implements Authorization.ValidateBasic. +func (m *DepositAuthorization) ValidateBasic() error { + if !m.SpendLimit.IsPositive() { + return sdkerrors.ErrInvalidCoins.Wrapf("spend limit cannot be negative") + } + return nil +} + +// NewMsgDepositDeployment creates a new MsgDepositDeployment instance +func NewMsgDepositDeployment(id DeploymentID, amount sdk.Coin, depositor string) *MsgDepositDeployment { + return &MsgDepositDeployment{ + ID: id, + Amount: amount, + Depositor: depositor, + } +} + +// Route implements the sdk.Msg interface +func (msg *MsgDepositDeployment) Route() string { + return RouterKey +} + +// Type implements the sdk.Msg interface +func (msg *MsgDepositDeployment) Type() string { + return MsgTypeDepositDeployment +} + +// GetSignBytes encodes the message for signing +// func (msg MsgDepositDeployment) GetSignBytes() []byte { +// return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +// } + +// GetSigners defines whose signature is required +func (msg *MsgDepositDeployment) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// ValidateBasic does basic validation like check owner and groups length +func (msg *MsgDepositDeployment) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + + if msg.Amount.IsZero() { + return ErrInvalidDeposit + } + + _, err := sdk.AccAddressFromBech32(msg.Depositor) + if err != nil { + return sdkerrors.ErrInvalidAddress.Wrap("MsgDepositDeployment: Invalid Depositor Address") + } + + return nil +} diff --git a/go/node/deployment/v1/authz.pb.go b/go/node/deployment/v1/authz.pb.go new file mode 100644 index 00000000..e0e42038 --- /dev/null +++ b/go/node/deployment/v1/authz.pb.go @@ -0,0 +1,330 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1/authz.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from +// the granter's account for a deployment. +type DepositAuthorization struct { + // SpendLimit is the amount the grantee is authorized to spend from the granter's account for + // the purpose of deployment. + SpendLimit types.Coin `protobuf:"bytes,1,opt,name=spend_limit,json=spendLimit,proto3" json:"spend_limit"` +} + +func (m *DepositAuthorization) Reset() { *m = DepositAuthorization{} } +func (m *DepositAuthorization) String() string { return proto.CompactTextString(m) } +func (*DepositAuthorization) ProtoMessage() {} +func (*DepositAuthorization) Descriptor() ([]byte, []int) { + return fileDescriptor_c0c17e4c56f70008, []int{0} +} +func (m *DepositAuthorization) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DepositAuthorization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DepositAuthorization.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DepositAuthorization) XXX_Merge(src proto.Message) { + xxx_messageInfo_DepositAuthorization.Merge(m, src) +} +func (m *DepositAuthorization) XXX_Size() int { + return m.Size() +} +func (m *DepositAuthorization) XXX_DiscardUnknown() { + xxx_messageInfo_DepositAuthorization.DiscardUnknown(m) +} + +var xxx_messageInfo_DepositAuthorization proto.InternalMessageInfo + +func (m *DepositAuthorization) GetSpendLimit() types.Coin { + if m != nil { + return m.SpendLimit + } + return types.Coin{} +} + +func init() { + proto.RegisterType((*DepositAuthorization)(nil), "akash.deployment.v1.DepositAuthorization") +} + +func init() { proto.RegisterFile("akash/deployment/v1/authz.proto", fileDescriptor_c0c17e4c56f70008) } + +var fileDescriptor_c0c17e4c56f70008 = []byte{ + // 260 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0xd4, + 0x4f, 0x2c, 0x2d, 0xc9, 0xa8, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x06, 0x2b, 0xd0, + 0x43, 0x28, 0xd0, 0x2b, 0x33, 0x94, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xeb, 0x83, 0x58, + 0x10, 0xa5, 0x52, 0x92, 0xc9, 0xf9, 0xc5, 0xb9, 0xf9, 0xc5, 0xf1, 0x10, 0x09, 0x08, 0x07, 0x2a, + 0x25, 0x07, 0xe1, 0xe9, 0x27, 0x25, 0x16, 0xa7, 0xea, 0x97, 0x19, 0x26, 0xa5, 0x96, 0x24, 0x1a, + 0xea, 0x27, 0xe7, 0x67, 0xe6, 0x41, 0xe4, 0x95, 0xaa, 0xb8, 0x44, 0x5c, 0x52, 0x0b, 0xf2, 0x8b, + 0x33, 0x4b, 0x1c, 0x4b, 0x4b, 0x32, 0xf2, 0x8b, 0x32, 0xab, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x84, + 0xfc, 0xb9, 0xb8, 0x8b, 0x0b, 0x52, 0xf3, 0x52, 0xe2, 0x73, 0x32, 0x73, 0x33, 0x4b, 0x24, 0x18, + 0x15, 0x18, 0x35, 0xb8, 0x8d, 0x24, 0xf5, 0xa0, 0x66, 0x83, 0x4c, 0xd3, 0x83, 0x9a, 0xa6, 0xe7, + 0x9c, 0x9f, 0x99, 0xe7, 0x24, 0x7c, 0xe2, 0x9e, 0x3c, 0xc3, 0xab, 0x7b, 0xf2, 0xc8, 0xba, 0x82, + 0xb8, 0xc0, 0x1c, 0x1f, 0x10, 0xdb, 0x4a, 0xf0, 0xd4, 0x16, 0x5d, 0x5e, 0x14, 0x3b, 0x9c, 0xac, + 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, + 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x4a, 0xb1, 0x20, 0x3b, 0x5d, 0x2f, + 0x31, 0xbb, 0x44, 0x2f, 0x25, 0xb5, 0x4c, 0x3f, 0x3d, 0x5f, 0x3f, 0x2f, 0x3f, 0x25, 0x15, 0x35, + 0xa8, 0x92, 0xd8, 0xc0, 0xee, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x46, 0x58, 0x6f, 0xf8, + 0x48, 0x01, 0x00, 0x00, +} + +func (m *DepositAuthorization) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DepositAuthorization) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DepositAuthorization) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.SpendLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAuthz(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintAuthz(dAtA []byte, offset int, v uint64) int { + offset -= sovAuthz(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DepositAuthorization) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SpendLimit.Size() + n += 1 + l + sovAuthz(uint64(l)) + return n +} + +func sovAuthz(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAuthz(x uint64) (n int) { + return sovAuthz(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *DepositAuthorization) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuthz + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DepositAuthorization: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DepositAuthorization: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpendLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuthz + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuthz + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAuthz + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SpendLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuthz(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAuthz + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAuthz(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuthz + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuthz + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuthz + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAuthz + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAuthz + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAuthz + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAuthz = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAuthz = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAuthz = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1/codec.go b/go/node/deployment/v1/codec.go new file mode 100644 index 00000000..0cad24fd --- /dev/null +++ b/go/node/deployment/v1/codec.go @@ -0,0 +1,19 @@ +package v1 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" +) + +var ( + // ModuleCdc references the global x/deployment module codec. Note, the codec should + // ONLY be used in certain instances of tests and for JSON encoding as Amino is + // still used for that purpose. + // + // The actual codec used for serialization should be provided to x/deployment and + // defined at the application level. + // + // Deprecated: ModuleCdc use is deprecated + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) + diff --git a/go/node/deployment/v1/deployment.go b/go/node/deployment/v1/deployment.go new file mode 100644 index 00000000..fdba8029 --- /dev/null +++ b/go/node/deployment/v1/deployment.go @@ -0,0 +1,69 @@ +package v1 + +import ( + "fmt" + "strconv" + "strings" + + sdkerrors "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + dsdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const ( + HashLength = 32 +) + +// Equals method compares specific deployment with provided deployment +func (id DeploymentID) Equals(other DeploymentID) bool { + return id.Owner == other.Owner && id.DSeq == other.DSeq +} + +// Validate method for DeploymentID and returns nil +func (id DeploymentID) Validate() error { + _, err := sdk.AccAddressFromBech32(id.Owner) + switch { + case err != nil: + return sdkerrors.Wrap(dsdkerrors.ErrInvalidAddress, "DeploymentID: Invalid Owner Address") + case id.DSeq == 0: + return sdkerrors.Wrap(dsdkerrors.ErrInvalidSequence, "DeploymentID: Invalid Deployment Sequence") + } + return nil +} + +// String method for deployment IDs +func (id DeploymentID) String() string { + return fmt.Sprintf("%s/%d", id.Owner, id.DSeq) +} + +func (id DeploymentID) GetOwnerAddress() (sdk.Address, error) { + return sdk.AccAddressFromBech32(id.Owner) +} + +func ParseDeploymentID(val string) (DeploymentID, error) { + parts := strings.Split(val, "/") + return ParseDeploymentPath(parts) +} + +// ParseDeploymentPath returns DeploymentID details with provided queries, and return +// error if occurred due to wrong query +func ParseDeploymentPath(parts []string) (DeploymentID, error) { + if len(parts) != 2 { + return DeploymentID{}, ErrInvalidIDPath + } + + owner, err := sdk.AccAddressFromBech32(parts[0]) + if err != nil { + return DeploymentID{}, err + } + + dseq, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return DeploymentID{}, err + } + + return DeploymentID{ + Owner: owner.String(), + DSeq: dseq, + }, nil +} diff --git a/go/node/deployment/v1/deployment.pb.go b/go/node/deployment/v1/deployment.pb.go new file mode 100644 index 00000000..b668ac26 --- /dev/null +++ b/go/node/deployment/v1/deployment.pb.go @@ -0,0 +1,702 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1/deployment.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State is an enum which refers to state of deployment +type Deployment_State int32 + +const ( + // Prefix should start with 0 in enum. So declaring dummy state + DeploymentStateInvalid Deployment_State = 0 + // DeploymentActive denotes state for deployment active + DeploymentActive Deployment_State = 1 + // DeploymentClosed denotes state for deployment closed + DeploymentClosed Deployment_State = 2 +) + +var Deployment_State_name = map[int32]string{ + 0: "invalid", + 1: "active", + 2: "closed", +} + +var Deployment_State_value = map[string]int32{ + "invalid": 0, + "active": 1, + "closed": 2, +} + +func (x Deployment_State) String() string { + return proto.EnumName(Deployment_State_name, int32(x)) +} + +func (Deployment_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_289f09354ec3dad5, []int{1, 0} +} + +// DeploymentID stores owner and sequence number +type DeploymentID struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` +} + +func (m *DeploymentID) Reset() { *m = DeploymentID{} } +func (*DeploymentID) ProtoMessage() {} +func (*DeploymentID) Descriptor() ([]byte, []int) { + return fileDescriptor_289f09354ec3dad5, []int{0} +} +func (m *DeploymentID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeploymentID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeploymentID) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentID.Merge(m, src) +} +func (m *DeploymentID) XXX_Size() int { + return m.Size() +} +func (m *DeploymentID) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentID.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentID proto.InternalMessageInfo + +func (m *DeploymentID) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *DeploymentID) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +// Deployment stores deploymentID, state and checksum details +type Deployment struct { + ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + State Deployment_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.deployment.v1.Deployment_State" json:"state" yaml:"state"` + Hash []byte `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash" yaml:"hash"` + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` +} + +func (m *Deployment) Reset() { *m = Deployment{} } +func (m *Deployment) String() string { return proto.CompactTextString(m) } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_289f09354ec3dad5, []int{1} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Deployment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} + +var xxx_messageInfo_Deployment proto.InternalMessageInfo + +func (m *Deployment) GetID() DeploymentID { + if m != nil { + return m.ID + } + return DeploymentID{} +} + +func (m *Deployment) GetState() Deployment_State { + if m != nil { + return m.State + } + return DeploymentStateInvalid +} + +func (m *Deployment) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *Deployment) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func init() { + proto.RegisterEnum("akash.deployment.v1.Deployment_State", Deployment_State_name, Deployment_State_value) + proto.RegisterType((*DeploymentID)(nil), "akash.deployment.v1.DeploymentID") + proto.RegisterType((*Deployment)(nil), "akash.deployment.v1.Deployment") +} + +func init() { + proto.RegisterFile("akash/deployment/v1/deployment.proto", fileDescriptor_289f09354ec3dad5) +} + +var fileDescriptor_289f09354ec3dad5 = []byte{ + // 480 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x33, 0x69, 0xba, 0xda, 0xd9, 0x22, 0x25, 0x16, 0xed, 0x06, 0x36, 0x93, 0x0d, 0x8a, + 0x05, 0x31, 0xa1, 0xbb, 0xb7, 0x7a, 0x6a, 0x2c, 0x48, 0x6f, 0x92, 0x82, 0x07, 0x2f, 0xcb, 0xd8, + 0x19, 0xd2, 0xd0, 0x36, 0xd3, 0x4d, 0x86, 0xc8, 0x7a, 0xf6, 0x20, 0x7b, 0xf2, 0x24, 0x5e, 0x16, + 0x16, 0xfc, 0x0a, 0x7e, 0x88, 0x3d, 0x2e, 0x9e, 0x3c, 0x05, 0x49, 0x2f, 0xd2, 0x63, 0x3f, 0x81, + 0xcc, 0x4c, 0x31, 0x55, 0x84, 0xbd, 0xe5, 0xff, 0x7b, 0xff, 0xbc, 0x79, 0xff, 0x99, 0x07, 0x1f, + 0xe1, 0x19, 0xce, 0xa6, 0x3e, 0xa1, 0xcb, 0x39, 0x3b, 0x5f, 0xd0, 0x84, 0xfb, 0x79, 0x6f, 0x47, + 0x79, 0xcb, 0x94, 0x71, 0x66, 0xde, 0x97, 0x2e, 0x6f, 0x87, 0xe7, 0x3d, 0xab, 0x1d, 0xb1, 0x88, + 0xc9, 0xba, 0x2f, 0xbe, 0x94, 0xd5, 0x3a, 0x98, 0xb0, 0x6c, 0xc1, 0xb2, 0x53, 0x55, 0x50, 0x42, + 0x95, 0xdc, 0xcf, 0x00, 0x36, 0x87, 0x7f, 0x5a, 0x8c, 0x86, 0xe6, 0x4b, 0x58, 0x67, 0xef, 0x12, + 0x9a, 0x76, 0x80, 0x03, 0xba, 0x8d, 0xa0, 0xb7, 0x2e, 0x90, 0x02, 0x9b, 0x02, 0x35, 0xcf, 0xf1, + 0x62, 0xde, 0x77, 0xa5, 0x74, 0xbf, 0x7f, 0x7b, 0xd6, 0xde, 0xb6, 0x1a, 0x10, 0x92, 0xd2, 0x2c, + 0x1b, 0xf3, 0x34, 0x4e, 0xa2, 0x50, 0xd9, 0xcd, 0x13, 0x68, 0x90, 0x8c, 0x9e, 0x75, 0x74, 0x07, + 0x74, 0x8d, 0x00, 0x95, 0x05, 0x32, 0x86, 0x63, 0x7a, 0xb6, 0x2e, 0x90, 0xe4, 0x9b, 0x02, 0xed, + 0xab, 0x76, 0x42, 0xb9, 0xa1, 0x84, 0xfd, 0xbb, 0x5f, 0xae, 0x90, 0xf6, 0xeb, 0x0a, 0x69, 0xee, + 0x87, 0x1a, 0x84, 0xd5, 0x60, 0xe6, 0x2b, 0xa8, 0xc7, 0x44, 0xce, 0xb4, 0x7f, 0x7c, 0xe4, 0xfd, + 0x27, 0xba, 0xb7, 0x9b, 0x22, 0x38, 0xbc, 0x2e, 0x90, 0x56, 0x16, 0x48, 0x1f, 0x0d, 0xd7, 0x05, + 0xd2, 0x63, 0xb2, 0x29, 0x50, 0x43, 0x1d, 0x17, 0x13, 0x37, 0xd4, 0x63, 0x62, 0xbe, 0x86, 0xf5, + 0x8c, 0x63, 0x4e, 0xe5, 0x80, 0xf7, 0x8e, 0x1f, 0xdf, 0xd2, 0xd4, 0x1b, 0x0b, 0x73, 0x70, 0x20, + 0xee, 0x43, 0xfe, 0x57, 0xdd, 0x87, 0x94, 0x6e, 0xa8, 0xb0, 0xf9, 0x14, 0x1a, 0x53, 0x9c, 0x4d, + 0x3b, 0x35, 0x07, 0x74, 0x9b, 0xc1, 0x43, 0x91, 0x57, 0xe8, 0x2a, 0xaf, 0x50, 0x6e, 0x28, 0xa1, + 0x79, 0x08, 0xe1, 0x24, 0xa5, 0x98, 0x53, 0x72, 0x8a, 0x79, 0xc7, 0x70, 0x40, 0xb7, 0x16, 0x36, + 0xb6, 0x64, 0xc0, 0xdd, 0xf7, 0xb0, 0x2e, 0x8f, 0x35, 0x9f, 0xc0, 0x3b, 0x71, 0x92, 0xe3, 0x79, + 0x4c, 0x5a, 0x9a, 0x65, 0x5d, 0x5c, 0x3a, 0x0f, 0xaa, 0xc9, 0xa4, 0x63, 0xa4, 0xaa, 0xa6, 0x03, + 0xf7, 0xf0, 0x84, 0xc7, 0x39, 0x6d, 0x01, 0xab, 0x7d, 0x71, 0xe9, 0xb4, 0x2a, 0xdf, 0x40, 0x72, + 0xe1, 0x98, 0xcc, 0x59, 0x46, 0x49, 0x4b, 0xff, 0xd7, 0xf1, 0x42, 0x72, 0xcb, 0xf8, 0xf8, 0xd5, + 0xd6, 0xfa, 0x86, 0x78, 0x86, 0xe0, 0xf9, 0x75, 0x69, 0x83, 0x9b, 0xd2, 0x06, 0x3f, 0x4b, 0x1b, + 0x7c, 0x5a, 0xd9, 0xda, 0xcd, 0xca, 0xd6, 0x7e, 0xac, 0x6c, 0xed, 0xcd, 0xd1, 0x72, 0x16, 0x79, + 0x78, 0xc6, 0x3d, 0x42, 0x73, 0x3f, 0x62, 0x7e, 0xc2, 0x08, 0xfd, 0x7b, 0x67, 0xdf, 0xee, 0xc9, + 0x1d, 0x3b, 0xf9, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x3a, 0x14, 0x28, 0x0c, 0xd1, 0x02, 0x00, 0x00, +} + +func (m *DeploymentID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DSeq != 0 { + i = encodeVarintDeployment(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintDeployment(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Deployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreatedAt != 0 { + i = encodeVarintDeployment(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintDeployment(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x1a + } + if m.State != 0 { + i = encodeVarintDeployment(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeployment(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintDeployment(dAtA []byte, offset int, v uint64) int { + offset -= sovDeployment(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DeploymentID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovDeployment(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovDeployment(uint64(m.DSeq)) + } + return n +} + +func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovDeployment(uint64(l)) + if m.State != 0 { + n += 1 + sovDeployment(uint64(m.State)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovDeployment(uint64(l)) + } + if m.CreatedAt != 0 { + n += 1 + sovDeployment(uint64(m.CreatedAt)) + } + return n +} + +func sovDeployment(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDeployment(x uint64) (n int) { + return sovDeployment(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *DeploymentID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDeployment + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDeployment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDeployment(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeployment + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeployment + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeployment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Deployment_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDeployment + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDeployment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDeployment(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeployment + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDeployment(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDeployment + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDeployment + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDeployment + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDeployment + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDeployment + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDeployment + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDeployment = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDeployment = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDeployment = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1/errors.go b/go/node/deployment/v1/errors.go new file mode 100644 index 00000000..ff33ad39 --- /dev/null +++ b/go/node/deployment/v1/errors.go @@ -0,0 +1,80 @@ +package v1 + +import ( + cerrors "cosmossdk.io/errors" +) + +const ( + errNameDoesNotExist uint32 = iota + 1 + errInvalidRequest + errDeploymentExists + errDeploymentNotFound + errDeploymentClosed + errOwnerAcctMissing + errInvalidGroups + errInvalidDeploymentID + errEmptyHash + errInvalidHash + errInternal + errInvalidDeployment + errInvalidGroupID + errGroupNotFound + errGroupClosed + errGroupOpen + errGroupPaused + errGroupNotOpen + errGroupSpec + errInvalidDeposit + errInvalidIDPath + errInvalidParam + errInvalidDeploymentDepositor +) + +var ( + // ErrNameDoesNotExist is the error when name does not exist + ErrNameDoesNotExist = cerrors.Register(ModuleName, errNameDoesNotExist, "Name does not exist") + // ErrInvalidRequest is the error for invalid request + ErrInvalidRequest = cerrors.Register(ModuleName, errInvalidRequest, "Invalid request") + // ErrDeploymentExists is the error when already deployment exists + ErrDeploymentExists = cerrors.Register(ModuleName, errDeploymentExists, "Deployment exists") + // ErrDeploymentNotFound is the error when deployment not found + ErrDeploymentNotFound = cerrors.Register(ModuleName, errDeploymentNotFound, "Deployment not found") + // ErrDeploymentClosed is the error when deployment is closed + ErrDeploymentClosed = cerrors.Register(ModuleName, errDeploymentClosed, "Deployment closed") + // ErrOwnerAcctMissing is the error for owner account missing + ErrOwnerAcctMissing = cerrors.Register(ModuleName, errOwnerAcctMissing, "Owner account missing") + // ErrInvalidGroups is the error when groups are empty + ErrInvalidGroups = cerrors.Register(ModuleName, errInvalidGroups, "Invalid groups") + // ErrInvalidDeploymentID is the error for invalid deployment id + ErrInvalidDeploymentID = cerrors.Register(ModuleName, errInvalidDeploymentID, "Invalid: deployment id") + // ErrEmptyHash is the error when version is empty + ErrEmptyHash = cerrors.Register(ModuleName, errEmptyHash, "Invalid: empty hash") + // ErrInvalidHash is the error when version is invalid + ErrInvalidHash = cerrors.Register(ModuleName, errInvalidHash, "Invalid: deployment hash") + // ErrInternal is the error for internal error + ErrInternal = cerrors.Register(ModuleName, errInternal, "internal error") + // ErrInvalidDeployment = is the error when deployment does not pass validation + ErrInvalidDeployment = cerrors.Register(ModuleName, errInvalidDeployment, "Invalid deployment") + // ErrInvalidGroupID is the error when already deployment exists + ErrInvalidGroupID = cerrors.Register(ModuleName, errInvalidGroupID, "Deployment exists") + // ErrGroupNotFound is the keeper's error for not finding a group + ErrGroupNotFound = cerrors.Register(ModuleName, errGroupNotFound, "Group not found") + // ErrGroupClosed is the error when deployment is closed + ErrGroupClosed = cerrors.Register(ModuleName, errGroupClosed, "Group already closed") + // ErrGroupOpen is the error when deployment is closed + ErrGroupOpen = cerrors.Register(ModuleName, errGroupOpen, "Group open") + // ErrGroupPaused is the error when deployment is closed + ErrGroupPaused = cerrors.Register(ModuleName, errGroupPaused, "Group paused") + // ErrGroupNotOpen indicates the Group state has progressed beyond initial Open. + ErrGroupNotOpen = cerrors.Register(ModuleName, errGroupNotOpen, "Group not open") + // ErrGroupSpecInvalid indicates a GroupSpec has invalid configuration + ErrGroupSpecInvalid = cerrors.Register(ModuleName, errGroupSpec, "GroupSpec invalid") + // ErrInvalidDeposit indicates an invalid deposit + ErrInvalidDeposit = cerrors.Register(ModuleName, errInvalidDeposit, "Deposit invalid") + // ErrInvalidIDPath indicates an invalid ID path + ErrInvalidIDPath = cerrors.Register(ModuleName, errInvalidIDPath, "ID path invalid") + // ErrInvalidParam indicates an invalid chain parameter + ErrInvalidParam = cerrors.Register(ModuleName, errInvalidParam, "parameter invalid") + // ErrInvalidDeploymentDepositor indicates an invalid chain parameter + ErrInvalidDeploymentDepositor = cerrors.Register(ModuleName, errInvalidDeploymentDepositor, "invalid deployment depositor") +) diff --git a/go/node/deployment/v1/event.pb.go b/go/node/deployment/v1/event.pb.go new file mode 100644 index 00000000..afd3c505 --- /dev/null +++ b/go/node/deployment/v1/event.pb.go @@ -0,0 +1,1300 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1/event.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// EventDeploymentCreated event is triggered when deployment is created on chain +type EventDeploymentCreated struct { + ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash" yaml:"hash"` +} + +func (m *EventDeploymentCreated) Reset() { *m = EventDeploymentCreated{} } +func (m *EventDeploymentCreated) String() string { return proto.CompactTextString(m) } +func (*EventDeploymentCreated) ProtoMessage() {} +func (*EventDeploymentCreated) Descriptor() ([]byte, []int) { + return fileDescriptor_8af18b536d600791, []int{0} +} +func (m *EventDeploymentCreated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventDeploymentCreated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventDeploymentCreated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventDeploymentCreated) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventDeploymentCreated.Merge(m, src) +} +func (m *EventDeploymentCreated) XXX_Size() int { + return m.Size() +} +func (m *EventDeploymentCreated) XXX_DiscardUnknown() { + xxx_messageInfo_EventDeploymentCreated.DiscardUnknown(m) +} + +var xxx_messageInfo_EventDeploymentCreated proto.InternalMessageInfo + +func (m *EventDeploymentCreated) GetID() DeploymentID { + if m != nil { + return m.ID + } + return DeploymentID{} +} + +func (m *EventDeploymentCreated) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +// EventDeploymentUpdated is triggered when deployment is updated on chain +type EventDeploymentUpdated struct { + ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash" yaml:"hash"` +} + +func (m *EventDeploymentUpdated) Reset() { *m = EventDeploymentUpdated{} } +func (m *EventDeploymentUpdated) String() string { return proto.CompactTextString(m) } +func (*EventDeploymentUpdated) ProtoMessage() {} +func (*EventDeploymentUpdated) Descriptor() ([]byte, []int) { + return fileDescriptor_8af18b536d600791, []int{1} +} +func (m *EventDeploymentUpdated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventDeploymentUpdated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventDeploymentUpdated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventDeploymentUpdated) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventDeploymentUpdated.Merge(m, src) +} +func (m *EventDeploymentUpdated) XXX_Size() int { + return m.Size() +} +func (m *EventDeploymentUpdated) XXX_DiscardUnknown() { + xxx_messageInfo_EventDeploymentUpdated.DiscardUnknown(m) +} + +var xxx_messageInfo_EventDeploymentUpdated proto.InternalMessageInfo + +func (m *EventDeploymentUpdated) GetID() DeploymentID { + if m != nil { + return m.ID + } + return DeploymentID{} +} + +func (m *EventDeploymentUpdated) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +// EventDeploymentClosed is triggered when deployment is closed on chain +type EventDeploymentClosed struct { + ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *EventDeploymentClosed) Reset() { *m = EventDeploymentClosed{} } +func (m *EventDeploymentClosed) String() string { return proto.CompactTextString(m) } +func (*EventDeploymentClosed) ProtoMessage() {} +func (*EventDeploymentClosed) Descriptor() ([]byte, []int) { + return fileDescriptor_8af18b536d600791, []int{2} +} +func (m *EventDeploymentClosed) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventDeploymentClosed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventDeploymentClosed.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventDeploymentClosed) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventDeploymentClosed.Merge(m, src) +} +func (m *EventDeploymentClosed) XXX_Size() int { + return m.Size() +} +func (m *EventDeploymentClosed) XXX_DiscardUnknown() { + xxx_messageInfo_EventDeploymentClosed.DiscardUnknown(m) +} + +var xxx_messageInfo_EventDeploymentClosed proto.InternalMessageInfo + +func (m *EventDeploymentClosed) GetID() DeploymentID { + if m != nil { + return m.ID + } + return DeploymentID{} +} + +// EventGroupStarted is triggered when deployment group is started +type EventGroupStarted struct { + ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *EventGroupStarted) Reset() { *m = EventGroupStarted{} } +func (m *EventGroupStarted) String() string { return proto.CompactTextString(m) } +func (*EventGroupStarted) ProtoMessage() {} +func (*EventGroupStarted) Descriptor() ([]byte, []int) { + return fileDescriptor_8af18b536d600791, []int{3} +} +func (m *EventGroupStarted) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventGroupStarted) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventGroupStarted.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventGroupStarted) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventGroupStarted.Merge(m, src) +} +func (m *EventGroupStarted) XXX_Size() int { + return m.Size() +} +func (m *EventGroupStarted) XXX_DiscardUnknown() { + xxx_messageInfo_EventGroupStarted.DiscardUnknown(m) +} + +var xxx_messageInfo_EventGroupStarted proto.InternalMessageInfo + +func (m *EventGroupStarted) GetID() GroupID { + if m != nil { + return m.ID + } + return GroupID{} +} + +// EventGroupPaused is triggered when deployment group is paused +type EventGroupPaused struct { + ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *EventGroupPaused) Reset() { *m = EventGroupPaused{} } +func (m *EventGroupPaused) String() string { return proto.CompactTextString(m) } +func (*EventGroupPaused) ProtoMessage() {} +func (*EventGroupPaused) Descriptor() ([]byte, []int) { + return fileDescriptor_8af18b536d600791, []int{4} +} +func (m *EventGroupPaused) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventGroupPaused) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventGroupPaused.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventGroupPaused) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventGroupPaused.Merge(m, src) +} +func (m *EventGroupPaused) XXX_Size() int { + return m.Size() +} +func (m *EventGroupPaused) XXX_DiscardUnknown() { + xxx_messageInfo_EventGroupPaused.DiscardUnknown(m) +} + +var xxx_messageInfo_EventGroupPaused proto.InternalMessageInfo + +func (m *EventGroupPaused) GetID() GroupID { + if m != nil { + return m.ID + } + return GroupID{} +} + +// EventGroupClosed is triggered when deployment group is closed +type EventGroupClosed struct { + ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *EventGroupClosed) Reset() { *m = EventGroupClosed{} } +func (m *EventGroupClosed) String() string { return proto.CompactTextString(m) } +func (*EventGroupClosed) ProtoMessage() {} +func (*EventGroupClosed) Descriptor() ([]byte, []int) { + return fileDescriptor_8af18b536d600791, []int{5} +} +func (m *EventGroupClosed) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventGroupClosed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventGroupClosed.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventGroupClosed) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventGroupClosed.Merge(m, src) +} +func (m *EventGroupClosed) XXX_Size() int { + return m.Size() +} +func (m *EventGroupClosed) XXX_DiscardUnknown() { + xxx_messageInfo_EventGroupClosed.DiscardUnknown(m) +} + +var xxx_messageInfo_EventGroupClosed proto.InternalMessageInfo + +func (m *EventGroupClosed) GetID() GroupID { + if m != nil { + return m.ID + } + return GroupID{} +} + +func init() { + proto.RegisterType((*EventDeploymentCreated)(nil), "akash.deployment.v1.EventDeploymentCreated") + proto.RegisterType((*EventDeploymentUpdated)(nil), "akash.deployment.v1.EventDeploymentUpdated") + proto.RegisterType((*EventDeploymentClosed)(nil), "akash.deployment.v1.EventDeploymentClosed") + proto.RegisterType((*EventGroupStarted)(nil), "akash.deployment.v1.EventGroupStarted") + proto.RegisterType((*EventGroupPaused)(nil), "akash.deployment.v1.EventGroupPaused") + proto.RegisterType((*EventGroupClosed)(nil), "akash.deployment.v1.EventGroupClosed") +} + +func init() { proto.RegisterFile("akash/deployment/v1/event.proto", fileDescriptor_8af18b536d600791) } + +var fileDescriptor_8af18b536d600791 = []byte{ + // 335 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0xd4, + 0x4f, 0x2d, 0x4b, 0xcd, 0x2b, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x06, 0x2b, 0xd0, + 0x43, 0x28, 0xd0, 0x2b, 0x33, 0x94, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xeb, 0x83, 0x58, + 0x10, 0xa5, 0x52, 0x2a, 0xd8, 0xcc, 0x42, 0xd2, 0x08, 0x51, 0x85, 0xd5, 0xc6, 0xf4, 0xa2, 0xfc, + 0xd2, 0x02, 0x88, 0x02, 0xa5, 0xb9, 0x8c, 0x5c, 0x62, 0xae, 0x20, 0x17, 0xb8, 0xc0, 0x95, 0x38, + 0x17, 0xa5, 0x26, 0x96, 0xa4, 0xa6, 0x08, 0x05, 0x70, 0x31, 0x65, 0xa6, 0x48, 0x30, 0x2a, 0x30, + 0x6a, 0x70, 0x1b, 0x29, 0xea, 0x61, 0x71, 0x99, 0x1e, 0x42, 0x8f, 0xa7, 0x8b, 0x93, 0xec, 0x89, + 0x7b, 0xf2, 0x0c, 0x8f, 0xee, 0xc9, 0x33, 0x79, 0xba, 0xbc, 0xba, 0x27, 0xcf, 0x94, 0x99, 0xf2, + 0xe9, 0x9e, 0x3c, 0x67, 0x65, 0x62, 0x6e, 0x8e, 0x95, 0x52, 0x66, 0x8a, 0x52, 0x10, 0x53, 0x66, + 0x8a, 0x90, 0x36, 0x17, 0x4b, 0x46, 0x62, 0x71, 0x86, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x8f, 0x93, + 0xf8, 0xab, 0x7b, 0xf2, 0x60, 0xfe, 0xa7, 0x7b, 0xf2, 0xdc, 0x10, 0x85, 0x20, 0x9e, 0x52, 0x10, + 0x58, 0xd0, 0x8a, 0xe5, 0xc5, 0x02, 0x79, 0x06, 0x6c, 0xee, 0x0b, 0x2d, 0x48, 0x19, 0x3c, 0xee, + 0xcb, 0xe7, 0x12, 0x45, 0x0f, 0xbe, 0x9c, 0xfc, 0x62, 0x5a, 0xb8, 0x0e, 0x6a, 0x61, 0x1a, 0x97, + 0x20, 0xd8, 0x42, 0x77, 0x50, 0x24, 0x06, 0x97, 0x24, 0x16, 0x81, 0x82, 0xc2, 0x1b, 0xc9, 0x32, + 0x19, 0xac, 0x96, 0x81, 0x95, 0x93, 0x62, 0x4f, 0x2a, 0x97, 0x00, 0xc2, 0x9e, 0x80, 0xc4, 0xd2, + 0x62, 0x3a, 0x58, 0x03, 0x0d, 0x3a, 0xea, 0x5b, 0xe3, 0x64, 0x7d, 0xe2, 0x91, 0x1c, 0xe3, 0x85, + 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, + 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0x8a, 0x05, 0xd9, 0xe9, 0x7a, 0x89, 0xd9, 0x25, 0x7a, 0x29, 0xa9, + 0x65, 0xfa, 0xe9, 0xf9, 0xfa, 0x79, 0xf9, 0x29, 0xa9, 0xa8, 0xf9, 0x25, 0x89, 0x0d, 0x9c, 0x55, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x89, 0x40, 0x49, 0x5e, 0xbf, 0x03, 0x00, 0x00, +} + +func (m *EventDeploymentCreated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventDeploymentCreated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventDeploymentCreated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintEvent(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventDeploymentUpdated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventDeploymentUpdated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventDeploymentUpdated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintEvent(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventDeploymentClosed) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventDeploymentClosed) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventDeploymentClosed) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventGroupStarted) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventGroupStarted) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventGroupStarted) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventGroupPaused) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventGroupPaused) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventGroupPaused) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventGroupClosed) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventGroupClosed) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventGroupClosed) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintEvent(dAtA []byte, offset int, v uint64) int { + offset -= sovEvent(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EventDeploymentCreated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovEvent(uint64(l)) + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovEvent(uint64(l)) + } + return n +} + +func (m *EventDeploymentUpdated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovEvent(uint64(l)) + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovEvent(uint64(l)) + } + return n +} + +func (m *EventDeploymentClosed) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovEvent(uint64(l)) + return n +} + +func (m *EventGroupStarted) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovEvent(uint64(l)) + return n +} + +func (m *EventGroupPaused) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovEvent(uint64(l)) + return n +} + +func (m *EventGroupClosed) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovEvent(uint64(l)) + return n +} + +func sovEvent(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvent(x uint64) (n int) { + return sovEvent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EventDeploymentCreated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventDeploymentCreated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventDeploymentCreated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventDeploymentUpdated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventDeploymentUpdated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventDeploymentUpdated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventDeploymentClosed) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventDeploymentClosed: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventDeploymentClosed: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventGroupStarted) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventGroupStarted: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventGroupStarted: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventGroupPaused) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventGroupPaused: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventGroupPaused: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventGroupClosed) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventGroupClosed: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventGroupClosed: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvent(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvent + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvent + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvent + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvent = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvent = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvent = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1/group.go b/go/node/deployment/v1/group.go new file mode 100644 index 00000000..b7562379 --- /dev/null +++ b/go/node/deployment/v1/group.go @@ -0,0 +1,47 @@ +package v1 + +import ( + "fmt" + + sdkerrors "cosmossdk.io/errors" + dsdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// MakeGroupID returns GroupID instance with provided deployment details +// and group sequence number. +func MakeGroupID(id DeploymentID, gseq uint32) GroupID { + return GroupID{ + Owner: id.Owner, + DSeq: id.DSeq, + GSeq: gseq, + } +} + +// DeploymentID method returns DeploymentID details with specific group details +func (id GroupID) DeploymentID() DeploymentID { + return DeploymentID{ + Owner: id.Owner, + DSeq: id.DSeq, + } +} + +// Equals method compares specific group with provided group +func (id GroupID) Equals(other GroupID) bool { + return id.DeploymentID().Equals(other.DeploymentID()) && id.GSeq == other.GSeq +} + +// Validate method for GroupID and returns nil +func (id GroupID) Validate() error { + if err := id.DeploymentID().Validate(); err != nil { + return sdkerrors.Wrap(err, "GroupID: Invalid DeploymentID") + } + if id.GSeq == 0 { + return sdkerrors.Wrap(dsdkerrors.ErrInvalidSequence, "GroupID: Invalid Group Sequence") + } + return nil +} + +// String method provides human-readable representation of GroupID. +func (id GroupID) String() string { + return fmt.Sprintf("%s/%d", id.DeploymentID(), id.GSeq) +} diff --git a/go/node/deployment/v1/group.pb.go b/go/node/deployment/v1/group.pb.go new file mode 100644 index 00000000..7080e52c --- /dev/null +++ b/go/node/deployment/v1/group.pb.go @@ -0,0 +1,395 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1/group.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GroupID stores owner, deployment sequence number and group sequence number +type GroupID struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` +} + +func (m *GroupID) Reset() { *m = GroupID{} } +func (*GroupID) ProtoMessage() {} +func (*GroupID) Descriptor() ([]byte, []int) { + return fileDescriptor_68e5d2321fa62ce8, []int{0} +} +func (m *GroupID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GroupID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GroupID) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupID.Merge(m, src) +} +func (m *GroupID) XXX_Size() int { + return m.Size() +} +func (m *GroupID) XXX_DiscardUnknown() { + xxx_messageInfo_GroupID.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupID proto.InternalMessageInfo + +func (m *GroupID) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *GroupID) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *GroupID) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func init() { + proto.RegisterType((*GroupID)(nil), "akash.deployment.v1.GroupID") +} + +func init() { proto.RegisterFile("akash/deployment/v1/group.proto", fileDescriptor_68e5d2321fa62ce8) } + +var fileDescriptor_68e5d2321fa62ce8 = []byte{ + // 293 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0xd4, + 0x4f, 0x2f, 0xca, 0x2f, 0x2d, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x06, 0x2b, 0xd0, + 0x43, 0x28, 0xd0, 0x2b, 0x33, 0x94, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xeb, 0x83, 0x58, + 0x10, 0xa5, 0x52, 0x92, 0xc9, 0xf9, 0xc5, 0xb9, 0xf9, 0xc5, 0xf1, 0x10, 0x09, 0x08, 0x07, 0x22, + 0xa5, 0x74, 0x8c, 0x91, 0x8b, 0xdd, 0x1d, 0x64, 0xaa, 0xa7, 0x8b, 0x90, 0x3b, 0x17, 0x6b, 0x7e, + 0x79, 0x5e, 0x6a, 0x91, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xe1, 0xab, 0x7b, 0xf2, 0x10, + 0x81, 0x4f, 0xf7, 0xe4, 0x79, 0x2a, 0x13, 0x73, 0x73, 0xac, 0x94, 0xc0, 0x5c, 0xa5, 0x4b, 0x5b, + 0x74, 0x45, 0xa0, 0xa6, 0x38, 0xa6, 0xa4, 0x14, 0xa5, 0x16, 0x17, 0x07, 0x97, 0x14, 0x65, 0xe6, + 0xa5, 0x07, 0x41, 0x94, 0x0b, 0x19, 0x73, 0xb1, 0xa4, 0x14, 0xa7, 0x16, 0x4a, 0x30, 0x29, 0x30, + 0x6a, 0xb0, 0x38, 0xc9, 0x3f, 0xba, 0x27, 0xcf, 0xe2, 0x12, 0x9c, 0x5a, 0xf8, 0xea, 0x9e, 0x3c, + 0x58, 0xfc, 0xd3, 0x3d, 0x79, 0x6e, 0x88, 0x71, 0x20, 0x9e, 0x52, 0x10, 0x58, 0x10, 0xa4, 0x29, + 0x1d, 0xa4, 0x89, 0x59, 0x81, 0x51, 0x83, 0x17, 0xa2, 0xc9, 0x1d, 0xaa, 0x29, 0x1d, 0x45, 0x53, + 0x3a, 0x44, 0x13, 0x88, 0xb2, 0xe2, 0x98, 0xb1, 0x40, 0x9e, 0xe1, 0xc5, 0x02, 0x79, 0x06, 0x27, + 0xeb, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, + 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x52, 0x2c, 0xc8, 0x4e, 0xd7, + 0x4b, 0xcc, 0x2e, 0xd1, 0x4b, 0x49, 0x2d, 0xd3, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, 0x49, 0x45, + 0x0d, 0xd7, 0x24, 0x36, 0x70, 0x60, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd2, 0xaf, 0xac, + 0x4f, 0x75, 0x01, 0x00, 0x00, +} + +func (m *GroupID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.GSeq != 0 { + i = encodeVarintGroup(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintGroup(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintGroup(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGroup(dAtA []byte, offset int, v uint64) int { + offset -= sovGroup(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GroupID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovGroup(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovGroup(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovGroup(uint64(m.GSeq)) + } + return n +} + +func sovGroup(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGroup(x uint64) (n int) { + return sovGroup(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GroupID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGroup + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGroup(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroup + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGroup(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGroup + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGroup + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGroup = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGroup = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1/key.go b/go/node/deployment/v1/key.go new file mode 100644 index 00000000..d7bdbd19 --- /dev/null +++ b/go/node/deployment/v1/key.go @@ -0,0 +1,24 @@ +package v1 + +const ( + // ModuleName is the module name constant used in many places + ModuleName = "deployment" + + // StoreKey is the store key string for deployment + StoreKey = ModuleName + + // RouterKey is the message route for deployment + RouterKey = ModuleName +) + +func DeploymentPrefix() []byte { + return []byte{0x01} +} + +func GroupPrefix() []byte { + return []byte{0x02} +} + +func ParamsPrefix() []byte { + return []byte{0x03} +} diff --git a/go/node/deployment/v1/msg.pb.go b/go/node/deployment/v1/msg.pb.go new file mode 100644 index 00000000..8c29710b --- /dev/null +++ b/go/node/deployment/v1/msg.pb.go @@ -0,0 +1,559 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1/msg.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgDepositDeployment deposits more funds into the deposit account +type MsgDepositDeployment struct { + ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + Amount types.Coin `protobuf:"bytes,2,opt,name=amount,proto3" json:"amount" yaml:"amount"` + // Depositor pays for the deposit + Depositor string `protobuf:"bytes,3,opt,name=depositor,proto3" json:"depositor" yaml:"depositor"` +} + +func (m *MsgDepositDeployment) Reset() { *m = MsgDepositDeployment{} } +func (m *MsgDepositDeployment) String() string { return proto.CompactTextString(m) } +func (*MsgDepositDeployment) ProtoMessage() {} +func (*MsgDepositDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_d5db5b4dd1de0f0c, []int{0} +} +func (m *MsgDepositDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDepositDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDepositDeployment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDepositDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDepositDeployment.Merge(m, src) +} +func (m *MsgDepositDeployment) XXX_Size() int { + return m.Size() +} +func (m *MsgDepositDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDepositDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDepositDeployment proto.InternalMessageInfo + +func (m *MsgDepositDeployment) GetID() DeploymentID { + if m != nil { + return m.ID + } + return DeploymentID{} +} + +func (m *MsgDepositDeployment) GetAmount() types.Coin { + if m != nil { + return m.Amount + } + return types.Coin{} +} + +func (m *MsgDepositDeployment) GetDepositor() string { + if m != nil { + return m.Depositor + } + return "" +} + +// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. +type MsgDepositDeploymentResponse struct { +} + +func (m *MsgDepositDeploymentResponse) Reset() { *m = MsgDepositDeploymentResponse{} } +func (m *MsgDepositDeploymentResponse) String() string { return proto.CompactTextString(m) } +func (*MsgDepositDeploymentResponse) ProtoMessage() {} +func (*MsgDepositDeploymentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_d5db5b4dd1de0f0c, []int{1} +} +func (m *MsgDepositDeploymentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDepositDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDepositDeploymentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDepositDeploymentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDepositDeploymentResponse.Merge(m, src) +} +func (m *MsgDepositDeploymentResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgDepositDeploymentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDepositDeploymentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDepositDeploymentResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgDepositDeployment)(nil), "akash.deployment.v1.MsgDepositDeployment") + proto.RegisterType((*MsgDepositDeploymentResponse)(nil), "akash.deployment.v1.MsgDepositDeploymentResponse") +} + +func init() { proto.RegisterFile("akash/deployment/v1/msg.proto", fileDescriptor_d5db5b4dd1de0f0c) } + +var fileDescriptor_d5db5b4dd1de0f0c = []byte{ + // 375 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x3f, 0x6b, 0xe3, 0x30, + 0x18, 0xc6, 0x6d, 0xdf, 0x11, 0x88, 0x8f, 0x83, 0xc3, 0x97, 0x21, 0x09, 0x17, 0x29, 0x31, 0x37, + 0x64, 0xa9, 0x84, 0xdb, 0xa9, 0xe9, 0x54, 0xd7, 0x4b, 0x86, 0x42, 0x71, 0x87, 0x42, 0x97, 0xe2, + 0x44, 0xc2, 0x15, 0x89, 0x2d, 0x63, 0xb9, 0x86, 0x7c, 0x8b, 0xee, 0x5d, 0xfa, 0x21, 0xfa, 0x21, + 0x32, 0x86, 0x4e, 0x9d, 0x44, 0x71, 0x96, 0x92, 0x31, 0x9f, 0xa0, 0xc4, 0x32, 0x71, 0x0b, 0xd9, + 0xf4, 0xbe, 0xbf, 0xf7, 0xcf, 0xf3, 0xe8, 0x35, 0x7b, 0xc1, 0x2c, 0x10, 0xf7, 0x98, 0xd0, 0x64, + 0xce, 0x17, 0x11, 0x8d, 0x33, 0x9c, 0x3b, 0x38, 0x12, 0x21, 0x4a, 0x52, 0x9e, 0x71, 0xeb, 0x6f, + 0x89, 0x51, 0x8d, 0x51, 0xee, 0x74, 0x5b, 0x21, 0x0f, 0x79, 0xc9, 0xf1, 0xee, 0xa5, 0x4a, 0xbb, + 0x9d, 0x29, 0x17, 0x11, 0x17, 0x77, 0x0a, 0xa8, 0xa0, 0x42, 0x40, 0x45, 0x78, 0x12, 0x08, 0x8a, + 0x73, 0x67, 0x42, 0xb3, 0xc0, 0xc1, 0x53, 0xce, 0xe2, 0x8a, 0xff, 0x3f, 0x24, 0xe2, 0xcb, 0xce, + 0xb2, 0xca, 0x7e, 0x32, 0xcc, 0xd6, 0xa5, 0x08, 0x3d, 0x9a, 0x70, 0xc1, 0x32, 0x6f, 0x8f, 0xad, + 0x2b, 0xd3, 0x60, 0xa4, 0xad, 0xf7, 0xf5, 0xe1, 0xaf, 0xe3, 0x01, 0x3a, 0xa0, 0x18, 0xd5, 0xc5, + 0x63, 0xcf, 0xed, 0x2d, 0x25, 0xd4, 0x0a, 0x09, 0x8d, 0xb1, 0xb7, 0x91, 0xd0, 0x60, 0x64, 0x2b, + 0x61, 0x73, 0x11, 0x44, 0xf3, 0x91, 0xcd, 0x88, 0xed, 0x1b, 0x8c, 0x58, 0xbe, 0xd9, 0x08, 0x22, + 0xfe, 0x10, 0x67, 0x6d, 0xa3, 0x9c, 0xda, 0x41, 0x95, 0x9f, 0x9d, 0x03, 0x54, 0x39, 0x40, 0x17, + 0x9c, 0xc5, 0x2e, 0xdc, 0x4d, 0xdb, 0x48, 0x58, 0x35, 0x6c, 0x25, 0xfc, 0xad, 0x66, 0xa9, 0xd8, + 0xf6, 0x2b, 0x60, 0xdd, 0x98, 0x4d, 0xa2, 0xa4, 0xf3, 0xb4, 0xfd, 0xa3, 0xaf, 0x0f, 0x9b, 0xee, + 0xe9, 0x46, 0xc2, 0x3a, 0xb9, 0x95, 0xf0, 0x8f, 0x6a, 0xdd, 0xa7, 0xec, 0xd7, 0x97, 0xa3, 0x56, + 0xb5, 0xfa, 0x9c, 0x90, 0x94, 0x0a, 0x71, 0x9d, 0xa5, 0x2c, 0x0e, 0xfd, 0xba, 0x6d, 0xf4, 0xf3, + 0xe3, 0x19, 0x6a, 0x36, 0x30, 0xff, 0x1d, 0xfa, 0x1c, 0x9f, 0x8a, 0x84, 0xc7, 0x82, 0xba, 0x67, + 0xcb, 0x02, 0xe8, 0xab, 0x02, 0xe8, 0xef, 0x05, 0xd0, 0x1f, 0xd7, 0x40, 0x5b, 0xad, 0x81, 0xf6, + 0xb6, 0x06, 0xda, 0xed, 0x20, 0x99, 0x85, 0x28, 0x98, 0x65, 0x88, 0xd0, 0x1c, 0x87, 0x1c, 0xc7, + 0x9c, 0xd0, 0xef, 0xb7, 0x98, 0x34, 0xca, 0x0b, 0x9c, 0x7c, 0x06, 0x00, 0x00, 0xff, 0xff, 0x5b, + 0xe8, 0xf7, 0xae, 0x2e, 0x02, 0x00, 0x00, +} + +func (m *MsgDepositDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDepositDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDepositDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Depositor) > 0 { + i -= len(m.Depositor) + copy(dAtA[i:], m.Depositor) + i = encodeVarintMsg(dAtA, i, uint64(len(m.Depositor))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Amount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgDepositDeploymentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDepositDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDepositDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintMsg(dAtA []byte, offset int, v uint64) int { + offset -= sovMsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgDepositDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovMsg(uint64(l)) + l = m.Amount.Size() + n += 1 + l + sovMsg(uint64(l)) + l = len(m.Depositor) + if l > 0 { + n += 1 + l + sovMsg(uint64(l)) + } + return n +} + +func (m *MsgDepositDeploymentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovMsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMsg(x uint64) (n int) { + return sovMsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgDepositDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDepositDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDepositDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Depositor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Depositor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDepositDeploymentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDepositDeploymentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDepositDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1/msgs.go b/go/node/deployment/v1/msgs.go new file mode 100644 index 00000000..7bbe13ec --- /dev/null +++ b/go/node/deployment/v1/msgs.go @@ -0,0 +1,13 @@ +package v1 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgDepositDeployment) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + diff --git a/go/node/deployment/v1beta1/authz.pb.go b/go/node/deployment/v1beta1/authz.pb.go deleted file mode 100644 index 4753440d..00000000 --- a/go/node/deployment/v1beta1/authz.pb.go +++ /dev/null @@ -1,333 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta1/authz.proto - -package v1beta1 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - _ "github.com/regen-network/cosmos-proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from -// the granter's account for a deployment. -type DepositDeploymentAuthorization struct { - // SpendLimit is the amount the grantee is authorized to spend from the granter's account for - // the purpose of deployment. - SpendLimit types.Coin `protobuf:"bytes,1,opt,name=spend_limit,json=spendLimit,proto3" json:"spend_limit"` -} - -func (m *DepositDeploymentAuthorization) Reset() { *m = DepositDeploymentAuthorization{} } -func (m *DepositDeploymentAuthorization) String() string { return proto.CompactTextString(m) } -func (*DepositDeploymentAuthorization) ProtoMessage() {} -func (*DepositDeploymentAuthorization) Descriptor() ([]byte, []int) { - return fileDescriptor_a27e07d58aa17308, []int{0} -} -func (m *DepositDeploymentAuthorization) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DepositDeploymentAuthorization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DepositDeploymentAuthorization.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DepositDeploymentAuthorization) XXX_Merge(src proto.Message) { - xxx_messageInfo_DepositDeploymentAuthorization.Merge(m, src) -} -func (m *DepositDeploymentAuthorization) XXX_Size() int { - return m.Size() -} -func (m *DepositDeploymentAuthorization) XXX_DiscardUnknown() { - xxx_messageInfo_DepositDeploymentAuthorization.DiscardUnknown(m) -} - -var xxx_messageInfo_DepositDeploymentAuthorization proto.InternalMessageInfo - -func (m *DepositDeploymentAuthorization) GetSpendLimit() types.Coin { - if m != nil { - return m.SpendLimit - } - return types.Coin{} -} - -func init() { - proto.RegisterType((*DepositDeploymentAuthorization)(nil), "akash.deployment.v1beta1.DepositDeploymentAuthorization") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta1/authz.proto", fileDescriptor_a27e07d58aa17308) -} - -var fileDescriptor_a27e07d58aa17308 = []byte{ - // 277 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, - 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0x4f, 0x2c, 0x2d, 0xc9, 0xa8, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, - 0x17, 0x92, 0x00, 0xab, 0xd2, 0x43, 0xa8, 0xd2, 0x83, 0xaa, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, - 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0x24, 0x93, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, 0xe3, - 0x21, 0x12, 0x10, 0x0e, 0x54, 0x4a, 0x0e, 0xc2, 0xd3, 0x4f, 0x4a, 0x2c, 0x4e, 0x85, 0xdb, 0x95, - 0x9c, 0x9f, 0x99, 0x07, 0x91, 0x57, 0x6a, 0x61, 0xe4, 0x92, 0x73, 0x49, 0x2d, 0xc8, 0x2f, 0xce, - 0x2c, 0x71, 0x81, 0x5b, 0xe7, 0x58, 0x5a, 0x92, 0x91, 0x5f, 0x94, 0x59, 0x95, 0x58, 0x92, 0x99, - 0x9f, 0x27, 0xe4, 0xcf, 0xc5, 0x5d, 0x5c, 0x90, 0x9a, 0x97, 0x12, 0x9f, 0x93, 0x99, 0x9b, 0x59, - 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0xa9, 0x07, 0xb5, 0x06, 0x64, 0x30, 0xcc, 0x79, - 0x7a, 0xce, 0xf9, 0x99, 0x79, 0x4e, 0xc2, 0x27, 0xee, 0xc9, 0x33, 0xbc, 0xba, 0x27, 0x8f, 0xac, - 0x2b, 0x88, 0x0b, 0xcc, 0xf1, 0x01, 0xb1, 0xad, 0x04, 0x2f, 0x6d, 0xd1, 0xe5, 0x45, 0xb1, 0xc3, - 0x29, 0xfc, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, - 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x6c, 0xd3, 0x33, 0x4b, - 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, 0xc1, 0xa2, 0x9b, 0x97, 0x5a, 0x52, 0x9e, - 0x5f, 0x94, 0x0d, 0xe5, 0x25, 0x16, 0x64, 0xea, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, 0xa7, 0xa4, 0x62, - 0x09, 0xd6, 0x24, 0x36, 0xb0, 0x37, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0x1e, 0xa4, - 0x62, 0x79, 0x01, 0x00, 0x00, -} - -func (m *DepositDeploymentAuthorization) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DepositDeploymentAuthorization) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DepositDeploymentAuthorization) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.SpendLimit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAuthz(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintAuthz(dAtA []byte, offset int, v uint64) int { - offset -= sovAuthz(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *DepositDeploymentAuthorization) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.SpendLimit.Size() - n += 1 + l + sovAuthz(uint64(l)) - return n -} - -func sovAuthz(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAuthz(x uint64) (n int) { - return sovAuthz(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *DepositDeploymentAuthorization) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuthz - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DepositDeploymentAuthorization: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DepositDeploymentAuthorization: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpendLimit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuthz - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAuthz - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAuthz - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpendLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuthz(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuthz - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAuthz(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuthz - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuthz - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuthz - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAuthz - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAuthz - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAuthz - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAuthz = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAuthz = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAuthz = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta1/codec.go b/go/node/deployment/v1beta1/codec.go deleted file mode 100644 index 7b5f7b99..00000000 --- a/go/node/deployment/v1beta1/codec.go +++ /dev/null @@ -1,58 +0,0 @@ -package v1beta1 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" - "github.com/cosmos/cosmos-sdk/x/authz" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/deployment module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/deployment and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterLegacyAminoCodec register concrete types on codec -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgCreateDeployment{}, ModuleName+"/"+MsgTypeCreateDeployment, nil) - cdc.RegisterConcrete(&MsgUpdateDeployment{}, ModuleName+"/"+MsgTypeUpdateDeployment, nil) - cdc.RegisterConcrete(&MsgDepositDeployment{}, ModuleName+"/"+MsgTypeDepositDeployment, nil) - cdc.RegisterConcrete(&MsgCloseDeployment{}, ModuleName+"/"+MsgTypeCloseDeployment, nil) - cdc.RegisterConcrete(&MsgCloseGroup{}, ModuleName+"/"+MsgTypeCloseGroup, nil) - cdc.RegisterConcrete(&MsgPauseGroup{}, ModuleName+"/"+MsgTypePauseGroup, nil) - cdc.RegisterConcrete(&MsgStartGroup{}, ModuleName+"/"+MsgTypeStartGroup, nil) -} - -// RegisterInterfaces registers the x/deployment interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgCreateDeployment{}, - &MsgUpdateDeployment{}, - &MsgDepositDeployment{}, - &MsgCloseDeployment{}, - &MsgCloseGroup{}, - &MsgPauseGroup{}, - &MsgStartGroup{}, - ) - registry.RegisterImplementations( - (*authz.Authorization)(nil), - &DepositDeploymentAuthorization{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/deployment/v1beta1/deployment.pb.go b/go/node/deployment/v1beta1/deployment.pb.go deleted file mode 100644 index e6d49e57..00000000 --- a/go/node/deployment/v1beta1/deployment.pb.go +++ /dev/null @@ -1,2806 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta1/deployment.proto - -package v1beta1 - -import ( - context "context" - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of deployment -type Deployment_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - DeploymentStateInvalid Deployment_State = 0 - // DeploymentActive denotes state for deployment active - DeploymentActive Deployment_State = 1 - // DeploymentClosed denotes state for deployment closed - DeploymentClosed Deployment_State = 2 -) - -var Deployment_State_name = map[int32]string{ - 0: "invalid", - 1: "active", - 2: "closed", -} - -var Deployment_State_value = map[string]int32{ - "invalid": 0, - "active": 1, - "closed": 2, -} - -func (x Deployment_State) String() string { - return proto.EnumName(Deployment_State_name, int32(x)) -} - -func (Deployment_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_bfe50ba12f1404bf, []int{9, 0} -} - -// MsgCreateDeployment defines an SDK message for creating deployment -type MsgCreateDeployment struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` - Groups []GroupSpec `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups" yaml:"groups"` - Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version" yaml:"version"` - Deposit types.Coin `protobuf:"bytes,4,opt,name=deposit,proto3" json:"deposit" yaml:"deposit"` -} - -func (m *MsgCreateDeployment) Reset() { *m = MsgCreateDeployment{} } -func (m *MsgCreateDeployment) String() string { return proto.CompactTextString(m) } -func (*MsgCreateDeployment) ProtoMessage() {} -func (*MsgCreateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe50ba12f1404bf, []int{0} -} -func (m *MsgCreateDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateDeployment.Merge(m, src) -} -func (m *MsgCreateDeployment) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateDeployment proto.InternalMessageInfo - -func (m *MsgCreateDeployment) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -func (m *MsgCreateDeployment) GetGroups() []GroupSpec { - if m != nil { - return m.Groups - } - return nil -} - -func (m *MsgCreateDeployment) GetVersion() []byte { - if m != nil { - return m.Version - } - return nil -} - -func (m *MsgCreateDeployment) GetDeposit() types.Coin { - if m != nil { - return m.Deposit - } - return types.Coin{} -} - -// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. -type MsgCreateDeploymentResponse struct { -} - -func (m *MsgCreateDeploymentResponse) Reset() { *m = MsgCreateDeploymentResponse{} } -func (m *MsgCreateDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateDeploymentResponse) ProtoMessage() {} -func (*MsgCreateDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe50ba12f1404bf, []int{1} -} -func (m *MsgCreateDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateDeploymentResponse.Merge(m, src) -} -func (m *MsgCreateDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateDeploymentResponse proto.InternalMessageInfo - -// MsgDepositDeployment deposits more funds into the deposit account -type MsgDepositDeployment struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` - Amount types.Coin `protobuf:"bytes,2,opt,name=amount,proto3" json:"amount" yaml:"amount"` -} - -func (m *MsgDepositDeployment) Reset() { *m = MsgDepositDeployment{} } -func (m *MsgDepositDeployment) String() string { return proto.CompactTextString(m) } -func (*MsgDepositDeployment) ProtoMessage() {} -func (*MsgDepositDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe50ba12f1404bf, []int{2} -} -func (m *MsgDepositDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDepositDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDepositDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDepositDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDepositDeployment.Merge(m, src) -} -func (m *MsgDepositDeployment) XXX_Size() int { - return m.Size() -} -func (m *MsgDepositDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDepositDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDepositDeployment proto.InternalMessageInfo - -func (m *MsgDepositDeployment) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -func (m *MsgDepositDeployment) GetAmount() types.Coin { - if m != nil { - return m.Amount - } - return types.Coin{} -} - -// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. -type MsgDepositDeploymentResponse struct { -} - -func (m *MsgDepositDeploymentResponse) Reset() { *m = MsgDepositDeploymentResponse{} } -func (m *MsgDepositDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*MsgDepositDeploymentResponse) ProtoMessage() {} -func (*MsgDepositDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe50ba12f1404bf, []int{3} -} -func (m *MsgDepositDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDepositDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDepositDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDepositDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDepositDeploymentResponse.Merge(m, src) -} -func (m *MsgDepositDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgDepositDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDepositDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDepositDeploymentResponse proto.InternalMessageInfo - -// MsgUpdateDeployment defines an SDK message for updating deployment -type MsgUpdateDeployment struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` - Groups []GroupSpec `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups" yaml:"groups"` - Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version" yaml:"version"` -} - -func (m *MsgUpdateDeployment) Reset() { *m = MsgUpdateDeployment{} } -func (m *MsgUpdateDeployment) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateDeployment) ProtoMessage() {} -func (*MsgUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe50ba12f1404bf, []int{4} -} -func (m *MsgUpdateDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateDeployment.Merge(m, src) -} -func (m *MsgUpdateDeployment) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateDeployment proto.InternalMessageInfo - -func (m *MsgUpdateDeployment) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -func (m *MsgUpdateDeployment) GetGroups() []GroupSpec { - if m != nil { - return m.Groups - } - return nil -} - -func (m *MsgUpdateDeployment) GetVersion() []byte { - if m != nil { - return m.Version - } - return nil -} - -// MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. -type MsgUpdateDeploymentResponse struct { -} - -func (m *MsgUpdateDeploymentResponse) Reset() { *m = MsgUpdateDeploymentResponse{} } -func (m *MsgUpdateDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateDeploymentResponse) ProtoMessage() {} -func (*MsgUpdateDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe50ba12f1404bf, []int{5} -} -func (m *MsgUpdateDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateDeploymentResponse.Merge(m, src) -} -func (m *MsgUpdateDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateDeploymentResponse proto.InternalMessageInfo - -// MsgCloseDeployment defines an SDK message for closing deployment -type MsgCloseDeployment struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseDeployment) Reset() { *m = MsgCloseDeployment{} } -func (m *MsgCloseDeployment) String() string { return proto.CompactTextString(m) } -func (*MsgCloseDeployment) ProtoMessage() {} -func (*MsgCloseDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe50ba12f1404bf, []int{6} -} -func (m *MsgCloseDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseDeployment.Merge(m, src) -} -func (m *MsgCloseDeployment) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseDeployment proto.InternalMessageInfo - -func (m *MsgCloseDeployment) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -// MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. -type MsgCloseDeploymentResponse struct { -} - -func (m *MsgCloseDeploymentResponse) Reset() { *m = MsgCloseDeploymentResponse{} } -func (m *MsgCloseDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseDeploymentResponse) ProtoMessage() {} -func (*MsgCloseDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe50ba12f1404bf, []int{7} -} -func (m *MsgCloseDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseDeploymentResponse.Merge(m, src) -} -func (m *MsgCloseDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseDeploymentResponse proto.InternalMessageInfo - -// DeploymentID stores owner and sequence number -type DeploymentID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` -} - -func (m *DeploymentID) Reset() { *m = DeploymentID{} } -func (*DeploymentID) ProtoMessage() {} -func (*DeploymentID) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe50ba12f1404bf, []int{8} -} -func (m *DeploymentID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeploymentID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeploymentID) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentID.Merge(m, src) -} -func (m *DeploymentID) XXX_Size() int { - return m.Size() -} -func (m *DeploymentID) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentID.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentID proto.InternalMessageInfo - -func (m *DeploymentID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *DeploymentID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -// Deployment stores deploymentID, state and version details -type Deployment struct { - DeploymentID DeploymentID `protobuf:"bytes,1,opt,name=deployment_id,json=deploymentId,proto3" json:"id" yaml:"id"` - State Deployment_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.deployment.v1beta1.Deployment_State" json:"state" yaml:"state"` - Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version" yaml:"version"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Deployment) Reset() { *m = Deployment{} } -func (m *Deployment) String() string { return proto.CompactTextString(m) } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe50ba12f1404bf, []int{9} -} -func (m *Deployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Deployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Deployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_Deployment.Merge(m, src) -} -func (m *Deployment) XXX_Size() int { - return m.Size() -} -func (m *Deployment) XXX_DiscardUnknown() { - xxx_messageInfo_Deployment.DiscardUnknown(m) -} - -var xxx_messageInfo_Deployment proto.InternalMessageInfo - -func (m *Deployment) GetDeploymentID() DeploymentID { - if m != nil { - return m.DeploymentID - } - return DeploymentID{} -} - -func (m *Deployment) GetState() Deployment_State { - if m != nil { - return m.State - } - return DeploymentStateInvalid -} - -func (m *Deployment) GetVersion() []byte { - if m != nil { - return m.Version - } - return nil -} - -func (m *Deployment) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -// DeploymentFilters defines filters used to filter deployments -type DeploymentFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *DeploymentFilters) Reset() { *m = DeploymentFilters{} } -func (m *DeploymentFilters) String() string { return proto.CompactTextString(m) } -func (*DeploymentFilters) ProtoMessage() {} -func (*DeploymentFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_bfe50ba12f1404bf, []int{10} -} -func (m *DeploymentFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeploymentFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeploymentFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentFilters.Merge(m, src) -} -func (m *DeploymentFilters) XXX_Size() int { - return m.Size() -} -func (m *DeploymentFilters) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentFilters proto.InternalMessageInfo - -func (m *DeploymentFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *DeploymentFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *DeploymentFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func init() { - proto.RegisterEnum("akash.deployment.v1beta1.Deployment_State", Deployment_State_name, Deployment_State_value) - proto.RegisterType((*MsgCreateDeployment)(nil), "akash.deployment.v1beta1.MsgCreateDeployment") - proto.RegisterType((*MsgCreateDeploymentResponse)(nil), "akash.deployment.v1beta1.MsgCreateDeploymentResponse") - proto.RegisterType((*MsgDepositDeployment)(nil), "akash.deployment.v1beta1.MsgDepositDeployment") - proto.RegisterType((*MsgDepositDeploymentResponse)(nil), "akash.deployment.v1beta1.MsgDepositDeploymentResponse") - proto.RegisterType((*MsgUpdateDeployment)(nil), "akash.deployment.v1beta1.MsgUpdateDeployment") - proto.RegisterType((*MsgUpdateDeploymentResponse)(nil), "akash.deployment.v1beta1.MsgUpdateDeploymentResponse") - proto.RegisterType((*MsgCloseDeployment)(nil), "akash.deployment.v1beta1.MsgCloseDeployment") - proto.RegisterType((*MsgCloseDeploymentResponse)(nil), "akash.deployment.v1beta1.MsgCloseDeploymentResponse") - proto.RegisterType((*DeploymentID)(nil), "akash.deployment.v1beta1.DeploymentID") - proto.RegisterType((*Deployment)(nil), "akash.deployment.v1beta1.Deployment") - proto.RegisterType((*DeploymentFilters)(nil), "akash.deployment.v1beta1.DeploymentFilters") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta1/deployment.proto", fileDescriptor_bfe50ba12f1404bf) -} - -var fileDescriptor_bfe50ba12f1404bf = []byte{ - // 863 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x56, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xb6, 0x9d, 0x34, 0xa5, 0xd3, 0xec, 0x12, 0x4c, 0x85, 0xb2, 0x66, 0xe3, 0x09, 0x06, 0xd1, - 0x82, 0x58, 0x5b, 0x9b, 0xe5, 0x87, 0x54, 0x89, 0xc3, 0x66, 0x23, 0x50, 0x0e, 0x95, 0x90, 0xa3, - 0x05, 0x09, 0x90, 0x56, 0x4e, 0x3c, 0x78, 0xad, 0x4d, 0x3c, 0x8e, 0x67, 0x92, 0x6d, 0x41, 0xe2, - 0x0c, 0x3d, 0x71, 0xe4, 0x52, 0xa9, 0x12, 0xe2, 0xce, 0x95, 0xbf, 0x80, 0x1e, 0x7b, 0xe4, 0x64, - 0x41, 0x7a, 0x41, 0x39, 0xe6, 0x2f, 0x40, 0x9e, 0x71, 0xec, 0x34, 0xbf, 0x83, 0x54, 0x2e, 0x7b, - 0xcb, 0xbc, 0xf7, 0xbd, 0xf7, 0xbd, 0xf7, 0xcd, 0xcb, 0x1b, 0x83, 0x77, 0xac, 0x67, 0x16, 0x79, - 0x6a, 0xd8, 0xc8, 0x6f, 0xe3, 0x93, 0x0e, 0xf2, 0xa8, 0xd1, 0xbf, 0xdf, 0x44, 0xd4, 0xba, 0x3f, - 0x61, 0xd2, 0xfd, 0x00, 0x53, 0x2c, 0x17, 0x19, 0x54, 0x9f, 0xb0, 0xc7, 0x50, 0x65, 0xcf, 0xc1, - 0x0e, 0x66, 0x20, 0x23, 0xfa, 0xc5, 0xf1, 0xca, 0x5b, 0x0b, 0x53, 0x3b, 0x01, 0xee, 0xf9, 0x31, - 0x4a, 0x6d, 0x61, 0xd2, 0xc1, 0xc4, 0x68, 0x5a, 0x04, 0x25, 0x80, 0x16, 0x76, 0x3d, 0xee, 0xd7, - 0xfe, 0x96, 0xc0, 0xab, 0x47, 0xc4, 0x79, 0x14, 0x20, 0x8b, 0xa2, 0x5a, 0x92, 0x4b, 0x7e, 0x0c, - 0x24, 0xd7, 0x2e, 0x8a, 0x65, 0xf1, 0x60, 0xb7, 0xf2, 0xb6, 0xbe, 0xa8, 0x34, 0x3d, 0x8d, 0xa8, - 0xd7, 0xaa, 0xa5, 0x8b, 0x10, 0x0a, 0x83, 0x10, 0x4a, 0xf5, 0xda, 0x30, 0x84, 0x92, 0x6b, 0x8f, - 0x42, 0xb8, 0x73, 0x62, 0x75, 0xda, 0x87, 0x9a, 0x6b, 0x6b, 0xa6, 0xe4, 0xda, 0xf2, 0xd7, 0x20, - 0xc7, 0xaa, 0x23, 0x45, 0xa9, 0x9c, 0x39, 0xd8, 0xad, 0xbc, 0xb9, 0x38, 0xf5, 0xa7, 0x11, 0xae, - 0xe1, 0xa3, 0x56, 0x15, 0x46, 0x79, 0x87, 0x21, 0x8c, 0x43, 0x47, 0x21, 0xbc, 0xc5, 0xb3, 0xf2, - 0xb3, 0x66, 0xc6, 0x0e, 0xf9, 0x23, 0xb0, 0xdd, 0x47, 0x01, 0x71, 0xb1, 0x57, 0xcc, 0x94, 0xc5, - 0x83, 0x7c, 0xb5, 0x34, 0x0c, 0xe1, 0xd8, 0x34, 0x0a, 0xe1, 0x6d, 0x1e, 0x16, 0x1b, 0x34, 0x73, - 0xec, 0x92, 0x3f, 0x07, 0xdb, 0x36, 0xf2, 0x31, 0x71, 0x69, 0x31, 0xcb, 0x5a, 0xbe, 0xa3, 0x73, - 0xdd, 0xf4, 0x48, 0xb7, 0xa4, 0xa4, 0x47, 0xd8, 0xf5, 0xaa, 0x6f, 0xc4, 0xd5, 0x8c, 0x23, 0xd2, - 0xbc, 0xb1, 0x41, 0x33, 0xc7, 0xae, 0xc3, 0xec, 0x3f, 0xe7, 0x50, 0xd0, 0x4a, 0xe0, 0xf5, 0x39, - 0x12, 0x9b, 0x88, 0xf8, 0xd8, 0x23, 0x48, 0xfb, 0x43, 0x04, 0x7b, 0x47, 0xc4, 0xa9, 0xf1, 0x98, - 0x9b, 0xbf, 0x03, 0x13, 0xe4, 0xac, 0x0e, 0xee, 0x79, 0xb4, 0x28, 0xad, 0xea, 0x35, 0x51, 0x9e, - 0x07, 0xa4, 0xca, 0xf3, 0xb3, 0x66, 0xc6, 0x8e, 0xb8, 0x51, 0x15, 0xdc, 0x9d, 0xd7, 0x48, 0xd2, - 0xe9, 0x8f, 0x7c, 0xd8, 0x1e, 0xfb, 0xf6, 0x0b, 0x3c, 0x6c, 0xd7, 0x86, 0x62, 0x5a, 0x8a, 0x44, - 0xaa, 0x2e, 0x90, 0xa3, 0x99, 0x69, 0x63, 0x72, 0xf3, 0x42, 0xc5, 0x15, 0xdd, 0x05, 0xca, 0x2c, - 0x65, 0x52, 0xd0, 0xf7, 0x20, 0x3f, 0x99, 0x56, 0x36, 0xc0, 0x16, 0x7e, 0xee, 0xa1, 0x80, 0x55, - 0xb3, 0x53, 0xbd, 0x33, 0x0c, 0x21, 0x37, 0x8c, 0x42, 0x98, 0xe7, 0xe9, 0xd9, 0x51, 0x33, 0xb9, - 0x59, 0x7e, 0x00, 0xb2, 0x36, 0x41, 0x5d, 0x36, 0x74, 0xd9, 0x2a, 0x1c, 0x84, 0x30, 0x5b, 0x6b, - 0xa0, 0xee, 0x30, 0x84, 0xcc, 0x3e, 0x0a, 0xe1, 0x6e, 0xfc, 0x37, 0x22, 0xa8, 0xab, 0x99, 0xcc, - 0x78, 0xf8, 0xd2, 0xcf, 0xe7, 0x50, 0x60, 0xd5, 0xfd, 0x9e, 0x01, 0x60, 0x42, 0x09, 0x0a, 0x6e, - 0xa5, 0x8d, 0x3f, 0xd9, 0x58, 0x94, 0xfd, 0x58, 0x94, 0x6b, 0x3d, 0xcd, 0x93, 0x27, 0x9f, 0x66, - 0xaa, 0xdb, 0xf2, 0x57, 0x60, 0x8b, 0x50, 0x8b, 0x22, 0xd6, 0xc4, 0xed, 0xca, 0xbb, 0xeb, 0xb0, - 0xe9, 0x8d, 0x28, 0x82, 0x0b, 0xc4, 0x82, 0x53, 0x81, 0xd8, 0x51, 0x33, 0xb9, 0xf9, 0xbf, 0x6f, - 0xaf, 0x12, 0x00, 0x2d, 0xb6, 0x5c, 0xec, 0x27, 0x16, 0x5f, 0x60, 0x19, 0x73, 0x27, 0xb6, 0x3c, - 0xa4, 0xda, 0xb7, 0x60, 0x8b, 0x95, 0x20, 0xef, 0x83, 0x6d, 0xd7, 0xeb, 0x5b, 0x6d, 0xd7, 0x2e, - 0x08, 0x8a, 0x72, 0x7a, 0x56, 0x7e, 0x2d, 0xad, 0x92, 0x21, 0xea, 0xdc, 0x2b, 0x97, 0x41, 0xce, - 0x6a, 0x51, 0xb7, 0x8f, 0x0a, 0xa2, 0xb2, 0x77, 0x7a, 0x56, 0x2e, 0xa4, 0xb8, 0x87, 0xcc, 0x1e, - 0x21, 0x5a, 0xd1, 0xa0, 0xd8, 0x05, 0x69, 0x1a, 0xc1, 0x06, 0xc8, 0x56, 0xb2, 0x3f, 0xfc, 0xa2, - 0x0a, 0xf1, 0x64, 0xfd, 0x26, 0x82, 0x57, 0x52, 0xc0, 0x27, 0x6e, 0x9b, 0xa2, 0x80, 0xfc, 0x3f, - 0x13, 0x14, 0xb1, 0xf0, 0x2b, 0xcb, 0xa4, 0x2c, 0xcb, 0xae, 0x81, 0x97, 0x5c, 0xf9, 0x35, 0x07, - 0x32, 0x47, 0xc4, 0x91, 0x8f, 0x41, 0x61, 0xe6, 0x6d, 0xbc, 0xb7, 0xf8, 0xda, 0xe7, 0xec, 0x79, - 0xe5, 0x83, 0x8d, 0xe0, 0xe3, 0x3f, 0x9c, 0xfc, 0x1d, 0xd3, 0x6c, 0xea, 0x49, 0xd0, 0x97, 0xe6, - 0x9a, 0xc1, 0x2b, 0x1f, 0x6e, 0x86, 0x4f, 0xc8, 0x8f, 0x41, 0x61, 0x66, 0x4b, 0x2f, 0x6f, 0x7b, - 0x1a, 0xbe, 0xa2, 0xed, 0x45, 0x8b, 0x4f, 0xee, 0x81, 0x97, 0xa7, 0xb7, 0xde, 0x7b, 0xcb, 0x05, - 0xbc, 0x8e, 0x56, 0xde, 0xdf, 0x04, 0x9d, 0xd0, 0x7e, 0x03, 0x00, 0x73, 0xb1, 0x87, 0x40, 0xde, - 0x5f, 0x9d, 0x83, 0x01, 0x15, 0x63, 0x4d, 0xe0, 0x24, 0xcf, 0x67, 0x56, 0x6f, 0x3d, 0x9e, 0x14, - 0xb8, 0x82, 0x27, 0x05, 0x4e, 0xf2, 0x34, 0xa8, 0x15, 0xd0, 0x75, 0x78, 0x52, 0xe0, 0x0a, 0x9e, - 0x14, 0x38, 0xe6, 0xa9, 0x7e, 0x71, 0x31, 0x50, 0xc5, 0xcb, 0x81, 0x2a, 0xfe, 0x35, 0x50, 0xc5, - 0x9f, 0xae, 0x54, 0xe1, 0xf2, 0x4a, 0x15, 0xfe, 0xbc, 0x52, 0x85, 0x2f, 0x3f, 0x76, 0x5c, 0xfa, - 0xb4, 0xd7, 0xd4, 0x5b, 0xb8, 0x63, 0xb0, 0xa4, 0xf7, 0x3c, 0x44, 0x9f, 0xe3, 0xe0, 0x59, 0x7c, - 0xb2, 0x7c, 0xd7, 0x70, 0xb0, 0xe1, 0x61, 0x1b, 0xcd, 0xf9, 0x88, 0x6d, 0xe6, 0xd8, 0xf7, 0xe9, - 0x83, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xe4, 0x9d, 0x81, 0x42, 0x0b, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // CreateDeployment defines a method to create new deployment given proper inputs. - CreateDeployment(ctx context.Context, in *MsgCreateDeployment, opts ...grpc.CallOption) (*MsgCreateDeploymentResponse, error) - // DepositDeployment deposits more funds into the deployment account - DepositDeployment(ctx context.Context, in *MsgDepositDeployment, opts ...grpc.CallOption) (*MsgDepositDeploymentResponse, error) - // UpdateDeployment defines a method to update a deployment given proper inputs. - UpdateDeployment(ctx context.Context, in *MsgUpdateDeployment, opts ...grpc.CallOption) (*MsgUpdateDeploymentResponse, error) - // CloseDeployment defines a method to close a deployment given proper inputs. - CloseDeployment(ctx context.Context, in *MsgCloseDeployment, opts ...grpc.CallOption) (*MsgCloseDeploymentResponse, error) - // CloseGroup defines a method to close a group of a deployment given proper inputs. - CloseGroup(ctx context.Context, in *MsgCloseGroup, opts ...grpc.CallOption) (*MsgCloseGroupResponse, error) - // PauseGroup defines a method to close a group of a deployment given proper inputs. - PauseGroup(ctx context.Context, in *MsgPauseGroup, opts ...grpc.CallOption) (*MsgPauseGroupResponse, error) - // StartGroup defines a method to close a group of a deployment given proper inputs. - StartGroup(ctx context.Context, in *MsgStartGroup, opts ...grpc.CallOption) (*MsgStartGroupResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) CreateDeployment(ctx context.Context, in *MsgCreateDeployment, opts ...grpc.CallOption) (*MsgCreateDeploymentResponse, error) { - out := new(MsgCreateDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta1.Msg/CreateDeployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) DepositDeployment(ctx context.Context, in *MsgDepositDeployment, opts ...grpc.CallOption) (*MsgDepositDeploymentResponse, error) { - out := new(MsgDepositDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta1.Msg/DepositDeployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) UpdateDeployment(ctx context.Context, in *MsgUpdateDeployment, opts ...grpc.CallOption) (*MsgUpdateDeploymentResponse, error) { - out := new(MsgUpdateDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta1.Msg/UpdateDeployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseDeployment(ctx context.Context, in *MsgCloseDeployment, opts ...grpc.CallOption) (*MsgCloseDeploymentResponse, error) { - out := new(MsgCloseDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta1.Msg/CloseDeployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseGroup(ctx context.Context, in *MsgCloseGroup, opts ...grpc.CallOption) (*MsgCloseGroupResponse, error) { - out := new(MsgCloseGroupResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta1.Msg/CloseGroup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) PauseGroup(ctx context.Context, in *MsgPauseGroup, opts ...grpc.CallOption) (*MsgPauseGroupResponse, error) { - out := new(MsgPauseGroupResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta1.Msg/PauseGroup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) StartGroup(ctx context.Context, in *MsgStartGroup, opts ...grpc.CallOption) (*MsgStartGroupResponse, error) { - out := new(MsgStartGroupResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta1.Msg/StartGroup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // CreateDeployment defines a method to create new deployment given proper inputs. - CreateDeployment(context.Context, *MsgCreateDeployment) (*MsgCreateDeploymentResponse, error) - // DepositDeployment deposits more funds into the deployment account - DepositDeployment(context.Context, *MsgDepositDeployment) (*MsgDepositDeploymentResponse, error) - // UpdateDeployment defines a method to update a deployment given proper inputs. - UpdateDeployment(context.Context, *MsgUpdateDeployment) (*MsgUpdateDeploymentResponse, error) - // CloseDeployment defines a method to close a deployment given proper inputs. - CloseDeployment(context.Context, *MsgCloseDeployment) (*MsgCloseDeploymentResponse, error) - // CloseGroup defines a method to close a group of a deployment given proper inputs. - CloseGroup(context.Context, *MsgCloseGroup) (*MsgCloseGroupResponse, error) - // PauseGroup defines a method to close a group of a deployment given proper inputs. - PauseGroup(context.Context, *MsgPauseGroup) (*MsgPauseGroupResponse, error) - // StartGroup defines a method to close a group of a deployment given proper inputs. - StartGroup(context.Context, *MsgStartGroup) (*MsgStartGroupResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) CreateDeployment(ctx context.Context, req *MsgCreateDeployment) (*MsgCreateDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateDeployment not implemented") -} -func (*UnimplementedMsgServer) DepositDeployment(ctx context.Context, req *MsgDepositDeployment) (*MsgDepositDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DepositDeployment not implemented") -} -func (*UnimplementedMsgServer) UpdateDeployment(ctx context.Context, req *MsgUpdateDeployment) (*MsgUpdateDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateDeployment not implemented") -} -func (*UnimplementedMsgServer) CloseDeployment(ctx context.Context, req *MsgCloseDeployment) (*MsgCloseDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseDeployment not implemented") -} -func (*UnimplementedMsgServer) CloseGroup(ctx context.Context, req *MsgCloseGroup) (*MsgCloseGroupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseGroup not implemented") -} -func (*UnimplementedMsgServer) PauseGroup(ctx context.Context, req *MsgPauseGroup) (*MsgPauseGroupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method PauseGroup not implemented") -} -func (*UnimplementedMsgServer) StartGroup(ctx context.Context, req *MsgStartGroup) (*MsgStartGroupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StartGroup not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_CreateDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateDeployment) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateDeployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta1.Msg/CreateDeployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateDeployment(ctx, req.(*MsgCreateDeployment)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_DepositDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgDepositDeployment) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).DepositDeployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta1.Msg/DepositDeployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).DepositDeployment(ctx, req.(*MsgDepositDeployment)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_UpdateDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgUpdateDeployment) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).UpdateDeployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta1.Msg/UpdateDeployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).UpdateDeployment(ctx, req.(*MsgUpdateDeployment)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseDeployment) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseDeployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta1.Msg/CloseDeployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseDeployment(ctx, req.(*MsgCloseDeployment)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseGroup) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseGroup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta1.Msg/CloseGroup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseGroup(ctx, req.(*MsgCloseGroup)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_PauseGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgPauseGroup) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).PauseGroup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta1.Msg/PauseGroup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).PauseGroup(ctx, req.(*MsgPauseGroup)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_StartGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgStartGroup) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).StartGroup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta1.Msg/StartGroup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).StartGroup(ctx, req.(*MsgStartGroup)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.deployment.v1beta1.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateDeployment", - Handler: _Msg_CreateDeployment_Handler, - }, - { - MethodName: "DepositDeployment", - Handler: _Msg_DepositDeployment_Handler, - }, - { - MethodName: "UpdateDeployment", - Handler: _Msg_UpdateDeployment_Handler, - }, - { - MethodName: "CloseDeployment", - Handler: _Msg_CloseDeployment_Handler, - }, - { - MethodName: "CloseGroup", - Handler: _Msg_CloseGroup_Handler, - }, - { - MethodName: "PauseGroup", - Handler: _Msg_PauseGroup_Handler, - }, - { - MethodName: "StartGroup", - Handler: _Msg_StartGroup_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/deployment/v1beta1/deployment.proto", -} - -func (m *MsgCreateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Deposit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeployment(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x1a - } - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeployment(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeployment(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCreateDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgDepositDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDepositDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDepositDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Amount.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeployment(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeployment(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgDepositDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDepositDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDepositDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgUpdateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x1a - } - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeployment(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeployment(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgUpdateDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgCloseDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeployment(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *DeploymentID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DSeq != 0 { - i = encodeVarintDeployment(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Deployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintDeployment(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x1a - } - if m.State != 0 { - i = encodeVarintDeployment(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.DeploymentID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeployment(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *DeploymentFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x1a - } - if m.DSeq != 0 { - i = encodeVarintDeployment(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintDeployment(dAtA []byte, offset int, v uint64) int { - offset -= sovDeployment(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MsgCreateDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovDeployment(uint64(l)) - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovDeployment(uint64(l)) - } - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - l = m.Deposit.Size() - n += 1 + l + sovDeployment(uint64(l)) - return n -} - -func (m *MsgCreateDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgDepositDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovDeployment(uint64(l)) - l = m.Amount.Size() - n += 1 + l + sovDeployment(uint64(l)) - return n -} - -func (m *MsgDepositDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgUpdateDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovDeployment(uint64(l)) - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovDeployment(uint64(l)) - } - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - return n -} - -func (m *MsgUpdateDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgCloseDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovDeployment(uint64(l)) - return n -} - -func (m *MsgCloseDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *DeploymentID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovDeployment(uint64(m.DSeq)) - } - return n -} - -func (m *Deployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.DeploymentID.Size() - n += 1 + l + sovDeployment(uint64(l)) - if m.State != 0 { - n += 1 + sovDeployment(uint64(m.State)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - if m.CreatedAt != 0 { - n += 1 + sovDeployment(uint64(m.CreatedAt)) - } - return n -} - -func (m *DeploymentFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovDeployment(uint64(m.DSeq)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - return n -} - -func sovDeployment(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozDeployment(x uint64) (n int) { - return sovDeployment(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MsgCreateDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, GroupSpec{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = append(m.Version[:0], dAtA[iNdEx:postIndex]...) - if m.Version == nil { - m.Version = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Deposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDepositDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDepositDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDepositDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDepositDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDepositDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDepositDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgUpdateDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, GroupSpec{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = append(m.Version[:0], dAtA[iNdEx:postIndex]...) - if m.Version == nil { - m.Version = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgUpdateDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Deployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeploymentID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DeploymentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Deployment_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = append(m.Version[:0], dAtA[iNdEx:postIndex]...) - if m.Version == nil { - m.Version = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipDeployment(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeployment - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeployment - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeployment - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDeployment - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupDeployment - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthDeployment - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthDeployment = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDeployment = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupDeployment = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta1/deployment_validation_test.go b/go/node/deployment/v1beta1/deployment_validation_test.go deleted file mode 100644 index 359e3872..00000000 --- a/go/node/deployment/v1beta1/deployment_validation_test.go +++ /dev/null @@ -1,185 +0,0 @@ -package v1beta1_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - - sdk "github.com/cosmos/cosmos-sdk/types" - - types "github.com/akash-network/akash-api/go/node/deployment/v1beta1" - akashtypes "github.com/akash-network/akash-api/go/node/types/v1beta1" - tutil "github.com/akash-network/akash-api/go/testutil" - testutil "github.com/akash-network/akash-api/go/testutil/v1beta1" -) - -func TestZeroValueGroupSpec(t *testing.T) { - did := testutil.DeploymentID(t) - - dgroup := testutil.DeploymentGroup(t, did, uint32(6)) - gspec := dgroup.GroupSpec - - t.Run("assert nominal test success", func(t *testing.T) { - err := gspec.ValidateBasic() - require.NoError(t, err) - }) -} - -/* -func TestZeroValueGroupSpecs(t *testing.T) { - did := testutil.DeploymentID(t) - dgroups := testutil.DeploymentGroups(t, did, uint32(6)) - gspecs := make([]types.GroupSpec, 0) - for _, d := range dgroups { - gspecs = append(gspecs, d.GroupSpec) - } - - t.Run("assert nominal test success", func(t *testing.T) { - err := types.ValidateDeploymentGroups(gspecs) - require.NoError(t, err) - }) - - gspecZeroed := make([]types.GroupSpec, len(gspecs)) - gspecZeroed = append(gspecZeroed, gspecs...) - t.Run("assert error for zero value bid duration", func(t *testing.T) { - err := types.ValidateDeploymentGroups(gspecZeroed) - require.Error(t, err) - }) -}*/ - -func TestEmptyGroupSpecIsInvalid(t *testing.T) { - err := types.ValidateDeploymentGroups(make([]types.GroupSpec, 0)) - require.Equal(t, types.ErrInvalidGroups, err) -} - -func validSimpleGroupSpec() types.GroupSpec { - resources := make([]types.Resource, 1) - resources[0] = types.Resource{ - Resources: akashtypes.ResourceUnits{ - CPU: &akashtypes.CPU{ - Units: akashtypes.ResourceValue{ - Val: sdk.NewInt(10), - }, - Attributes: nil, - }, - Memory: &akashtypes.Memory{ - Quantity: akashtypes.ResourceValue{ - Val: sdk.NewIntFromUint64(types.GetValidationConfig().MinUnitMemory), - }, - Attributes: nil, - }, - Storage: &akashtypes.Storage{ - Quantity: akashtypes.ResourceValue{ - Val: sdk.NewIntFromUint64(types.GetValidationConfig().MinUnitStorage), - }, - Attributes: nil, - }, - Endpoints: nil, - }, - Count: 1, - Price: sdk.Coin{ - Denom: tutil.CoinDenom, - Amount: sdk.NewInt(1), - }, - } - return types.GroupSpec{ - Name: "testGroup", - Requirements: akashtypes.PlacementRequirements{}, - Resources: resources, - } -} - -func validSimpleGroupSpecs() []types.GroupSpec { - result := make([]types.GroupSpec, 1) - result[0] = validSimpleGroupSpec() - - return result -} - -func TestSimpleGroupSpecIsValid(t *testing.T) { - groups := validSimpleGroupSpecs() - err := types.ValidateDeploymentGroups(groups) - require.NoError(t, err) -} - -func TestDuplicateSimpleGroupSpecIsInvalid(t *testing.T) { - groups := validSimpleGroupSpecs() - groupsDuplicate := make([]types.GroupSpec, 2) - groupsDuplicate[0] = groups[0] - groupsDuplicate[1] = groups[0] - err := types.ValidateDeploymentGroups(groupsDuplicate) - require.Error(t, err) // TODO - specific error - require.Regexp(t, "^.*duplicate.*$", err) -} - -func TestGroupWithZeroCount(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Count = 0 - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid unit count.*$", err) -} - -func TestGroupWithZeroCPU(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Resources.CPU.Units.Val = sdk.NewInt(0) - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid unit CPU.*$", err) -} - -func TestGroupWithZeroMemory(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Resources.Memory.Quantity.Val = sdk.NewInt(0) - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid unit memory.*$", err) -} - -func TestGroupWithZeroStorage(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Resources.Storage.Quantity.Val = sdk.NewInt(0) - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid unit storage.*$", err) -} - -func TestGroupWithNilCPU(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Resources.CPU = nil - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid unit CPU.*$", err) -} - -func TestGroupWithNilMemory(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Resources.Memory = nil - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid unit memory.*$", err) -} - -func TestGroupWithNilStorage(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Resources.Storage = nil - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid unit storage.*$", err) -} - -func TestGroupWithInvalidPrice(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Price = sdk.Coin{} - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid price object.*$", err) -} - -func TestGroupWithNegativePrice(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Price.Amount = sdk.NewInt(-1) - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid price object.*$", err) -} diff --git a/go/node/deployment/v1beta1/deposit_deployment_authorization.go b/go/node/deployment/v1beta1/deposit_deployment_authorization.go deleted file mode 100644 index 812c0863..00000000 --- a/go/node/deployment/v1beta1/deposit_deployment_authorization.go +++ /dev/null @@ -1,48 +0,0 @@ -package v1beta1 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/cosmos/cosmos-sdk/x/authz" -) - -var ( - _ authz.Authorization = &DepositDeploymentAuthorization{} -) - -// NewDepositDeploymentAuthorization creates a new DepositDeploymentAuthorization object. -func NewDepositDeploymentAuthorization(spendLimit sdk.Coin) *DepositDeploymentAuthorization { - return &DepositDeploymentAuthorization{ - SpendLimit: spendLimit, - } -} - -// MsgTypeURL implements Authorization.MsgTypeURL. -func (m DepositDeploymentAuthorization) MsgTypeURL() string { - return sdk.MsgTypeURL(&MsgDepositDeployment{}) -} - -// Accept implements Authorization.Accept. -func (m DepositDeploymentAuthorization) Accept(_ sdk.Context, msg sdk.Msg) (authz.AcceptResponse, error) { - mDepositDeployment, ok := msg.(*MsgDepositDeployment) - if !ok { - return authz.AcceptResponse{}, sdkerrors.ErrInvalidType.Wrap("type mismatch") - } - limitLeft := m.SpendLimit.Sub(mDepositDeployment.Amount) - if limitLeft.IsNegative() { - return authz.AcceptResponse{}, sdkerrors.ErrInsufficientFunds.Wrapf("requested amount is more than spend limit") - } - if limitLeft.IsZero() { - return authz.AcceptResponse{Accept: true, Delete: true}, nil - } - - return authz.AcceptResponse{Accept: true, Delete: false, Updated: &DepositDeploymentAuthorization{SpendLimit: limitLeft}}, nil -} - -// ValidateBasic implements Authorization.ValidateBasic. -func (m DepositDeploymentAuthorization) ValidateBasic() error { - if !m.SpendLimit.IsPositive() { - return sdkerrors.ErrInvalidCoins.Wrapf("spend limit cannot be negative") - } - return nil -} diff --git a/go/node/deployment/v1beta1/errors.go b/go/node/deployment/v1beta1/errors.go deleted file mode 100644 index d574e6cb..00000000 --- a/go/node/deployment/v1beta1/errors.go +++ /dev/null @@ -1,35 +0,0 @@ -package v1beta1 - -import ( - "errors" -) - -var ( - - // ErrInvalidGroups is the error when groups are empty - ErrInvalidGroups = errors.New("Invalid groups") - // ErrInvalidDeploymentID is the error for invalid deployment id - - // ErrEmptyVersion is the error when version is empty - ErrEmptyVersion = errors.New("Invalid: empty version") - // ErrInvalidVersion is the error when version is invalid - ErrInvalidVersion = errors.New("Invalid: deployment version") - // ErrInternal is the error for internal error - - // ErrInvalidDeployment = is the error when deployment does not pass validation - ErrInvalidDeployment = errors.New("Invalid deployment") - - // ErrGroupClosed is the error when deployment is closed - ErrGroupClosed = errors.New("Group already closed") - // ErrGroupOpen is the error when deployment is closed - ErrGroupOpen = errors.New("Group open") - // ErrGroupPaused is the error when deployment is closed - ErrGroupPaused = errors.New("Group paused") - - // ErrInvalidDeposit indicates an invalid deposit - ErrInvalidDeposit = errors.New("Deposit invalid") - // ErrInvalidIDPath indicates an invalid ID path - ErrInvalidIDPath = errors.New("ID path invalid") - // ErrInvalidParam indicates an invalid chain parameter - ErrInvalidParam = errors.New("parameter invalid") -) diff --git a/go/node/deployment/v1beta1/escrow.go b/go/node/deployment/v1beta1/escrow.go deleted file mode 100644 index 3a73ec00..00000000 --- a/go/node/deployment/v1beta1/escrow.go +++ /dev/null @@ -1,25 +0,0 @@ -package v1beta1 - -import ( - etypes "github.com/akash-network/akash-api/go/node/escrow/v1beta1" -) - -const ( - EscrowScope = "deployment" -) - -func EscrowAccountForDeployment(id DeploymentID) etypes.AccountID { - return etypes.AccountID{ - Scope: EscrowScope, - XID: id.String(), - } -} - -func DeploymentIDFromEscrowAccount(id etypes.AccountID) (DeploymentID, bool) { - if id.Scope != EscrowScope { - return DeploymentID{}, false - } - - did, err := ParseDeploymentID(id.XID) - return did, err == nil -} diff --git a/go/node/deployment/v1beta1/event.go b/go/node/deployment/v1beta1/event.go deleted file mode 100644 index 859fa22d..00000000 --- a/go/node/deployment/v1beta1/event.go +++ /dev/null @@ -1,309 +0,0 @@ -package v1beta1 - -import ( - "encoding/hex" - "strconv" - - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -const ( - evActionDeploymentCreated = "deployment-created" - evActionDeploymentUpdated = "deployment-updated" - evActionDeploymentClosed = "deployment-closed" - evActionGroupClosed = "group-closed" - evActionGroupPaused = "group-paused" - evActionGroupStarted = "group-started" - evOwnerKey = "owner" - evDSeqKey = "dseq" - evGSeqKey = "gseq" - evVersionKey = "version" - encodedVersionHexLen = 64 -) - -// EventDeploymentCreated struct -type EventDeploymentCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID DeploymentID `json:"id"` - Version []byte `json:"version"` -} - -// NewEventDeploymentCreated initializes creation event. -func NewEventDeploymentCreated(id DeploymentID, version []byte) EventDeploymentCreated { - return EventDeploymentCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionDeploymentCreated, - }, - ID: id, - Version: version, - } -} - -// ToSDKEvent method creates new sdk event for EventDeploymentCreated struct -func (ev EventDeploymentCreated) ToSDKEvent() sdk.Event { - version := encodeHex(ev.Version) - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionDeploymentCreated), - sdk.NewAttribute(evVersionKey, string(version)), - }, DeploymentIDEVAttributes(ev.ID)...)..., - ) -} - -// EventDeploymentUpdated struct -type EventDeploymentUpdated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID DeploymentID `json:"id"` - Version []byte `json:"version"` -} - -// NewEventDeploymentUpdated initializes SDK type -func NewEventDeploymentUpdated(id DeploymentID, version []byte) EventDeploymentUpdated { - return EventDeploymentUpdated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionDeploymentUpdated, - }, - ID: id, - Version: version, - } -} - -// ToSDKEvent method creates new sdk event for EventDeploymentUpdated struct -func (ev EventDeploymentUpdated) ToSDKEvent() sdk.Event { - version := encodeHex(ev.Version) - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionDeploymentUpdated), - sdk.NewAttribute(evVersionKey, string(version)), - }, DeploymentIDEVAttributes(ev.ID)...)..., - ) -} - -// EventDeploymentClosed struct -type EventDeploymentClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID DeploymentID `json:"id"` -} - -func NewEventDeploymentClosed(id DeploymentID) EventDeploymentClosed { - return EventDeploymentClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionDeploymentClosed, - }, - ID: id, - } -} - -// ToSDKEvent method creates new sdk event for EventDeploymentClosed struct -func (ev EventDeploymentClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionDeploymentClosed), - }, DeploymentIDEVAttributes(ev.ID)...)..., - ) -} - -// DeploymentIDEVAttributes returns event attribues for given DeploymentID -func DeploymentIDEVAttributes(id DeploymentID) []sdk.Attribute { - return []sdk.Attribute{ - sdk.NewAttribute(evOwnerKey, id.Owner), - sdk.NewAttribute(evDSeqKey, strconv.FormatUint(id.DSeq, 10)), - } -} - -// ParseEVDeploymentID returns deploymentID details for given event attributes -func ParseEVDeploymentID(attrs []sdk.Attribute) (DeploymentID, error) { - owner, err := sdkutil.GetAccAddress(attrs, evOwnerKey) - if err != nil { - return DeploymentID{}, err - } - dseq, err := sdkutil.GetUint64(attrs, evDSeqKey) - if err != nil { - return DeploymentID{}, err - } - - return DeploymentID{ - Owner: owner.String(), - DSeq: dseq, - }, nil -} - -// ParseEVDeploymentVersion returns the Deployment's SDL sha256 sum -func ParseEVDeploymentVersion(attrs []sdk.Attribute) ([]byte, error) { - v, err := sdkutil.GetString(attrs, evVersionKey) - if err != nil { - return nil, err - } - return decodeHex([]byte(v)) -} - -func encodeHex(src []byte) []byte { - dst := make([]byte, hex.EncodedLen(len(src))) - hex.Encode(dst, src) - return dst -} - -func decodeHex(src []byte) ([]byte, error) { - dst := make([]byte, hex.DecodedLen(len(src))) - if _, err := hex.Decode(dst, src); err != nil { - return []byte{}, err - } - return dst, nil -} - -// EventGroupClosed provides SDK event to signal group termination -type EventGroupClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID GroupID `json:"id"` -} - -func NewEventGroupClosed(id GroupID) EventGroupClosed { - return EventGroupClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionGroupClosed, - }, - ID: id, - } -} - -// ToSDKEvent produces the SDK notification for Event -func (ev EventGroupClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionGroupClosed), - }, GroupIDEVAttributes(ev.ID)...)..., - ) -} - -// EventGroupPaused provides SDK event to signal group termination -type EventGroupPaused struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID GroupID `json:"id"` -} - -func NewEventGroupPaused(id GroupID) EventGroupPaused { - return EventGroupPaused{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionGroupPaused, - }, - ID: id, - } -} - -// ToSDKEvent produces the SDK notification for Event -func (ev EventGroupPaused) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionGroupPaused), - }, GroupIDEVAttributes(ev.ID)...)..., - ) -} - -// EventGroupStarted provides SDK event to signal group termination -type EventGroupStarted struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID GroupID `json:"id"` -} - -func NewEventGroupStarted(id GroupID) EventGroupStarted { - return EventGroupStarted{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionGroupStarted, - }, - ID: id, - } -} - -// ToSDKEvent produces the SDK notification for Event -func (ev EventGroupStarted) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionGroupStarted), - }, GroupIDEVAttributes(ev.ID)...)..., - ) -} - -// GroupIDEVAttributes returns event attribues for given GroupID -func GroupIDEVAttributes(id GroupID) []sdk.Attribute { - return append(DeploymentIDEVAttributes(id.DeploymentID()), - sdk.NewAttribute(evGSeqKey, strconv.FormatUint(uint64(id.GSeq), 10))) -} - -// ParseEVGroupID returns GroupID details for given event attributes -func ParseEVGroupID(attrs []sdk.Attribute) (GroupID, error) { - did, err := ParseEVDeploymentID(attrs) - if err != nil { - return GroupID{}, err - } - - gseq, err := sdkutil.GetUint64(attrs, evGSeqKey) - if err != nil { - return GroupID{}, err - } - - return GroupID{ - Owner: did.Owner, - DSeq: did.DSeq, - GSeq: uint32(gseq), - }, nil -} - -// ParseEvent parses event and returns details of event and error if occurred -func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { - if ev.Type != sdkutil.EventTypeMessage { - return nil, sdkutil.ErrUnknownType - } - if ev.Module != ModuleName { - return nil, sdkutil.ErrUnknownModule - } - switch ev.Action { - case evActionDeploymentCreated: - did, err := ParseEVDeploymentID(ev.Attributes) - if err != nil { - return nil, err - } - ver, err := ParseEVDeploymentVersion(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventDeploymentCreated(did, ver), nil - case evActionDeploymentUpdated: - did, err := ParseEVDeploymentID(ev.Attributes) - if err != nil { - return nil, err - } - ver, err := ParseEVDeploymentVersion(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventDeploymentUpdated(did, ver), nil - case evActionDeploymentClosed: - did, err := ParseEVDeploymentID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventDeploymentClosed(did), nil - case evActionGroupClosed: - gid, err := ParseEVGroupID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventGroupClosed(gid), nil - default: - return nil, sdkutil.ErrUnknownAction - } -} diff --git a/go/node/deployment/v1beta1/genesis.pb.go b/go/node/deployment/v1beta1/genesis.pb.go deleted file mode 100644 index f3873b88..00000000 --- a/go/node/deployment/v1beta1/genesis.pb.go +++ /dev/null @@ -1,630 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta1/genesis.proto - -package v1beta1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisDeployment defines the basic genesis state used by deployment module -type GenesisDeployment struct { - Deployment Deployment `protobuf:"bytes,1,opt,name=deployment,proto3" json:"deployment" yaml:"deployment"` - Groups []Group `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups" yaml:"groups"` -} - -func (m *GenesisDeployment) Reset() { *m = GenesisDeployment{} } -func (m *GenesisDeployment) String() string { return proto.CompactTextString(m) } -func (*GenesisDeployment) ProtoMessage() {} -func (*GenesisDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea837e5a570e958, []int{0} -} -func (m *GenesisDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisDeployment.Merge(m, src) -} -func (m *GenesisDeployment) XXX_Size() int { - return m.Size() -} -func (m *GenesisDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisDeployment proto.InternalMessageInfo - -func (m *GenesisDeployment) GetDeployment() Deployment { - if m != nil { - return m.Deployment - } - return Deployment{} -} - -func (m *GenesisDeployment) GetGroups() []Group { - if m != nil { - return m.Groups - } - return nil -} - -// GenesisState stores slice of genesis deployment instance -type GenesisState struct { - Deployments []GenesisDeployment `protobuf:"bytes,1,rep,name=deployments,proto3" json:"deployments" yaml:"deployments"` - Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params" yaml:"params"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_8ea837e5a570e958, []int{1} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetDeployments() []GenesisDeployment { - if m != nil { - return m.Deployments - } - return nil -} - -func (m *GenesisState) GetParams() Params { - if m != nil { - return m.Params - } - return Params{} -} - -func init() { - proto.RegisterType((*GenesisDeployment)(nil), "akash.deployment.v1beta1.GenesisDeployment") - proto.RegisterType((*GenesisState)(nil), "akash.deployment.v1beta1.GenesisState") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta1/genesis.proto", fileDescriptor_8ea837e5a570e958) -} - -var fileDescriptor_8ea837e5a570e958 = []byte{ - // 357 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xbf, 0x4e, 0xc3, 0x30, - 0x10, 0xc6, 0xe3, 0x22, 0x75, 0x70, 0x61, 0x68, 0xc4, 0x10, 0x75, 0x88, 0x2b, 0xab, 0x40, 0x2b, - 0x44, 0xac, 0x96, 0x0d, 0x89, 0x25, 0x42, 0xea, 0x8a, 0xc2, 0xc0, 0x9f, 0xcd, 0xa5, 0x56, 0x5a, - 0xb5, 0x89, 0xa3, 0xc4, 0x05, 0xfa, 0x16, 0x3c, 0x56, 0xc7, 0x8e, 0x0c, 0x28, 0x42, 0xcd, 0xc6, - 0xd8, 0x27, 0x40, 0xb5, 0x2d, 0x1c, 0x55, 0x84, 0x2d, 0x17, 0xff, 0xbe, 0xfb, 0xee, 0x3b, 0x1d, - 0x3c, 0xa5, 0x33, 0x9a, 0x4d, 0xc8, 0x98, 0x25, 0x73, 0xbe, 0x8c, 0x58, 0x2c, 0xc8, 0x4b, 0x7f, - 0xc4, 0x04, 0xed, 0x93, 0x90, 0xc5, 0x2c, 0x9b, 0x66, 0x5e, 0x92, 0x72, 0xc1, 0x6d, 0x47, 0x72, - 0x9e, 0xe1, 0x3c, 0xcd, 0xb5, 0x8e, 0x43, 0x1e, 0x72, 0x09, 0x91, 0xdd, 0x97, 0xe2, 0x5b, 0xbd, - 0xca, 0xbe, 0xa5, 0x16, 0x0a, 0xed, 0x54, 0x8f, 0x90, 0xf2, 0x45, 0xa2, 0xa9, 0x93, 0x4a, 0x2a, - 0xa1, 0x29, 0x8d, 0xf4, 0x9c, 0xf8, 0x13, 0xc0, 0xe6, 0x50, 0x4d, 0x7e, 0xf3, 0x8b, 0xda, 0x11, - 0x84, 0x46, 0xe8, 0x80, 0x36, 0xe8, 0x36, 0x06, 0x1d, 0xaf, 0x2a, 0x92, 0x67, 0x94, 0xfe, 0xd9, - 0x2a, 0x47, 0xd6, 0x77, 0x8e, 0x4a, 0xfa, 0x6d, 0x8e, 0x9a, 0x4b, 0x1a, 0xcd, 0xaf, 0xb0, 0xf9, - 0x87, 0x83, 0x12, 0x60, 0x3f, 0xc0, 0xba, 0x1c, 0x3d, 0x73, 0x6a, 0xed, 0x83, 0x6e, 0x63, 0x80, - 0xaa, 0xad, 0x86, 0x3b, 0xce, 0x47, 0xda, 0x45, 0xcb, 0xb6, 0x39, 0x3a, 0x52, 0x0e, 0xaa, 0xc6, - 0x81, 0x7e, 0xc0, 0x05, 0x80, 0x87, 0x3a, 0xde, 0x9d, 0xa0, 0x82, 0xd9, 0x6f, 0xb0, 0x61, 0xba, - 0x66, 0x0e, 0x90, 0x7e, 0xe7, 0xff, 0xf8, 0xed, 0xef, 0xc6, 0xef, 0x69, 0xef, 0x72, 0x9f, 0x6d, - 0x8e, 0xec, 0xfd, 0x88, 0x19, 0x0e, 0xca, 0x88, 0xfd, 0x08, 0xeb, 0x6a, 0xf3, 0x4e, 0x4d, 0xee, - 0xb3, 0x5d, 0x6d, 0x7a, 0x2b, 0x39, 0x93, 0x52, 0xe9, 0x4c, 0x4a, 0x55, 0xe3, 0x40, 0x3f, 0xf8, - 0xf7, 0xab, 0x8d, 0x0b, 0xd6, 0x1b, 0x17, 0x7c, 0x6d, 0x5c, 0xf0, 0x5e, 0xb8, 0xd6, 0xba, 0x70, - 0xad, 0x8f, 0xc2, 0xb5, 0x9e, 0xae, 0xc3, 0xa9, 0x98, 0x2c, 0x46, 0xde, 0x33, 0x8f, 0x88, 0xb4, - 0xbb, 0x88, 0x99, 0x78, 0xe5, 0xe9, 0x4c, 0x57, 0x34, 0x99, 0x92, 0x90, 0x93, 0x98, 0x8f, 0xd9, - 0x1f, 0xa7, 0x32, 0xaa, 0xcb, 0x23, 0xb9, 0xfc, 0x09, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x93, 0x31, - 0x9c, 0xf6, 0x02, 0x00, 0x00, -} - -func (m *GenesisDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Deployments) > 0 { - for iNdEx := len(m.Deployments) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Deployments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Deployment.Size() - n += 1 + l + sovGenesis(uint64(l)) - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Deployments) > 0 { - for _, e := range m.Deployments { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - l = m.Params.Size() - n += 1 + l + sovGenesis(uint64(l)) - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, Group{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deployments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Deployments = append(m.Deployments, GenesisDeployment{}) - if err := m.Deployments[len(m.Deployments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta1/group.pb.go b/go/node/deployment/v1beta1/group.pb.go deleted file mode 100644 index eac07231..00000000 --- a/go/node/deployment/v1beta1/group.pb.go +++ /dev/null @@ -1,2176 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta1/group.proto - -package v1beta1 - -import ( - fmt "fmt" - v1beta1 "github.com/akash-network/akash-api/go/node/types/v1beta1" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of group -type Group_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - GroupStateInvalid Group_State = 0 - // GroupOpen denotes state for group open - GroupOpen Group_State = 1 - // GroupOrdered denotes state for group ordered - GroupPaused Group_State = 2 - // GroupInsufficientFunds denotes state for group insufficient_funds - GroupInsufficientFunds Group_State = 3 - // GroupClosed denotes state for group closed - GroupClosed Group_State = 4 -) - -var Group_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "paused", - 3: "insufficient_funds", - 4: "closed", -} - -var Group_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "paused": 2, - "insufficient_funds": 3, - "closed": 4, -} - -func (x Group_State) String() string { - return proto.EnumName(Group_State_name, int32(x)) -} - -func (Group_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_92581ef27257da99, []int{8, 0} -} - -// MsgCloseGroup defines SDK message to close a single Group within a Deployment. -type MsgCloseGroup struct { - ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseGroup) Reset() { *m = MsgCloseGroup{} } -func (m *MsgCloseGroup) String() string { return proto.CompactTextString(m) } -func (*MsgCloseGroup) ProtoMessage() {} -func (*MsgCloseGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_92581ef27257da99, []int{0} -} -func (m *MsgCloseGroup) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseGroup.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseGroup.Merge(m, src) -} -func (m *MsgCloseGroup) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseGroup) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseGroup proto.InternalMessageInfo - -func (m *MsgCloseGroup) GetID() GroupID { - if m != nil { - return m.ID - } - return GroupID{} -} - -// MsgCloseGroupResponse defines the Msg/CloseGroup response type. -type MsgCloseGroupResponse struct { -} - -func (m *MsgCloseGroupResponse) Reset() { *m = MsgCloseGroupResponse{} } -func (m *MsgCloseGroupResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseGroupResponse) ProtoMessage() {} -func (*MsgCloseGroupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_92581ef27257da99, []int{1} -} -func (m *MsgCloseGroupResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseGroupResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseGroupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseGroupResponse.Merge(m, src) -} -func (m *MsgCloseGroupResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseGroupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseGroupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseGroupResponse proto.InternalMessageInfo - -// MsgPauseGroup defines SDK message to close a single Group within a Deployment. -type MsgPauseGroup struct { - ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgPauseGroup) Reset() { *m = MsgPauseGroup{} } -func (m *MsgPauseGroup) String() string { return proto.CompactTextString(m) } -func (*MsgPauseGroup) ProtoMessage() {} -func (*MsgPauseGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_92581ef27257da99, []int{2} -} -func (m *MsgPauseGroup) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgPauseGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgPauseGroup.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgPauseGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgPauseGroup.Merge(m, src) -} -func (m *MsgPauseGroup) XXX_Size() int { - return m.Size() -} -func (m *MsgPauseGroup) XXX_DiscardUnknown() { - xxx_messageInfo_MsgPauseGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgPauseGroup proto.InternalMessageInfo - -func (m *MsgPauseGroup) GetID() GroupID { - if m != nil { - return m.ID - } - return GroupID{} -} - -// MsgPauseGroupResponse defines the Msg/PauseGroup response type. -type MsgPauseGroupResponse struct { -} - -func (m *MsgPauseGroupResponse) Reset() { *m = MsgPauseGroupResponse{} } -func (m *MsgPauseGroupResponse) String() string { return proto.CompactTextString(m) } -func (*MsgPauseGroupResponse) ProtoMessage() {} -func (*MsgPauseGroupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_92581ef27257da99, []int{3} -} -func (m *MsgPauseGroupResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgPauseGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgPauseGroupResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgPauseGroupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgPauseGroupResponse.Merge(m, src) -} -func (m *MsgPauseGroupResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgPauseGroupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgPauseGroupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgPauseGroupResponse proto.InternalMessageInfo - -// MsgStartGroup defines SDK message to close a single Group within a Deployment. -type MsgStartGroup struct { - ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgStartGroup) Reset() { *m = MsgStartGroup{} } -func (m *MsgStartGroup) String() string { return proto.CompactTextString(m) } -func (*MsgStartGroup) ProtoMessage() {} -func (*MsgStartGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_92581ef27257da99, []int{4} -} -func (m *MsgStartGroup) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgStartGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgStartGroup.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgStartGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgStartGroup.Merge(m, src) -} -func (m *MsgStartGroup) XXX_Size() int { - return m.Size() -} -func (m *MsgStartGroup) XXX_DiscardUnknown() { - xxx_messageInfo_MsgStartGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgStartGroup proto.InternalMessageInfo - -func (m *MsgStartGroup) GetID() GroupID { - if m != nil { - return m.ID - } - return GroupID{} -} - -// MsgStartGroupResponse defines the Msg/StartGroup response type. -type MsgStartGroupResponse struct { -} - -func (m *MsgStartGroupResponse) Reset() { *m = MsgStartGroupResponse{} } -func (m *MsgStartGroupResponse) String() string { return proto.CompactTextString(m) } -func (*MsgStartGroupResponse) ProtoMessage() {} -func (*MsgStartGroupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_92581ef27257da99, []int{5} -} -func (m *MsgStartGroupResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgStartGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgStartGroupResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgStartGroupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgStartGroupResponse.Merge(m, src) -} -func (m *MsgStartGroupResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgStartGroupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgStartGroupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgStartGroupResponse proto.InternalMessageInfo - -// GroupID stores owner, deployment sequence number and group sequence number -type GroupID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` -} - -func (m *GroupID) Reset() { *m = GroupID{} } -func (*GroupID) ProtoMessage() {} -func (*GroupID) Descriptor() ([]byte, []int) { - return fileDescriptor_92581ef27257da99, []int{6} -} -func (m *GroupID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GroupID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GroupID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GroupID) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupID.Merge(m, src) -} -func (m *GroupID) XXX_Size() int { - return m.Size() -} -func (m *GroupID) XXX_DiscardUnknown() { - xxx_messageInfo_GroupID.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupID proto.InternalMessageInfo - -func (m *GroupID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *GroupID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *GroupID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -// GroupSpec stores group specifications -type GroupSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` - Requirements v1beta1.PlacementRequirements `protobuf:"bytes,2,opt,name=requirements,proto3" json:"requirements" yaml:"requirements"` - Resources []Resource `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources" yaml:"resources"` -} - -func (m *GroupSpec) Reset() { *m = GroupSpec{} } -func (m *GroupSpec) String() string { return proto.CompactTextString(m) } -func (*GroupSpec) ProtoMessage() {} -func (*GroupSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_92581ef27257da99, []int{7} -} -func (m *GroupSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GroupSpec.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GroupSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupSpec.Merge(m, src) -} -func (m *GroupSpec) XXX_Size() int { - return m.Size() -} -func (m *GroupSpec) XXX_DiscardUnknown() { - xxx_messageInfo_GroupSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupSpec proto.InternalMessageInfo - -// Group stores group id, state and specifications of group -type Group struct { - GroupID GroupID `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"id" yaml:"id"` - State Group_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.deployment.v1beta1.Group_State" json:"state" yaml:"state"` - GroupSpec GroupSpec `protobuf:"bytes,3,opt,name=group_spec,json=groupSpec,proto3" json:"spec" yaml:"spec"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Group) Reset() { *m = Group{} } -func (m *Group) String() string { return proto.CompactTextString(m) } -func (*Group) ProtoMessage() {} -func (*Group) Descriptor() ([]byte, []int) { - return fileDescriptor_92581ef27257da99, []int{8} -} -func (m *Group) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Group.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Group) XXX_Merge(src proto.Message) { - xxx_messageInfo_Group.Merge(m, src) -} -func (m *Group) XXX_Size() int { - return m.Size() -} -func (m *Group) XXX_DiscardUnknown() { - xxx_messageInfo_Group.DiscardUnknown(m) -} - -var xxx_messageInfo_Group proto.InternalMessageInfo - -func (m *Group) GetGroupID() GroupID { - if m != nil { - return m.GroupID - } - return GroupID{} -} - -func (m *Group) GetState() Group_State { - if m != nil { - return m.State - } - return GroupStateInvalid -} - -func (m *Group) GetGroupSpec() GroupSpec { - if m != nil { - return m.GroupSpec - } - return GroupSpec{} -} - -func (m *Group) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -// Resource stores unit, total count and price of resource -type Resource struct { - Resources v1beta1.ResourceUnits `protobuf:"bytes,1,opt,name=resources,proto3" json:"unit" yaml:"unit"` - Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count" yaml:"count"` - Price types.Coin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` -} - -func (m *Resource) Reset() { *m = Resource{} } -func (m *Resource) String() string { return proto.CompactTextString(m) } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { - return fileDescriptor_92581ef27257da99, []int{9} -} -func (m *Resource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Resource.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Resource) XXX_Merge(src proto.Message) { - xxx_messageInfo_Resource.Merge(m, src) -} -func (m *Resource) XXX_Size() int { - return m.Size() -} -func (m *Resource) XXX_DiscardUnknown() { - xxx_messageInfo_Resource.DiscardUnknown(m) -} - -var xxx_messageInfo_Resource proto.InternalMessageInfo - -func (m *Resource) GetResources() v1beta1.ResourceUnits { - if m != nil { - return m.Resources - } - return v1beta1.ResourceUnits{} -} - -func (m *Resource) GetCount() uint32 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *Resource) GetPrice() types.Coin { - if m != nil { - return m.Price - } - return types.Coin{} -} - -func init() { - proto.RegisterEnum("akash.deployment.v1beta1.Group_State", Group_State_name, Group_State_value) - proto.RegisterType((*MsgCloseGroup)(nil), "akash.deployment.v1beta1.MsgCloseGroup") - proto.RegisterType((*MsgCloseGroupResponse)(nil), "akash.deployment.v1beta1.MsgCloseGroupResponse") - proto.RegisterType((*MsgPauseGroup)(nil), "akash.deployment.v1beta1.MsgPauseGroup") - proto.RegisterType((*MsgPauseGroupResponse)(nil), "akash.deployment.v1beta1.MsgPauseGroupResponse") - proto.RegisterType((*MsgStartGroup)(nil), "akash.deployment.v1beta1.MsgStartGroup") - proto.RegisterType((*MsgStartGroupResponse)(nil), "akash.deployment.v1beta1.MsgStartGroupResponse") - proto.RegisterType((*GroupID)(nil), "akash.deployment.v1beta1.GroupID") - proto.RegisterType((*GroupSpec)(nil), "akash.deployment.v1beta1.GroupSpec") - proto.RegisterType((*Group)(nil), "akash.deployment.v1beta1.Group") - proto.RegisterType((*Resource)(nil), "akash.deployment.v1beta1.Resource") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta1/group.proto", fileDescriptor_92581ef27257da99) -} - -var fileDescriptor_92581ef27257da99 = []byte{ - // 869 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0x8e, 0x13, 0xa7, 0xbb, 0x99, 0x74, 0x21, 0x0c, 0x94, 0x4d, 0x5d, 0xd5, 0x76, 0x07, 0x2a, - 0x05, 0x55, 0xd8, 0x6a, 0x7a, 0x5b, 0x89, 0x03, 0xe9, 0x8a, 0x6a, 0x0f, 0xc0, 0xca, 0x2b, 0x40, - 0x42, 0x48, 0x8b, 0x63, 0xcf, 0xba, 0xa3, 0x26, 0x33, 0x5e, 0xcf, 0xb8, 0x55, 0x39, 0x72, 0xaa, - 0xf6, 0xc4, 0x91, 0xcb, 0x4a, 0x95, 0x38, 0x70, 0xe3, 0xcc, 0x9f, 0xd0, 0x63, 0x8f, 0x9c, 0x2c, - 0x94, 0xbd, 0xa0, 0xbd, 0x20, 0xe5, 0x2f, 0x40, 0xf3, 0x23, 0xeb, 0x18, 0x6d, 0x55, 0xf5, 0xd2, - 0x53, 0xf2, 0xde, 0xfb, 0xde, 0xf7, 0x3e, 0xbf, 0xf9, 0x3c, 0x06, 0x1f, 0xc7, 0x8f, 0x62, 0xfe, - 0x30, 0x4c, 0x71, 0x3e, 0x63, 0x4f, 0xe7, 0x98, 0x8a, 0xf0, 0xf1, 0xdd, 0x29, 0x16, 0xf1, 0xdd, - 0x30, 0x2b, 0x58, 0x99, 0x07, 0x79, 0xc1, 0x04, 0x83, 0x43, 0x85, 0x0a, 0x6a, 0x54, 0x60, 0x50, - 0xce, 0x07, 0x19, 0xcb, 0x98, 0x02, 0x85, 0xf2, 0x9f, 0xc6, 0x3b, 0xb7, 0x34, 0xeb, 0x34, 0xe6, - 0xf8, 0x82, 0xaf, 0xc0, 0x9c, 0x95, 0x45, 0x82, 0x0d, 0x04, 0x5d, 0x02, 0x89, 0x85, 0x28, 0xc8, - 0xb4, 0x14, 0x2b, 0x8c, 0x9b, 0x30, 0x3e, 0x67, 0xbc, 0x09, 0x4a, 0x18, 0xa1, 0xba, 0x8e, 0x32, - 0xb0, 0xf5, 0x25, 0xcf, 0xee, 0xcf, 0x18, 0xc7, 0x0f, 0xa4, 0x5a, 0xb8, 0x0f, 0xda, 0x24, 0x1d, - 0x5a, 0xbe, 0x35, 0xea, 0x8f, 0x6f, 0x05, 0xaf, 0x12, 0x1d, 0x28, 0xf0, 0xde, 0xee, 0xe4, 0xe6, - 0x8b, 0xca, 0x6b, 0x2d, 0x2a, 0xaf, 0xbd, 0xb7, 0x7b, 0x5e, 0x79, 0x6d, 0x92, 0x2e, 0x2b, 0xaf, - 0xf7, 0x34, 0x9e, 0xcf, 0x76, 0x10, 0x49, 0x51, 0xd4, 0x26, 0xe9, 0x8e, 0xfd, 0xcf, 0x73, 0xaf, - 0x85, 0xb6, 0xc1, 0xb5, 0xc6, 0xa0, 0x08, 0xf3, 0x9c, 0x51, 0x8e, 0x8d, 0x82, 0xfd, 0xb8, 0x7c, - 0x3b, 0x0a, 0xea, 0x41, 0xff, 0x53, 0x70, 0x20, 0xe2, 0x42, 0xbc, 0x0d, 0x05, 0xf5, 0xa0, 0x0b, - 0x05, 0x7f, 0x58, 0x60, 0xc3, 0xb0, 0xc1, 0x10, 0x74, 0xd9, 0x13, 0x8a, 0x0b, 0x35, 0xbf, 0x37, - 0xb9, 0x7e, 0x5e, 0x79, 0x3a, 0xb1, 0xac, 0xbc, 0xab, 0x9a, 0x55, 0x85, 0x28, 0xd2, 0x69, 0x78, - 0x0f, 0xd8, 0x29, 0xc7, 0xc7, 0xc3, 0xb6, 0x6f, 0x8d, 0xec, 0x89, 0xb7, 0xa8, 0x3c, 0x7b, 0xf7, - 0x00, 0x1f, 0x9f, 0x57, 0x9e, 0xca, 0x2f, 0x2b, 0xaf, 0xaf, 0xdb, 0x64, 0x84, 0x22, 0x95, 0x94, - 0x4d, 0x99, 0x6c, 0xea, 0xf8, 0xd6, 0x68, 0x4b, 0x37, 0x3d, 0x30, 0x4d, 0x59, 0xa3, 0x29, 0xd3, - 0x4d, 0xf2, 0x67, 0x67, 0xf3, 0xd7, 0xe7, 0x5e, 0x4b, 0x3d, 0xc9, 0xef, 0x6d, 0xd0, 0x53, 0x82, - 0x0f, 0x72, 0x9c, 0xc0, 0x3b, 0xc0, 0xa6, 0xf1, 0x1c, 0x1b, 0xc5, 0xdb, 0x92, 0x44, 0xc6, 0x35, - 0x89, 0x8c, 0x50, 0xa4, 0x92, 0xf0, 0x27, 0x70, 0xb5, 0xc0, 0xc7, 0x25, 0x29, 0xb0, 0x5c, 0x26, - 0x57, 0xb2, 0xfb, 0xe3, 0x4f, 0xcc, 0x9a, 0xa5, 0x4f, 0x2f, 0x16, 0xbc, 0x3f, 0x8b, 0x13, 0x85, - 0x8a, 0xd6, 0x1a, 0x26, 0x77, 0xe4, 0xba, 0xcf, 0x2b, 0xaf, 0x41, 0xb3, 0xac, 0xbc, 0xf7, 0xf5, - 0xac, 0xf5, 0x2c, 0x8a, 0x1a, 0x20, 0x98, 0x81, 0xde, 0xea, 0x1d, 0xe2, 0xc3, 0x8e, 0xdf, 0x19, - 0xf5, 0xc7, 0xe8, 0xd5, 0xe7, 0x1b, 0x19, 0xe8, 0xe4, 0xb6, 0x99, 0x58, 0x37, 0x2f, 0x2b, 0x6f, - 0xb0, 0x1a, 0x67, 0x52, 0x28, 0xaa, 0xcb, 0x3b, 0x9b, 0xcf, 0x56, 0x9b, 0xfa, 0xd9, 0x06, 0x5d, - 0xed, 0xaa, 0x1f, 0xc1, 0xa6, 0xba, 0x10, 0x0e, 0xdf, 0xc4, 0x5b, 0xc8, 0x78, 0x6b, 0x65, 0x8f, - 0xcb, 0x0c, 0xb6, 0xa1, 0x68, 0xf7, 0x52, 0xf8, 0x2d, 0xe8, 0x72, 0x11, 0x0b, 0xac, 0x76, 0xfa, - 0xce, 0xf8, 0xf6, 0x6b, 0xe8, 0x83, 0x03, 0x09, 0xd6, 0x0e, 0x53, 0x7d, 0xb5, 0xc3, 0x54, 0x88, - 0x22, 0x9d, 0x86, 0x87, 0x00, 0x68, 0xe5, 0x3c, 0xc7, 0x89, 0xb2, 0x4c, 0x7f, 0xfc, 0xd1, 0x6b, - 0xc8, 0xa5, 0x31, 0x26, 0x37, 0xcc, 0xe2, 0x6c, 0xd9, 0x58, 0xdb, 0x41, 0x46, 0x28, 0xea, 0x65, - 0x17, 0x06, 0xba, 0x09, 0x40, 0x52, 0xe0, 0x58, 0xe0, 0xf4, 0x30, 0x16, 0x43, 0xdb, 0xb7, 0x46, - 0x9d, 0xa8, 0x67, 0x32, 0x9f, 0x0b, 0xf4, 0xa7, 0x05, 0xba, 0x4a, 0x2b, 0x44, 0x60, 0x83, 0xd0, - 0xc7, 0xf1, 0x8c, 0xa4, 0x83, 0x96, 0x73, 0xed, 0xe4, 0xd4, 0x7f, 0x4f, 0x0f, 0x93, 0xc5, 0x3d, - 0x5d, 0x80, 0xdb, 0xc0, 0x66, 0x39, 0xa6, 0x03, 0xcb, 0xd9, 0x3a, 0x39, 0xf5, 0xb5, 0x4d, 0xbf, - 0xce, 0x31, 0x85, 0x37, 0xc0, 0x95, 0x5c, 0xbe, 0xfd, 0xe9, 0xa0, 0xed, 0xbc, 0x7b, 0x72, 0xea, - 0xf7, 0x55, 0x49, 0x5d, 0x08, 0x29, 0x1c, 0x03, 0x48, 0x28, 0x2f, 0x8f, 0x8e, 0x48, 0x42, 0x30, - 0x15, 0x87, 0x47, 0x25, 0x4d, 0xf9, 0xa0, 0xe3, 0x38, 0x27, 0xa7, 0xfe, 0x87, 0x7a, 0xf9, 0x6b, - 0xe5, 0x2f, 0x64, 0x55, 0x12, 0x26, 0xf2, 0x42, 0x4b, 0x07, 0xf6, 0x1a, 0xa1, 0xba, 0xe3, 0x52, - 0xc7, 0x7e, 0xf6, 0x9b, 0xdb, 0x32, 0x2f, 0xfe, 0xbf, 0x16, 0xd8, 0x5c, 0xb9, 0x09, 0xfe, 0xb0, - 0x6e, 0xc2, 0xa6, 0x11, 0x1a, 0xee, 0x5f, 0x35, 0x7c, 0x43, 0x89, 0xe0, 0xf5, 0x2a, 0x4b, 0x4a, - 0x44, 0xbd, 0x4a, 0x19, 0xad, 0x3b, 0x4f, 0x5e, 0x1f, 0x09, 0x2b, 0xa9, 0x50, 0x1e, 0xd8, 0xd2, - 0x87, 0xab, 0x12, 0xf5, 0xe1, 0xaa, 0x10, 0x45, 0x3a, 0x0d, 0xbf, 0x02, 0xdd, 0xbc, 0x20, 0x09, - 0x36, 0xe7, 0x7a, 0x3d, 0xd0, 0x5f, 0x8c, 0xa6, 0x96, 0xfb, 0x8c, 0x50, 0x7d, 0xcf, 0x49, 0x3e, - 0x85, 0xaf, 0xf9, 0x54, 0x88, 0x22, 0x9d, 0xd6, 0x4f, 0x3c, 0xf9, 0xee, 0xc5, 0xc2, 0xb5, 0x5e, - 0x2e, 0x5c, 0xeb, 0xef, 0x85, 0x6b, 0xfd, 0x72, 0xe6, 0xb6, 0x5e, 0x9e, 0xb9, 0xad, 0xbf, 0xce, - 0xdc, 0xd6, 0xf7, 0x9f, 0x65, 0x44, 0x3c, 0x2c, 0xa7, 0x41, 0xc2, 0xe6, 0xa1, 0x7a, 0xea, 0x4f, - 0x29, 0x16, 0x4f, 0x58, 0xf1, 0xc8, 0x44, 0x71, 0x4e, 0xc2, 0x8c, 0x85, 0x94, 0xa5, 0xf8, 0x92, - 0x6f, 0xea, 0xf4, 0x8a, 0xfa, 0x6e, 0xdd, 0xfb, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xb6, 0xc2, - 0x82, 0x76, 0x07, 0x00, 0x00, -} - -func (m *MsgCloseGroup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseGroup) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseGroupResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseGroupResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgPauseGroup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgPauseGroup) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgPauseGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgPauseGroupResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgPauseGroupResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgPauseGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgStartGroup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgStartGroup) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgStartGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgStartGroupResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgStartGroupResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgStartGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *GroupID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GroupID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.GSeq != 0 { - i = encodeVarintGroup(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintGroup(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintGroup(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GroupSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GroupSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - { - size, err := m.Requirements.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGroup(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Group) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Group) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintGroup(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.GroupSpec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintGroup(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.GroupID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Resource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Resource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.Count != 0 { - i = encodeVarintGroup(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGroup(dAtA []byte, offset int, v uint64) int { - offset -= sovGroup(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MsgCloseGroup) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovGroup(uint64(l)) - return n -} - -func (m *MsgCloseGroupResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgPauseGroup) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovGroup(uint64(l)) - return n -} - -func (m *MsgPauseGroupResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgStartGroup) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovGroup(uint64(l)) - return n -} - -func (m *MsgStartGroupResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *GroupID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovGroup(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovGroup(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovGroup(uint64(m.GSeq)) - } - return n -} - -func (m *GroupSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovGroup(uint64(l)) - } - l = m.Requirements.Size() - n += 1 + l + sovGroup(uint64(l)) - if len(m.Resources) > 0 { - for _, e := range m.Resources { - l = e.Size() - n += 1 + l + sovGroup(uint64(l)) - } - } - return n -} - -func (m *Group) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.GroupID.Size() - n += 1 + l + sovGroup(uint64(l)) - if m.State != 0 { - n += 1 + sovGroup(uint64(m.State)) - } - l = m.GroupSpec.Size() - n += 1 + l + sovGroup(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovGroup(uint64(m.CreatedAt)) - } - return n -} - -func (m *Resource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Resources.Size() - n += 1 + l + sovGroup(uint64(l)) - if m.Count != 0 { - n += 1 + sovGroup(uint64(m.Count)) - } - l = m.Price.Size() - n += 1 + l + sovGroup(uint64(l)) - return n -} - -func sovGroup(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGroup(x uint64) (n int) { - return sovGroup(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MsgCloseGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseGroupResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseGroupResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgPauseGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgPauseGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgPauseGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgPauseGroupResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgPauseGroupResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgPauseGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgStartGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgStartGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgStartGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgStartGroupResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgStartGroupResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgStartGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Requirements.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, Resource{}) - if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Group) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Group: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.GroupID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Group_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupSpec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.GroupSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Resource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Resource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGroup(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGroup - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGroup - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGroup - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGroup = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGroup = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGroup = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta1/group_pricing_validation.go b/go/node/deployment/v1beta1/group_pricing_validation.go deleted file mode 100644 index 4d3ecbb2..00000000 --- a/go/node/deployment/v1beta1/group_pricing_validation.go +++ /dev/null @@ -1,66 +0,0 @@ -package v1beta1 - -import ( - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -func validateGroupPricing(gspec GroupSpec) error { - var price sdk.Coin - - mem := sdk.NewInt(0) - denom := "" - - for idx, resource := range gspec.Resources { - if err := validateUnitPricing(resource); err != nil { - return fmt.Errorf("group %v: %w", gspec.GetName(), err) - } - - // all must be same denomination - if denom == "" { - denom = resource.FullPrice().Denom - } else if resource.FullPrice().Denom != denom { - return fmt.Errorf("%w: denomination must be %q", ErrInvalidDeployment, denom) - } - - if idx == 0 { - price = resource.FullPrice() - } else { - rprice := resource.FullPrice() - if rprice.Denom != price.Denom { - return errors.Errorf("multi-denonimation group: (%v == %v fails)", rprice.Denom, price.Denom) - } - price = price.Add(rprice) - } - - memCount := sdk.NewInt(0) - if u := resource.Resources.Memory; u != nil { - memCount.Add(sdk.NewIntFromUint64(u.Quantity.Value())) - } - - mem = mem.Add(memCount.Mul(sdk.NewIntFromUint64(uint64(resource.Count)))) - } - - return nil -} - -func validateUnitPricing(rg Resource) error { - if !rg.GetPrice().IsValid() { - return errors.Errorf("error: invalid price object") - } - - if rg.Price.Amount.GT(sdk.NewIntFromUint64(validationConfig.MaxUnitPrice)) { - return errors.Errorf("error: invalid unit price (%v > %v fails)", validationConfig.MaxUnitPrice, rg.Price) - } - - if rg.Price.Amount.LT(sdk.NewIntFromUint64(validationConfig.MinUnitPrice)) { - return errors.Errorf("error: invalid unit price (%v < %v fails)", validationConfig.MinUnitPrice, rg.Price) - } - return nil -} - -func validateOrderBidDuration(_ GroupSpec) error { - return nil -} diff --git a/go/node/deployment/v1beta1/group_validation.go b/go/node/deployment/v1beta1/group_validation.go deleted file mode 100644 index e97f12aa..00000000 --- a/go/node/deployment/v1beta1/group_validation.go +++ /dev/null @@ -1,35 +0,0 @@ -package v1beta1 - -import "github.com/pkg/errors" - -// ValidateDeploymentGroup does validation for provided deployment group -func validateDeploymentGroup(gspec GroupSpec) error { - if err := ValidateResourceList(gspec); err != nil { - return err - } - if err := validateGroupPricing(gspec); err != nil { - return err - } - return validateOrderBidDuration(gspec) -} - -// ValidateDeploymentGroups does validation for all deployment groups -func ValidateDeploymentGroups(gspecs []GroupSpec) error { - if len(gspecs) == 0 { - return ErrInvalidGroups - } - - names := make(map[string]int, len(gspecs)) // Used as set - for _, group := range gspecs { - if err := group.ValidateBasic(); err != nil { - return err - } - - if _, exists := names[group.GetName()]; exists { - return errors.Errorf("duplicate deployment group name %q", group.GetName()) - } - names[group.GetName()] = 0 // Value stored does not matter - } - - return nil -} diff --git a/go/node/deployment/v1beta1/id.go b/go/node/deployment/v1beta1/id.go deleted file mode 100644 index a9876bd2..00000000 --- a/go/node/deployment/v1beta1/id.go +++ /dev/null @@ -1,103 +0,0 @@ -package v1beta1 - -import ( - fmt "fmt" - "strconv" - "strings" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -// Equals method compares specific deployment with provided deployment -func (id DeploymentID) Equals(other DeploymentID) bool { - return id.Owner == other.Owner && id.DSeq == other.DSeq -} - -// Validate method for DeploymentID and returns nil -func (id DeploymentID) Validate() error { - _, err := sdk.AccAddressFromBech32(id.Owner) - switch { - case err != nil: - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "DeploymentID: Invalid Owner Address") - case id.DSeq == 0: - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "DeploymentID: Invalid Deployment Sequence") - } - return nil -} - -// String method for deployment IDs -func (id DeploymentID) String() string { - return fmt.Sprintf("%s/%d", id.Owner, id.DSeq) -} - -func (id DeploymentID) GetOwnerAddress() (sdk.Address, error) { - return sdk.AccAddressFromBech32(id.Owner) -} - -func ParseDeploymentID(val string) (DeploymentID, error) { - parts := strings.Split(val, "/") - return ParseDeploymentPath(parts) -} - -// ParseDeploymentPath returns DeploymentID details with provided queries, and return -// error if occurred due to wrong query -func ParseDeploymentPath(parts []string) (DeploymentID, error) { - if len(parts) != 2 { - return DeploymentID{}, ErrInvalidIDPath - } - - owner, err := sdk.AccAddressFromBech32(parts[0]) - if err != nil { - return DeploymentID{}, err - } - - dseq, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return DeploymentID{}, err - } - - return DeploymentID{ - Owner: owner.String(), - DSeq: dseq, - }, nil -} - -// MakeGroupID returns GroupID instance with provided deployment details -// and group sequence number. -func MakeGroupID(id DeploymentID, gseq uint32) GroupID { - return GroupID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: gseq, - } -} - -// DeploymentID method returns DeploymentID details with specific group details -func (id GroupID) DeploymentID() DeploymentID { - return DeploymentID{ - Owner: id.Owner, - DSeq: id.DSeq, - } -} - -// Equals method compares specific group with provided group -func (id GroupID) Equals(other GroupID) bool { - return id.DeploymentID().Equals(other.DeploymentID()) && id.GSeq == other.GSeq -} - -// Validate method for GroupID and returns nil -func (id GroupID) Validate() error { - if err := id.DeploymentID().Validate(); err != nil { - return sdkerrors.Wrap(err, "GroupID: Invalid DeploymentID") - } - if id.GSeq == 0 { - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "GroupID: Invalid Group Sequence") - } - return nil -} - -// String method provides human readable representation of GroupID. -func (id GroupID) String() string { - return fmt.Sprintf("%s/%d", id.DeploymentID(), id.GSeq) -} diff --git a/go/node/deployment/v1beta1/key.go b/go/node/deployment/v1beta1/key.go deleted file mode 100644 index 507c2654..00000000 --- a/go/node/deployment/v1beta1/key.go +++ /dev/null @@ -1,20 +0,0 @@ -package v1beta1 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "deployment" - - // StoreKey is the store key string for deployment - StoreKey = ModuleName - - // RouterKey is the message route for deployment - RouterKey = ModuleName -) - -func DeploymentPrefix() []byte { - return []byte{0x01} -} - -func GroupPrefix() []byte { - return []byte{0x02} -} diff --git a/go/node/deployment/v1beta1/msgs.go b/go/node/deployment/v1beta1/msgs.go deleted file mode 100644 index 632e2b1c..00000000 --- a/go/node/deployment/v1beta1/msgs.go +++ /dev/null @@ -1,311 +0,0 @@ -package v1beta1 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" -) - -const ( - MsgTypeCreateDeployment = "create-deployment" - MsgTypeDepositDeployment = "deposit-deployment" - MsgTypeUpdateDeployment = "update-deployment" - MsgTypeCloseDeployment = "close-deployment" - MsgTypeCloseGroup = "close-group" - MsgTypePauseGroup = "pause-group" - MsgTypeStartGroup = "start-group" -) - -var ( - _, _, _, _ sdk.Msg = &MsgCreateDeployment{}, &MsgUpdateDeployment{}, &MsgCloseDeployment{}, &MsgCloseGroup{} -) - -// NewMsgCreateDeployment creates a new MsgCreateDeployment instance -func NewMsgCreateDeployment(id DeploymentID, groups []GroupSpec, version []byte, - deposit sdk.Coin, _ sdk.AccAddress) *MsgCreateDeployment { - return &MsgCreateDeployment{ - ID: id, - Groups: groups, - Version: version, - Deposit: deposit, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateDeployment) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateDeployment) Type() string { return MsgTypeCreateDeployment } - -// GetSignBytes encodes the message for signing -func (msg MsgCreateDeployment) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateDeployment) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// ValidateBasic does basic validation like check owner and groups length -func (msg MsgCreateDeployment) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - if len(msg.Groups) == 0 { - return ErrInvalidGroups - } - - if len(msg.Version) == 0 { - return ErrEmptyVersion - } - - if len(msg.Version) != ManifestVersionLength { - return ErrInvalidVersion - } - - for _, gs := range msg.Groups { - err := gs.ValidateBasic() - if err != nil { - return err - } - } - - return nil -} - -// NewMsgDepositDeployment creates a new MsgDepositDeployment instance -func NewMsgDepositDeployment(id DeploymentID, amount sdk.Coin, _ string) *MsgDepositDeployment { - return &MsgDepositDeployment{ - ID: id, - Amount: amount, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgDepositDeployment) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgDepositDeployment) Type() string { return MsgTypeDepositDeployment } - -// GetSignBytes encodes the message for signing -func (msg MsgDepositDeployment) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgDepositDeployment) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// ValidateBasic does basic validation like check owner and groups length -func (msg MsgDepositDeployment) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - - if msg.Amount.IsZero() { - return ErrInvalidDeposit - } - - return nil -} - -// NewMsgUpdateDeployment creates a new MsgUpdateDeployment instance -func NewMsgUpdateDeployment(id DeploymentID, groups []GroupSpec, version []byte) *MsgUpdateDeployment { - return &MsgUpdateDeployment{ - ID: id, - Groups: groups, - Version: version, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgUpdateDeployment) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgUpdateDeployment) Type() string { return MsgTypeUpdateDeployment } - -// ValidateBasic does basic validation -func (msg MsgUpdateDeployment) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - - if len(msg.Version) == 0 { - return ErrEmptyVersion - } - - if len(msg.Version) != ManifestVersionLength { - return ErrInvalidVersion - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgUpdateDeployment) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgUpdateDeployment) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgCloseDeployment creates a new MsgCloseDeployment instance -func NewMsgCloseDeployment(id DeploymentID) *MsgCloseDeployment { - return &MsgCloseDeployment{ - ID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCloseDeployment) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCloseDeployment) Type() string { return MsgTypeCloseDeployment } - -// ValidateBasic does basic validation with deployment details -func (msg MsgCloseDeployment) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgCloseDeployment) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseDeployment) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgCloseGroup creates a new MsgCloseGroup instance -func NewMsgCloseGroup(id GroupID) *MsgCloseGroup { - return &MsgCloseGroup{ - ID: id, - } -} - -// Route implements the sdk.Msg interface for routing -func (msg MsgCloseGroup) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface exposing message type -func (msg MsgCloseGroup) Type() string { return MsgTypeCloseGroup } - -// ValidateBasic calls underlying GroupID.Validate() check and returns result -func (msg MsgCloseGroup) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgCloseGroup) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseGroup) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgPauseGroup creates a new MsgPauseGroup instance -func NewMsgPauseGroup(id GroupID) *MsgPauseGroup { - return &MsgPauseGroup{ - ID: id, - } -} - -// Route implements the sdk.Msg interface for routing -func (msg MsgPauseGroup) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface exposing message type -func (msg MsgPauseGroup) Type() string { return MsgTypePauseGroup } - -// ValidateBasic calls underlying GroupID.Validate() check and returns result -func (msg MsgPauseGroup) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgPauseGroup) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgPauseGroup) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgStartGroup creates a new MsgStartGroup instance -func NewMsgStartGroup(id GroupID) *MsgStartGroup { - return &MsgStartGroup{ - ID: id, - } -} - -// Route implements the sdk.Msg interface for routing -func (msg MsgStartGroup) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface exposing message type -func (msg MsgStartGroup) Type() string { return MsgTypeStartGroup } - -// ValidateBasic calls underlying GroupID.Validate() check and returns result -func (msg MsgStartGroup) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgStartGroup) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgStartGroup) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} diff --git a/go/node/deployment/v1beta1/msgs_test.go b/go/node/deployment/v1beta1/msgs_test.go deleted file mode 100644 index c695a574..00000000 --- a/go/node/deployment/v1beta1/msgs_test.go +++ /dev/null @@ -1 +0,0 @@ -package v1beta1_test diff --git a/go/node/deployment/v1beta1/params.go b/go/node/deployment/v1beta1/params.go deleted file mode 100644 index 58a66c7a..00000000 --- a/go/node/deployment/v1beta1/params.go +++ /dev/null @@ -1,50 +0,0 @@ -package v1beta1 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/pkg/errors" -) - -var _ paramtypes.ParamSet = (*Params)(nil) - -var ( - DefaultDeploymentMinDeposit = sdk.NewCoin("uakt", sdk.NewInt(5000000)) -) - -const ( - keyDeploymentMinDeposit = "DeploymentMinDeposit" -) - -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair([]byte(keyDeploymentMinDeposit), &p.DeploymentMinDeposit, validateCoin), - } -} - -func DefaultParams() Params { - return Params{ - DeploymentMinDeposit: DefaultDeploymentMinDeposit, - } -} - -func (p Params) Validate() error { - if err := validateCoin(p.DeploymentMinDeposit); err != nil { - return err - } - - return nil -} - -func validateCoin(i interface{}) error { - _, ok := i.(sdk.Coin) - if !ok { - return errors.Wrapf(ErrInvalidParam, "%T", i) - } - - return nil -} diff --git a/go/node/deployment/v1beta1/params.pb.go b/go/node/deployment/v1beta1/params.pb.go deleted file mode 100644 index a1c5b02a..00000000 --- a/go/node/deployment/v1beta1/params.pb.go +++ /dev/null @@ -1,328 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta1/params.proto - -package v1beta1 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Params defines the parameters for the x/deployment package -type Params struct { - DeploymentMinDeposit types.Coin `protobuf:"bytes,1,opt,name=deployment_min_deposit,json=deploymentMinDeposit,proto3" json:"deployment_min_deposit" yaml:"deployment_min_deposit"` -} - -func (m *Params) Reset() { *m = Params{} } -func (m *Params) String() string { return proto.CompactTextString(m) } -func (*Params) ProtoMessage() {} -func (*Params) Descriptor() ([]byte, []int) { - return fileDescriptor_fe50954b0fed1b39, []int{0} -} -func (m *Params) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Params.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Params) XXX_Merge(src proto.Message) { - xxx_messageInfo_Params.Merge(m, src) -} -func (m *Params) XXX_Size() int { - return m.Size() -} -func (m *Params) XXX_DiscardUnknown() { - xxx_messageInfo_Params.DiscardUnknown(m) -} - -var xxx_messageInfo_Params proto.InternalMessageInfo - -func (m *Params) GetDeploymentMinDeposit() types.Coin { - if m != nil { - return m.DeploymentMinDeposit - } - return types.Coin{} -} - -func init() { - proto.RegisterType((*Params)(nil), "akash.deployment.v1beta1.Params") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta1/params.proto", fileDescriptor_fe50954b0fed1b39) -} - -var fileDescriptor_fe50954b0fed1b39 = []byte{ - // 271 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, - 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x92, 0x00, 0x2b, 0xd3, 0x43, 0x28, 0xd3, 0x83, 0x2a, 0x93, 0x12, 0x49, 0xcf, 0x4f, - 0xcf, 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0xe4, 0x92, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, - 0xf5, 0x93, 0x12, 0x8b, 0x53, 0xe1, 0x26, 0x26, 0xe7, 0x67, 0xe6, 0x41, 0xe4, 0x95, 0x36, 0x33, - 0x72, 0xb1, 0x05, 0x80, 0x2d, 0x10, 0x5a, 0xc1, 0xc8, 0x25, 0x86, 0x30, 0x37, 0x3e, 0x37, 0x33, - 0x2f, 0x3e, 0x25, 0xb5, 0x20, 0xbf, 0x38, 0xb3, 0x44, 0x82, 0x51, 0x81, 0x51, 0x83, 0xdb, 0x48, - 0x52, 0x0f, 0x62, 0x98, 0x1e, 0xc8, 0x30, 0x98, 0xbd, 0x7a, 0xce, 0xf9, 0x99, 0x79, 0x4e, 0xe1, - 0x27, 0xee, 0xc9, 0x33, 0x3c, 0xba, 0x27, 0x2f, 0xe2, 0x02, 0x37, 0xc0, 0x37, 0x33, 0xcf, 0x05, - 0xa2, 0xfd, 0xd5, 0x3d, 0x79, 0x1c, 0x06, 0x7f, 0xba, 0x27, 0x2f, 0x5b, 0x99, 0x98, 0x9b, 0x63, - 0xa5, 0x84, 0x5d, 0x5e, 0x29, 0x48, 0x24, 0x05, 0x8b, 0x81, 0x4e, 0xe1, 0x27, 0x1e, 0xc9, 0x31, - 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, - 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x9b, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, - 0x9f, 0xab, 0x0f, 0x0e, 0x2a, 0xdd, 0xbc, 0xd4, 0x92, 0xf2, 0xfc, 0xa2, 0x6c, 0x28, 0x2f, 0xb1, - 0x20, 0x53, 0x3f, 0x3d, 0x5f, 0x3f, 0x2f, 0x3f, 0x25, 0x15, 0x4b, 0x58, 0x27, 0xb1, 0x81, 0x43, - 0xc5, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x10, 0x3c, 0x27, 0x8e, 0x01, 0x00, 0x00, -} - -func (m *Params) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Params) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.DeploymentMinDeposit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintParams(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintParams(dAtA []byte, offset int, v uint64) int { - offset -= sovParams(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Params) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.DeploymentMinDeposit.Size() - n += 1 + l + sovParams(uint64(l)) - return n -} - -func sovParams(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozParams(x uint64) (n int) { - return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Params) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Params: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeploymentMinDeposit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DeploymentMinDeposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipParams(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipParams(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthParams - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupParams - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthParams - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta1/query.pb.go b/go/node/deployment/v1beta1/query.pb.go deleted file mode 100644 index 72817db1..00000000 --- a/go/node/deployment/v1beta1/query.pb.go +++ /dev/null @@ -1,1627 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta1/query.proto - -package v1beta1 - -import ( - context "context" - fmt "fmt" - v1beta1 "github.com/akash-network/akash-api/go/node/escrow/v1beta1" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryDeploymentsRequest is request type for the Query/Deployments RPC method -type QueryDeploymentsRequest struct { - Filters DeploymentFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryDeploymentsRequest) Reset() { *m = QueryDeploymentsRequest{} } -func (m *QueryDeploymentsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryDeploymentsRequest) ProtoMessage() {} -func (*QueryDeploymentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e9d5676377f9641, []int{0} -} -func (m *QueryDeploymentsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryDeploymentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryDeploymentsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryDeploymentsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryDeploymentsRequest.Merge(m, src) -} -func (m *QueryDeploymentsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryDeploymentsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryDeploymentsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryDeploymentsRequest proto.InternalMessageInfo - -func (m *QueryDeploymentsRequest) GetFilters() DeploymentFilters { - if m != nil { - return m.Filters - } - return DeploymentFilters{} -} - -func (m *QueryDeploymentsRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryDeploymentsResponse is response type for the Query/Deployments RPC method -type QueryDeploymentsResponse struct { - Deployments DeploymentResponses `protobuf:"bytes,1,rep,name=deployments,proto3,castrepeated=DeploymentResponses" json:"deployments"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryDeploymentsResponse) Reset() { *m = QueryDeploymentsResponse{} } -func (m *QueryDeploymentsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryDeploymentsResponse) ProtoMessage() {} -func (*QueryDeploymentsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e9d5676377f9641, []int{1} -} -func (m *QueryDeploymentsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryDeploymentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryDeploymentsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryDeploymentsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryDeploymentsResponse.Merge(m, src) -} -func (m *QueryDeploymentsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryDeploymentsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryDeploymentsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryDeploymentsResponse proto.InternalMessageInfo - -func (m *QueryDeploymentsResponse) GetDeployments() DeploymentResponses { - if m != nil { - return m.Deployments - } - return nil -} - -func (m *QueryDeploymentsResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryDeploymentRequest is request type for the Query/Deployment RPC method -type QueryDeploymentRequest struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryDeploymentRequest) Reset() { *m = QueryDeploymentRequest{} } -func (m *QueryDeploymentRequest) String() string { return proto.CompactTextString(m) } -func (*QueryDeploymentRequest) ProtoMessage() {} -func (*QueryDeploymentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e9d5676377f9641, []int{2} -} -func (m *QueryDeploymentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryDeploymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryDeploymentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryDeploymentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryDeploymentRequest.Merge(m, src) -} -func (m *QueryDeploymentRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryDeploymentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryDeploymentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryDeploymentRequest proto.InternalMessageInfo - -func (m *QueryDeploymentRequest) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -// QueryDeploymentResponse is response type for the Query/Deployment RPC method -type QueryDeploymentResponse struct { - Deployment Deployment `protobuf:"bytes,1,opt,name=deployment,proto3" json:"deployment" yaml:"deployment"` - Groups []Group `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups" yaml:"groups"` - EscrowAccount v1beta1.Account `protobuf:"bytes,3,opt,name=escrow_account,json=escrowAccount,proto3" json:"escrow_account"` -} - -func (m *QueryDeploymentResponse) Reset() { *m = QueryDeploymentResponse{} } -func (m *QueryDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*QueryDeploymentResponse) ProtoMessage() {} -func (*QueryDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e9d5676377f9641, []int{3} -} -func (m *QueryDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryDeploymentResponse.Merge(m, src) -} -func (m *QueryDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryDeploymentResponse proto.InternalMessageInfo - -func (m *QueryDeploymentResponse) GetDeployment() Deployment { - if m != nil { - return m.Deployment - } - return Deployment{} -} - -func (m *QueryDeploymentResponse) GetGroups() []Group { - if m != nil { - return m.Groups - } - return nil -} - -func (m *QueryDeploymentResponse) GetEscrowAccount() v1beta1.Account { - if m != nil { - return m.EscrowAccount - } - return v1beta1.Account{} -} - -// QueryGroupRequest is request type for the Query/Group RPC method -type QueryGroupRequest struct { - ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryGroupRequest) Reset() { *m = QueryGroupRequest{} } -func (m *QueryGroupRequest) String() string { return proto.CompactTextString(m) } -func (*QueryGroupRequest) ProtoMessage() {} -func (*QueryGroupRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_5e9d5676377f9641, []int{4} -} -func (m *QueryGroupRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryGroupRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryGroupRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryGroupRequest.Merge(m, src) -} -func (m *QueryGroupRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryGroupRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryGroupRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryGroupRequest proto.InternalMessageInfo - -func (m *QueryGroupRequest) GetID() GroupID { - if m != nil { - return m.ID - } - return GroupID{} -} - -// QueryGroupResponse is response type for the Query/Group RPC method -type QueryGroupResponse struct { - Group Group `protobuf:"bytes,1,opt,name=group,proto3" json:"group"` -} - -func (m *QueryGroupResponse) Reset() { *m = QueryGroupResponse{} } -func (m *QueryGroupResponse) String() string { return proto.CompactTextString(m) } -func (*QueryGroupResponse) ProtoMessage() {} -func (*QueryGroupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e9d5676377f9641, []int{5} -} -func (m *QueryGroupResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryGroupResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryGroupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryGroupResponse.Merge(m, src) -} -func (m *QueryGroupResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryGroupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryGroupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryGroupResponse proto.InternalMessageInfo - -func (m *QueryGroupResponse) GetGroup() Group { - if m != nil { - return m.Group - } - return Group{} -} - -func init() { - proto.RegisterType((*QueryDeploymentsRequest)(nil), "akash.deployment.v1beta1.QueryDeploymentsRequest") - proto.RegisterType((*QueryDeploymentsResponse)(nil), "akash.deployment.v1beta1.QueryDeploymentsResponse") - proto.RegisterType((*QueryDeploymentRequest)(nil), "akash.deployment.v1beta1.QueryDeploymentRequest") - proto.RegisterType((*QueryDeploymentResponse)(nil), "akash.deployment.v1beta1.QueryDeploymentResponse") - proto.RegisterType((*QueryGroupRequest)(nil), "akash.deployment.v1beta1.QueryGroupRequest") - proto.RegisterType((*QueryGroupResponse)(nil), "akash.deployment.v1beta1.QueryGroupResponse") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta1/query.proto", fileDescriptor_5e9d5676377f9641) -} - -var fileDescriptor_5e9d5676377f9641 = []byte{ - // 671 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x3f, 0x6f, 0xd3, 0x40, - 0x1c, 0x8d, 0x9d, 0xa6, 0x48, 0x17, 0x15, 0xa9, 0x07, 0x02, 0x2b, 0x80, 0x5d, 0xac, 0xd2, 0x94, - 0xfe, 0xb1, 0x49, 0xd8, 0x8a, 0x3a, 0x60, 0x45, 0xad, 0x0a, 0x0b, 0xf5, 0x02, 0x42, 0x48, 0xe8, - 0x92, 0x5c, 0x5d, 0xab, 0x89, 0xcf, 0xf5, 0x5d, 0xa8, 0xb2, 0xf2, 0x09, 0x40, 0x7c, 0x01, 0x16, - 0x24, 0xc4, 0xc0, 0xc4, 0x87, 0xe8, 0x58, 0x09, 0x21, 0x31, 0x05, 0x94, 0x30, 0x20, 0x06, 0x86, - 0x7e, 0x02, 0xe4, 0xbb, 0x4b, 0x6c, 0x48, 0x42, 0x92, 0x2d, 0xf1, 0xbd, 0xdf, 0x7b, 0xef, 0xf7, - 0x7b, 0xbf, 0xb3, 0xc1, 0x32, 0x3a, 0x42, 0xf4, 0xd0, 0xae, 0xe3, 0xb0, 0x41, 0xda, 0x4d, 0x1c, - 0x30, 0xfb, 0x45, 0xa9, 0x8a, 0x19, 0x2a, 0xd9, 0xc7, 0x2d, 0x1c, 0xb5, 0xad, 0x30, 0x22, 0x8c, - 0x40, 0x8d, 0xa3, 0xac, 0x04, 0x65, 0x49, 0x54, 0xe1, 0xb2, 0x47, 0x3c, 0xc2, 0x41, 0x76, 0xfc, - 0x4b, 0xe0, 0x0b, 0xd7, 0x3d, 0x42, 0xbc, 0x06, 0xb6, 0x51, 0xe8, 0xdb, 0x28, 0x08, 0x08, 0x43, - 0xcc, 0x27, 0x01, 0x95, 0xa7, 0x6b, 0x35, 0x42, 0x9b, 0x84, 0xda, 0x55, 0x44, 0xb1, 0x90, 0x19, - 0x88, 0x86, 0xc8, 0xf3, 0x03, 0x0e, 0x96, 0xd8, 0xdb, 0x63, 0xfd, 0xa5, 0xcc, 0x08, 0xe8, 0xf8, - 0x56, 0xbc, 0x88, 0xb4, 0x42, 0x89, 0x5a, 0x12, 0x28, 0x4c, 0x6b, 0x11, 0x39, 0x19, 0x20, 0x58, - 0x3b, 0xc4, 0xd2, 0x9e, 0xf9, 0x51, 0x01, 0x57, 0xf7, 0x63, 0x57, 0x95, 0x01, 0x13, 0x75, 0xf1, - 0x71, 0x0b, 0x53, 0x06, 0x1f, 0x82, 0x0b, 0x07, 0x7e, 0x83, 0xe1, 0x88, 0x6a, 0xca, 0x92, 0xb2, - 0x9a, 0x2f, 0xaf, 0x5b, 0xe3, 0x46, 0x63, 0x25, 0xe5, 0x3b, 0xa2, 0xc4, 0x99, 0x3b, 0xed, 0x18, - 0x19, 0xb7, 0xcf, 0x00, 0x77, 0x00, 0x48, 0xfa, 0xd5, 0x54, 0xce, 0xb7, 0x62, 0x89, 0xe1, 0x58, - 0xf1, 0x70, 0x2c, 0x91, 0x41, 0x9f, 0xf0, 0x11, 0xf2, 0xb0, 0x34, 0xe2, 0xa6, 0x2a, 0xcd, 0x2f, - 0x0a, 0xd0, 0x86, 0x0d, 0xd3, 0x90, 0x04, 0x14, 0xc3, 0x10, 0xe4, 0x13, 0x6f, 0xb1, 0xeb, 0xec, - 0x6a, 0xbe, 0x5c, 0x1a, 0xef, 0xfa, 0x1f, 0xa2, 0x3e, 0x8f, 0x73, 0x2d, 0xf6, 0xfe, 0xe1, 0x9b, - 0x71, 0x69, 0xf8, 0x8c, 0xba, 0x69, 0x09, 0xb8, 0x3b, 0xa2, 0xad, 0xe2, 0xc4, 0xb6, 0x04, 0xd5, - 0x5f, 0x7d, 0x3d, 0x03, 0x57, 0x86, 0xdc, 0x88, 0x18, 0x1c, 0xa0, 0xfa, 0x75, 0x99, 0xc0, 0xca, - 0x34, 0x09, 0xec, 0x55, 0x1c, 0x10, 0x37, 0xd0, 0xed, 0x18, 0xea, 0x5e, 0xc5, 0x55, 0xfd, 0xba, - 0xf9, 0x49, 0x1d, 0x8a, 0x79, 0x30, 0xb4, 0x26, 0x00, 0x09, 0x9d, 0xd4, 0x59, 0x9e, 0x46, 0xc7, - 0x29, 0xc6, 0x2a, 0xbf, 0x3a, 0x46, 0xaa, 0xfe, 0xbc, 0x63, 0x2c, 0xb6, 0x51, 0xb3, 0xb1, 0x65, - 0x26, 0xcf, 0x4c, 0x37, 0x05, 0x80, 0x4f, 0xc0, 0x3c, 0x5f, 0x51, 0xaa, 0xa9, 0x3c, 0x1e, 0x63, - 0xbc, 0xd4, 0x6e, 0x8c, 0x73, 0x0c, 0xa9, 0x22, 0xcb, 0xce, 0x3b, 0xc6, 0x82, 0x50, 0x10, 0xff, - 0x4d, 0x57, 0x1e, 0xc0, 0x07, 0xe0, 0xa2, 0xd8, 0xf4, 0xe7, 0xa8, 0x56, 0x23, 0xad, 0x80, 0x69, - 0x59, 0xde, 0xcc, 0x0d, 0xa9, 0x20, 0x0e, 0x07, 0xec, 0xf7, 0x05, 0x48, 0x2e, 0xea, 0x82, 0x38, - 0x95, 0x0f, 0xb7, 0xe6, 0x7e, 0xbe, 0x35, 0x32, 0xa6, 0x0b, 0x16, 0xf9, 0xd4, 0xb8, 0x91, 0x7e, - 0x1e, 0xdb, 0xa9, 0x3c, 0x6e, 0x4e, 0x30, 0x3f, 0x22, 0x8a, 0x7d, 0x00, 0xd3, 0x9c, 0x32, 0x84, - 0x7b, 0x20, 0xc7, 0xbb, 0x90, 0xbc, 0x13, 0x87, 0x22, 0x4c, 0x8b, 0x9a, 0xf2, 0xef, 0x2c, 0xc8, - 0x71, 0x4e, 0xf8, 0x5e, 0x01, 0xf9, 0xd4, 0xc5, 0x80, 0xd3, 0xef, 0x7e, 0xff, 0xd6, 0x17, 0xca, - 0xb3, 0x94, 0x08, 0xf7, 0x66, 0xf9, 0xe5, 0xe7, 0x1f, 0x6f, 0xd4, 0x0d, 0xb8, 0x66, 0x4f, 0xf1, - 0x06, 0xa3, 0x76, 0xc3, 0xa7, 0x0c, 0xbe, 0x53, 0x00, 0x48, 0xb8, 0xe0, 0x9d, 0x19, 0x6e, 0xa9, - 0x30, 0x3a, 0xfb, 0xbd, 0x9e, 0xd5, 0xa7, 0x1f, 0x1c, 0x10, 0xf8, 0x5a, 0x01, 0x39, 0x3e, 0x73, - 0xb8, 0x3e, 0x41, 0x30, 0xbd, 0x25, 0x85, 0x8d, 0xe9, 0xc0, 0xd2, 0xd8, 0x26, 0x37, 0x56, 0x84, - 0xb7, 0xec, 0xff, 0xbf, 0xd7, 0x85, 0x27, 0xe7, 0xf1, 0x69, 0x57, 0x57, 0xce, 0xba, 0xba, 0xf2, - 0xbd, 0xab, 0x2b, 0xaf, 0x7a, 0x7a, 0xe6, 0xac, 0xa7, 0x67, 0xbe, 0xf6, 0xf4, 0xcc, 0xd3, 0x6d, - 0xcf, 0x67, 0x87, 0xad, 0xaa, 0x55, 0x23, 0x4d, 0x41, 0xb5, 0x19, 0x60, 0x76, 0x42, 0xa2, 0x23, - 0xf9, 0x2f, 0xfe, 0x48, 0x79, 0xc4, 0x0e, 0x48, 0x1d, 0x8f, 0x10, 0xa9, 0xce, 0xf3, 0xaf, 0xc2, - 0xdd, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9a, 0xb5, 0x31, 0x38, 0x2a, 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Deployments queries deployments - Deployments(ctx context.Context, in *QueryDeploymentsRequest, opts ...grpc.CallOption) (*QueryDeploymentsResponse, error) - // Deployment queries deployment details - Deployment(ctx context.Context, in *QueryDeploymentRequest, opts ...grpc.CallOption) (*QueryDeploymentResponse, error) - // Group queries group details - Group(ctx context.Context, in *QueryGroupRequest, opts ...grpc.CallOption) (*QueryGroupResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Deployments(ctx context.Context, in *QueryDeploymentsRequest, opts ...grpc.CallOption) (*QueryDeploymentsResponse, error) { - out := new(QueryDeploymentsResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta1.Query/Deployments", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Deployment(ctx context.Context, in *QueryDeploymentRequest, opts ...grpc.CallOption) (*QueryDeploymentResponse, error) { - out := new(QueryDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta1.Query/Deployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Group(ctx context.Context, in *QueryGroupRequest, opts ...grpc.CallOption) (*QueryGroupResponse, error) { - out := new(QueryGroupResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta1.Query/Group", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Deployments queries deployments - Deployments(context.Context, *QueryDeploymentsRequest) (*QueryDeploymentsResponse, error) - // Deployment queries deployment details - Deployment(context.Context, *QueryDeploymentRequest) (*QueryDeploymentResponse, error) - // Group queries group details - Group(context.Context, *QueryGroupRequest) (*QueryGroupResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Deployments(ctx context.Context, req *QueryDeploymentsRequest) (*QueryDeploymentsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Deployments not implemented") -} -func (*UnimplementedQueryServer) Deployment(ctx context.Context, req *QueryDeploymentRequest) (*QueryDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Deployment not implemented") -} -func (*UnimplementedQueryServer) Group(ctx context.Context, req *QueryGroupRequest) (*QueryGroupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Group not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Deployments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryDeploymentsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Deployments(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta1.Query/Deployments", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Deployments(ctx, req.(*QueryDeploymentsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Deployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryDeploymentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Deployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta1.Query/Deployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Deployment(ctx, req.(*QueryDeploymentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Group_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryGroupRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Group(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta1.Query/Group", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Group(ctx, req.(*QueryGroupRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.deployment.v1beta1.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Deployments", - Handler: _Query_Deployments_Handler, - }, - { - MethodName: "Deployment", - Handler: _Query_Deployment_Handler, - }, - { - MethodName: "Group", - Handler: _Query_Group_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/deployment/v1beta1/query.proto", -} - -func (m *QueryDeploymentsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryDeploymentsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryDeploymentsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryDeploymentsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryDeploymentsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryDeploymentsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Deployments) > 0 { - for iNdEx := len(m.Deployments) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Deployments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryDeploymentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryDeploymentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryDeploymentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.EscrowAccount.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryGroupRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryGroupRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryGroupRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryGroupResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryGroupResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryDeploymentsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryDeploymentsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Deployments) > 0 { - for _, e := range m.Deployments { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryDeploymentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Deployment.Size() - n += 1 + l + sovQuery(uint64(l)) - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - l = m.EscrowAccount.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryGroupRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryGroupResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Group.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryDeploymentsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryDeploymentsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryDeploymentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryDeploymentsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryDeploymentsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryDeploymentsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deployments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Deployments = append(m.Deployments, QueryDeploymentResponse{}) - if err := m.Deployments[len(m.Deployments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryDeploymentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryDeploymentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryDeploymentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, Group{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EscrowAccount", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EscrowAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryGroupRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryGroupRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryGroupRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryGroupResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryGroupResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta1/query.pb.gw.go b/go/node/deployment/v1beta1/query.pb.gw.go deleted file mode 100644 index 68399167..00000000 --- a/go/node/deployment/v1beta1/query.pb.gw.go +++ /dev/null @@ -1,337 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/deployment/v1beta1/query.proto - -/* -Package v1beta1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta1 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Deployments_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Deployments_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryDeploymentsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Deployments(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Deployments_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryDeploymentsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Deployments(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Deployment_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Deployment_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryDeploymentRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployment_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Deployment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Deployment_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryDeploymentRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployment_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Deployment(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Group_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Group_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryGroupRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Group_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Group(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Group_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryGroupRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Group_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Group(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Deployments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Deployments_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Deployments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Deployment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Deployment_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Deployment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Group_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Group_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Group_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Deployments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Deployments_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Deployments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Deployment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Deployment_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Deployment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Group_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Group_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Group_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Deployments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta1", "deployments", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Deployment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta1", "deployments", "info"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Group_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta1", "groups", "info"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Deployments_0 = runtime.ForwardResponseMessage - - forward_Query_Deployment_0 = runtime.ForwardResponseMessage - - forward_Query_Group_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/deployment/v1beta1/resource_list_validation.go b/go/node/deployment/v1beta1/resource_list_validation.go deleted file mode 100644 index f07b9198..00000000 --- a/go/node/deployment/v1beta1/resource_list_validation.go +++ /dev/null @@ -1,157 +0,0 @@ -package v1beta1 - -import ( - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" - - types "github.com/akash-network/akash-api/go/node/types/v1beta1" -) - -var ( - ErrNoGroupsPresent = errors.New("validation: no groups present") - ErrGroupEmptyName = errors.New("validation: group has empty name") -) - -func ValidateResourceList(rlist types.ResourceGroup) error { - if rlist.GetName() == "" { - return ErrGroupEmptyName - } - - units := rlist.GetResources() - - if count := len(units); count > validationConfig.MaxGroupUnits { - return errors.Errorf("group %v: too many units (%v > %v)", rlist.GetName(), count, validationConfig.MaxGroupUnits) - } - - limits := newLimits() - - for _, resource := range units { - gLimits, err := validateResourceGroup(resource) - if err != nil { - return fmt.Errorf("group %v: %w", rlist.GetName(), err) - } - - gLimits.mul(resource.Count) - - limits.add(gLimits) - } - - if limits.cpu.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupCPU)) || limits.cpu.LTE(sdk.ZeroInt()) { - return errors.Errorf("group %v: invalid total CPU (%v > %v > %v fails)", - rlist.GetName(), validationConfig.MaxGroupCPU, limits.cpu, 0) - } - - if limits.memory.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupMemory)) || limits.memory.LTE(sdk.ZeroInt()) { - return errors.Errorf("group %v: invalid total memory (%v > %v > %v fails)", - rlist.GetName(), validationConfig.MaxGroupMemory, limits.memory, 0) - } - - if limits.storage.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupStorage)) || limits.storage.LTE(sdk.ZeroInt()) { - return errors.Errorf("group %v: invalid total storage (%v > %v > %v fails)", - rlist.GetName(), validationConfig.MaxGroupStorage, limits.storage, 0) - } - - return nil -} - -func validateResourceGroup(rg types.Resources) (resourceLimits, error) { - limits, err := validateResourceUnit(rg.Resources) - if err != nil { - return resourceLimits{}, err - } - - if rg.Count > uint32(validationConfig.MaxUnitCount) || rg.Count < uint32(validationConfig.MinUnitCount) { - return resourceLimits{}, errors.Errorf("error: invalid unit count (%v > %v > %v fails)", - validationConfig.MaxUnitCount, rg.Count, validationConfig.MinUnitCount) - } - - return limits, nil -} - -func validateResourceUnit(units types.ResourceUnits) (resourceLimits, error) { - limits := newLimits() - - val, err := validateCPU(units.CPU) - if err != nil { - return resourceLimits{}, err - } - limits.cpu = limits.cpu.Add(val) - - val, err = validateMemory(units.Memory) - if err != nil { - return resourceLimits{}, err - } - limits.memory = limits.memory.Add(val) - - val, err = validateStorage(units.Storage) - if err != nil { - return resourceLimits{}, err - } - limits.storage = limits.storage.Add(val) - - return limits, nil -} - -func validateCPU(u *types.CPU) (sdk.Int, error) { - if u == nil { - return sdk.Int{}, errors.Errorf("error: invalid unit CPU, cannot be nil") - } - if (u.Units.Value() > uint64(validationConfig.MaxUnitCPU)) || (u.Units.Value() < uint64(validationConfig.MinUnitCPU)) { - return sdk.Int{}, errors.Errorf("error: invalid unit CPU (%v > %v > %v fails)", - validationConfig.MaxUnitCPU, u.Units.Value(), validationConfig.MinUnitCPU) - } - - return u.Units.Val, nil -} - -func validateMemory(u *types.Memory) (sdk.Int, error) { - if u == nil { - return sdk.Int{}, errors.Errorf("error: invalid unit memory, cannot be nil") - } - if (u.Quantity.Value() > uint64(validationConfig.MaxUnitMemory)) || (u.Quantity.Value() < uint64(validationConfig.MinUnitMemory)) { - return sdk.Int{}, errors.Errorf("error: invalid unit memory (%v > %v > %v fails)", - validationConfig.MaxUnitMemory, u.Quantity.Value(), validationConfig.MinUnitMemory) - } - - return u.Quantity.Val, nil -} - -func validateStorage(u *types.Storage) (sdk.Int, error) { - if u == nil { - return sdk.Int{}, errors.Errorf("error: invalid unit storage, cannot be nil") - } - if (u.Quantity.Value() > uint64(validationConfig.MaxUnitStorage)) || (u.Quantity.Value() < uint64(validationConfig.MinUnitStorage)) { - return sdk.Int{}, errors.Errorf("error: invalid unit storage (%v > %v > %v fails)", - validationConfig.MaxUnitStorage, u.Quantity.Value(), validationConfig.MinUnitStorage) - } - - return u.Quantity.Val, nil -} - -type resourceLimits struct { - cpu sdk.Int - memory sdk.Int - storage sdk.Int -} - -func newLimits() resourceLimits { - return resourceLimits{ - cpu: sdk.ZeroInt(), - memory: sdk.ZeroInt(), - storage: sdk.ZeroInt(), - } -} - -func (u *resourceLimits) add(rhs resourceLimits) { - u.cpu = u.cpu.Add(rhs.cpu) - u.memory = u.memory.Add(rhs.memory) - u.storage = u.storage.Add(rhs.storage) -} - -func (u *resourceLimits) mul(count uint32) { - u.cpu = u.cpu.MulRaw(int64(count)) - u.memory = u.memory.MulRaw(int64(count)) - u.storage = u.storage.MulRaw(int64(count)) -} diff --git a/go/node/deployment/v1beta1/types.go b/go/node/deployment/v1beta1/types.go deleted file mode 100644 index 401e20e3..00000000 --- a/go/node/deployment/v1beta1/types.go +++ /dev/null @@ -1,211 +0,0 @@ -package v1beta1 - -import ( - "bytes" - - sdk "github.com/cosmos/cosmos-sdk/types" - - atypes "github.com/akash-network/akash-api/go/node/audit/v1beta1" - types "github.com/akash-network/akash-api/go/node/types/v1beta1" -) - -type attributesMatching map[string]types.Attributes - -const ( - // ManifestVersionLength is the length of manifest version - ManifestVersionLength = 32 - - // DefaultOrderBiddingDuration is the default time limit for an Order being active. - // After the duration, the Order is automatically closed. - // ( 24(hr) * 3600(seconds per hour) ) / 7s-Block - DefaultOrderBiddingDuration = int64(12342) - - // MaxBiddingDuration is roughly 30 days of block height - MaxBiddingDuration = DefaultOrderBiddingDuration * int64(30) -) - -// ID method returns DeploymentID details of specific deployment -func (obj Deployment) ID() DeploymentID { - return obj.DeploymentID -} - -// ValidateBasic asserts non-zero values -// TODO: This is causing an import cycle. I think there is some pattern here I'm missing tho.. -func (g GroupSpec) ValidateBasic() error { - return validateDeploymentGroup(g) -} - -// GetResources method returns resources list in group -func (g GroupSpec) GetResources() []types.Resources { - resources := make([]types.Resources, 0, len(g.Resources)) - for _, r := range g.Resources { - resources = append(resources, types.Resources{ - Resources: r.Resources, - Count: r.Count, - }) - } - - return resources -} - -// GetName method returns group name -func (g GroupSpec) GetName() string { - return g.Name -} - -// Price method returns price of group -func (g GroupSpec) Price() sdk.Coin { - var price sdk.Coin - for idx, resource := range g.Resources { - if idx == 0 { - price = resource.FullPrice() - continue - } - price = price.Add(resource.FullPrice()) - } - return price -} - -// MatchRequirements method compares provided attributes with specific group attributes. -// Argument provider is a bit cumbersome. First element is attributes from x/provider store -// in case tenant does not need signed attributes at all -// rest of elements (if any) are attributes signed by various auditors -func (g GroupSpec) MatchRequirements(provider []atypes.Provider) bool { - if (len(g.Requirements.SignedBy.AnyOf) != 0) || (len(g.Requirements.SignedBy.AllOf) != 0) { - // we cannot match if there is no signed attributes - if len(provider) < 2 { - return false - } - - existingRequirements := make(attributesMatching) - - for _, existing := range provider[1:] { - existingRequirements[existing.Auditor] = existing.Attributes - } - - if len(g.Requirements.SignedBy.AllOf) != 0 { - for _, validator := range g.Requirements.SignedBy.AllOf { - // if at least one signature does not exist or no match on attributes - requirements cannot match - if existingAttr, exists := existingRequirements[validator]; !exists || - !types.AttributesSubsetOf(g.Requirements.Attributes, existingAttr) { - return false - } - } - } - - if len(g.Requirements.SignedBy.AnyOf) != 0 { - for _, validator := range g.Requirements.SignedBy.AnyOf { - if existingAttr, exists := existingRequirements[validator]; exists && - types.AttributesSubsetOf(g.Requirements.Attributes, existingAttr) { - return true - } - } - - return false - } - - return true - } - - return types.AttributesSubsetOf(g.Requirements.Attributes, provider[0].Attributes) -} - -// MatchAttributes method compares provided attributes with specific group attributes -func (g GroupSpec) MatchAttributes(attr types.Attributes) bool { - return types.AttributesSubsetOf(g.Requirements.Attributes, attr) -} - -// ID method returns GroupID details of specific group -func (g Group) ID() GroupID { - return g.GroupID -} - -// ValidateClosable provides error response if group is already closed, -// and thus should not be closed again, else nil. -func (g Group) ValidateClosable() error { - switch g.State { - case GroupClosed: - return ErrGroupClosed - default: - return nil - } -} - -// ValidatePausable provides error response if group is not pausable -func (g Group) ValidatePausable() error { - switch g.State { - case GroupClosed: - return ErrGroupClosed - case GroupPaused: - return ErrGroupPaused - default: - return nil - } -} - -// ValidatePausable provides error response if group is not pausable -func (g Group) ValidateStartable() error { - switch g.State { - case GroupClosed: - return ErrGroupClosed - case GroupOpen: - return ErrGroupOpen - default: - return nil - } -} - -// GetName method returns group name -func (g Group) GetName() string { - return g.GroupSpec.Name -} - -// GetResources method returns resources list in group -func (g Group) GetResources() []types.Resources { - return g.GroupSpec.GetResources() -} - -// FullPrice method returns full price of resource -func (r Resource) FullPrice() sdk.Coin { - return sdk.NewCoin(r.Price.Denom, r.Price.Amount.MulRaw(int64(r.Count))) -} - -// DeploymentResponses is a collection of DeploymentResponse -type DeploymentResponses []QueryDeploymentResponse - -func (ds DeploymentResponses) String() string { - var buf bytes.Buffer - - const sep = "\n\n" - - for _, d := range ds { - buf.WriteString(d.String()) - buf.WriteString(sep) - } - - if len(ds) > 0 { - buf.Truncate(buf.Len() - len(sep)) - } - - return buf.String() -} - -// Accept returns whether deployment filters valid or not -func (filters DeploymentFilters) Accept(obj Deployment, stateVal Deployment_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.DeploymentID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.DeploymentID.DSeq { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} diff --git a/go/node/deployment/v1beta1/types_test.go b/go/node/deployment/v1beta1/types_test.go deleted file mode 100644 index c695a574..00000000 --- a/go/node/deployment/v1beta1/types_test.go +++ /dev/null @@ -1 +0,0 @@ -package v1beta1_test diff --git a/go/node/deployment/v1beta1/validation_config.go b/go/node/deployment/v1beta1/validation_config.go deleted file mode 100644 index 6e73ee8d..00000000 --- a/go/node/deployment/v1beta1/validation_config.go +++ /dev/null @@ -1,66 +0,0 @@ -package v1beta1 - -import ( - "github.com/akash-network/akash-api/go/node/types/unit" -) - -// This is the validation configuration that acts as a hard limit -// on what the network accepts for deployments. This is never changed -// and is the same across all members of the network - -type ValidationConfig struct { - - // MaxUnitCPU is the maximum number of milli (1/1000) cpu units a unit can consume. - MaxUnitCPU uint - // MaxUnitMemory is the maximum number of bytes of memory that a unit can consume - MaxUnitMemory uint64 - // MaxUnitStorage is the maximum number of bytes of storage that a unit can consume - MaxUnitStorage uint64 - // MaxUnitCount is the maximum number of replias of a service - MaxUnitCount uint - // MaxUnitPrice is the maximum price that a unit can have - MaxUnitPrice uint64 - - MinUnitCPU uint - MinUnitMemory uint64 - MinUnitStorage uint64 - MinUnitCount uint - MinUnitPrice uint64 - - // MaxGroupCount is the maximum number of groups allowed per deployment - MaxGroupCount int - // MaxGroupUnits is the maximum number services per group - MaxGroupUnits int - - // MaxGroupCPU is the maximum total amount of CPU requested per group - MaxGroupCPU uint64 - // MaxGroupMemory is the maximum total amount of memory requested per group - MaxGroupMemory uint64 - // MaxGroupStorage is the maximum total amount of storage requested per group - MaxGroupStorage uint64 -} - -var validationConfig = ValidationConfig{ - MaxUnitCPU: 10 * 1000, // 10 CPUs - MaxUnitMemory: 16 * unit.Gi, // 16 Gi - MaxUnitStorage: unit.Ti, // 1 Ti - MaxUnitCount: 50, - MaxUnitPrice: 10000000, // 10akt - - MinUnitCPU: 10, - MinUnitMemory: unit.Mi, - MinUnitStorage: 5 * unit.Mi, - MinUnitCount: 1, - MinUnitPrice: 1, - - MaxGroupCount: 20, - MaxGroupUnits: 20, - - MaxGroupCPU: 20 * 1000, - MaxGroupMemory: 32 * unit.Gi, - MaxGroupStorage: unit.Ti, -} - -func GetValidationConfig() ValidationConfig { - return validationConfig -} diff --git a/go/node/deployment/v1beta2/authz.pb.go b/go/node/deployment/v1beta2/authz.pb.go deleted file mode 100644 index cde3c179..00000000 --- a/go/node/deployment/v1beta2/authz.pb.go +++ /dev/null @@ -1,333 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta2/authz.proto - -package v1beta2 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - _ "github.com/regen-network/cosmos-proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from -// the granter's account for a deployment. -type DepositDeploymentAuthorization struct { - // SpendLimit is the amount the grantee is authorized to spend from the granter's account for - // the purpose of deployment. - SpendLimit types.Coin `protobuf:"bytes,1,opt,name=spend_limit,json=spendLimit,proto3" json:"spend_limit"` -} - -func (m *DepositDeploymentAuthorization) Reset() { *m = DepositDeploymentAuthorization{} } -func (m *DepositDeploymentAuthorization) String() string { return proto.CompactTextString(m) } -func (*DepositDeploymentAuthorization) ProtoMessage() {} -func (*DepositDeploymentAuthorization) Descriptor() ([]byte, []int) { - return fileDescriptor_7ce8076728f4c483, []int{0} -} -func (m *DepositDeploymentAuthorization) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DepositDeploymentAuthorization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DepositDeploymentAuthorization.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DepositDeploymentAuthorization) XXX_Merge(src proto.Message) { - xxx_messageInfo_DepositDeploymentAuthorization.Merge(m, src) -} -func (m *DepositDeploymentAuthorization) XXX_Size() int { - return m.Size() -} -func (m *DepositDeploymentAuthorization) XXX_DiscardUnknown() { - xxx_messageInfo_DepositDeploymentAuthorization.DiscardUnknown(m) -} - -var xxx_messageInfo_DepositDeploymentAuthorization proto.InternalMessageInfo - -func (m *DepositDeploymentAuthorization) GetSpendLimit() types.Coin { - if m != nil { - return m.SpendLimit - } - return types.Coin{} -} - -func init() { - proto.RegisterType((*DepositDeploymentAuthorization)(nil), "akash.deployment.v1beta2.DepositDeploymentAuthorization") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta2/authz.proto", fileDescriptor_7ce8076728f4c483) -} - -var fileDescriptor_7ce8076728f4c483 = []byte{ - // 280 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, - 0x4a, 0x2d, 0x49, 0x34, 0xd2, 0x4f, 0x2c, 0x2d, 0xc9, 0xa8, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, - 0x17, 0x92, 0x00, 0xab, 0xd2, 0x43, 0xa8, 0xd2, 0x83, 0xaa, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, - 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0x24, 0x93, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, 0xe3, - 0x21, 0x12, 0x10, 0x0e, 0x54, 0x4a, 0x0e, 0xc2, 0xd3, 0x4f, 0x4a, 0x2c, 0x4e, 0x85, 0xda, 0x65, - 0xa8, 0x9f, 0x9c, 0x9f, 0x99, 0x07, 0x91, 0x57, 0x6a, 0x61, 0xe4, 0x92, 0x73, 0x49, 0x2d, 0xc8, - 0x2f, 0xce, 0x2c, 0x71, 0x81, 0x5b, 0xe7, 0x58, 0x5a, 0x92, 0x91, 0x5f, 0x94, 0x59, 0x95, 0x58, - 0x92, 0x99, 0x9f, 0x27, 0xe4, 0xcf, 0xc5, 0x5d, 0x5c, 0x90, 0x9a, 0x97, 0x12, 0x9f, 0x93, 0x99, - 0x9b, 0x59, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0xa9, 0x07, 0xb5, 0x06, 0x64, 0x30, - 0xd4, 0x79, 0x86, 0x7a, 0xce, 0xf9, 0x99, 0x79, 0x4e, 0xc2, 0x27, 0xee, 0xc9, 0x33, 0xbc, 0xba, - 0x27, 0x8f, 0xac, 0x2b, 0x88, 0x0b, 0xcc, 0xf1, 0x01, 0xb1, 0xad, 0x04, 0x2f, 0x6d, 0xd1, 0xe5, - 0x45, 0xb1, 0xc3, 0x29, 0xfc, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, - 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x6c, - 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, 0xc1, 0xa2, 0x9b, 0x97, - 0x5a, 0x52, 0x9e, 0x5f, 0x94, 0x0d, 0xe5, 0x25, 0x16, 0x64, 0xea, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, - 0xa7, 0xa4, 0x62, 0x09, 0xd6, 0x24, 0x36, 0xb0, 0x37, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x4d, 0x91, 0x48, 0x38, 0x79, 0x01, 0x00, 0x00, -} - -func (m *DepositDeploymentAuthorization) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DepositDeploymentAuthorization) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DepositDeploymentAuthorization) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.SpendLimit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAuthz(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintAuthz(dAtA []byte, offset int, v uint64) int { - offset -= sovAuthz(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *DepositDeploymentAuthorization) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.SpendLimit.Size() - n += 1 + l + sovAuthz(uint64(l)) - return n -} - -func sovAuthz(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAuthz(x uint64) (n int) { - return sovAuthz(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *DepositDeploymentAuthorization) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuthz - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DepositDeploymentAuthorization: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DepositDeploymentAuthorization: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpendLimit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuthz - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAuthz - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAuthz - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpendLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuthz(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuthz - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAuthz(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuthz - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuthz - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuthz - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAuthz - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAuthz - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAuthz - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAuthz = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAuthz = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAuthz = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta2/codec.go b/go/node/deployment/v1beta2/codec.go deleted file mode 100644 index cd1e21d3..00000000 --- a/go/node/deployment/v1beta2/codec.go +++ /dev/null @@ -1,58 +0,0 @@ -package v1beta2 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" - "github.com/cosmos/cosmos-sdk/x/authz" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/deployment module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/deployment and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterLegacyAminoCodec register concrete types on codec -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgCreateDeployment{}, ModuleName+"/"+MsgTypeCreateDeployment, nil) - cdc.RegisterConcrete(&MsgUpdateDeployment{}, ModuleName+"/"+MsgTypeUpdateDeployment, nil) - cdc.RegisterConcrete(&MsgDepositDeployment{}, ModuleName+"/"+MsgTypeDepositDeployment, nil) - cdc.RegisterConcrete(&MsgCloseDeployment{}, ModuleName+"/"+MsgTypeCloseDeployment, nil) - cdc.RegisterConcrete(&MsgCloseGroup{}, ModuleName+"/"+MsgTypeCloseGroup, nil) - cdc.RegisterConcrete(&MsgPauseGroup{}, ModuleName+"/"+MsgTypePauseGroup, nil) - cdc.RegisterConcrete(&MsgStartGroup{}, ModuleName+"/"+MsgTypeStartGroup, nil) -} - -// RegisterInterfaces registers the x/deployment interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgCreateDeployment{}, - &MsgUpdateDeployment{}, - &MsgDepositDeployment{}, - &MsgCloseDeployment{}, - &MsgCloseGroup{}, - &MsgPauseGroup{}, - &MsgStartGroup{}, - ) - registry.RegisterImplementations( - (*authz.Authorization)(nil), - &DepositDeploymentAuthorization{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/deployment/v1beta2/deployment.pb.go b/go/node/deployment/v1beta2/deployment.pb.go deleted file mode 100644 index 2116025c..00000000 --- a/go/node/deployment/v1beta2/deployment.pb.go +++ /dev/null @@ -1,960 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta2/deployment.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of deployment -type Deployment_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - DeploymentStateInvalid Deployment_State = 0 - // DeploymentActive denotes state for deployment active - DeploymentActive Deployment_State = 1 - // DeploymentClosed denotes state for deployment closed - DeploymentClosed Deployment_State = 2 -) - -var Deployment_State_name = map[int32]string{ - 0: "invalid", - 1: "active", - 2: "closed", -} - -var Deployment_State_value = map[string]int32{ - "invalid": 0, - "active": 1, - "closed": 2, -} - -func (x Deployment_State) String() string { - return proto.EnumName(Deployment_State_name, int32(x)) -} - -func (Deployment_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_897ec42830b2cbac, []int{1, 0} -} - -// DeploymentID stores owner and sequence number -type DeploymentID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` -} - -func (m *DeploymentID) Reset() { *m = DeploymentID{} } -func (*DeploymentID) ProtoMessage() {} -func (*DeploymentID) Descriptor() ([]byte, []int) { - return fileDescriptor_897ec42830b2cbac, []int{0} -} -func (m *DeploymentID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeploymentID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeploymentID) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentID.Merge(m, src) -} -func (m *DeploymentID) XXX_Size() int { - return m.Size() -} -func (m *DeploymentID) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentID.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentID proto.InternalMessageInfo - -func (m *DeploymentID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *DeploymentID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -// Deployment stores deploymentID, state and version details -type Deployment struct { - DeploymentID DeploymentID `protobuf:"bytes,1,opt,name=deployment_id,json=deploymentId,proto3" json:"id" yaml:"id"` - State Deployment_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.deployment.v1beta2.Deployment_State" json:"state" yaml:"state"` - Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version" yaml:"version"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Deployment) Reset() { *m = Deployment{} } -func (m *Deployment) String() string { return proto.CompactTextString(m) } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_897ec42830b2cbac, []int{1} -} -func (m *Deployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Deployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Deployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_Deployment.Merge(m, src) -} -func (m *Deployment) XXX_Size() int { - return m.Size() -} -func (m *Deployment) XXX_DiscardUnknown() { - xxx_messageInfo_Deployment.DiscardUnknown(m) -} - -var xxx_messageInfo_Deployment proto.InternalMessageInfo - -func (m *Deployment) GetDeploymentID() DeploymentID { - if m != nil { - return m.DeploymentID - } - return DeploymentID{} -} - -func (m *Deployment) GetState() Deployment_State { - if m != nil { - return m.State - } - return DeploymentStateInvalid -} - -func (m *Deployment) GetVersion() []byte { - if m != nil { - return m.Version - } - return nil -} - -func (m *Deployment) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -// DeploymentFilters defines filters used to filter deployments -type DeploymentFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *DeploymentFilters) Reset() { *m = DeploymentFilters{} } -func (m *DeploymentFilters) String() string { return proto.CompactTextString(m) } -func (*DeploymentFilters) ProtoMessage() {} -func (*DeploymentFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_897ec42830b2cbac, []int{2} -} -func (m *DeploymentFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeploymentFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeploymentFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentFilters.Merge(m, src) -} -func (m *DeploymentFilters) XXX_Size() int { - return m.Size() -} -func (m *DeploymentFilters) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentFilters proto.InternalMessageInfo - -func (m *DeploymentFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *DeploymentFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *DeploymentFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func init() { - proto.RegisterEnum("akash.deployment.v1beta2.Deployment_State", Deployment_State_name, Deployment_State_value) - proto.RegisterType((*DeploymentID)(nil), "akash.deployment.v1beta2.DeploymentID") - proto.RegisterType((*Deployment)(nil), "akash.deployment.v1beta2.Deployment") - proto.RegisterType((*DeploymentFilters)(nil), "akash.deployment.v1beta2.DeploymentFilters") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta2/deployment.proto", fileDescriptor_897ec42830b2cbac) -} - -var fileDescriptor_897ec42830b2cbac = []byte{ - // 506 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x93, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xc7, 0x7d, 0x8d, 0xd3, 0x92, 0x6b, 0xa8, 0x82, 0x55, 0x21, 0x63, 0xa9, 0x3e, 0xcb, 0x03, - 0x0d, 0x48, 0xd8, 0x22, 0x1d, 0x90, 0x22, 0x31, 0xd4, 0x44, 0x48, 0x59, 0xdd, 0x01, 0x09, 0x86, - 0xea, 0x92, 0x3b, 0xa5, 0xa7, 0x3a, 0xbe, 0xd4, 0x3e, 0x52, 0x95, 0x81, 0x19, 0x75, 0x62, 0x64, - 0xa9, 0x54, 0x89, 0x2f, 0xc0, 0xca, 0x37, 0xe8, 0xd8, 0x91, 0xe9, 0x84, 0x9c, 0x05, 0x65, 0xcc, - 0x27, 0x40, 0xbe, 0x4b, 0x71, 0x40, 0x80, 0x98, 0xd8, 0xfc, 0x7e, 0xf7, 0x7f, 0x7e, 0xff, 0x77, - 0xef, 0x1e, 0x7c, 0x80, 0x8f, 0x71, 0x7e, 0x14, 0x12, 0x3a, 0x49, 0xf8, 0xd9, 0x98, 0xa6, 0x22, - 0x9c, 0x3e, 0x1e, 0x50, 0x81, 0x3b, 0x2b, 0x28, 0x98, 0x64, 0x5c, 0x70, 0xcb, 0x56, 0xd2, 0x60, - 0x85, 0x2f, 0xa5, 0xce, 0xf6, 0x88, 0x8f, 0xb8, 0x12, 0x85, 0xe5, 0x97, 0xd6, 0xfb, 0x6f, 0x61, - 0xb3, 0xf7, 0x43, 0xdb, 0xef, 0x59, 0x21, 0xac, 0xf3, 0xd3, 0x94, 0x66, 0x36, 0xf0, 0x40, 0xbb, - 0x11, 0xdd, 0x9b, 0x4b, 0xa4, 0xc1, 0x42, 0xa2, 0xe6, 0x19, 0x1e, 0x27, 0x5d, 0x5f, 0x85, 0x7e, - 0xac, 0xb1, 0xb5, 0x07, 0x4d, 0x92, 0xd3, 0x13, 0x7b, 0xcd, 0x03, 0x6d, 0x33, 0x42, 0x85, 0x44, - 0x66, 0xef, 0x80, 0x9e, 0xcc, 0x25, 0x52, 0x7c, 0x21, 0xd1, 0xa6, 0x4e, 0x2b, 0x23, 0x3f, 0x56, - 0xb0, 0x7b, 0xeb, 0xc3, 0x25, 0x32, 0xbe, 0x5d, 0x22, 0xc3, 0xff, 0x5c, 0x83, 0xb0, 0x32, 0x60, - 0x09, 0x78, 0xbb, 0xb2, 0x7e, 0xc8, 0x88, 0xb2, 0xb1, 0xd9, 0xb9, 0x1f, 0xfc, 0xa9, 0xad, 0x60, - 0xd5, 0x7d, 0xb4, 0x7b, 0x25, 0x91, 0x51, 0x48, 0xf4, 0x53, 0x4f, 0x73, 0x89, 0xd6, 0x18, 0x59, - 0x48, 0xd4, 0xd0, 0x46, 0x18, 0xf1, 0xe3, 0x66, 0xf5, 0xa7, 0x3e, 0xb1, 0x5e, 0xc1, 0x7a, 0x2e, - 0xb0, 0xa0, 0xaa, 0x89, 0xad, 0xce, 0xc3, 0x7f, 0xa9, 0x16, 0x1c, 0x94, 0x19, 0xfa, 0x82, 0x54, - 0x72, 0x75, 0x41, 0x2a, 0xf4, 0x63, 0x8d, 0xad, 0x27, 0x70, 0x63, 0x4a, 0xb3, 0x9c, 0xf1, 0xd4, - 0xae, 0x79, 0xa0, 0xdd, 0x8c, 0x76, 0xe6, 0x12, 0xdd, 0xa0, 0x85, 0x44, 0x5b, 0x3a, 0x69, 0x09, - 0xfc, 0xf8, 0xe6, 0xc8, 0xda, 0x81, 0x70, 0x98, 0x51, 0x2c, 0x28, 0x39, 0xc4, 0xc2, 0x36, 0x3d, - 0xd0, 0xae, 0xc5, 0x8d, 0x25, 0xd9, 0x17, 0xfe, 0x1b, 0x58, 0x57, 0x16, 0xac, 0x5d, 0xb8, 0xc1, - 0xd2, 0x29, 0x4e, 0x18, 0x69, 0x19, 0x8e, 0x73, 0x7e, 0xe1, 0xdd, 0xad, 0x5c, 0x2a, 0x45, 0x5f, - 0x9f, 0x5a, 0x1e, 0x5c, 0xc7, 0x43, 0xc1, 0xa6, 0xb4, 0x05, 0x9c, 0xed, 0xf3, 0x0b, 0xaf, 0x55, - 0xe9, 0xf6, 0x15, 0x2f, 0x15, 0xc3, 0x84, 0xe7, 0x94, 0xb4, 0xd6, 0x7e, 0x55, 0x3c, 0x53, 0xdc, - 0x31, 0xdf, 0x7d, 0x74, 0x8d, 0xae, 0xa9, 0x66, 0xf7, 0x09, 0xc0, 0x3b, 0x95, 0xe0, 0x39, 0x4b, - 0x04, 0xcd, 0xf2, 0xff, 0xf3, 0x82, 0xca, 0x2a, 0x7a, 0x64, 0xb5, 0xaa, 0xca, 0xdf, 0xc6, 0xa0, - 0x2d, 0x47, 0x2f, 0xae, 0x0a, 0x17, 0x5c, 0x17, 0x2e, 0xf8, 0x5a, 0xb8, 0xe0, 0xfd, 0xcc, 0x35, - 0xae, 0x67, 0xae, 0xf1, 0x65, 0xe6, 0x1a, 0x2f, 0x9f, 0x8e, 0x98, 0x38, 0x7a, 0x3d, 0x08, 0x86, - 0x7c, 0x1c, 0xaa, 0xf1, 0x3f, 0x4a, 0xa9, 0x38, 0xe5, 0xd9, 0xf1, 0x32, 0xc2, 0x13, 0x16, 0x8e, - 0x78, 0x98, 0x72, 0x42, 0x7f, 0xb3, 0x88, 0x83, 0x75, 0xb5, 0x4e, 0x7b, 0xdf, 0x03, 0x00, 0x00, - 0xff, 0xff, 0x1b, 0x82, 0x2a, 0xa8, 0xab, 0x03, 0x00, 0x00, -} - -func (m *DeploymentID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DSeq != 0 { - i = encodeVarintDeployment(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Deployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintDeployment(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x1a - } - if m.State != 0 { - i = encodeVarintDeployment(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.DeploymentID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeployment(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *DeploymentFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x1a - } - if m.DSeq != 0 { - i = encodeVarintDeployment(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintDeployment(dAtA []byte, offset int, v uint64) int { - offset -= sovDeployment(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *DeploymentID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovDeployment(uint64(m.DSeq)) - } - return n -} - -func (m *Deployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.DeploymentID.Size() - n += 1 + l + sovDeployment(uint64(l)) - if m.State != 0 { - n += 1 + sovDeployment(uint64(m.State)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - if m.CreatedAt != 0 { - n += 1 + sovDeployment(uint64(m.CreatedAt)) - } - return n -} - -func (m *DeploymentFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovDeployment(uint64(m.DSeq)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - return n -} - -func sovDeployment(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozDeployment(x uint64) (n int) { - return sovDeployment(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *DeploymentID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Deployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeploymentID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DeploymentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Deployment_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = append(m.Version[:0], dAtA[iNdEx:postIndex]...) - if m.Version == nil { - m.Version = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipDeployment(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeployment - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeployment - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeployment - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDeployment - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupDeployment - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthDeployment - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthDeployment = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDeployment = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupDeployment = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta2/deploymentmsg.pb.go b/go/node/deployment/v1beta2/deploymentmsg.pb.go deleted file mode 100644 index 571c3f61..00000000 --- a/go/node/deployment/v1beta2/deploymentmsg.pb.go +++ /dev/null @@ -1,1722 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta2/deploymentmsg.proto - -package v1beta2 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// MsgCreateDeployment defines an SDK message for creating deployment -type MsgCreateDeployment struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` - Groups []GroupSpec `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups" yaml:"groups"` - Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version" yaml:"version"` - Deposit types.Coin `protobuf:"bytes,4,opt,name=deposit,proto3" json:"deposit" yaml:"deposit"` - // Depositor pays for the deposit - Depositor string `protobuf:"bytes,5,opt,name=depositor,proto3" json:"depositor" yaml:"depositor"` -} - -func (m *MsgCreateDeployment) Reset() { *m = MsgCreateDeployment{} } -func (m *MsgCreateDeployment) String() string { return proto.CompactTextString(m) } -func (*MsgCreateDeployment) ProtoMessage() {} -func (*MsgCreateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_5e09213efb52c240, []int{0} -} -func (m *MsgCreateDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateDeployment.Merge(m, src) -} -func (m *MsgCreateDeployment) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateDeployment proto.InternalMessageInfo - -func (m *MsgCreateDeployment) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -func (m *MsgCreateDeployment) GetGroups() []GroupSpec { - if m != nil { - return m.Groups - } - return nil -} - -func (m *MsgCreateDeployment) GetVersion() []byte { - if m != nil { - return m.Version - } - return nil -} - -func (m *MsgCreateDeployment) GetDeposit() types.Coin { - if m != nil { - return m.Deposit - } - return types.Coin{} -} - -func (m *MsgCreateDeployment) GetDepositor() string { - if m != nil { - return m.Depositor - } - return "" -} - -// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. -type MsgCreateDeploymentResponse struct { -} - -func (m *MsgCreateDeploymentResponse) Reset() { *m = MsgCreateDeploymentResponse{} } -func (m *MsgCreateDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateDeploymentResponse) ProtoMessage() {} -func (*MsgCreateDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e09213efb52c240, []int{1} -} -func (m *MsgCreateDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateDeploymentResponse.Merge(m, src) -} -func (m *MsgCreateDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateDeploymentResponse proto.InternalMessageInfo - -// MsgDepositDeployment deposits more funds into the deposit account -type MsgDepositDeployment struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` - Amount types.Coin `protobuf:"bytes,2,opt,name=amount,proto3" json:"amount" yaml:"amount"` - // Depositor pays for the deposit - Depositor string `protobuf:"bytes,3,opt,name=depositor,proto3" json:"depositor" yaml:"depositor"` -} - -func (m *MsgDepositDeployment) Reset() { *m = MsgDepositDeployment{} } -func (m *MsgDepositDeployment) String() string { return proto.CompactTextString(m) } -func (*MsgDepositDeployment) ProtoMessage() {} -func (*MsgDepositDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_5e09213efb52c240, []int{2} -} -func (m *MsgDepositDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDepositDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDepositDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDepositDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDepositDeployment.Merge(m, src) -} -func (m *MsgDepositDeployment) XXX_Size() int { - return m.Size() -} -func (m *MsgDepositDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDepositDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDepositDeployment proto.InternalMessageInfo - -func (m *MsgDepositDeployment) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -func (m *MsgDepositDeployment) GetAmount() types.Coin { - if m != nil { - return m.Amount - } - return types.Coin{} -} - -func (m *MsgDepositDeployment) GetDepositor() string { - if m != nil { - return m.Depositor - } - return "" -} - -// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. -type MsgDepositDeploymentResponse struct { -} - -func (m *MsgDepositDeploymentResponse) Reset() { *m = MsgDepositDeploymentResponse{} } -func (m *MsgDepositDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*MsgDepositDeploymentResponse) ProtoMessage() {} -func (*MsgDepositDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e09213efb52c240, []int{3} -} -func (m *MsgDepositDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDepositDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDepositDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDepositDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDepositDeploymentResponse.Merge(m, src) -} -func (m *MsgDepositDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgDepositDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDepositDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDepositDeploymentResponse proto.InternalMessageInfo - -// MsgUpdateDeployment defines an SDK message for updating deployment -type MsgUpdateDeployment struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` - Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version" yaml:"version"` -} - -func (m *MsgUpdateDeployment) Reset() { *m = MsgUpdateDeployment{} } -func (m *MsgUpdateDeployment) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateDeployment) ProtoMessage() {} -func (*MsgUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_5e09213efb52c240, []int{4} -} -func (m *MsgUpdateDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateDeployment.Merge(m, src) -} -func (m *MsgUpdateDeployment) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateDeployment proto.InternalMessageInfo - -func (m *MsgUpdateDeployment) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -func (m *MsgUpdateDeployment) GetVersion() []byte { - if m != nil { - return m.Version - } - return nil -} - -// MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. -type MsgUpdateDeploymentResponse struct { -} - -func (m *MsgUpdateDeploymentResponse) Reset() { *m = MsgUpdateDeploymentResponse{} } -func (m *MsgUpdateDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateDeploymentResponse) ProtoMessage() {} -func (*MsgUpdateDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e09213efb52c240, []int{5} -} -func (m *MsgUpdateDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateDeploymentResponse.Merge(m, src) -} -func (m *MsgUpdateDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateDeploymentResponse proto.InternalMessageInfo - -// MsgCloseDeployment defines an SDK message for closing deployment -type MsgCloseDeployment struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseDeployment) Reset() { *m = MsgCloseDeployment{} } -func (m *MsgCloseDeployment) String() string { return proto.CompactTextString(m) } -func (*MsgCloseDeployment) ProtoMessage() {} -func (*MsgCloseDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_5e09213efb52c240, []int{6} -} -func (m *MsgCloseDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseDeployment.Merge(m, src) -} -func (m *MsgCloseDeployment) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseDeployment proto.InternalMessageInfo - -func (m *MsgCloseDeployment) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -// MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. -type MsgCloseDeploymentResponse struct { -} - -func (m *MsgCloseDeploymentResponse) Reset() { *m = MsgCloseDeploymentResponse{} } -func (m *MsgCloseDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseDeploymentResponse) ProtoMessage() {} -func (*MsgCloseDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5e09213efb52c240, []int{7} -} -func (m *MsgCloseDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseDeploymentResponse.Merge(m, src) -} -func (m *MsgCloseDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseDeploymentResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*MsgCreateDeployment)(nil), "akash.deployment.v1beta2.MsgCreateDeployment") - proto.RegisterType((*MsgCreateDeploymentResponse)(nil), "akash.deployment.v1beta2.MsgCreateDeploymentResponse") - proto.RegisterType((*MsgDepositDeployment)(nil), "akash.deployment.v1beta2.MsgDepositDeployment") - proto.RegisterType((*MsgDepositDeploymentResponse)(nil), "akash.deployment.v1beta2.MsgDepositDeploymentResponse") - proto.RegisterType((*MsgUpdateDeployment)(nil), "akash.deployment.v1beta2.MsgUpdateDeployment") - proto.RegisterType((*MsgUpdateDeploymentResponse)(nil), "akash.deployment.v1beta2.MsgUpdateDeploymentResponse") - proto.RegisterType((*MsgCloseDeployment)(nil), "akash.deployment.v1beta2.MsgCloseDeployment") - proto.RegisterType((*MsgCloseDeploymentResponse)(nil), "akash.deployment.v1beta2.MsgCloseDeploymentResponse") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta2/deploymentmsg.proto", fileDescriptor_5e09213efb52c240) -} - -var fileDescriptor_5e09213efb52c240 = []byte{ - // 534 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0x41, 0x8b, 0xd3, 0x40, - 0x14, 0x6e, 0xd2, 0x75, 0x97, 0xce, 0xaa, 0x48, 0xdc, 0x43, 0xac, 0xdb, 0x4c, 0x77, 0x04, 0xa9, - 0xa0, 0x09, 0xad, 0x07, 0x61, 0x41, 0x84, 0x6c, 0x41, 0xf6, 0xb0, 0x97, 0xc8, 0x2a, 0x88, 0x97, - 0xb4, 0x19, 0xb2, 0xc3, 0x36, 0x79, 0x31, 0x93, 0xae, 0xec, 0x3f, 0xf0, 0xe8, 0x4f, 0x10, 0xaf, - 0xfe, 0x91, 0x3d, 0xee, 0xd1, 0xd3, 0x20, 0xed, 0x45, 0x7a, 0xec, 0x2f, 0x90, 0x64, 0x26, 0x8d, - 0xd5, 0x16, 0x65, 0x61, 0xbd, 0x65, 0xde, 0xf7, 0x7d, 0x2f, 0xdf, 0x7c, 0xef, 0x31, 0xe8, 0xb1, - 0x7f, 0xea, 0xf3, 0x13, 0x27, 0xa0, 0xc9, 0x08, 0xce, 0x23, 0x1a, 0x67, 0xce, 0x59, 0x77, 0x40, - 0x33, 0xbf, 0xf7, 0x4b, 0x29, 0xe2, 0xa1, 0x9d, 0xa4, 0x90, 0x81, 0x61, 0x16, 0x6c, 0xbb, 0x82, - 0x6c, 0xc5, 0x6e, 0xee, 0x84, 0x10, 0x42, 0x41, 0x72, 0xf2, 0x2f, 0xc9, 0x6f, 0x3e, 0xfa, 0x87, - 0xee, 0x8a, 0xda, 0x59, 0x4b, 0x0d, 0x53, 0x18, 0x27, 0x3c, 0xa1, 0x43, 0xc5, 0xb4, 0x86, 0xc0, - 0x23, 0xe0, 0xce, 0xc0, 0xe7, 0x54, 0x91, 0xba, 0xce, 0x10, 0x58, 0x2c, 0x71, 0xf2, 0xa5, 0x8e, - 0xee, 0x1e, 0xf1, 0xf0, 0x20, 0xa5, 0x7e, 0x46, 0xfb, 0x8b, 0x7e, 0xc6, 0x31, 0xd2, 0x59, 0x60, - 0x6a, 0x6d, 0xad, 0xb3, 0xdd, 0x7b, 0x68, 0xaf, 0xbb, 0x89, 0x5d, 0x29, 0x0e, 0xfb, 0x6e, 0xeb, - 0x42, 0xe0, 0xda, 0x44, 0x60, 0xfd, 0xb0, 0x3f, 0x13, 0x58, 0x67, 0xc1, 0x5c, 0xe0, 0xc6, 0xb9, - 0x1f, 0x8d, 0xf6, 0x09, 0x0b, 0x88, 0xa7, 0xb3, 0xc0, 0x78, 0x87, 0x36, 0xa5, 0x43, 0x53, 0x6f, - 0xd7, 0x3b, 0xdb, 0xbd, 0x07, 0xeb, 0x5b, 0xbf, 0xcc, 0x79, 0xaf, 0x12, 0x3a, 0x74, 0x71, 0xde, - 0x77, 0x26, 0xb0, 0x92, 0xce, 0x05, 0xbe, 0x25, 0xbb, 0xca, 0x33, 0xf1, 0x14, 0x60, 0x3c, 0x43, - 0x5b, 0x67, 0x34, 0xe5, 0x0c, 0x62, 0xb3, 0xde, 0xd6, 0x3a, 0x37, 0xdd, 0xd6, 0x4c, 0xe0, 0xb2, - 0x34, 0x17, 0xf8, 0xb6, 0x94, 0xa9, 0x02, 0xf1, 0x4a, 0xc8, 0x78, 0x8d, 0xb6, 0x02, 0x9a, 0x00, - 0x67, 0x99, 0xb9, 0x51, 0x5c, 0xf9, 0x9e, 0x2d, 0x73, 0xb3, 0xf3, 0xdc, 0x94, 0xa5, 0xae, 0x7d, - 0x00, 0x2c, 0x76, 0xf7, 0x94, 0x9b, 0x52, 0x51, 0xf5, 0x55, 0x05, 0xe2, 0x95, 0x90, 0xf1, 0x02, - 0x35, 0xd4, 0x27, 0xa4, 0xe6, 0x8d, 0xb6, 0xd6, 0x69, 0xb8, 0x7b, 0x33, 0x81, 0xab, 0xe2, 0x5c, - 0xe0, 0x3b, 0x4b, 0x62, 0x48, 0x89, 0x57, 0xc1, 0xfb, 0x1b, 0x3f, 0x3e, 0xe3, 0x1a, 0x69, 0xa1, - 0xfb, 0x2b, 0x66, 0xe4, 0x51, 0x9e, 0x40, 0xcc, 0x29, 0xf9, 0xa8, 0xa3, 0x9d, 0x23, 0x1e, 0xf6, - 0xa5, 0xea, 0xfa, 0x87, 0xe8, 0xa1, 0x4d, 0x3f, 0x82, 0x71, 0x9c, 0x99, 0xfa, 0xdf, 0xc2, 0x5a, - 0x8c, 0x4e, 0x0a, 0xaa, 0xd1, 0xc9, 0x33, 0xf1, 0x14, 0xb0, 0x9c, 0x54, 0xfd, 0xca, 0x49, 0x59, - 0x68, 0x77, 0x55, 0x12, 0x8b, 0xa8, 0xbe, 0x6a, 0xc5, 0xba, 0x1f, 0x27, 0xc1, 0x7f, 0x59, 0xf7, - 0xab, 0x2e, 0xe4, 0xd2, 0xdc, 0x7f, 0x37, 0xbb, 0xb8, 0xcc, 0x7b, 0x64, 0xe4, 0x6b, 0x31, 0x02, - 0x7e, 0xfd, 0x57, 0x51, 0x8e, 0x76, 0x51, 0xf3, 0xcf, 0x5f, 0x96, 0x86, 0xdc, 0x37, 0x17, 0x13, - 0x4b, 0xbb, 0x9c, 0x58, 0xda, 0xf7, 0x89, 0xa5, 0x7d, 0x9a, 0x5a, 0xb5, 0xcb, 0xa9, 0x55, 0xfb, - 0x36, 0xb5, 0x6a, 0x6f, 0x9f, 0x87, 0x2c, 0x3b, 0x19, 0x0f, 0xec, 0x21, 0x44, 0x4e, 0x61, 0xe9, - 0x49, 0x4c, 0xb3, 0x0f, 0x90, 0x9e, 0xaa, 0x93, 0x9f, 0x30, 0x27, 0x04, 0x27, 0x86, 0x80, 0xae, - 0x78, 0xd5, 0x06, 0x9b, 0xc5, 0x63, 0xf5, 0xf4, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0xad, 0xa1, - 0x02, 0x3a, 0x81, 0x05, 0x00, 0x00, -} - -func (m *MsgCreateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Depositor) > 0 { - i -= len(m.Depositor) - copy(dAtA[i:], m.Depositor) - i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Depositor))) - i-- - dAtA[i] = 0x2a - } - { - size, err := m.Deposit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x1a - } - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCreateDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgDepositDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDepositDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDepositDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Depositor) > 0 { - i -= len(m.Depositor) - copy(dAtA[i:], m.Depositor) - i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Depositor))) - i-- - dAtA[i] = 0x1a - } - { - size, err := m.Amount.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgDepositDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDepositDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDepositDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgUpdateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x1a - } - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgUpdateDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgCloseDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintDeploymentmsg(dAtA []byte, offset int, v uint64) int { - offset -= sovDeploymentmsg(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MsgCreateDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - } - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovDeploymentmsg(uint64(l)) - } - l = m.Deposit.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - l = len(m.Depositor) - if l > 0 { - n += 1 + l + sovDeploymentmsg(uint64(l)) - } - return n -} - -func (m *MsgCreateDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgDepositDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - l = m.Amount.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - l = len(m.Depositor) - if l > 0 { - n += 1 + l + sovDeploymentmsg(uint64(l)) - } - return n -} - -func (m *MsgDepositDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgUpdateDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - l = len(m.Version) - if l > 0 { - n += 1 + l + sovDeploymentmsg(uint64(l)) - } - return n -} - -func (m *MsgUpdateDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgCloseDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - return n -} - -func (m *MsgCloseDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovDeploymentmsg(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozDeploymentmsg(x uint64) (n int) { - return sovDeploymentmsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MsgCreateDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, GroupSpec{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = append(m.Version[:0], dAtA[iNdEx:postIndex]...) - if m.Version == nil { - m.Version = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Deposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Depositor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Depositor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDepositDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDepositDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDepositDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Depositor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Depositor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDepositDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDepositDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDepositDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgUpdateDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = append(m.Version[:0], dAtA[iNdEx:postIndex]...) - if m.Version == nil { - m.Version = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgUpdateDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipDeploymentmsg(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDeploymentmsg - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupDeploymentmsg - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthDeploymentmsg - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthDeploymentmsg = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDeploymentmsg = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupDeploymentmsg = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta2/deposit_deployment_authorization.go b/go/node/deployment/v1beta2/deposit_deployment_authorization.go deleted file mode 100644 index 53399448..00000000 --- a/go/node/deployment/v1beta2/deposit_deployment_authorization.go +++ /dev/null @@ -1,48 +0,0 @@ -package v1beta2 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/cosmos/cosmos-sdk/x/authz" -) - -var ( - _ authz.Authorization = &DepositDeploymentAuthorization{} -) - -// NewDepositDeploymentAuthorization creates a new DepositDeploymentAuthorization object. -func NewDepositDeploymentAuthorization(spendLimit sdk.Coin) *DepositDeploymentAuthorization { - return &DepositDeploymentAuthorization{ - SpendLimit: spendLimit, - } -} - -// MsgTypeURL implements Authorization.MsgTypeURL. -func (m DepositDeploymentAuthorization) MsgTypeURL() string { - return sdk.MsgTypeURL(&MsgDepositDeployment{}) -} - -// Accept implements Authorization.Accept. -func (m DepositDeploymentAuthorization) Accept(_ sdk.Context, msg sdk.Msg) (authz.AcceptResponse, error) { - mDepositDeployment, ok := msg.(*MsgDepositDeployment) - if !ok { - return authz.AcceptResponse{}, sdkerrors.ErrInvalidType.Wrap("type mismatch") - } - if m.SpendLimit.IsLT(mDepositDeployment.Amount) { - return authz.AcceptResponse{}, sdkerrors.ErrInsufficientFunds.Wrapf("requested amount is more than spend limit") - } - limitLeft := m.SpendLimit.Sub(mDepositDeployment.Amount) - if limitLeft.IsZero() { - return authz.AcceptResponse{Accept: true, Delete: true}, nil - } - - return authz.AcceptResponse{Accept: true, Delete: false, Updated: &DepositDeploymentAuthorization{SpendLimit: limitLeft}}, nil -} - -// ValidateBasic implements Authorization.ValidateBasic. -func (m DepositDeploymentAuthorization) ValidateBasic() error { - if !m.SpendLimit.IsPositive() { - return sdkerrors.ErrInvalidCoins.Wrapf("spend limit cannot be negative") - } - return nil -} diff --git a/go/node/deployment/v1beta2/errors.go b/go/node/deployment/v1beta2/errors.go deleted file mode 100644 index a6865e9b..00000000 --- a/go/node/deployment/v1beta2/errors.go +++ /dev/null @@ -1,53 +0,0 @@ -package v1beta2 - -import ( - "errors" -) - -var ( - // ErrNameDoesNotExist is the error when name does not exist - ErrNameDoesNotExist = errors.New("Name does not exist") - // ErrInvalidRequest is the error for invalid request - ErrInvalidRequest = errors.New("Invalid request") - // ErrDeploymentExists is the error when already deployment exists - ErrDeploymentExists = errors.New("Deployment exists") - // ErrDeploymentNotFound is the error when deployment not found - ErrDeploymentNotFound = errors.New("Deployment not found") - // ErrDeploymentClosed is the error when deployment is closed - ErrDeploymentClosed = errors.New("Deployment closed") - // ErrOwnerAcctMissing is the error for owner account missing - ErrOwnerAcctMissing = errors.New("Owner account missing") - // ErrInvalidGroups is the error when groups are empty - ErrInvalidGroups = errors.New("Invalid groups") - // ErrInvalidDeploymentID is the error for invalid deployment id - ErrInvalidDeploymentID = errors.New("Invalid: deployment id") - // ErrEmptyVersion is the error when version is empty - ErrEmptyVersion = errors.New("Invalid: empty version") - // ErrInvalidVersion is the error when version is invalid - ErrInvalidVersion = errors.New("Invalid: deployment version") - // ErrInternal is the error for internal error - ErrInternal = errors.New("internal error") - // ErrInvalidDeployment = is the error when deployment does not pass validation - ErrInvalidDeployment = errors.New("Invalid deployment") - // ErrInvalidGroupID is the error when already deployment exists - ErrInvalidGroupID = errors.New("Deployment exists") - // ErrGroupNotFound is the keeper's error for not finding a group - ErrGroupNotFound = errors.New("Group not found") - // ErrGroupClosed is the error when deployment is closed - ErrGroupClosed = errors.New("Group already closed") - // ErrGroupOpen is the error when deployment is closed - ErrGroupOpen = errors.New("Group open") - // ErrGroupPaused is the error when deployment is closed - ErrGroupPaused = errors.New("Group paused") - // ErrGroupNotOpen indicates the Group state has progressed beyond initial Open. - ErrGroupNotOpen = errors.New("Group not open") - // ErrGroupSpecInvalid indicates a GroupSpec has invalid configuration - ErrGroupSpecInvalid = errors.New("GroupSpec invalid") - - // ErrInvalidDeposit indicates an invalid deposit - ErrInvalidDeposit = errors.New("Deposit invalid") - // ErrInvalidIDPath indicates an invalid ID path - ErrInvalidIDPath = errors.New("ID path invalid") - // ErrInvalidParam indicates an invalid chain parameter - ErrInvalidParam = errors.New("parameter invalid") -) diff --git a/go/node/deployment/v1beta2/escrow.go b/go/node/deployment/v1beta2/escrow.go deleted file mode 100644 index 4ef8261f..00000000 --- a/go/node/deployment/v1beta2/escrow.go +++ /dev/null @@ -1,25 +0,0 @@ -package v1beta2 - -import ( - etypes "github.com/akash-network/akash-api/go/node/escrow/v1beta2" -) - -const ( - EscrowScope = "deployment" -) - -func EscrowAccountForDeployment(id DeploymentID) etypes.AccountID { - return etypes.AccountID{ - Scope: EscrowScope, - XID: id.String(), - } -} - -func DeploymentIDFromEscrowAccount(id etypes.AccountID) (DeploymentID, bool) { - if id.Scope != EscrowScope { - return DeploymentID{}, false - } - - did, err := ParseDeploymentID(id.XID) - return did, err == nil -} diff --git a/go/node/deployment/v1beta2/event.go b/go/node/deployment/v1beta2/event.go deleted file mode 100644 index 0d5805a4..00000000 --- a/go/node/deployment/v1beta2/event.go +++ /dev/null @@ -1,309 +0,0 @@ -package v1beta2 - -import ( - "encoding/hex" - "strconv" - - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -const ( - evActionDeploymentCreated = "deployment-created" - evActionDeploymentUpdated = "deployment-updated" - evActionDeploymentClosed = "deployment-closed" - evActionGroupClosed = "group-closed" - evActionGroupPaused = "group-paused" - evActionGroupStarted = "group-started" - evOwnerKey = "owner" - evDSeqKey = "dseq" - evGSeqKey = "gseq" - evVersionKey = "version" - encodedVersionHexLen = 64 -) - -// EventDeploymentCreated struct -type EventDeploymentCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID DeploymentID `json:"id"` - Version []byte `json:"version"` -} - -// NewEventDeploymentCreated initializes creation event. -func NewEventDeploymentCreated(id DeploymentID, version []byte) EventDeploymentCreated { - return EventDeploymentCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionDeploymentCreated, - }, - ID: id, - Version: version, - } -} - -// ToSDKEvent method creates new sdk event for EventDeploymentCreated struct -func (ev EventDeploymentCreated) ToSDKEvent() sdk.Event { - version := encodeHex(ev.Version) - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionDeploymentCreated), - sdk.NewAttribute(evVersionKey, string(version)), - }, DeploymentIDEVAttributes(ev.ID)...)..., - ) -} - -// EventDeploymentUpdated struct -type EventDeploymentUpdated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID DeploymentID `json:"id"` - Version []byte `json:"version"` -} - -// NewEventDeploymentUpdated initializes SDK type -func NewEventDeploymentUpdated(id DeploymentID, version []byte) EventDeploymentUpdated { - return EventDeploymentUpdated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionDeploymentUpdated, - }, - ID: id, - Version: version, - } -} - -// ToSDKEvent method creates new sdk event for EventDeploymentUpdated struct -func (ev EventDeploymentUpdated) ToSDKEvent() sdk.Event { - version := encodeHex(ev.Version) - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionDeploymentUpdated), - sdk.NewAttribute(evVersionKey, string(version)), - }, DeploymentIDEVAttributes(ev.ID)...)..., - ) -} - -// EventDeploymentClosed struct -type EventDeploymentClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID DeploymentID `json:"id"` -} - -func NewEventDeploymentClosed(id DeploymentID) EventDeploymentClosed { - return EventDeploymentClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionDeploymentClosed, - }, - ID: id, - } -} - -// ToSDKEvent method creates new sdk event for EventDeploymentClosed struct -func (ev EventDeploymentClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionDeploymentClosed), - }, DeploymentIDEVAttributes(ev.ID)...)..., - ) -} - -// DeploymentIDEVAttributes returns event attribues for given DeploymentID -func DeploymentIDEVAttributes(id DeploymentID) []sdk.Attribute { - return []sdk.Attribute{ - sdk.NewAttribute(evOwnerKey, id.Owner), - sdk.NewAttribute(evDSeqKey, strconv.FormatUint(id.DSeq, 10)), - } -} - -// ParseEVDeploymentID returns deploymentID details for given event attributes -func ParseEVDeploymentID(attrs []sdk.Attribute) (DeploymentID, error) { - owner, err := sdkutil.GetAccAddress(attrs, evOwnerKey) - if err != nil { - return DeploymentID{}, err - } - dseq, err := sdkutil.GetUint64(attrs, evDSeqKey) - if err != nil { - return DeploymentID{}, err - } - - return DeploymentID{ - Owner: owner.String(), - DSeq: dseq, - }, nil -} - -// ParseEVDeploymentVersion returns the Deployment's SDL sha256 sum -func ParseEVDeploymentVersion(attrs []sdk.Attribute) ([]byte, error) { - v, err := sdkutil.GetString(attrs, evVersionKey) - if err != nil { - return nil, err - } - return decodeHex([]byte(v)) -} - -func encodeHex(src []byte) []byte { - dst := make([]byte, hex.EncodedLen(len(src))) - hex.Encode(dst, src) - return dst -} - -func decodeHex(src []byte) ([]byte, error) { - dst := make([]byte, hex.DecodedLen(len(src))) - if _, err := hex.Decode(dst, src); err != nil { - return []byte{}, err - } - return dst, nil -} - -// EventGroupClosed provides SDK event to signal group termination -type EventGroupClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID GroupID `json:"id"` -} - -func NewEventGroupClosed(id GroupID) EventGroupClosed { - return EventGroupClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionGroupClosed, - }, - ID: id, - } -} - -// ToSDKEvent produces the SDK notification for Event -func (ev EventGroupClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionGroupClosed), - }, GroupIDEVAttributes(ev.ID)...)..., - ) -} - -// EventGroupPaused provides SDK event to signal group termination -type EventGroupPaused struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID GroupID `json:"id"` -} - -func NewEventGroupPaused(id GroupID) EventGroupPaused { - return EventGroupPaused{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionGroupPaused, - }, - ID: id, - } -} - -// ToSDKEvent produces the SDK notification for Event -func (ev EventGroupPaused) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionGroupPaused), - }, GroupIDEVAttributes(ev.ID)...)..., - ) -} - -// EventGroupStarted provides SDK event to signal group termination -type EventGroupStarted struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID GroupID `json:"id"` -} - -func NewEventGroupStarted(id GroupID) EventGroupStarted { - return EventGroupStarted{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionGroupStarted, - }, - ID: id, - } -} - -// ToSDKEvent produces the SDK notification for Event -func (ev EventGroupStarted) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionGroupStarted), - }, GroupIDEVAttributes(ev.ID)...)..., - ) -} - -// GroupIDEVAttributes returns event attribues for given GroupID -func GroupIDEVAttributes(id GroupID) []sdk.Attribute { - return append(DeploymentIDEVAttributes(id.DeploymentID()), - sdk.NewAttribute(evGSeqKey, strconv.FormatUint(uint64(id.GSeq), 10))) -} - -// ParseEVGroupID returns GroupID details for given event attributes -func ParseEVGroupID(attrs []sdk.Attribute) (GroupID, error) { - did, err := ParseEVDeploymentID(attrs) - if err != nil { - return GroupID{}, err - } - - gseq, err := sdkutil.GetUint64(attrs, evGSeqKey) - if err != nil { - return GroupID{}, err - } - - return GroupID{ - Owner: did.Owner, - DSeq: did.DSeq, - GSeq: uint32(gseq), - }, nil -} - -// ParseEvent parses event and returns details of event and error if occurred -func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { - if ev.Type != sdkutil.EventTypeMessage { - return nil, sdkutil.ErrUnknownType - } - if ev.Module != ModuleName { - return nil, sdkutil.ErrUnknownModule - } - switch ev.Action { - case evActionDeploymentCreated: - did, err := ParseEVDeploymentID(ev.Attributes) - if err != nil { - return nil, err - } - ver, err := ParseEVDeploymentVersion(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventDeploymentCreated(did, ver), nil - case evActionDeploymentUpdated: - did, err := ParseEVDeploymentID(ev.Attributes) - if err != nil { - return nil, err - } - ver, err := ParseEVDeploymentVersion(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventDeploymentUpdated(did, ver), nil - case evActionDeploymentClosed: - did, err := ParseEVDeploymentID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventDeploymentClosed(did), nil - case evActionGroupClosed: - gid, err := ParseEVGroupID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventGroupClosed(gid), nil - default: - return nil, sdkutil.ErrUnknownAction - } -} diff --git a/go/node/deployment/v1beta2/events_test.go b/go/node/deployment/v1beta2/events_test.go deleted file mode 100644 index 69ac0c15..00000000 --- a/go/node/deployment/v1beta2/events_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package v1beta2 - -import ( - "crypto/sha256" - "strconv" - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -var ( - keyAcc, _ = sdk.AccAddressFromBech32("akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr") - errWildcard = errors.New("wildcard string error can't be matched") - tmpSum = sha256.Sum256([]byte(keyAcc)) - deploymentVersion = encodeHex(tmpSum[:]) -) - -type testEventParsing struct { - msg sdkutil.Event - expErr error -} - -func (tep testEventParsing) testMessageType() func(t *testing.T) { - _, err := ParseEvent(tep.msg) - return func(t *testing.T) { - // if the error expected is errWildcard to catch untyped errors, don't fail the test, the error was expected. - if errors.Is(tep.expErr, errWildcard) { - require.Error(t, err) - } else { - require.Equal(t, tep.expErr, err) - } - } -} - -var TEPS = []testEventParsing{ - { - msg: sdkutil.Event{ - Type: "nil", - }, - expErr: sdkutil.ErrUnknownType, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - }, - expErr: sdkutil.ErrUnknownAction, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: "nil", - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: "nil", - }, - expErr: sdkutil.ErrUnknownAction, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evVersionKey, - Value: string(deploymentVersion), - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "abc", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - }, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentUpdated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evVersionKey, - Value: string(deploymentVersion), - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentUpdated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionGroupClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "1", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "abc", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionGroupClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionGroupClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evGSeqKey, - Value: "1", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentUpdated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "neh", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evVersionKey, - Value: string(deploymentVersion), - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentUpdated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - }, - }, - expErr: errWildcard, - }, -} - -func TestEventParsing(t *testing.T) { - for i, test := range TEPS { - t.Run(strconv.Itoa(i), test.testMessageType()) - } -} - -func TestVersionEncoding(t *testing.T) { - versionHex := encodeHex(tmpSum[:]) - assert.Len(t, versionHex, encodedVersionHexLen) - decodedVersion, err := decodeHex(versionHex) - assert.NoError(t, err) - assert.Equal(t, tmpSum[:], decodedVersion) -} diff --git a/go/node/deployment/v1beta2/genesis.pb.go b/go/node/deployment/v1beta2/genesis.pb.go deleted file mode 100644 index b4cd2837..00000000 --- a/go/node/deployment/v1beta2/genesis.pb.go +++ /dev/null @@ -1,630 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta2/genesis.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisDeployment defines the basic genesis state used by deployment module -type GenesisDeployment struct { - Deployment Deployment `protobuf:"bytes,1,opt,name=deployment,proto3" json:"deployment" yaml:"deployment"` - Groups []Group `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups" yaml:"groups"` -} - -func (m *GenesisDeployment) Reset() { *m = GenesisDeployment{} } -func (m *GenesisDeployment) String() string { return proto.CompactTextString(m) } -func (*GenesisDeployment) ProtoMessage() {} -func (*GenesisDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_5f37856962d6010c, []int{0} -} -func (m *GenesisDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisDeployment.Merge(m, src) -} -func (m *GenesisDeployment) XXX_Size() int { - return m.Size() -} -func (m *GenesisDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisDeployment proto.InternalMessageInfo - -func (m *GenesisDeployment) GetDeployment() Deployment { - if m != nil { - return m.Deployment - } - return Deployment{} -} - -func (m *GenesisDeployment) GetGroups() []Group { - if m != nil { - return m.Groups - } - return nil -} - -// GenesisState stores slice of genesis deployment instance -type GenesisState struct { - Deployments []GenesisDeployment `protobuf:"bytes,1,rep,name=deployments,proto3" json:"deployments" yaml:"deployments"` - Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params" yaml:"params"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_5f37856962d6010c, []int{1} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetDeployments() []GenesisDeployment { - if m != nil { - return m.Deployments - } - return nil -} - -func (m *GenesisState) GetParams() Params { - if m != nil { - return m.Params - } - return Params{} -} - -func init() { - proto.RegisterType((*GenesisDeployment)(nil), "akash.deployment.v1beta2.GenesisDeployment") - proto.RegisterType((*GenesisState)(nil), "akash.deployment.v1beta2.GenesisState") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta2/genesis.proto", fileDescriptor_5f37856962d6010c) -} - -var fileDescriptor_5f37856962d6010c = []byte{ - // 358 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xbf, 0x4e, 0xf3, 0x30, - 0x14, 0xc5, 0xe3, 0x7e, 0x52, 0x07, 0xf7, 0x63, 0x68, 0xc4, 0x10, 0x75, 0x88, 0x2b, 0xab, 0x40, - 0x2b, 0x44, 0x2c, 0xca, 0x86, 0xc4, 0x12, 0x21, 0x75, 0x45, 0x61, 0xe0, 0xcf, 0xe6, 0x52, 0x2b, - 0xad, 0xda, 0xc4, 0x51, 0xe2, 0x02, 0x7d, 0x0b, 0x1e, 0xab, 0x63, 0x47, 0x06, 0x14, 0xa1, 0x66, - 0x63, 0xcc, 0x13, 0xa0, 0xda, 0x16, 0x8e, 0x2a, 0xc2, 0x96, 0x1b, 0xff, 0xce, 0x3d, 0xf7, 0x5c, - 0x5d, 0x78, 0x4c, 0xe7, 0x34, 0x9b, 0x92, 0x09, 0x4b, 0x16, 0x7c, 0x15, 0xb1, 0x58, 0x90, 0xe7, - 0xf3, 0x31, 0x13, 0x74, 0x48, 0x42, 0x16, 0xb3, 0x6c, 0x96, 0x79, 0x49, 0xca, 0x05, 0xb7, 0x1d, - 0xc9, 0x79, 0x86, 0xf3, 0x34, 0xd7, 0x39, 0x0c, 0x79, 0xc8, 0x25, 0x44, 0x76, 0x5f, 0x8a, 0xef, - 0x0c, 0x6a, 0xfb, 0x56, 0x5a, 0x28, 0xb4, 0x57, 0x3f, 0x42, 0xca, 0x97, 0x89, 0xa6, 0x8e, 0x6a, - 0xa9, 0x84, 0xa6, 0x34, 0xd2, 0x73, 0xe2, 0x0f, 0x00, 0xdb, 0x23, 0x35, 0xf9, 0xf5, 0x0f, 0x6a, - 0x47, 0x10, 0x1a, 0xa1, 0x03, 0xba, 0xa0, 0xdf, 0x1a, 0xf6, 0xbc, 0xba, 0x48, 0x9e, 0x51, 0xfa, - 0x27, 0xeb, 0x1c, 0x59, 0x5f, 0x39, 0xaa, 0xe8, 0xcb, 0x1c, 0xb5, 0x57, 0x34, 0x5a, 0x5c, 0x62, - 0xf3, 0x0f, 0x07, 0x15, 0xc0, 0xbe, 0x87, 0x4d, 0x39, 0x7a, 0xe6, 0x34, 0xba, 0xff, 0xfa, 0xad, - 0x21, 0xaa, 0xb7, 0x1a, 0xed, 0x38, 0x1f, 0x69, 0x17, 0x2d, 0x2b, 0x73, 0x74, 0xa0, 0x1c, 0x54, - 0x8d, 0x03, 0xfd, 0x80, 0x0b, 0x00, 0xff, 0xeb, 0x78, 0xb7, 0x82, 0x0a, 0x66, 0xbf, 0xc2, 0x96, - 0xe9, 0x9a, 0x39, 0x40, 0xfa, 0x9d, 0xfe, 0xe1, 0xb7, 0xbf, 0x1b, 0x7f, 0xa0, 0xbd, 0xab, 0x7d, - 0xca, 0x1c, 0xd9, 0xfb, 0x11, 0x33, 0x1c, 0x54, 0x11, 0xfb, 0x01, 0x36, 0xd5, 0xe6, 0x9d, 0x86, - 0xdc, 0x67, 0xb7, 0xde, 0xf4, 0x46, 0x72, 0x26, 0xa5, 0xd2, 0x99, 0x94, 0xaa, 0xc6, 0x81, 0x7e, - 0xf0, 0xef, 0xd6, 0x5b, 0x17, 0x6c, 0xb6, 0x2e, 0xf8, 0xdc, 0xba, 0xe0, 0xad, 0x70, 0xad, 0x4d, - 0xe1, 0x5a, 0xef, 0x85, 0x6b, 0x3d, 0x5e, 0x85, 0x33, 0x31, 0x5d, 0x8e, 0xbd, 0x27, 0x1e, 0x11, - 0x69, 0x77, 0x16, 0x33, 0xf1, 0xc2, 0xd3, 0xb9, 0xae, 0x68, 0x32, 0x23, 0x21, 0x27, 0x31, 0x9f, - 0xb0, 0x5f, 0x4e, 0x65, 0xdc, 0x94, 0x47, 0x72, 0xf1, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x83, 0x7c, - 0x61, 0x6c, 0xf6, 0x02, 0x00, 0x00, -} - -func (m *GenesisDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Deployments) > 0 { - for iNdEx := len(m.Deployments) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Deployments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Deployment.Size() - n += 1 + l + sovGenesis(uint64(l)) - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Deployments) > 0 { - for _, e := range m.Deployments { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - l = m.Params.Size() - n += 1 + l + sovGenesis(uint64(l)) - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, Group{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deployments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Deployments = append(m.Deployments, GenesisDeployment{}) - if err := m.Deployments[len(m.Deployments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta2/group.pb.go b/go/node/deployment/v1beta2/group.pb.go deleted file mode 100644 index 401e10b3..00000000 --- a/go/node/deployment/v1beta2/group.pb.go +++ /dev/null @@ -1,505 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta2/group.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of group -type Group_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - GroupStateInvalid Group_State = 0 - // GroupOpen denotes state for group open - GroupOpen Group_State = 1 - // GroupOrdered denotes state for group ordered - GroupPaused Group_State = 2 - // GroupInsufficientFunds denotes state for group insufficient_funds - GroupInsufficientFunds Group_State = 3 - // GroupClosed denotes state for group closed - GroupClosed Group_State = 4 -) - -var Group_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "paused", - 3: "insufficient_funds", - 4: "closed", -} - -var Group_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "paused": 2, - "insufficient_funds": 3, - "closed": 4, -} - -func (x Group_State) String() string { - return proto.EnumName(Group_State_name, int32(x)) -} - -func (Group_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_60637d8fd815b0bf, []int{0, 0} -} - -// Group stores group id, state and specifications of group -type Group struct { - GroupID GroupID `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"id" yaml:"id"` - State Group_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.deployment.v1beta2.Group_State" json:"state" yaml:"state"` - GroupSpec GroupSpec `protobuf:"bytes,3,opt,name=group_spec,json=groupSpec,proto3" json:"spec" yaml:"spec"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Group) Reset() { *m = Group{} } -func (m *Group) String() string { return proto.CompactTextString(m) } -func (*Group) ProtoMessage() {} -func (*Group) Descriptor() ([]byte, []int) { - return fileDescriptor_60637d8fd815b0bf, []int{0} -} -func (m *Group) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Group.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Group) XXX_Merge(src proto.Message) { - xxx_messageInfo_Group.Merge(m, src) -} -func (m *Group) XXX_Size() int { - return m.Size() -} -func (m *Group) XXX_DiscardUnknown() { - xxx_messageInfo_Group.DiscardUnknown(m) -} - -var xxx_messageInfo_Group proto.InternalMessageInfo - -func (m *Group) GetGroupID() GroupID { - if m != nil { - return m.GroupID - } - return GroupID{} -} - -func (m *Group) GetState() Group_State { - if m != nil { - return m.State - } - return GroupStateInvalid -} - -func (m *Group) GetGroupSpec() GroupSpec { - if m != nil { - return m.GroupSpec - } - return GroupSpec{} -} - -func (m *Group) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -func init() { - proto.RegisterEnum("akash.deployment.v1beta2.Group_State", Group_State_name, Group_State_value) - proto.RegisterType((*Group)(nil), "akash.deployment.v1beta2.Group") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta2/group.proto", fileDescriptor_60637d8fd815b0bf) -} - -var fileDescriptor_60637d8fd815b0bf = []byte{ - // 487 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x3f, 0x8f, 0xd3, 0x3e, - 0x1c, 0xc6, 0x93, 0x36, 0xbd, 0xfe, 0xea, 0xfe, 0x80, 0x62, 0xf1, 0x27, 0xe4, 0x44, 0x12, 0xc2, - 0x1f, 0x75, 0x21, 0x11, 0x65, 0x3b, 0x89, 0x81, 0x82, 0x40, 0x9d, 0x40, 0x3d, 0x09, 0x24, 0x96, - 0xe2, 0xc6, 0x6e, 0xce, 0xba, 0xd6, 0xb6, 0x1a, 0xe7, 0xd0, 0xad, 0x4c, 0xa8, 0x13, 0x6f, 0xa0, - 0x12, 0x12, 0x6f, 0x82, 0x97, 0x70, 0xe3, 0x8d, 0x4c, 0x11, 0x6a, 0x17, 0xd4, 0xb1, 0xaf, 0x00, - 0xd9, 0x0e, 0xe2, 0x06, 0xb8, 0xdb, 0xe2, 0xe7, 0xf9, 0x7c, 0x9f, 0x3c, 0xb6, 0xbe, 0xe0, 0x1e, - 0x3a, 0x44, 0xf9, 0x41, 0x82, 0x89, 0x98, 0xf2, 0xe3, 0x19, 0x61, 0x32, 0x39, 0x7a, 0x34, 0x26, - 0x12, 0xf5, 0x92, 0x6c, 0xce, 0x0b, 0x11, 0x8b, 0x39, 0x97, 0x1c, 0xba, 0x9a, 0x8a, 0xff, 0x50, - 0x71, 0x45, 0x79, 0xd7, 0x32, 0x9e, 0x71, 0x0d, 0x25, 0xea, 0xcb, 0xf0, 0xde, 0x83, 0xf3, 0x53, - 0x29, 0xae, 0xb8, 0xee, 0xf9, 0x5c, 0x2e, 0x48, 0x6a, 0xc8, 0xe8, 0xa3, 0x03, 0x1a, 0x2f, 0x95, - 0x06, 0xdf, 0x83, 0xff, 0xb4, 0x39, 0xa2, 0xd8, 0xb5, 0x43, 0xbb, 0xdb, 0xee, 0xdd, 0x89, 0xff, - 0x55, 0x2f, 0xd6, 0x23, 0x83, 0xe7, 0xfd, 0xe8, 0xa4, 0x0c, 0xac, 0x55, 0x19, 0x34, 0x2b, 0x61, - 0x53, 0x06, 0x35, 0x8a, 0xb7, 0x65, 0xd0, 0x3a, 0x46, 0xb3, 0xe9, 0x5e, 0x44, 0x71, 0x34, 0x6c, - 0xea, 0xd8, 0x01, 0x86, 0x6f, 0x40, 0x23, 0x97, 0x48, 0x12, 0xb7, 0x16, 0xda, 0xdd, 0xcb, 0xbd, - 0xfb, 0x17, 0xc4, 0xc7, 0xfb, 0x0a, 0xee, 0xdf, 0xda, 0x94, 0x81, 0x99, 0xdb, 0x96, 0xc1, 0xff, - 0x26, 0x56, 0x1f, 0xa3, 0xa1, 0x91, 0xe1, 0x08, 0x00, 0xd3, 0x5c, 0xdd, 0xcb, 0xad, 0xeb, 0xee, - 0x77, 0x2f, 0x08, 0xdf, 0x17, 0x24, 0xed, 0xef, 0xaa, 0xf6, 0x9b, 0x32, 0x70, 0xd4, 0xe0, 0xb6, - 0x0c, 0xda, 0x55, 0xba, 0x20, 0x69, 0x34, 0x6c, 0x65, 0xbf, 0x39, 0x78, 0x1b, 0x80, 0x74, 0x4e, - 0x90, 0x24, 0x78, 0x84, 0xa4, 0xeb, 0x84, 0x76, 0xb7, 0x3e, 0x6c, 0x55, 0xca, 0x53, 0x19, 0x7d, - 0xb3, 0x41, 0x43, 0x77, 0x85, 0x11, 0x68, 0x52, 0x76, 0x84, 0xa6, 0x14, 0x77, 0x2c, 0xef, 0xfa, - 0x62, 0x19, 0x5e, 0x35, 0x3f, 0x53, 0xe6, 0xc0, 0x18, 0xf0, 0x26, 0x70, 0xb8, 0x20, 0xac, 0x63, - 0x7b, 0x97, 0x16, 0xcb, 0xb0, 0xa5, 0x81, 0x57, 0x82, 0x30, 0xb8, 0x0b, 0x76, 0x04, 0x2a, 0x72, - 0x82, 0x3b, 0x35, 0xef, 0xca, 0x62, 0x19, 0xb6, 0xb5, 0xf5, 0x5a, 0x4b, 0xb0, 0x07, 0x20, 0x65, - 0x79, 0x31, 0x99, 0xd0, 0x94, 0x12, 0x26, 0x47, 0x93, 0x82, 0xe1, 0xbc, 0x53, 0xf7, 0xbc, 0xc5, - 0x32, 0xbc, 0x61, 0x1e, 0xff, 0x8c, 0xfd, 0x42, 0xb9, 0x2a, 0x30, 0x9d, 0x72, 0x15, 0xe8, 0x9c, - 0x09, 0x7c, 0xa6, 0x25, 0xcf, 0xf9, 0xf4, 0xd5, 0xb7, 0xf6, 0x9c, 0x9f, 0x5f, 0x02, 0xab, 0xff, - 0xf6, 0x64, 0xe5, 0xdb, 0xa7, 0x2b, 0xdf, 0xfe, 0xb1, 0xf2, 0xed, 0xcf, 0x6b, 0xdf, 0x3a, 0x5d, - 0xfb, 0xd6, 0xf7, 0xb5, 0x6f, 0xbd, 0x7b, 0x92, 0x51, 0x79, 0x50, 0x8c, 0xe3, 0x94, 0xcf, 0x12, - 0xfd, 0xa0, 0x0f, 0x19, 0x91, 0x1f, 0xf8, 0xfc, 0xb0, 0x3a, 0x21, 0x41, 0x93, 0x8c, 0x27, 0x8c, - 0x63, 0xf2, 0x97, 0x6d, 0x1b, 0xef, 0xe8, 0x25, 0x7b, 0xfc, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x40, - 0x44, 0x6a, 0xaf, 0x0e, 0x03, 0x00, 0x00, -} - -func (m *Group) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Group) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintGroup(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.GroupSpec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintGroup(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.GroupID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGroup(dAtA []byte, offset int, v uint64) int { - offset -= sovGroup(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Group) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.GroupID.Size() - n += 1 + l + sovGroup(uint64(l)) - if m.State != 0 { - n += 1 + sovGroup(uint64(m.State)) - } - l = m.GroupSpec.Size() - n += 1 + l + sovGroup(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovGroup(uint64(m.CreatedAt)) - } - return n -} - -func sovGroup(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGroup(x uint64) (n int) { - return sovGroup(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Group) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Group: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.GroupID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Group_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupSpec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.GroupSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGroup(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGroup - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGroup - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGroup - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGroup = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGroup = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGroup = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta2/group_pricing_validation.go b/go/node/deployment/v1beta2/group_pricing_validation.go deleted file mode 100644 index a8fb3178..00000000 --- a/go/node/deployment/v1beta2/group_pricing_validation.go +++ /dev/null @@ -1,63 +0,0 @@ -package v1beta2 - -import ( - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -func validateGroupPricing(gspec GroupSpec) error { - var price sdk.DecCoin - - mem := sdk.NewInt(0) - denom := "" - - for idx, resource := range gspec.Resources { - - if err := validateUnitPricing(resource); err != nil { - return fmt.Errorf("group %v: %w", gspec.GetName(), err) - } - - // all must be same denomination - if denom == "" { - denom = resource.FullPrice().Denom - } else if resource.FullPrice().Denom != denom { - return fmt.Errorf("%w: denomination must be %q", ErrInvalidDeployment, denom) - } - - if idx == 0 { - price = resource.FullPrice() - } else { - rprice := resource.FullPrice() - if rprice.Denom != price.Denom { - return fmt.Errorf("multi-denonimation group: (%v == %v fails)", rprice.Denom, price.Denom) - } - price = price.Add(rprice) - } - - memCount := sdk.NewInt(0) - if u := resource.Resources.Memory; u != nil { - memCount.Add(sdk.NewIntFromUint64(u.Quantity.Value())) - } - - mem = mem.Add(memCount.Mul(sdk.NewIntFromUint64(uint64(resource.Count)))) - } - - return nil -} - -func validateUnitPricing(rg Resource) error { - if !rg.GetPrice().IsValid() { - return fmt.Errorf("error: invalid price object") - } - - if rg.Price.Amount.GT(sdk.NewDecFromInt(sdk.NewIntFromUint64(validationConfig.MaxUnitPrice))) { - return fmt.Errorf("error: invalid unit price (%v > %v fails)", validationConfig.MaxUnitPrice, rg.Price) - } - - return nil -} - -func validateOrderBidDuration(_ GroupSpec) error { - return nil -} diff --git a/go/node/deployment/v1beta2/group_validation.go b/go/node/deployment/v1beta2/group_validation.go deleted file mode 100644 index 58a87877..00000000 --- a/go/node/deployment/v1beta2/group_validation.go +++ /dev/null @@ -1,37 +0,0 @@ -package v1beta2 - -import ( - "fmt" -) - -// ValidateDeploymentGroup does validation for provided deployment group -func validateDeploymentGroup(gspec GroupSpec) error { - if err := ValidateResourceList(gspec); err != nil { - return err - } - if err := validateGroupPricing(gspec); err != nil { - return err - } - return validateOrderBidDuration(gspec) -} - -// ValidateDeploymentGroups does validation for all deployment groups -func ValidateDeploymentGroups(gspecs []GroupSpec) error { - if len(gspecs) == 0 { - return ErrInvalidGroups - } - - names := make(map[string]int, len(gspecs)) // Used as set - for _, group := range gspecs { - if err := group.ValidateBasic(); err != nil { - return err - } - - if _, exists := names[group.GetName()]; exists { - return fmt.Errorf("duplicate deployment group name %q", group.GetName()) - } - names[group.GetName()] = 0 // Value stored does not matter - } - - return nil -} diff --git a/go/node/deployment/v1beta2/groupid.pb.go b/go/node/deployment/v1beta2/groupid.pb.go deleted file mode 100644 index 60ed5aa5..00000000 --- a/go/node/deployment/v1beta2/groupid.pb.go +++ /dev/null @@ -1,395 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta2/groupid.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GroupID stores owner, deployment sequence number and group sequence number -type GroupID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` -} - -func (m *GroupID) Reset() { *m = GroupID{} } -func (*GroupID) ProtoMessage() {} -func (*GroupID) Descriptor() ([]byte, []int) { - return fileDescriptor_bceb1533fce25dcc, []int{0} -} -func (m *GroupID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GroupID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GroupID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GroupID) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupID.Merge(m, src) -} -func (m *GroupID) XXX_Size() int { - return m.Size() -} -func (m *GroupID) XXX_DiscardUnknown() { - xxx_messageInfo_GroupID.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupID proto.InternalMessageInfo - -func (m *GroupID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *GroupID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *GroupID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func init() { - proto.RegisterType((*GroupID)(nil), "akash.deployment.v1beta2.GroupID") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta2/groupid.proto", fileDescriptor_bceb1533fce25dcc) -} - -var fileDescriptor_bceb1533fce25dcc = []byte{ - // 281 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4b, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, - 0x4a, 0x2d, 0x49, 0x34, 0xd2, 0x4f, 0x2f, 0xca, 0x2f, 0x2d, 0xc8, 0x4c, 0xd1, 0x2b, 0x28, 0xca, - 0x2f, 0xc9, 0x17, 0x92, 0x00, 0xab, 0xd3, 0x43, 0xa8, 0xd3, 0x83, 0xaa, 0x93, 0x12, 0x49, 0xcf, - 0x4f, 0xcf, 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0x95, 0xd6, 0x31, 0x72, 0xb1, 0xbb, 0x83, - 0x4c, 0xf0, 0x74, 0x11, 0xd2, 0xe7, 0x62, 0xcd, 0x2f, 0xcf, 0x4b, 0x2d, 0x92, 0x60, 0x54, 0x60, - 0xd4, 0xe0, 0x74, 0x92, 0x7c, 0x75, 0x4f, 0x1e, 0x22, 0xf0, 0xe9, 0x9e, 0x3c, 0x4f, 0x65, 0x62, - 0x6e, 0x8e, 0x95, 0x12, 0x98, 0xab, 0x14, 0x04, 0x11, 0x16, 0x32, 0xe6, 0x62, 0x49, 0x29, 0x4e, - 0x2d, 0x94, 0x60, 0x52, 0x60, 0xd4, 0x60, 0x71, 0x92, 0x7f, 0x74, 0x4f, 0x9e, 0xc5, 0x25, 0x38, - 0xb5, 0xf0, 0xd5, 0x3d, 0x79, 0xb0, 0xf8, 0xa7, 0x7b, 0xf2, 0xdc, 0x10, 0x6d, 0x20, 0x9e, 0x52, - 0x10, 0x58, 0x10, 0xa4, 0x29, 0x1d, 0xa4, 0x89, 0x59, 0x81, 0x51, 0x83, 0x17, 0xa2, 0xc9, 0x1d, - 0xaa, 0x29, 0x1d, 0x45, 0x53, 0x3a, 0x44, 0x13, 0x88, 0xb2, 0xe2, 0x98, 0xb1, 0x40, 0x9e, 0xe1, - 0xc5, 0x02, 0x79, 0x06, 0xa7, 0xf0, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, - 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, - 0xb2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x07, 0x87, 0x82, 0x6e, - 0x5e, 0x6a, 0x49, 0x79, 0x7e, 0x51, 0x36, 0x94, 0x97, 0x58, 0x90, 0xa9, 0x9f, 0x9e, 0xaf, 0x9f, - 0x97, 0x9f, 0x92, 0x8a, 0x25, 0x1c, 0x93, 0xd8, 0xc0, 0x01, 0x62, 0x0c, 0x08, 0x00, 0x00, 0xff, - 0xff, 0x41, 0x15, 0xe6, 0x58, 0x6a, 0x01, 0x00, 0x00, -} - -func (m *GroupID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GroupID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.GSeq != 0 { - i = encodeVarintGroupid(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintGroupid(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintGroupid(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGroupid(dAtA []byte, offset int, v uint64) int { - offset -= sovGroupid(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GroupID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovGroupid(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovGroupid(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovGroupid(uint64(m.GSeq)) - } - return n -} - -func sovGroupid(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGroupid(x uint64) (n int) { - return sovGroupid(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GroupID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGroupid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGroupid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGroupid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGroupid(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGroupid - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGroupid - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGroupid - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGroupid = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGroupid = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGroupid = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta2/groupmsg.pb.go b/go/node/deployment/v1beta2/groupmsg.pb.go deleted file mode 100644 index 8b8ca98f..00000000 --- a/go/node/deployment/v1beta2/groupmsg.pb.go +++ /dev/null @@ -1,1034 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta2/groupmsg.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// MsgCloseGroup defines SDK message to close a single Group within a Deployment. -type MsgCloseGroup struct { - ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseGroup) Reset() { *m = MsgCloseGroup{} } -func (m *MsgCloseGroup) String() string { return proto.CompactTextString(m) } -func (*MsgCloseGroup) ProtoMessage() {} -func (*MsgCloseGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_28ffee979288602d, []int{0} -} -func (m *MsgCloseGroup) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseGroup.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseGroup.Merge(m, src) -} -func (m *MsgCloseGroup) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseGroup) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseGroup proto.InternalMessageInfo - -func (m *MsgCloseGroup) GetID() GroupID { - if m != nil { - return m.ID - } - return GroupID{} -} - -// MsgCloseGroupResponse defines the Msg/CloseGroup response type. -type MsgCloseGroupResponse struct { -} - -func (m *MsgCloseGroupResponse) Reset() { *m = MsgCloseGroupResponse{} } -func (m *MsgCloseGroupResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseGroupResponse) ProtoMessage() {} -func (*MsgCloseGroupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28ffee979288602d, []int{1} -} -func (m *MsgCloseGroupResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseGroupResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseGroupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseGroupResponse.Merge(m, src) -} -func (m *MsgCloseGroupResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseGroupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseGroupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseGroupResponse proto.InternalMessageInfo - -// MsgPauseGroup defines SDK message to close a single Group within a Deployment. -type MsgPauseGroup struct { - ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgPauseGroup) Reset() { *m = MsgPauseGroup{} } -func (m *MsgPauseGroup) String() string { return proto.CompactTextString(m) } -func (*MsgPauseGroup) ProtoMessage() {} -func (*MsgPauseGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_28ffee979288602d, []int{2} -} -func (m *MsgPauseGroup) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgPauseGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgPauseGroup.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgPauseGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgPauseGroup.Merge(m, src) -} -func (m *MsgPauseGroup) XXX_Size() int { - return m.Size() -} -func (m *MsgPauseGroup) XXX_DiscardUnknown() { - xxx_messageInfo_MsgPauseGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgPauseGroup proto.InternalMessageInfo - -func (m *MsgPauseGroup) GetID() GroupID { - if m != nil { - return m.ID - } - return GroupID{} -} - -// MsgPauseGroupResponse defines the Msg/PauseGroup response type. -type MsgPauseGroupResponse struct { -} - -func (m *MsgPauseGroupResponse) Reset() { *m = MsgPauseGroupResponse{} } -func (m *MsgPauseGroupResponse) String() string { return proto.CompactTextString(m) } -func (*MsgPauseGroupResponse) ProtoMessage() {} -func (*MsgPauseGroupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28ffee979288602d, []int{3} -} -func (m *MsgPauseGroupResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgPauseGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgPauseGroupResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgPauseGroupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgPauseGroupResponse.Merge(m, src) -} -func (m *MsgPauseGroupResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgPauseGroupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgPauseGroupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgPauseGroupResponse proto.InternalMessageInfo - -// MsgStartGroup defines SDK message to close a single Group within a Deployment. -type MsgStartGroup struct { - ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgStartGroup) Reset() { *m = MsgStartGroup{} } -func (m *MsgStartGroup) String() string { return proto.CompactTextString(m) } -func (*MsgStartGroup) ProtoMessage() {} -func (*MsgStartGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_28ffee979288602d, []int{4} -} -func (m *MsgStartGroup) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgStartGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgStartGroup.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgStartGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgStartGroup.Merge(m, src) -} -func (m *MsgStartGroup) XXX_Size() int { - return m.Size() -} -func (m *MsgStartGroup) XXX_DiscardUnknown() { - xxx_messageInfo_MsgStartGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgStartGroup proto.InternalMessageInfo - -func (m *MsgStartGroup) GetID() GroupID { - if m != nil { - return m.ID - } - return GroupID{} -} - -// MsgStartGroupResponse defines the Msg/StartGroup response type. -type MsgStartGroupResponse struct { -} - -func (m *MsgStartGroupResponse) Reset() { *m = MsgStartGroupResponse{} } -func (m *MsgStartGroupResponse) String() string { return proto.CompactTextString(m) } -func (*MsgStartGroupResponse) ProtoMessage() {} -func (*MsgStartGroupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_28ffee979288602d, []int{5} -} -func (m *MsgStartGroupResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgStartGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgStartGroupResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgStartGroupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgStartGroupResponse.Merge(m, src) -} -func (m *MsgStartGroupResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgStartGroupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgStartGroupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgStartGroupResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*MsgCloseGroup)(nil), "akash.deployment.v1beta2.MsgCloseGroup") - proto.RegisterType((*MsgCloseGroupResponse)(nil), "akash.deployment.v1beta2.MsgCloseGroupResponse") - proto.RegisterType((*MsgPauseGroup)(nil), "akash.deployment.v1beta2.MsgPauseGroup") - proto.RegisterType((*MsgPauseGroupResponse)(nil), "akash.deployment.v1beta2.MsgPauseGroupResponse") - proto.RegisterType((*MsgStartGroup)(nil), "akash.deployment.v1beta2.MsgStartGroup") - proto.RegisterType((*MsgStartGroupResponse)(nil), "akash.deployment.v1beta2.MsgStartGroupResponse") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta2/groupmsg.proto", fileDescriptor_28ffee979288602d) -} - -var fileDescriptor_28ffee979288602d = []byte{ - // 286 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4f, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, - 0x4a, 0x2d, 0x49, 0x34, 0xd2, 0x4f, 0x2f, 0xca, 0x2f, 0x2d, 0xc8, 0x2d, 0x4e, 0xd7, 0x2b, 0x28, - 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x00, 0x2b, 0xd4, 0x43, 0x28, 0xd4, 0x83, 0x2a, 0x94, 0x12, 0x49, - 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0xd4, 0xf0, 0x1b, 0x9c, 0x99, - 0x02, 0x51, 0xa7, 0x94, 0xce, 0xc5, 0xeb, 0x5b, 0x9c, 0xee, 0x9c, 0x93, 0x5f, 0x9c, 0xea, 0x0e, - 0x92, 0x10, 0x0a, 0xe0, 0x62, 0xca, 0x4c, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x36, 0x52, 0xd4, - 0xc3, 0x65, 0xab, 0x1e, 0x58, 0xb1, 0xa7, 0x8b, 0x93, 0xec, 0x89, 0x7b, 0xf2, 0x0c, 0x8f, 0xee, - 0xc9, 0x33, 0x79, 0xba, 0xbc, 0xba, 0x27, 0xcf, 0x94, 0x99, 0xf2, 0xe9, 0x9e, 0x3c, 0x67, 0x65, - 0x62, 0x6e, 0x8e, 0x95, 0x52, 0x66, 0x8a, 0x52, 0x10, 0x53, 0x66, 0x8a, 0x15, 0xcb, 0x8b, 0x05, - 0xf2, 0x0c, 0x4a, 0xe2, 0x5c, 0xa2, 0x28, 0x16, 0x05, 0xa5, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, - 0x42, 0x5d, 0x10, 0x90, 0x58, 0x4a, 0x1f, 0x17, 0x20, 0x2c, 0x42, 0x73, 0x41, 0x70, 0x49, 0x62, - 0x51, 0x09, 0x3d, 0x5c, 0x80, 0xb0, 0x08, 0xe6, 0x02, 0xa7, 0xf0, 0x13, 0x8f, 0xe4, 0x18, 0x2f, - 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, - 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xb2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, - 0xd5, 0x07, 0x3b, 0x44, 0x37, 0x2f, 0xb5, 0xa4, 0x3c, 0xbf, 0x28, 0x1b, 0xca, 0x4b, 0x2c, 0xc8, - 0xd4, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, 0x49, 0xc5, 0x12, 0xd9, 0x49, 0x6c, 0xe0, 0x58, 0x36, - 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x1c, 0x19, 0xbb, 0xfa, 0x68, 0x02, 0x00, 0x00, -} - -func (m *MsgCloseGroup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseGroup) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroupmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseGroupResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseGroupResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgPauseGroup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgPauseGroup) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgPauseGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroupmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgPauseGroupResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgPauseGroupResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgPauseGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgStartGroup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgStartGroup) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgStartGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroupmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgStartGroupResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgStartGroupResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgStartGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintGroupmsg(dAtA []byte, offset int, v uint64) int { - offset -= sovGroupmsg(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MsgCloseGroup) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovGroupmsg(uint64(l)) - return n -} - -func (m *MsgCloseGroupResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgPauseGroup) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovGroupmsg(uint64(l)) - return n -} - -func (m *MsgPauseGroupResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgStartGroup) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovGroupmsg(uint64(l)) - return n -} - -func (m *MsgStartGroupResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovGroupmsg(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGroupmsg(x uint64) (n int) { - return sovGroupmsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MsgCloseGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroupmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroupmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroupmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseGroupResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseGroupResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGroupmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgPauseGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgPauseGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgPauseGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroupmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroupmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroupmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgPauseGroupResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgPauseGroupResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgPauseGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGroupmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgStartGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgStartGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgStartGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroupmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroupmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroupmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgStartGroupResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgStartGroupResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgStartGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGroupmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGroupmsg(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGroupmsg - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGroupmsg - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGroupmsg - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGroupmsg = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGroupmsg = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGroupmsg = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta2/groupspec.go b/go/node/deployment/v1beta2/groupspec.go deleted file mode 100644 index 7640afa0..00000000 --- a/go/node/deployment/v1beta2/groupspec.go +++ /dev/null @@ -1,107 +0,0 @@ -package v1beta2 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - atypes "github.com/akash-network/akash-api/go/node/audit/v1beta2" - types "github.com/akash-network/akash-api/go/node/types/v1beta2" -) - -// ValidateBasic asserts non-zero values -// TODO: This is causing an import cycle. I think there is some pattern here I'm missing tho.. -func (g GroupSpec) ValidateBasic() error { - return validateDeploymentGroup(g) -} - -// GetResources method returns resources list in group -func (g GroupSpec) GetResources() []types.Resources { - resources := make([]types.Resources, 0, len(g.Resources)) - for _, r := range g.Resources { - resources = append(resources, types.Resources{ - Resources: r.Resources, - Count: r.Count, - }) - } - - return resources -} - -// GetName method returns group name -func (g GroupSpec) GetName() string { - return g.Name -} - -// Price method returns price of group -func (g GroupSpec) Price() sdk.DecCoin { - var price sdk.DecCoin - for idx, resource := range g.Resources { - if idx == 0 { - price = resource.FullPrice() - continue - } - price = price.Add(resource.FullPrice()) - } - return price -} - -// MatchResourcesRequirements check if resources attributes match provider's capabilities -func (g GroupSpec) MatchResourcesRequirements(pattr types.Attributes) bool { - for _, rgroup := range g.GetResources() { - pgroup := pattr.GetCapabilitiesGroup("storage") - for _, storage := range rgroup.Resources.Storage { - if len(storage.Attributes) == 0 { - continue - } - - if !storage.Attributes.IN(pgroup) { - return false - } - } - } - - return true -} - -// MatchRequirements method compares provided attributes with specific group attributes. -// Argument provider is a bit cumbersome. First element is attributes from x/provider store -// in case tenant does not need signed attributes at all -// rest of elements (if any) are attributes signed by various auditors -func (g GroupSpec) MatchRequirements(provider []atypes.Provider) bool { - if (len(g.Requirements.SignedBy.AnyOf) != 0) || (len(g.Requirements.SignedBy.AllOf) != 0) { - // we cannot match if there is no signed attributes - if len(provider) < 2 { - return false - } - - existingRequirements := make(attributesMatching) - - for _, existing := range provider[1:] { - existingRequirements[existing.Auditor] = existing.Attributes - } - - if len(g.Requirements.SignedBy.AllOf) != 0 { - for _, validator := range g.Requirements.SignedBy.AllOf { - // if at least one signature does not exist or no match on attributes - requirements cannot match - if existingAttr, exists := existingRequirements[validator]; !exists || - !types.AttributesSubsetOf(g.Requirements.Attributes, existingAttr) { - return false - } - } - } - - if len(g.Requirements.SignedBy.AnyOf) != 0 { - for _, validator := range g.Requirements.SignedBy.AnyOf { - if existingAttr, exists := existingRequirements[validator]; exists && - types.AttributesSubsetOf(g.Requirements.Attributes, existingAttr) { - return true - } - } - - return false - } - - return true - } - - return types.AttributesSubsetOf(g.Requirements.Attributes, provider[0].Attributes) -} diff --git a/go/node/deployment/v1beta2/groupspec.pb.go b/go/node/deployment/v1beta2/groupspec.pb.go deleted file mode 100644 index d909c6b3..00000000 --- a/go/node/deployment/v1beta2/groupspec.pb.go +++ /dev/null @@ -1,426 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta2/groupspec.proto - -package v1beta2 - -import ( - fmt "fmt" - v1beta2 "github.com/akash-network/akash-api/go/node/types/v1beta2" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GroupSpec stores group specifications -type GroupSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` - Requirements v1beta2.PlacementRequirements `protobuf:"bytes,2,opt,name=requirements,proto3" json:"requirements" yaml:"requirements"` - Resources []Resource `protobuf:"bytes,3,rep,name=resources,proto3" json:"resources" yaml:"resources"` -} - -func (m *GroupSpec) Reset() { *m = GroupSpec{} } -func (m *GroupSpec) String() string { return proto.CompactTextString(m) } -func (*GroupSpec) ProtoMessage() {} -func (*GroupSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_8afb9070f2e843b2, []int{0} -} -func (m *GroupSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GroupSpec.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GroupSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupSpec.Merge(m, src) -} -func (m *GroupSpec) XXX_Size() int { - return m.Size() -} -func (m *GroupSpec) XXX_DiscardUnknown() { - xxx_messageInfo_GroupSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupSpec proto.InternalMessageInfo - -func init() { - proto.RegisterType((*GroupSpec)(nil), "akash.deployment.v1beta2.GroupSpec") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta2/groupspec.proto", fileDescriptor_8afb9070f2e843b2) -} - -var fileDescriptor_8afb9070f2e843b2 = []byte{ - // 358 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x31, 0x4b, 0xfb, 0x40, - 0x18, 0xc6, 0x93, 0xf6, 0xcf, 0x1f, 0x9b, 0x3a, 0x48, 0x14, 0x0c, 0x1d, 0x72, 0xe5, 0x40, 0x8c, - 0x14, 0xef, 0xb0, 0x6e, 0x05, 0x97, 0x2c, 0xae, 0x12, 0x07, 0xc1, 0xed, 0x92, 0x1e, 0x69, 0x68, - 0x93, 0x8b, 0x97, 0x8b, 0x52, 0x3f, 0x81, 0xa3, 0x1f, 0xa1, 0x9b, 0x5f, 0xa5, 0x63, 0x47, 0xa7, - 0x20, 0xed, 0x22, 0x1d, 0xfb, 0x09, 0x24, 0x97, 0xab, 0x6d, 0xc1, 0x6e, 0x79, 0xdf, 0xfc, 0xde, - 0xf7, 0x79, 0x9e, 0x7b, 0x0d, 0x87, 0x0c, 0x49, 0x36, 0xc0, 0x7d, 0x9a, 0x8e, 0xd8, 0x38, 0xa6, - 0x89, 0xc0, 0xcf, 0x57, 0x3e, 0x15, 0xa4, 0x8b, 0x43, 0xce, 0xf2, 0x34, 0x4b, 0x69, 0x80, 0x52, - 0xce, 0x04, 0x33, 0x2d, 0x49, 0xa2, 0x0d, 0x89, 0x14, 0xd9, 0x3a, 0x09, 0x59, 0xc8, 0x24, 0x84, - 0xcb, 0xaf, 0x8a, 0x6f, 0xc1, 0x6a, 0xb3, 0x4f, 0x32, 0xfa, 0xbb, 0x93, 0x08, 0xc1, 0x23, 0x3f, - 0x17, 0x54, 0x31, 0xe7, 0x7b, 0xd5, 0x39, 0xcd, 0x58, 0xce, 0x03, 0x05, 0xc2, 0x8f, 0x9a, 0xd1, - 0xb8, 0x2d, 0x0d, 0xdd, 0xa7, 0x34, 0x30, 0x3b, 0xc6, 0xbf, 0x84, 0xc4, 0xd4, 0xd2, 0xdb, 0xba, - 0xd3, 0x70, 0x4f, 0x97, 0x05, 0x90, 0xf5, 0xaa, 0x00, 0xcd, 0x31, 0x89, 0x47, 0x3d, 0x58, 0x56, - 0xd0, 0x93, 0x4d, 0xf3, 0xd5, 0x38, 0xe4, 0xf4, 0x29, 0x8f, 0x38, 0x2d, 0x05, 0x32, 0xab, 0xd6, - 0xd6, 0x9d, 0x66, 0xf7, 0x02, 0x55, 0x71, 0x4a, 0x7b, 0xeb, 0x20, 0xe8, 0x6e, 0x44, 0x02, 0x49, - 0x79, 0x5b, 0x03, 0x6e, 0x67, 0x5a, 0x00, 0x6d, 0x59, 0x80, 0x9d, 0x35, 0xab, 0x02, 0x1c, 0x57, - 0x5a, 0xdb, 0x5d, 0xe8, 0xed, 0x40, 0x66, 0x68, 0x34, 0xd6, 0x41, 0x32, 0xab, 0xde, 0xae, 0x3b, - 0xcd, 0x2e, 0x44, 0xfb, 0xde, 0x11, 0x79, 0x0a, 0x75, 0xcf, 0x94, 0xe2, 0x66, 0x78, 0x55, 0x80, - 0xa3, 0xb5, 0x9c, 0x6a, 0x41, 0x6f, 0xf3, 0xbb, 0x77, 0xf0, 0x36, 0x01, 0xda, 0xf7, 0x04, 0x68, - 0xee, 0xc3, 0x74, 0x6e, 0xeb, 0xb3, 0xb9, 0xad, 0x7f, 0xcd, 0x6d, 0xfd, 0x7d, 0x61, 0x6b, 0xb3, - 0x85, 0xad, 0x7d, 0x2e, 0x6c, 0xed, 0xf1, 0x26, 0x8c, 0xc4, 0x20, 0xf7, 0x51, 0xc0, 0x62, 0x2c, - 0x3d, 0x5c, 0x26, 0x54, 0xbc, 0x30, 0x3e, 0x54, 0x15, 0x49, 0x23, 0x1c, 0x32, 0x9c, 0xb0, 0x3e, - 0xfd, 0xe3, 0x22, 0xfe, 0x7f, 0x79, 0x89, 0xeb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x80, 0x88, - 0x8d, 0xf5, 0x32, 0x02, 0x00, 0x00, -} - -func (m *GroupSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GroupSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroupspec(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - { - size, err := m.Requirements.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroupspec(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGroupspec(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGroupspec(dAtA []byte, offset int, v uint64) int { - offset -= sovGroupspec(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GroupSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovGroupspec(uint64(l)) - } - l = m.Requirements.Size() - n += 1 + l + sovGroupspec(uint64(l)) - if len(m.Resources) > 0 { - for _, e := range m.Resources { - l = e.Size() - n += 1 + l + sovGroupspec(uint64(l)) - } - } - return n -} - -func sovGroupspec(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGroupspec(x uint64) (n int) { - return sovGroupspec(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GroupSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupspec - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupspec - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGroupspec - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGroupspec - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupspec - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroupspec - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroupspec - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Requirements.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupspec - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroupspec - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroupspec - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, Resource{}) - if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroupspec(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupspec - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGroupspec(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupspec - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupspec - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupspec - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGroupspec - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGroupspec - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGroupspec - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGroupspec = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGroupspec = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGroupspec = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta2/id.go b/go/node/deployment/v1beta2/id.go deleted file mode 100644 index b499d914..00000000 --- a/go/node/deployment/v1beta2/id.go +++ /dev/null @@ -1,103 +0,0 @@ -package v1beta2 - -import ( - fmt "fmt" - "strconv" - "strings" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -// Equals method compares specific deployment with provided deployment -func (id DeploymentID) Equals(other DeploymentID) bool { - return id.Owner == other.Owner && id.DSeq == other.DSeq -} - -// Validate method for DeploymentID and returns nil -func (id DeploymentID) Validate() error { - _, err := sdk.AccAddressFromBech32(id.Owner) - switch { - case err != nil: - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "DeploymentID: Invalid Owner Address") - case id.DSeq == 0: - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "DeploymentID: Invalid Deployment Sequence") - } - return nil -} - -// String method for deployment IDs -func (id DeploymentID) String() string { - return fmt.Sprintf("%s/%d", id.Owner, id.DSeq) -} - -func (id DeploymentID) GetOwnerAddress() (sdk.Address, error) { - return sdk.AccAddressFromBech32(id.Owner) -} - -func ParseDeploymentID(val string) (DeploymentID, error) { - parts := strings.Split(val, "/") - return ParseDeploymentPath(parts) -} - -// ParseDeploymentPath returns DeploymentID details with provided queries, and return -// error if occurred due to wrong query -func ParseDeploymentPath(parts []string) (DeploymentID, error) { - if len(parts) != 2 { - return DeploymentID{}, ErrInvalidIDPath - } - - owner, err := sdk.AccAddressFromBech32(parts[0]) - if err != nil { - return DeploymentID{}, err - } - - dseq, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return DeploymentID{}, err - } - - return DeploymentID{ - Owner: owner.String(), - DSeq: dseq, - }, nil -} - -// MakeGroupID returns GroupID instance with provided deployment details -// and group sequence number. -func MakeGroupID(id DeploymentID, gseq uint32) GroupID { - return GroupID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: gseq, - } -} - -// DeploymentID method returns DeploymentID details with specific group details -func (id GroupID) DeploymentID() DeploymentID { - return DeploymentID{ - Owner: id.Owner, - DSeq: id.DSeq, - } -} - -// Equals method compares specific group with provided group -func (id GroupID) Equals(other GroupID) bool { - return id.DeploymentID().Equals(other.DeploymentID()) && id.GSeq == other.GSeq -} - -// Validate method for GroupID and returns nil -func (id GroupID) Validate() error { - if err := id.DeploymentID().Validate(); err != nil { - return sdkerrors.Wrap(err, "GroupID: Invalid DeploymentID") - } - if id.GSeq == 0 { - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "GroupID: Invalid Group Sequence") - } - return nil -} - -// String method provides human readable representation of GroupID. -func (id GroupID) String() string { - return fmt.Sprintf("%s/%d", id.DeploymentID(), id.GSeq) -} diff --git a/go/node/deployment/v1beta2/key.go b/go/node/deployment/v1beta2/key.go deleted file mode 100644 index c1b3e036..00000000 --- a/go/node/deployment/v1beta2/key.go +++ /dev/null @@ -1,20 +0,0 @@ -package v1beta2 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "deployment" - - // StoreKey is the store key string for deployment - StoreKey = ModuleName - - // RouterKey is the message route for deployment - RouterKey = ModuleName -) - -func DeploymentPrefix() []byte { - return []byte{0x01} -} - -func GroupPrefix() []byte { - return []byte{0x02} -} diff --git a/go/node/deployment/v1beta2/migrate/v1beta1.go b/go/node/deployment/v1beta2/migrate/v1beta1.go deleted file mode 100644 index d1dbe463..00000000 --- a/go/node/deployment/v1beta2/migrate/v1beta1.go +++ /dev/null @@ -1,52 +0,0 @@ -package migrate - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/akash-network/akash-api/go/node/deployment/v1beta1" - "github.com/akash-network/akash-api/go/node/deployment/v1beta2" - amigrate "github.com/akash-network/akash-api/go/node/types/v1beta2/migrate" -) - -func ResourceFromV1Beta1(from v1beta1.Resource) v1beta2.Resource { - return v1beta2.Resource{ - Resources: amigrate.ResourceUnitsFromV1Beta1(from.Resources), - Count: from.Count, - Price: sdk.NewDecCoinFromCoin(from.Price), - } -} - -func ResourcesFromV1Beta1(from []v1beta1.Resource) []v1beta2.Resource { - res := make([]v1beta2.Resource, 0, len(from)) - - for _, oval := range from { - res = append(res, ResourceFromV1Beta1(oval)) - } - - return res -} - -func GroupIDFromV1Beta1(from v1beta1.GroupID) v1beta2.GroupID { - return v1beta2.GroupID{ - Owner: from.Owner, - DSeq: from.DSeq, - GSeq: from.GSeq, - } -} - -func GroupSpecFromV1Beta1(from v1beta1.GroupSpec) v1beta2.GroupSpec { - return v1beta2.GroupSpec{ - Name: from.Name, - Requirements: amigrate.PlacementRequirementsFromV1Beta1(from.Requirements), - Resources: ResourcesFromV1Beta1(from.Resources), - } -} - -func GroupFromV1Beta1(from v1beta1.Group) v1beta2.Group { - return v1beta2.Group{ - GroupID: GroupIDFromV1Beta1(from.GroupID), - State: v1beta2.Group_State(from.State), - GroupSpec: GroupSpecFromV1Beta1(from.GroupSpec), - CreatedAt: from.CreatedAt, - } -} diff --git a/go/node/deployment/v1beta2/msgs.go b/go/node/deployment/v1beta2/msgs.go deleted file mode 100644 index d2ebf78c..00000000 --- a/go/node/deployment/v1beta2/msgs.go +++ /dev/null @@ -1,323 +0,0 @@ -package v1beta2 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - MsgTypeCreateDeployment = "create-deployment" - MsgTypeDepositDeployment = "deposit-deployment" - MsgTypeUpdateDeployment = "update-deployment" - MsgTypeCloseDeployment = "close-deployment" - MsgTypeCloseGroup = "close-group" - MsgTypePauseGroup = "pause-group" - MsgTypeStartGroup = "start-group" -) - -var ( - _, _, _, _ sdk.Msg = &MsgCreateDeployment{}, &MsgUpdateDeployment{}, &MsgCloseDeployment{}, &MsgCloseGroup{} -) - -// NewMsgCreateDeployment creates a new MsgCreateDeployment instance -func NewMsgCreateDeployment(id DeploymentID, groups []GroupSpec, version []byte, - deposit sdk.Coin, depositor sdk.AccAddress) *MsgCreateDeployment { - return &MsgCreateDeployment{ - ID: id, - Groups: groups, - Version: version, - Deposit: deposit, - Depositor: depositor.String(), - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateDeployment) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateDeployment) Type() string { return MsgTypeCreateDeployment } - -// GetSignBytes encodes the message for signing -func (msg MsgCreateDeployment) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateDeployment) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// ValidateBasic does basic validation like check owner and groups length -func (msg MsgCreateDeployment) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - if len(msg.Groups) == 0 { - return ErrInvalidGroups - } - - if len(msg.Version) == 0 { - return ErrEmptyVersion - } - - if len(msg.Version) != ManifestVersionLength { - return ErrInvalidVersion - } - - for _, gs := range msg.Groups { - err := gs.ValidateBasic() - if err != nil { - return err - } - } - - _, err := sdk.AccAddressFromBech32(msg.Depositor) - if err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreateDeployment: Invalid Depositor Address") - } - - return nil -} - -// NewMsgDepositDeployment creates a new MsgDepositDeployment instance -func NewMsgDepositDeployment(id DeploymentID, amount sdk.Coin, depositor string) *MsgDepositDeployment { - return &MsgDepositDeployment{ - ID: id, - Amount: amount, - Depositor: depositor, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgDepositDeployment) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgDepositDeployment) Type() string { return MsgTypeDepositDeployment } - -// GetSignBytes encodes the message for signing -func (msg MsgDepositDeployment) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgDepositDeployment) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// ValidateBasic does basic validation like check owner and groups length -func (msg MsgDepositDeployment) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - - if msg.Amount.IsZero() { - return ErrInvalidDeposit - } - - _, err := sdk.AccAddressFromBech32(msg.Depositor) - if err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgDepositDeployment: Invalid Depositor Address") - } - - return nil -} - -// NewMsgUpdateDeployment creates a new MsgUpdateDeployment instance -func NewMsgUpdateDeployment(id DeploymentID, version []byte) *MsgUpdateDeployment { - return &MsgUpdateDeployment{ - ID: id, - Version: version, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgUpdateDeployment) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgUpdateDeployment) Type() string { return MsgTypeUpdateDeployment } - -// ValidateBasic does basic validation -func (msg MsgUpdateDeployment) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - - if len(msg.Version) == 0 { - return ErrEmptyVersion - } - - if len(msg.Version) != ManifestVersionLength { - return ErrInvalidVersion - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgUpdateDeployment) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgUpdateDeployment) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgCloseDeployment creates a new MsgCloseDeployment instance -func NewMsgCloseDeployment(id DeploymentID) *MsgCloseDeployment { - return &MsgCloseDeployment{ - ID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCloseDeployment) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCloseDeployment) Type() string { return MsgTypeCloseDeployment } - -// ValidateBasic does basic validation with deployment details -func (msg MsgCloseDeployment) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgCloseDeployment) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseDeployment) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgCloseGroup creates a new MsgCloseGroup instance -func NewMsgCloseGroup(id GroupID) *MsgCloseGroup { - return &MsgCloseGroup{ - ID: id, - } -} - -// Route implements the sdk.Msg interface for routing -func (msg MsgCloseGroup) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface exposing message type -func (msg MsgCloseGroup) Type() string { return MsgTypeCloseGroup } - -// ValidateBasic calls underlying GroupID.Validate() check and returns result -func (msg MsgCloseGroup) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgCloseGroup) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseGroup) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgPauseGroup creates a new MsgPauseGroup instance -func NewMsgPauseGroup(id GroupID) *MsgPauseGroup { - return &MsgPauseGroup{ - ID: id, - } -} - -// Route implements the sdk.Msg interface for routing -func (msg MsgPauseGroup) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface exposing message type -func (msg MsgPauseGroup) Type() string { return MsgTypePauseGroup } - -// ValidateBasic calls underlying GroupID.Validate() check and returns result -func (msg MsgPauseGroup) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgPauseGroup) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgPauseGroup) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgStartGroup creates a new MsgStartGroup instance -func NewMsgStartGroup(id GroupID) *MsgStartGroup { - return &MsgStartGroup{ - ID: id, - } -} - -// Route implements the sdk.Msg interface for routing -func (msg MsgStartGroup) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface exposing message type -func (msg MsgStartGroup) Type() string { return MsgTypeStartGroup } - -// ValidateBasic calls underlying GroupID.Validate() check and returns result -func (msg MsgStartGroup) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgStartGroup) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgStartGroup) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} diff --git a/go/node/deployment/v1beta2/params.go b/go/node/deployment/v1beta2/params.go deleted file mode 100644 index 0144c7fa..00000000 --- a/go/node/deployment/v1beta2/params.go +++ /dev/null @@ -1,50 +0,0 @@ -package v1beta2 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/pkg/errors" -) - -var _ paramtypes.ParamSet = (*Params)(nil) - -var ( - DefaultDeploymentMinDeposit = sdk.NewCoin("uakt", sdk.NewInt(5000000)) -) - -const ( - keyDeploymentMinDeposit = "DeploymentMinDeposit" -) - -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair([]byte(keyDeploymentMinDeposit), &p.DeploymentMinDeposit, validateCoin), - } -} - -func DefaultParams() Params { - return Params{ - DeploymentMinDeposit: DefaultDeploymentMinDeposit, - } -} - -func (p Params) Validate() error { - if err := validateCoin(p.DeploymentMinDeposit); err != nil { - return err - } - - return nil -} - -func validateCoin(i interface{}) error { - _, ok := i.(sdk.Coin) - if !ok { - return errors.Wrapf(ErrInvalidParam, "%T", i) - } - - return nil -} diff --git a/go/node/deployment/v1beta2/params.pb.go b/go/node/deployment/v1beta2/params.pb.go deleted file mode 100644 index 01291348..00000000 --- a/go/node/deployment/v1beta2/params.pb.go +++ /dev/null @@ -1,329 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta2/params.proto - -package v1beta2 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Params defines the parameters for the x/deployment package -type Params struct { - DeploymentMinDeposit types.Coin `protobuf:"bytes,1,opt,name=deployment_min_deposit,json=deploymentMinDeposit,proto3" json:"deployment_min_deposit" yaml:"deployment_min_deposit"` -} - -func (m *Params) Reset() { *m = Params{} } -func (m *Params) String() string { return proto.CompactTextString(m) } -func (*Params) ProtoMessage() {} -func (*Params) Descriptor() ([]byte, []int) { - return fileDescriptor_0ce4ce202971abe5, []int{0} -} -func (m *Params) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Params.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Params) XXX_Merge(src proto.Message) { - xxx_messageInfo_Params.Merge(m, src) -} -func (m *Params) XXX_Size() int { - return m.Size() -} -func (m *Params) XXX_DiscardUnknown() { - xxx_messageInfo_Params.DiscardUnknown(m) -} - -var xxx_messageInfo_Params proto.InternalMessageInfo - -func (m *Params) GetDeploymentMinDeposit() types.Coin { - if m != nil { - return m.DeploymentMinDeposit - } - return types.Coin{} -} - -func init() { - proto.RegisterType((*Params)(nil), "akash.deployment.v1beta2.Params") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta2/params.proto", fileDescriptor_0ce4ce202971abe5) -} - -var fileDescriptor_0ce4ce202971abe5 = []byte{ - // 274 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, - 0x4a, 0x2d, 0x49, 0x34, 0xd2, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x92, 0x00, 0x2b, 0xd3, 0x43, 0x28, 0xd3, 0x83, 0x2a, 0x93, 0x12, 0x49, 0xcf, 0x4f, - 0xcf, 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0xe4, 0x92, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, - 0xf5, 0x93, 0x12, 0x8b, 0x53, 0xa1, 0x26, 0x1a, 0xea, 0x27, 0xe7, 0x67, 0xe6, 0x41, 0xe4, 0x95, - 0x36, 0x33, 0x72, 0xb1, 0x05, 0x80, 0x2d, 0x10, 0x5a, 0xc1, 0xc8, 0x25, 0x86, 0x30, 0x37, 0x3e, - 0x37, 0x33, 0x2f, 0x3e, 0x25, 0xb5, 0x20, 0xbf, 0x38, 0xb3, 0x44, 0x82, 0x51, 0x81, 0x51, 0x83, - 0xdb, 0x48, 0x52, 0x0f, 0x62, 0x98, 0x1e, 0xc8, 0x30, 0xa8, 0xbd, 0x86, 0x7a, 0xce, 0xf9, 0x99, - 0x79, 0x4e, 0xe1, 0x27, 0xee, 0xc9, 0x33, 0x3c, 0xba, 0x27, 0x2f, 0xe2, 0x02, 0x37, 0xc0, 0x37, - 0x33, 0xcf, 0x05, 0xa2, 0xfd, 0xd5, 0x3d, 0x79, 0x1c, 0x06, 0x7f, 0xba, 0x27, 0x2f, 0x5b, 0x99, - 0x98, 0x9b, 0x63, 0xa5, 0x84, 0x5d, 0x5e, 0x29, 0x48, 0x24, 0x05, 0x8b, 0x81, 0x4e, 0xe1, 0x27, - 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, - 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x9b, 0x9e, 0x59, 0x92, 0x51, 0x9a, - 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x0f, 0x0e, 0x2a, 0xdd, 0xbc, 0xd4, 0x92, 0xf2, 0xfc, 0xa2, 0x6c, - 0x28, 0x2f, 0xb1, 0x20, 0x53, 0x3f, 0x3d, 0x5f, 0x3f, 0x2f, 0x3f, 0x25, 0x15, 0x4b, 0x58, 0x27, - 0xb1, 0x81, 0x43, 0xc5, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x01, 0xc8, 0x38, 0xe2, 0x8e, 0x01, - 0x00, 0x00, -} - -func (m *Params) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Params) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.DeploymentMinDeposit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintParams(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintParams(dAtA []byte, offset int, v uint64) int { - offset -= sovParams(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Params) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.DeploymentMinDeposit.Size() - n += 1 + l + sovParams(uint64(l)) - return n -} - -func sovParams(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozParams(x uint64) (n int) { - return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Params) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Params: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeploymentMinDeposit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DeploymentMinDeposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipParams(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipParams(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthParams - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupParams - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthParams - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta2/query.pb.go b/go/node/deployment/v1beta2/query.pb.go deleted file mode 100644 index 50421246..00000000 --- a/go/node/deployment/v1beta2/query.pb.go +++ /dev/null @@ -1,1628 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta2/query.proto - -package v1beta2 - -import ( - context "context" - fmt "fmt" - v1beta2 "github.com/akash-network/akash-api/go/node/escrow/v1beta2" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryDeploymentsRequest is request type for the Query/Deployments RPC method -type QueryDeploymentsRequest struct { - Filters DeploymentFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryDeploymentsRequest) Reset() { *m = QueryDeploymentsRequest{} } -func (m *QueryDeploymentsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryDeploymentsRequest) ProtoMessage() {} -func (*QueryDeploymentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_61430a03c00e84b2, []int{0} -} -func (m *QueryDeploymentsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryDeploymentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryDeploymentsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryDeploymentsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryDeploymentsRequest.Merge(m, src) -} -func (m *QueryDeploymentsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryDeploymentsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryDeploymentsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryDeploymentsRequest proto.InternalMessageInfo - -func (m *QueryDeploymentsRequest) GetFilters() DeploymentFilters { - if m != nil { - return m.Filters - } - return DeploymentFilters{} -} - -func (m *QueryDeploymentsRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryDeploymentsResponse is response type for the Query/Deployments RPC method -type QueryDeploymentsResponse struct { - Deployments DeploymentResponses `protobuf:"bytes,1,rep,name=deployments,proto3,castrepeated=DeploymentResponses" json:"deployments"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryDeploymentsResponse) Reset() { *m = QueryDeploymentsResponse{} } -func (m *QueryDeploymentsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryDeploymentsResponse) ProtoMessage() {} -func (*QueryDeploymentsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_61430a03c00e84b2, []int{1} -} -func (m *QueryDeploymentsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryDeploymentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryDeploymentsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryDeploymentsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryDeploymentsResponse.Merge(m, src) -} -func (m *QueryDeploymentsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryDeploymentsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryDeploymentsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryDeploymentsResponse proto.InternalMessageInfo - -func (m *QueryDeploymentsResponse) GetDeployments() DeploymentResponses { - if m != nil { - return m.Deployments - } - return nil -} - -func (m *QueryDeploymentsResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryDeploymentRequest is request type for the Query/Deployment RPC method -type QueryDeploymentRequest struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryDeploymentRequest) Reset() { *m = QueryDeploymentRequest{} } -func (m *QueryDeploymentRequest) String() string { return proto.CompactTextString(m) } -func (*QueryDeploymentRequest) ProtoMessage() {} -func (*QueryDeploymentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_61430a03c00e84b2, []int{2} -} -func (m *QueryDeploymentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryDeploymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryDeploymentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryDeploymentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryDeploymentRequest.Merge(m, src) -} -func (m *QueryDeploymentRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryDeploymentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryDeploymentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryDeploymentRequest proto.InternalMessageInfo - -func (m *QueryDeploymentRequest) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -// QueryDeploymentResponse is response type for the Query/Deployment RPC method -type QueryDeploymentResponse struct { - Deployment Deployment `protobuf:"bytes,1,opt,name=deployment,proto3" json:"deployment" yaml:"deployment"` - Groups []Group `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups" yaml:"groups"` - EscrowAccount v1beta2.Account `protobuf:"bytes,3,opt,name=escrow_account,json=escrowAccount,proto3" json:"escrow_account"` -} - -func (m *QueryDeploymentResponse) Reset() { *m = QueryDeploymentResponse{} } -func (m *QueryDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*QueryDeploymentResponse) ProtoMessage() {} -func (*QueryDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_61430a03c00e84b2, []int{3} -} -func (m *QueryDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryDeploymentResponse.Merge(m, src) -} -func (m *QueryDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryDeploymentResponse proto.InternalMessageInfo - -func (m *QueryDeploymentResponse) GetDeployment() Deployment { - if m != nil { - return m.Deployment - } - return Deployment{} -} - -func (m *QueryDeploymentResponse) GetGroups() []Group { - if m != nil { - return m.Groups - } - return nil -} - -func (m *QueryDeploymentResponse) GetEscrowAccount() v1beta2.Account { - if m != nil { - return m.EscrowAccount - } - return v1beta2.Account{} -} - -// QueryGroupRequest is request type for the Query/Group RPC method -type QueryGroupRequest struct { - ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryGroupRequest) Reset() { *m = QueryGroupRequest{} } -func (m *QueryGroupRequest) String() string { return proto.CompactTextString(m) } -func (*QueryGroupRequest) ProtoMessage() {} -func (*QueryGroupRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_61430a03c00e84b2, []int{4} -} -func (m *QueryGroupRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryGroupRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryGroupRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryGroupRequest.Merge(m, src) -} -func (m *QueryGroupRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryGroupRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryGroupRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryGroupRequest proto.InternalMessageInfo - -func (m *QueryGroupRequest) GetID() GroupID { - if m != nil { - return m.ID - } - return GroupID{} -} - -// QueryGroupResponse is response type for the Query/Group RPC method -type QueryGroupResponse struct { - Group Group `protobuf:"bytes,1,opt,name=group,proto3" json:"group"` -} - -func (m *QueryGroupResponse) Reset() { *m = QueryGroupResponse{} } -func (m *QueryGroupResponse) String() string { return proto.CompactTextString(m) } -func (*QueryGroupResponse) ProtoMessage() {} -func (*QueryGroupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_61430a03c00e84b2, []int{5} -} -func (m *QueryGroupResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryGroupResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryGroupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryGroupResponse.Merge(m, src) -} -func (m *QueryGroupResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryGroupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryGroupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryGroupResponse proto.InternalMessageInfo - -func (m *QueryGroupResponse) GetGroup() Group { - if m != nil { - return m.Group - } - return Group{} -} - -func init() { - proto.RegisterType((*QueryDeploymentsRequest)(nil), "akash.deployment.v1beta2.QueryDeploymentsRequest") - proto.RegisterType((*QueryDeploymentsResponse)(nil), "akash.deployment.v1beta2.QueryDeploymentsResponse") - proto.RegisterType((*QueryDeploymentRequest)(nil), "akash.deployment.v1beta2.QueryDeploymentRequest") - proto.RegisterType((*QueryDeploymentResponse)(nil), "akash.deployment.v1beta2.QueryDeploymentResponse") - proto.RegisterType((*QueryGroupRequest)(nil), "akash.deployment.v1beta2.QueryGroupRequest") - proto.RegisterType((*QueryGroupResponse)(nil), "akash.deployment.v1beta2.QueryGroupResponse") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta2/query.proto", fileDescriptor_61430a03c00e84b2) -} - -var fileDescriptor_61430a03c00e84b2 = []byte{ - // 681 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0x8d, 0xdd, 0x9f, 0x4f, 0x9a, 0xa8, 0x9f, 0xd4, 0x01, 0x81, 0x15, 0xc0, 0x2e, 0x56, 0x69, - 0x4a, 0x7f, 0x3c, 0xc4, 0xec, 0x8a, 0xba, 0xc0, 0x8a, 0x5a, 0x15, 0x36, 0xd4, 0x1b, 0x10, 0x42, - 0x42, 0x4e, 0x32, 0x75, 0xad, 0x26, 0x1e, 0xd7, 0x33, 0xa1, 0xca, 0x96, 0x27, 0x00, 0xf1, 0x02, - 0x6c, 0x90, 0x10, 0x0b, 0x56, 0x3c, 0x44, 0x97, 0x95, 0x10, 0x12, 0xab, 0x80, 0x12, 0x16, 0x88, - 0x05, 0x8b, 0x3e, 0x01, 0xf2, 0xcc, 0x24, 0x36, 0x24, 0x69, 0x92, 0x5d, 0xe2, 0x39, 0xf7, 0x9c, - 0x73, 0xef, 0xb9, 0x63, 0x83, 0x65, 0xef, 0xc8, 0xa3, 0x87, 0xa8, 0x86, 0xa3, 0x3a, 0x69, 0x35, - 0x70, 0xc8, 0xd0, 0x8b, 0x52, 0x05, 0x33, 0xcf, 0x46, 0xc7, 0x4d, 0x1c, 0xb7, 0xac, 0x28, 0x26, - 0x8c, 0x40, 0x8d, 0xa3, 0xac, 0x14, 0x65, 0x49, 0x54, 0xe1, 0xb2, 0x4f, 0x7c, 0xc2, 0x41, 0x28, - 0xf9, 0x25, 0xf0, 0x85, 0xeb, 0x3e, 0x21, 0x7e, 0x1d, 0x23, 0x2f, 0x0a, 0x90, 0x17, 0x86, 0x84, - 0x79, 0x2c, 0x20, 0x21, 0x95, 0xa7, 0x6b, 0x55, 0x42, 0x1b, 0x84, 0xa2, 0x8a, 0x47, 0xb1, 0x90, - 0x91, 0xa2, 0x25, 0x14, 0x79, 0x7e, 0x10, 0x72, 0xb0, 0xc4, 0xde, 0x1e, 0xe9, 0x2f, 0x63, 0x46, - 0x40, 0x47, 0xb7, 0xe2, 0xc7, 0xa4, 0x19, 0x49, 0xd4, 0xca, 0xc5, 0xa8, 0xa0, 0x26, 0x71, 0x4b, - 0x02, 0x87, 0x69, 0x35, 0x26, 0x27, 0x7d, 0x0c, 0x6b, 0x45, 0x58, 0xb6, 0x61, 0x7e, 0x54, 0xc0, - 0xd5, 0xfd, 0xc4, 0x7d, 0xb9, 0xcf, 0x45, 0x5d, 0x7c, 0xdc, 0xc4, 0x94, 0xc1, 0x87, 0xe0, 0xbf, - 0x83, 0xa0, 0xce, 0x70, 0x4c, 0x35, 0x65, 0x49, 0x59, 0xcd, 0xdb, 0xeb, 0xd6, 0xa8, 0x11, 0x5a, - 0x69, 0xf9, 0x8e, 0x28, 0x71, 0x66, 0x4f, 0xdb, 0x46, 0xce, 0xed, 0x31, 0xc0, 0x1d, 0x00, 0xd2, - 0xb9, 0x68, 0x2a, 0xe7, 0x5b, 0xb1, 0xc4, 0x10, 0xad, 0x64, 0x88, 0x96, 0xc8, 0x4a, 0x0e, 0xd1, - 0x7a, 0xe4, 0xf9, 0x58, 0x1a, 0x71, 0x33, 0x95, 0xe6, 0x17, 0x05, 0x68, 0x83, 0x86, 0x69, 0x44, - 0x42, 0x8a, 0x61, 0x04, 0xf2, 0xa9, 0xb7, 0xc4, 0xf5, 0xcc, 0x6a, 0xde, 0x2e, 0x8d, 0x76, 0xfd, - 0x0f, 0x51, 0x8f, 0xc7, 0xb9, 0x96, 0x78, 0xff, 0xf0, 0xcd, 0xb8, 0x34, 0x78, 0x46, 0xdd, 0xac, - 0x04, 0xdc, 0x1d, 0xd2, 0x56, 0x71, 0x6c, 0x5b, 0x82, 0xea, 0xaf, 0xbe, 0x9e, 0x81, 0x2b, 0x03, - 0x6e, 0x44, 0x0c, 0x0e, 0x50, 0x83, 0x9a, 0x4c, 0x60, 0x65, 0x92, 0x04, 0xf6, 0xca, 0x0e, 0x48, - 0x1a, 0xe8, 0xb4, 0x0d, 0x75, 0xaf, 0xec, 0xaa, 0x41, 0xcd, 0xfc, 0xa4, 0x0e, 0xc4, 0xdc, 0x1f, - 0x5a, 0x03, 0x80, 0x94, 0x4e, 0xea, 0x2c, 0x4f, 0xa2, 0xe3, 0x14, 0x13, 0x95, 0x5f, 0x6d, 0x23, - 0x53, 0x7f, 0xde, 0x36, 0x16, 0x5b, 0x5e, 0xa3, 0xbe, 0x65, 0xa6, 0xcf, 0x4c, 0x37, 0x03, 0x80, - 0x4f, 0xc0, 0x3c, 0x5f, 0x52, 0xaa, 0xa9, 0x3c, 0x1e, 0x63, 0xb4, 0xd4, 0x6e, 0x82, 0x73, 0x0c, - 0xa9, 0x22, 0xcb, 0xce, 0xdb, 0xc6, 0x82, 0x50, 0x10, 0xff, 0x4d, 0x57, 0x1e, 0xc0, 0x07, 0xe0, - 0x7f, 0xb1, 0xe9, 0xcf, 0xbd, 0x6a, 0x95, 0x34, 0x43, 0xa6, 0xcd, 0xf0, 0x66, 0x6e, 0x48, 0x05, - 0x71, 0xd8, 0x67, 0xbf, 0x2f, 0x40, 0x72, 0x51, 0x17, 0xc4, 0xa9, 0x7c, 0xb8, 0x35, 0xfb, 0xf3, - 0xad, 0x91, 0x33, 0x5d, 0xb0, 0xc8, 0xa7, 0xc6, 0x8d, 0xf4, 0xf2, 0xd8, 0xce, 0xe4, 0x71, 0x73, - 0x8c, 0xf9, 0x21, 0x51, 0xec, 0x03, 0x98, 0xe5, 0x94, 0x21, 0xdc, 0x03, 0x73, 0xbc, 0x0b, 0xc9, - 0x3b, 0x76, 0x28, 0xc2, 0xb4, 0xa8, 0xb1, 0x7f, 0xcf, 0x80, 0x39, 0xce, 0x09, 0xdf, 0x2b, 0x20, - 0x9f, 0xb9, 0x18, 0x70, 0xf2, 0xdd, 0xef, 0xdd, 0xfa, 0x82, 0x3d, 0x4d, 0x89, 0x70, 0x6f, 0xda, - 0x2f, 0x3f, 0xff, 0x78, 0xa3, 0x6e, 0xc0, 0x35, 0x34, 0xc1, 0x9b, 0x8e, 0xa2, 0x7a, 0x40, 0x19, - 0x7c, 0xa7, 0x00, 0x90, 0x72, 0xc1, 0x3b, 0x53, 0xdc, 0x52, 0x61, 0x74, 0xfa, 0x7b, 0x3d, 0xad, - 0xcf, 0x20, 0x3c, 0x20, 0xf0, 0xb5, 0x02, 0xe6, 0xf8, 0xcc, 0xe1, 0xfa, 0x18, 0xc1, 0xec, 0x96, - 0x14, 0x36, 0x26, 0x03, 0x4b, 0x63, 0x9b, 0xdc, 0x58, 0x11, 0xde, 0x42, 0x17, 0xbf, 0xd9, 0x85, - 0x27, 0xe7, 0xf1, 0x69, 0x47, 0x57, 0xce, 0x3a, 0xba, 0xf2, 0xbd, 0xa3, 0x2b, 0xaf, 0xba, 0x7a, - 0xee, 0xac, 0xab, 0xe7, 0xbe, 0x76, 0xf5, 0xdc, 0xd3, 0x6d, 0x3f, 0x60, 0x87, 0xcd, 0x8a, 0x55, - 0x25, 0x0d, 0x41, 0xb5, 0x19, 0x62, 0x76, 0x42, 0xe2, 0x23, 0xf9, 0x2f, 0xf9, 0x98, 0xf9, 0x04, - 0x85, 0xa4, 0x86, 0x87, 0x88, 0x54, 0xe6, 0xf9, 0x57, 0xe1, 0xee, 0x9f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0xbc, 0xfa, 0xaa, 0xa7, 0x52, 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Deployments queries deployments - Deployments(ctx context.Context, in *QueryDeploymentsRequest, opts ...grpc.CallOption) (*QueryDeploymentsResponse, error) - // Deployment queries deployment details - Deployment(ctx context.Context, in *QueryDeploymentRequest, opts ...grpc.CallOption) (*QueryDeploymentResponse, error) - // Group queries group details - Group(ctx context.Context, in *QueryGroupRequest, opts ...grpc.CallOption) (*QueryGroupResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Deployments(ctx context.Context, in *QueryDeploymentsRequest, opts ...grpc.CallOption) (*QueryDeploymentsResponse, error) { - out := new(QueryDeploymentsResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta2.Query/Deployments", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Deployment(ctx context.Context, in *QueryDeploymentRequest, opts ...grpc.CallOption) (*QueryDeploymentResponse, error) { - out := new(QueryDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta2.Query/Deployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Group(ctx context.Context, in *QueryGroupRequest, opts ...grpc.CallOption) (*QueryGroupResponse, error) { - out := new(QueryGroupResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta2.Query/Group", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Deployments queries deployments - Deployments(context.Context, *QueryDeploymentsRequest) (*QueryDeploymentsResponse, error) - // Deployment queries deployment details - Deployment(context.Context, *QueryDeploymentRequest) (*QueryDeploymentResponse, error) - // Group queries group details - Group(context.Context, *QueryGroupRequest) (*QueryGroupResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Deployments(ctx context.Context, req *QueryDeploymentsRequest) (*QueryDeploymentsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Deployments not implemented") -} -func (*UnimplementedQueryServer) Deployment(ctx context.Context, req *QueryDeploymentRequest) (*QueryDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Deployment not implemented") -} -func (*UnimplementedQueryServer) Group(ctx context.Context, req *QueryGroupRequest) (*QueryGroupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Group not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Deployments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryDeploymentsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Deployments(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta2.Query/Deployments", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Deployments(ctx, req.(*QueryDeploymentsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Deployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryDeploymentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Deployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta2.Query/Deployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Deployment(ctx, req.(*QueryDeploymentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Group_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryGroupRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Group(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta2.Query/Group", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Group(ctx, req.(*QueryGroupRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.deployment.v1beta2.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Deployments", - Handler: _Query_Deployments_Handler, - }, - { - MethodName: "Deployment", - Handler: _Query_Deployment_Handler, - }, - { - MethodName: "Group", - Handler: _Query_Group_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/deployment/v1beta2/query.proto", -} - -func (m *QueryDeploymentsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryDeploymentsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryDeploymentsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryDeploymentsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryDeploymentsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryDeploymentsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Deployments) > 0 { - for iNdEx := len(m.Deployments) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Deployments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryDeploymentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryDeploymentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryDeploymentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.EscrowAccount.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryGroupRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryGroupRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryGroupRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryGroupResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryGroupResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryDeploymentsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryDeploymentsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Deployments) > 0 { - for _, e := range m.Deployments { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryDeploymentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Deployment.Size() - n += 1 + l + sovQuery(uint64(l)) - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - l = m.EscrowAccount.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryGroupRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryGroupResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Group.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryDeploymentsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryDeploymentsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryDeploymentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryDeploymentsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryDeploymentsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryDeploymentsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deployments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Deployments = append(m.Deployments, QueryDeploymentResponse{}) - if err := m.Deployments[len(m.Deployments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryDeploymentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryDeploymentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryDeploymentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, Group{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EscrowAccount", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EscrowAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryGroupRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryGroupRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryGroupRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryGroupResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryGroupResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta2/query.pb.gw.go b/go/node/deployment/v1beta2/query.pb.gw.go deleted file mode 100644 index 415ad7d6..00000000 --- a/go/node/deployment/v1beta2/query.pb.gw.go +++ /dev/null @@ -1,337 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/deployment/v1beta2/query.proto - -/* -Package v1beta2 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta2 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Deployments_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Deployments_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryDeploymentsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Deployments(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Deployments_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryDeploymentsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Deployments(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Deployment_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Deployment_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryDeploymentRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployment_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Deployment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Deployment_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryDeploymentRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployment_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Deployment(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Group_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Group_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryGroupRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Group_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Group(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Group_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryGroupRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Group_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Group(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Deployments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Deployments_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Deployments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Deployment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Deployment_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Deployment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Group_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Group_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Group_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Deployments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Deployments_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Deployments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Deployment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Deployment_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Deployment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Group_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Group_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Group_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Deployments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta2", "deployments", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Deployment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta2", "deployments", "info"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Group_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta2", "groups", "info"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Deployments_0 = runtime.ForwardResponseMessage - - forward_Query_Deployment_0 = runtime.ForwardResponseMessage - - forward_Query_Group_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/deployment/v1beta2/resource.pb.go b/go/node/deployment/v1beta2/resource.pb.go deleted file mode 100644 index f9cbe05a..00000000 --- a/go/node/deployment/v1beta2/resource.pb.go +++ /dev/null @@ -1,422 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta2/resource.proto - -package v1beta2 - -import ( - fmt "fmt" - v1beta2 "github.com/akash-network/akash-api/go/node/types/v1beta2" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Resource stores unit, total count and price of resource -type Resource struct { - Resources v1beta2.ResourceUnits `protobuf:"bytes,1,opt,name=resources,proto3" json:"unit" yaml:"unit"` - Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count" yaml:"count"` - Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` -} - -func (m *Resource) Reset() { *m = Resource{} } -func (m *Resource) String() string { return proto.CompactTextString(m) } -func (*Resource) ProtoMessage() {} -func (*Resource) Descriptor() ([]byte, []int) { - return fileDescriptor_93085c4a2e404198, []int{0} -} -func (m *Resource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Resource.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Resource) XXX_Merge(src proto.Message) { - xxx_messageInfo_Resource.Merge(m, src) -} -func (m *Resource) XXX_Size() int { - return m.Size() -} -func (m *Resource) XXX_DiscardUnknown() { - xxx_messageInfo_Resource.DiscardUnknown(m) -} - -var xxx_messageInfo_Resource proto.InternalMessageInfo - -func (m *Resource) GetResources() v1beta2.ResourceUnits { - if m != nil { - return m.Resources - } - return v1beta2.ResourceUnits{} -} - -func (m *Resource) GetCount() uint32 { - if m != nil { - return m.Count - } - return 0 -} - -func (m *Resource) GetPrice() types.DecCoin { - if m != nil { - return m.Price - } - return types.DecCoin{} -} - -func init() { - proto.RegisterType((*Resource)(nil), "akash.deployment.v1beta2.Resource") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta2/resource.proto", fileDescriptor_93085c4a2e404198) -} - -var fileDescriptor_93085c4a2e404198 = []byte{ - // 341 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xb1, 0x4e, 0xeb, 0x30, - 0x18, 0x85, 0x93, 0x7b, 0xdb, 0xab, 0x4b, 0x0a, 0x4b, 0xc4, 0x10, 0x0a, 0x38, 0x25, 0x03, 0x74, - 0xc1, 0x56, 0xcb, 0x56, 0x89, 0x25, 0xf0, 0x02, 0x44, 0x42, 0x48, 0x88, 0x25, 0x71, 0xad, 0x34, - 0x6a, 0xe3, 0x3f, 0x8a, 0x1d, 0x50, 0xdf, 0x82, 0x47, 0xe0, 0x71, 0x3a, 0x76, 0x64, 0x8a, 0x50, - 0xbb, 0xa0, 0x8e, 0x7d, 0x02, 0x14, 0x3b, 0x55, 0x3a, 0xb0, 0xe5, 0x1c, 0x7f, 0xff, 0xf1, 0xf9, - 0x63, 0xeb, 0x2a, 0x9c, 0x86, 0x62, 0x42, 0xc6, 0x2c, 0x9b, 0xc1, 0x3c, 0x65, 0x5c, 0x92, 0xd7, - 0x41, 0xc4, 0x64, 0x38, 0x24, 0x39, 0x13, 0x50, 0xe4, 0x94, 0xe1, 0x2c, 0x07, 0x09, 0xb6, 0xa3, - 0x40, 0xdc, 0x80, 0xb8, 0x06, 0xbb, 0xc7, 0x31, 0xc4, 0xa0, 0x20, 0x52, 0x7d, 0x69, 0xbe, 0x8b, - 0x28, 0x88, 0x14, 0x04, 0x89, 0x42, 0xc1, 0xea, 0xcc, 0x01, 0xa1, 0x90, 0xf0, 0xfa, 0xfc, 0x52, - 0x5f, 0xbc, 0x77, 0xdc, 0x5c, 0x59, 0xf0, 0x44, 0x0a, 0xcd, 0x79, 0x5b, 0xd3, 0xfa, 0x1f, 0xd4, - 0xbe, 0xfd, 0x62, 0x1d, 0xec, 0x18, 0xe1, 0x98, 0x3d, 0xb3, 0xdf, 0x19, 0x5e, 0x60, 0x5d, 0xac, - 0x0a, 0xda, 0x55, 0xc2, 0xbb, 0x81, 0xc7, 0x2a, 0xc8, 0x3f, 0x5d, 0x94, 0xae, 0xb1, 0x29, 0xdd, - 0x56, 0x95, 0xbb, 0x2d, 0xdd, 0xce, 0x3c, 0x4c, 0x67, 0x23, 0xaf, 0x52, 0x5e, 0xd0, 0x04, 0xda, - 0xc4, 0x6a, 0x53, 0x28, 0xb8, 0x74, 0xfe, 0xf4, 0xcc, 0xfe, 0x91, 0x7f, 0xb2, 0x29, 0x5d, 0x6d, - 0x6c, 0x4b, 0xf7, 0x50, 0xcf, 0x28, 0xe9, 0x05, 0xda, 0xb6, 0x1f, 0xac, 0x76, 0x96, 0x27, 0x94, - 0x39, 0x7f, 0x55, 0x95, 0x33, 0xac, 0x77, 0xde, 0xef, 0x32, 0xc0, 0xf7, 0x8c, 0xde, 0x41, 0xc2, - 0xfd, 0xf3, 0xba, 0x85, 0x1e, 0x69, 0x22, 0x95, 0xf4, 0x02, 0x6d, 0x8f, 0x5a, 0xdf, 0x1f, 0xae, - 0xe1, 0x3f, 0x2d, 0x56, 0xc8, 0x5c, 0xae, 0x90, 0xf9, 0xb5, 0x42, 0xe6, 0xfb, 0x1a, 0x19, 0xcb, - 0x35, 0x32, 0x3e, 0xd7, 0xc8, 0x78, 0xbe, 0x8d, 0x13, 0x39, 0x29, 0x22, 0x4c, 0x21, 0x25, 0x6a, - 0xf1, 0x6b, 0xce, 0xe4, 0x1b, 0xe4, 0xd3, 0x5a, 0x85, 0x59, 0x42, 0x62, 0x20, 0x1c, 0xc6, 0xec, - 0x97, 0x47, 0x8d, 0xfe, 0xa9, 0x9f, 0x7a, 0xf3, 0x13, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xe1, 0x47, - 0xfd, 0xf7, 0x01, 0x00, 0x00, -} - -func (m *Resource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Resource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Resource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.Count != 0 { - i = encodeVarintResource(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintResource(dAtA []byte, offset int, v uint64) int { - offset -= sovResource(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Resource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Resources.Size() - n += 1 + l + sovResource(uint64(l)) - if m.Count != 0 { - n += 1 + sovResource(uint64(m.Count)) - } - l = m.Price.Size() - n += 1 + l + sovResource(uint64(l)) - return n -} - -func sovResource(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozResource(x uint64) (n int) { - return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Resource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Resource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Resource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipResource(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResource - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipResource(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthResource - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupResource - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthResource - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowResource = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupResource = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta2/resource_list_validation.go b/go/node/deployment/v1beta2/resource_list_validation.go deleted file mode 100644 index ed1c13d9..00000000 --- a/go/node/deployment/v1beta2/resource_list_validation.go +++ /dev/null @@ -1,172 +0,0 @@ -package v1beta2 - -import ( - "errors" - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" - - types "github.com/akash-network/akash-api/go/node/types/v1beta2" -) - -var ( - ErrNoGroupsPresent = errors.New("validation: no groups present") - ErrGroupEmptyName = errors.New("validation: group has empty name") -) - -func ValidateResourceList(rlist types.ResourceGroup) error { - if rlist.GetName() == "" { - return ErrGroupEmptyName - } - - units := rlist.GetResources() - - if count := len(units); count > validationConfig.MaxGroupUnits { - return fmt.Errorf("group %v: too many units (%v > %v)", rlist.GetName(), count, validationConfig.MaxGroupUnits) - } - - limits := newLimits() - - for _, resource := range units { - gLimits, err := validateResourceGroup(resource) - if err != nil { - return fmt.Errorf("group %v: %w", rlist.GetName(), err) - } - - gLimits.mul(resource.Count) - - limits.add(gLimits) - } - - if limits.cpu.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupCPU)) || limits.cpu.LTE(sdk.ZeroInt()) { - return fmt.Errorf("group %v: invalid total CPU (%v > %v > %v fails)", - rlist.GetName(), validationConfig.MaxGroupCPU, limits.cpu, 0) - } - - if limits.memory.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupMemory)) || limits.memory.LTE(sdk.ZeroInt()) { - return fmt.Errorf("group %v: invalid total memory (%v > %v > %v fails)", - rlist.GetName(), validationConfig.MaxGroupMemory, limits.memory, 0) - } - - for i := range limits.storage { - if limits.storage[i].GT(sdk.NewIntFromUint64(validationConfig.MaxGroupStorage)) || limits.storage[i].LTE(sdk.ZeroInt()) { - return fmt.Errorf("group %v: invalid total storage (%v > %v > %v fails)", - rlist.GetName(), validationConfig.MaxGroupStorage, limits.storage, 0) - } - } - - return nil -} - -func validateResourceGroup(rg types.Resources) (resourceLimits, error) { - limits, err := validateResourceUnit(rg.Resources) - if err != nil { - return resourceLimits{}, err - } - - if rg.Count > uint32(validationConfig.MaxUnitCount) || rg.Count < uint32(validationConfig.MinUnitCount) { - return resourceLimits{}, fmt.Errorf("error: invalid unit count (%v > %v > %v fails)", - validationConfig.MaxUnitCount, rg.Count, validationConfig.MinUnitCount) - } - - return limits, nil -} - -func validateResourceUnit(units types.ResourceUnits) (resourceLimits, error) { - limits := newLimits() - - val, err := validateCPU(units.CPU) - if err != nil { - return resourceLimits{}, err - } - limits.cpu = limits.cpu.Add(val) - - val, err = validateMemory(units.Memory) - if err != nil { - return resourceLimits{}, err - } - limits.memory = limits.memory.Add(val) - - var storage []sdk.Int - storage, err = validateStorage(units.Storage) - if err != nil { - return resourceLimits{}, err - } - - // fixme this is not actually sum for storage usecase. - // do we really need sum here? - limits.storage = storage - - return limits, nil -} - -func validateCPU(u *types.CPU) (sdk.Int, error) { - if u == nil { - return sdk.Int{}, fmt.Errorf("error: invalid unit CPU, cannot be nil") - } - if (u.Units.Value() > uint64(validationConfig.MaxUnitCPU)) || (u.Units.Value() < uint64(validationConfig.MinUnitCPU)) { - return sdk.Int{}, fmt.Errorf("error: invalid unit CPU (%v > %v > %v fails)", - validationConfig.MaxUnitCPU, u.Units.Value(), validationConfig.MinUnitCPU) - } - - return u.Units.Val, nil -} - -func validateMemory(u *types.Memory) (sdk.Int, error) { - if u == nil { - return sdk.Int{}, fmt.Errorf("error: invalid unit memory, cannot be nil") - } - if (u.Quantity.Value() > validationConfig.MaxUnitMemory) || (u.Quantity.Value() < validationConfig.MinUnitMemory) { - return sdk.Int{}, fmt.Errorf("error: invalid unit memory (%v > %v > %v fails)", - validationConfig.MaxUnitMemory, u.Quantity.Value(), validationConfig.MinUnitMemory) - } - - return u.Quantity.Val, nil -} - -func validateStorage(u types.Volumes) ([]sdk.Int, error) { - if u == nil { - return nil, fmt.Errorf("error: invalid unit storage, cannot be nil") - } - - storage := make([]sdk.Int, 0, len(u)) - - for i := range u { - if (u[i].Quantity.Value() > validationConfig.MaxUnitStorage) || (u[i].Quantity.Value() < validationConfig.MinUnitStorage) { - return nil, fmt.Errorf("error: invalid unit storage (%v > %v > %v fails)", - validationConfig.MaxUnitStorage, u[i].Quantity.Value(), validationConfig.MinUnitStorage) - } - - storage = append(storage, u[i].Quantity.Val) - } - - return storage, nil -} - -type resourceLimits struct { - cpu sdk.Int - memory sdk.Int - storage []sdk.Int -} - -func newLimits() resourceLimits { - return resourceLimits{ - cpu: sdk.ZeroInt(), - memory: sdk.ZeroInt(), - } -} - -func (u *resourceLimits) add(rhs resourceLimits) { - u.cpu = u.cpu.Add(rhs.cpu) - u.memory = u.memory.Add(rhs.memory) - - // u.storage = u.storage.Add(rhs.storage) -} - -func (u *resourceLimits) mul(count uint32) { - u.cpu = u.cpu.MulRaw(int64(count)) - u.memory = u.memory.MulRaw(int64(count)) - for i := range u.storage { - u.storage[i] = u.storage[i].MulRaw(int64(count)) - } -} diff --git a/go/node/deployment/v1beta2/service.pb.go b/go/node/deployment/v1beta2/service.pb.go deleted file mode 100644 index 2334cab6..00000000 --- a/go/node/deployment/v1beta2/service.pb.go +++ /dev/null @@ -1,365 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta2/service.proto - -package v1beta2 - -import ( - context "context" - fmt "fmt" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { - proto.RegisterFile("akash/deployment/v1beta2/service.proto", fileDescriptor_0e37360c059968cc) -} - -var fileDescriptor_0e37360c059968cc = []byte{ - // 326 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0xd3, 0x3d, 0x4e, 0xf3, 0x30, - 0x18, 0xc0, 0xf1, 0x56, 0xaf, 0xd4, 0xc1, 0xcb, 0x5b, 0x32, 0x21, 0x0f, 0x1e, 0xe9, 0xd2, 0xda, - 0xa2, 0x7c, 0x6c, 0x2c, 0xb4, 0x12, 0x53, 0x25, 0x04, 0x42, 0x48, 0x6c, 0x6e, 0xfb, 0xe0, 0x46, - 0x6d, 0x63, 0xcb, 0x76, 0x4a, 0x11, 0x97, 0xe0, 0x12, 0xdc, 0x85, 0xb1, 0x23, 0x23, 0x4a, 0x2e, - 0x82, 0x08, 0x25, 0x0e, 0x81, 0x34, 0xc9, 0x98, 0xe4, 0x97, 0xe7, 0x1f, 0x47, 0x36, 0x3a, 0xe0, - 0x73, 0x6e, 0x66, 0x6c, 0x0a, 0x6a, 0x21, 0x1f, 0x97, 0x10, 0x58, 0xb6, 0x3a, 0x1c, 0x83, 0xe5, - 0x7d, 0x66, 0x40, 0xaf, 0xfc, 0x09, 0x50, 0xa5, 0xa5, 0x95, 0xde, 0x7e, 0xe2, 0xa8, 0x73, 0x74, - 0xeb, 0x70, 0xb7, 0x70, 0x82, 0xbb, 0xb5, 0x34, 0xe2, 0x6b, 0x0e, 0xee, 0x14, 0x6a, 0xa1, 0x65, - 0xa8, 0x52, 0xd8, 0x7f, 0x69, 0xa1, 0x7f, 0x23, 0x23, 0xbc, 0x35, 0x6a, 0x0f, 0x34, 0x70, 0x0b, - 0xc3, 0xf4, 0x15, 0xaf, 0x47, 0x8b, 0xbe, 0x86, 0x8e, 0x8c, 0xc8, 0x73, 0x7c, 0x52, 0x8b, 0x5f, - 0x81, 0x51, 0x32, 0x30, 0xe0, 0x3d, 0xa1, 0xbd, 0x21, 0x28, 0x69, 0x7c, 0x9b, 0x49, 0xd3, 0x9d, - 0xb3, 0x7e, 0x79, 0x7c, 0x5a, 0xcf, 0xa7, 0xf1, 0x35, 0x6a, 0xdf, 0xa8, 0x69, 0x9d, 0x65, 0xe7, - 0x79, 0xc9, 0xb2, 0xf3, 0x3c, 0x2d, 0x87, 0xe8, 0xff, 0x60, 0x21, 0x4d, 0x36, 0xdc, 0xdd, 0xfd, - 0x03, 0x7f, 0x6a, 0x7c, 0x5c, 0x47, 0xa7, 0xd9, 0x7b, 0x84, 0x92, 0x47, 0x17, 0x9f, 0xdb, 0xc0, - 0xeb, 0x94, 0xcf, 0x48, 0x20, 0x66, 0x15, 0x61, 0xb6, 0x73, 0xc9, 0xc3, 0x6a, 0x1d, 0x07, 0x4b, - 0x3a, 0x0e, 0x66, 0x3b, 0xd7, 0x96, 0x6b, 0x5b, 0xa5, 0xe3, 0x60, 0x49, 0xc7, 0xc1, 0xef, 0xce, - 0xf9, 0xed, 0x6b, 0x44, 0x9a, 0x9b, 0x88, 0x34, 0xdf, 0x23, 0xd2, 0x7c, 0x8e, 0x49, 0x63, 0x13, - 0x93, 0xc6, 0x5b, 0x4c, 0x1a, 0x77, 0x67, 0xc2, 0xb7, 0xb3, 0x70, 0x4c, 0x27, 0x72, 0xc9, 0x92, - 0xa1, 0xbd, 0x00, 0xec, 0x83, 0xd4, 0xf3, 0xed, 0x15, 0x57, 0x3e, 0x13, 0x92, 0x05, 0x72, 0x0a, - 0x7f, 0x9c, 0xc7, 0x71, 0x2b, 0x39, 0x87, 0x47, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x95, 0x3a, - 0x9a, 0x16, 0x22, 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // CreateDeployment defines a method to create new deployment given proper inputs. - CreateDeployment(ctx context.Context, in *MsgCreateDeployment, opts ...grpc.CallOption) (*MsgCreateDeploymentResponse, error) - // DepositDeployment deposits more funds into the deployment account - DepositDeployment(ctx context.Context, in *MsgDepositDeployment, opts ...grpc.CallOption) (*MsgDepositDeploymentResponse, error) - // UpdateDeployment defines a method to update a deployment given proper inputs. - UpdateDeployment(ctx context.Context, in *MsgUpdateDeployment, opts ...grpc.CallOption) (*MsgUpdateDeploymentResponse, error) - // CloseDeployment defines a method to close a deployment given proper inputs. - CloseDeployment(ctx context.Context, in *MsgCloseDeployment, opts ...grpc.CallOption) (*MsgCloseDeploymentResponse, error) - // CloseGroup defines a method to close a group of a deployment given proper inputs. - CloseGroup(ctx context.Context, in *MsgCloseGroup, opts ...grpc.CallOption) (*MsgCloseGroupResponse, error) - // PauseGroup defines a method to close a group of a deployment given proper inputs. - PauseGroup(ctx context.Context, in *MsgPauseGroup, opts ...grpc.CallOption) (*MsgPauseGroupResponse, error) - // StartGroup defines a method to close a group of a deployment given proper inputs. - StartGroup(ctx context.Context, in *MsgStartGroup, opts ...grpc.CallOption) (*MsgStartGroupResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) CreateDeployment(ctx context.Context, in *MsgCreateDeployment, opts ...grpc.CallOption) (*MsgCreateDeploymentResponse, error) { - out := new(MsgCreateDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta2.Msg/CreateDeployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) DepositDeployment(ctx context.Context, in *MsgDepositDeployment, opts ...grpc.CallOption) (*MsgDepositDeploymentResponse, error) { - out := new(MsgDepositDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta2.Msg/DepositDeployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) UpdateDeployment(ctx context.Context, in *MsgUpdateDeployment, opts ...grpc.CallOption) (*MsgUpdateDeploymentResponse, error) { - out := new(MsgUpdateDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta2.Msg/UpdateDeployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseDeployment(ctx context.Context, in *MsgCloseDeployment, opts ...grpc.CallOption) (*MsgCloseDeploymentResponse, error) { - out := new(MsgCloseDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta2.Msg/CloseDeployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseGroup(ctx context.Context, in *MsgCloseGroup, opts ...grpc.CallOption) (*MsgCloseGroupResponse, error) { - out := new(MsgCloseGroupResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta2.Msg/CloseGroup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) PauseGroup(ctx context.Context, in *MsgPauseGroup, opts ...grpc.CallOption) (*MsgPauseGroupResponse, error) { - out := new(MsgPauseGroupResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta2.Msg/PauseGroup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) StartGroup(ctx context.Context, in *MsgStartGroup, opts ...grpc.CallOption) (*MsgStartGroupResponse, error) { - out := new(MsgStartGroupResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta2.Msg/StartGroup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // CreateDeployment defines a method to create new deployment given proper inputs. - CreateDeployment(context.Context, *MsgCreateDeployment) (*MsgCreateDeploymentResponse, error) - // DepositDeployment deposits more funds into the deployment account - DepositDeployment(context.Context, *MsgDepositDeployment) (*MsgDepositDeploymentResponse, error) - // UpdateDeployment defines a method to update a deployment given proper inputs. - UpdateDeployment(context.Context, *MsgUpdateDeployment) (*MsgUpdateDeploymentResponse, error) - // CloseDeployment defines a method to close a deployment given proper inputs. - CloseDeployment(context.Context, *MsgCloseDeployment) (*MsgCloseDeploymentResponse, error) - // CloseGroup defines a method to close a group of a deployment given proper inputs. - CloseGroup(context.Context, *MsgCloseGroup) (*MsgCloseGroupResponse, error) - // PauseGroup defines a method to close a group of a deployment given proper inputs. - PauseGroup(context.Context, *MsgPauseGroup) (*MsgPauseGroupResponse, error) - // StartGroup defines a method to close a group of a deployment given proper inputs. - StartGroup(context.Context, *MsgStartGroup) (*MsgStartGroupResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) CreateDeployment(ctx context.Context, req *MsgCreateDeployment) (*MsgCreateDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateDeployment not implemented") -} -func (*UnimplementedMsgServer) DepositDeployment(ctx context.Context, req *MsgDepositDeployment) (*MsgDepositDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DepositDeployment not implemented") -} -func (*UnimplementedMsgServer) UpdateDeployment(ctx context.Context, req *MsgUpdateDeployment) (*MsgUpdateDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateDeployment not implemented") -} -func (*UnimplementedMsgServer) CloseDeployment(ctx context.Context, req *MsgCloseDeployment) (*MsgCloseDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseDeployment not implemented") -} -func (*UnimplementedMsgServer) CloseGroup(ctx context.Context, req *MsgCloseGroup) (*MsgCloseGroupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseGroup not implemented") -} -func (*UnimplementedMsgServer) PauseGroup(ctx context.Context, req *MsgPauseGroup) (*MsgPauseGroupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method PauseGroup not implemented") -} -func (*UnimplementedMsgServer) StartGroup(ctx context.Context, req *MsgStartGroup) (*MsgStartGroupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StartGroup not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_CreateDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateDeployment) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateDeployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta2.Msg/CreateDeployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateDeployment(ctx, req.(*MsgCreateDeployment)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_DepositDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgDepositDeployment) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).DepositDeployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta2.Msg/DepositDeployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).DepositDeployment(ctx, req.(*MsgDepositDeployment)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_UpdateDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgUpdateDeployment) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).UpdateDeployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta2.Msg/UpdateDeployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).UpdateDeployment(ctx, req.(*MsgUpdateDeployment)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseDeployment) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseDeployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta2.Msg/CloseDeployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseDeployment(ctx, req.(*MsgCloseDeployment)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseGroup) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseGroup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta2.Msg/CloseGroup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseGroup(ctx, req.(*MsgCloseGroup)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_PauseGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgPauseGroup) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).PauseGroup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta2.Msg/PauseGroup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).PauseGroup(ctx, req.(*MsgPauseGroup)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_StartGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgStartGroup) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).StartGroup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta2.Msg/StartGroup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).StartGroup(ctx, req.(*MsgStartGroup)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.deployment.v1beta2.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateDeployment", - Handler: _Msg_CreateDeployment_Handler, - }, - { - MethodName: "DepositDeployment", - Handler: _Msg_DepositDeployment_Handler, - }, - { - MethodName: "UpdateDeployment", - Handler: _Msg_UpdateDeployment_Handler, - }, - { - MethodName: "CloseDeployment", - Handler: _Msg_CloseDeployment_Handler, - }, - { - MethodName: "CloseGroup", - Handler: _Msg_CloseGroup_Handler, - }, - { - MethodName: "PauseGroup", - Handler: _Msg_PauseGroup_Handler, - }, - { - MethodName: "StartGroup", - Handler: _Msg_StartGroup_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/deployment/v1beta2/service.proto", -} diff --git a/go/node/deployment/v1beta2/types.go b/go/node/deployment/v1beta2/types.go deleted file mode 100644 index e24872b9..00000000 --- a/go/node/deployment/v1beta2/types.go +++ /dev/null @@ -1,129 +0,0 @@ -package v1beta2 - -import ( - "bytes" - - sdk "github.com/cosmos/cosmos-sdk/types" - - types "github.com/akash-network/akash-api/go/node/types/v1beta2" -) - -type attributesMatching map[string]types.Attributes - -const ( - // ManifestVersionLength is the length of manifest version - ManifestVersionLength = 32 - - // DefaultOrderBiddingDuration is the default time limit for an Order being active. - // After the duration, the Order is automatically closed. - // ( 24(hr) * 3600(seconds per hour) ) / 7s-Block - DefaultOrderBiddingDuration = int64(12342) - - // MaxBiddingDuration is roughly 30 days of block height - MaxBiddingDuration = DefaultOrderBiddingDuration * int64(30) -) - -// ID method returns DeploymentID details of specific deployment -func (obj Deployment) ID() DeploymentID { - return obj.DeploymentID -} - -// MatchAttributes method compares provided attributes with specific group attributes -func (g GroupSpec) MatchAttributes(attr types.Attributes) bool { - return types.AttributesSubsetOf(g.Requirements.Attributes, attr) -} - -// ID method returns GroupID details of specific group -func (g Group) ID() GroupID { - return g.GroupID -} - -// ValidateClosable provides error response if group is already closed, -// and thus should not be closed again, else nil. -func (g Group) ValidateClosable() error { - switch g.State { - case GroupClosed: - return ErrGroupClosed - default: - return nil - } -} - -// ValidatePausable provides error response if group is not pausable -func (g Group) ValidatePausable() error { - switch g.State { - case GroupClosed: - return ErrGroupClosed - case GroupPaused: - return ErrGroupPaused - default: - return nil - } -} - -// ValidatePausable provides error response if group is not pausable -func (g Group) ValidateStartable() error { - switch g.State { - case GroupClosed: - return ErrGroupClosed - case GroupOpen: - return ErrGroupOpen - default: - return nil - } -} - -// GetName method returns group name -func (g Group) GetName() string { - return g.GroupSpec.Name -} - -// GetResources method returns resources list in group -func (g Group) GetResources() []types.Resources { - return g.GroupSpec.GetResources() -} - -// FullPrice method returns full price of resource -func (r Resource) FullPrice() sdk.DecCoin { - return sdk.NewDecCoinFromDec(r.Price.Denom, r.Price.Amount.MulInt64(int64(r.Count))) -} - -// DeploymentResponses is a collection of DeploymentResponse -type DeploymentResponses []QueryDeploymentResponse - -func (ds DeploymentResponses) String() string { - var buf bytes.Buffer - - const sep = "\n\n" - - for _, d := range ds { - buf.WriteString(d.String()) - buf.WriteString(sep) - } - - if len(ds) > 0 { - buf.Truncate(buf.Len() - len(sep)) - } - - return buf.String() -} - -// Accept returns whether deployment filters valid or not -func (filters DeploymentFilters) Accept(obj Deployment, stateVal Deployment_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.DeploymentID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.DeploymentID.DSeq { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} diff --git a/go/node/deployment/v1beta2/validation_config.go b/go/node/deployment/v1beta2/validation_config.go deleted file mode 100644 index f7d01703..00000000 --- a/go/node/deployment/v1beta2/validation_config.go +++ /dev/null @@ -1,62 +0,0 @@ -package v1beta2 - -import "github.com/akash-network/akash-api/go/node/types/unit" - -// This is the validation configuration that acts as a hard limit -// on what the network accepts for deployments. This is never changed -// and is the same across all members of the network - -type ValidationConfig struct { - - // MaxUnitCPU is the maximum number of milli (1/1000) cpu units a unit can consume. - MaxUnitCPU uint - // MaxUnitMemory is the maximum number of bytes of memory that a unit can consume - MaxUnitMemory uint64 - // MaxUnitStorage is the maximum number of bytes of storage that a unit can consume - MaxUnitStorage uint64 - // MaxUnitCount is the maximum number of replias of a service - MaxUnitCount uint - // MaxUnitPrice is the maximum price that a unit can have - MaxUnitPrice uint64 - - MinUnitCPU uint - MinUnitMemory uint64 - MinUnitStorage uint64 - MinUnitCount uint - - // MaxGroupCount is the maximum number of groups allowed per deployment - MaxGroupCount int - // MaxGroupUnits is the maximum number services per group - MaxGroupUnits int - - // MaxGroupCPU is the maximum total amount of CPU requested per group - MaxGroupCPU uint64 - // MaxGroupMemory is the maximum total amount of memory requested per group - MaxGroupMemory uint64 - // MaxGroupStorage is the maximum total amount of storage requested per group - MaxGroupStorage uint64 -} - -var validationConfig = ValidationConfig{ - MaxUnitCPU: 256 * 1000, // 256 CPUs - MaxUnitMemory: 512 * unit.Gi, // 512 Gi - MaxUnitStorage: 32 * unit.Ti, // 32 Ti - MaxUnitCount: 50, - MaxUnitPrice: 10000000, // 10akt - - MinUnitCPU: 10, - MinUnitMemory: unit.Mi, - MinUnitStorage: 5 * unit.Mi, - MinUnitCount: 1, - - MaxGroupCount: 20, - MaxGroupUnits: 20, - - MaxGroupCPU: 512 * 1000, - MaxGroupMemory: 1024 * unit.Gi, - MaxGroupStorage: 32 * unit.Ti, -} - -func GetValidationConfig() ValidationConfig { - return validationConfig -} diff --git a/go/node/deployment/v1beta3/authz.pb.go b/go/node/deployment/v1beta3/authz.pb.go deleted file mode 100644 index 7ed92ff0..00000000 --- a/go/node/deployment/v1beta3/authz.pb.go +++ /dev/null @@ -1,333 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta3/authz.proto - -package v1beta3 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - _ "github.com/regen-network/cosmos-proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from -// the granter's account for a deployment. -type DepositDeploymentAuthorization struct { - // SpendLimit is the amount the grantee is authorized to spend from the granter's account for - // the purpose of deployment. - SpendLimit types.Coin `protobuf:"bytes,1,opt,name=spend_limit,json=spendLimit,proto3" json:"spend_limit"` -} - -func (m *DepositDeploymentAuthorization) Reset() { *m = DepositDeploymentAuthorization{} } -func (m *DepositDeploymentAuthorization) String() string { return proto.CompactTextString(m) } -func (*DepositDeploymentAuthorization) ProtoMessage() {} -func (*DepositDeploymentAuthorization) Descriptor() ([]byte, []int) { - return fileDescriptor_7e478d3fe036c639, []int{0} -} -func (m *DepositDeploymentAuthorization) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DepositDeploymentAuthorization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DepositDeploymentAuthorization.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DepositDeploymentAuthorization) XXX_Merge(src proto.Message) { - xxx_messageInfo_DepositDeploymentAuthorization.Merge(m, src) -} -func (m *DepositDeploymentAuthorization) XXX_Size() int { - return m.Size() -} -func (m *DepositDeploymentAuthorization) XXX_DiscardUnknown() { - xxx_messageInfo_DepositDeploymentAuthorization.DiscardUnknown(m) -} - -var xxx_messageInfo_DepositDeploymentAuthorization proto.InternalMessageInfo - -func (m *DepositDeploymentAuthorization) GetSpendLimit() types.Coin { - if m != nil { - return m.SpendLimit - } - return types.Coin{} -} - -func init() { - proto.RegisterType((*DepositDeploymentAuthorization)(nil), "akash.deployment.v1beta3.DepositDeploymentAuthorization") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta3/authz.proto", fileDescriptor_7e478d3fe036c639) -} - -var fileDescriptor_7e478d3fe036c639 = []byte{ - // 280 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, - 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x4f, 0x2c, 0x2d, 0xc9, 0xa8, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, - 0x17, 0x92, 0x00, 0xab, 0xd2, 0x43, 0xa8, 0xd2, 0x83, 0xaa, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, - 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0x24, 0x93, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, 0xe3, - 0x21, 0x12, 0x10, 0x0e, 0x54, 0x4a, 0x0e, 0xc2, 0xd3, 0x4f, 0x4a, 0x2c, 0x4e, 0x85, 0xda, 0x65, - 0xa8, 0x9f, 0x9c, 0x9f, 0x99, 0x07, 0x91, 0x57, 0x6a, 0x61, 0xe4, 0x92, 0x73, 0x49, 0x2d, 0xc8, - 0x2f, 0xce, 0x2c, 0x71, 0x81, 0x5b, 0xe7, 0x58, 0x5a, 0x92, 0x91, 0x5f, 0x94, 0x59, 0x95, 0x58, - 0x92, 0x99, 0x9f, 0x27, 0xe4, 0xcf, 0xc5, 0x5d, 0x5c, 0x90, 0x9a, 0x97, 0x12, 0x9f, 0x93, 0x99, - 0x9b, 0x59, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0xa9, 0x07, 0xb5, 0x06, 0x64, 0x30, - 0xd4, 0x79, 0x86, 0x7a, 0xce, 0xf9, 0x99, 0x79, 0x4e, 0xc2, 0x27, 0xee, 0xc9, 0x33, 0xbc, 0xba, - 0x27, 0x8f, 0xac, 0x2b, 0x88, 0x0b, 0xcc, 0xf1, 0x01, 0xb1, 0xad, 0x04, 0x2f, 0x6d, 0xd1, 0xe5, - 0x45, 0xb1, 0xc3, 0x29, 0xfc, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, - 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x6c, - 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, 0xc1, 0xa2, 0x9b, 0x97, - 0x5a, 0x52, 0x9e, 0x5f, 0x94, 0x0d, 0xe5, 0x25, 0x16, 0x64, 0xea, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, - 0xa7, 0xa4, 0x62, 0x09, 0xd6, 0x24, 0x36, 0xb0, 0x37, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, - 0x97, 0xe9, 0xc3, 0xb8, 0x79, 0x01, 0x00, 0x00, -} - -func (m *DepositDeploymentAuthorization) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DepositDeploymentAuthorization) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DepositDeploymentAuthorization) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.SpendLimit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAuthz(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintAuthz(dAtA []byte, offset int, v uint64) int { - offset -= sovAuthz(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *DepositDeploymentAuthorization) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.SpendLimit.Size() - n += 1 + l + sovAuthz(uint64(l)) - return n -} - -func sovAuthz(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAuthz(x uint64) (n int) { - return sovAuthz(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *DepositDeploymentAuthorization) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuthz - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DepositDeploymentAuthorization: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DepositDeploymentAuthorization: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SpendLimit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAuthz - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAuthz - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAuthz - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SpendLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAuthz(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAuthz - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAuthz(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuthz - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuthz - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAuthz - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAuthz - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAuthz - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAuthz - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAuthz = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAuthz = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAuthz = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta3/codec.go b/go/node/deployment/v1beta3/codec.go deleted file mode 100644 index 49f4a680..00000000 --- a/go/node/deployment/v1beta3/codec.go +++ /dev/null @@ -1,58 +0,0 @@ -package v1beta3 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" - "github.com/cosmos/cosmos-sdk/x/authz" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/deployment module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/deployment and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterLegacyAminoCodec register concrete types on codec -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgCreateDeployment{}, ModuleName+"/"+MsgTypeCreateDeployment, nil) - cdc.RegisterConcrete(&MsgUpdateDeployment{}, ModuleName+"/"+MsgTypeUpdateDeployment, nil) - cdc.RegisterConcrete(&MsgDepositDeployment{}, ModuleName+"/"+MsgTypeDepositDeployment, nil) - cdc.RegisterConcrete(&MsgCloseDeployment{}, ModuleName+"/"+MsgTypeCloseDeployment, nil) - cdc.RegisterConcrete(&MsgCloseGroup{}, ModuleName+"/"+MsgTypeCloseGroup, nil) - cdc.RegisterConcrete(&MsgPauseGroup{}, ModuleName+"/"+MsgTypePauseGroup, nil) - cdc.RegisterConcrete(&MsgStartGroup{}, ModuleName+"/"+MsgTypeStartGroup, nil) -} - -// RegisterInterfaces registers the x/deployment interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgCreateDeployment{}, - &MsgUpdateDeployment{}, - &MsgDepositDeployment{}, - &MsgCloseDeployment{}, - &MsgCloseGroup{}, - &MsgPauseGroup{}, - &MsgStartGroup{}, - ) - registry.RegisterImplementations( - (*authz.Authorization)(nil), - &DepositDeploymentAuthorization{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/deployment/v1beta3/deployment.pb.go b/go/node/deployment/v1beta3/deployment.pb.go deleted file mode 100644 index 6f5d4c96..00000000 --- a/go/node/deployment/v1beta3/deployment.pb.go +++ /dev/null @@ -1,960 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta3/deployment.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of deployment -type Deployment_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - DeploymentStateInvalid Deployment_State = 0 - // DeploymentActive denotes state for deployment active - DeploymentActive Deployment_State = 1 - // DeploymentClosed denotes state for deployment closed - DeploymentClosed Deployment_State = 2 -) - -var Deployment_State_name = map[int32]string{ - 0: "invalid", - 1: "active", - 2: "closed", -} - -var Deployment_State_value = map[string]int32{ - "invalid": 0, - "active": 1, - "closed": 2, -} - -func (x Deployment_State) String() string { - return proto.EnumName(Deployment_State_name, int32(x)) -} - -func (Deployment_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_16b7a55888f623cc, []int{1, 0} -} - -// DeploymentID stores owner and sequence number -type DeploymentID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` -} - -func (m *DeploymentID) Reset() { *m = DeploymentID{} } -func (*DeploymentID) ProtoMessage() {} -func (*DeploymentID) Descriptor() ([]byte, []int) { - return fileDescriptor_16b7a55888f623cc, []int{0} -} -func (m *DeploymentID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeploymentID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeploymentID) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentID.Merge(m, src) -} -func (m *DeploymentID) XXX_Size() int { - return m.Size() -} -func (m *DeploymentID) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentID.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentID proto.InternalMessageInfo - -func (m *DeploymentID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *DeploymentID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -// Deployment stores deploymentID, state and version details -type Deployment struct { - DeploymentID DeploymentID `protobuf:"bytes,1,opt,name=deployment_id,json=deploymentId,proto3" json:"id" yaml:"id"` - State Deployment_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.deployment.v1beta3.Deployment_State" json:"state" yaml:"state"` - Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version" yaml:"version"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Deployment) Reset() { *m = Deployment{} } -func (m *Deployment) String() string { return proto.CompactTextString(m) } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_16b7a55888f623cc, []int{1} -} -func (m *Deployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Deployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Deployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_Deployment.Merge(m, src) -} -func (m *Deployment) XXX_Size() int { - return m.Size() -} -func (m *Deployment) XXX_DiscardUnknown() { - xxx_messageInfo_Deployment.DiscardUnknown(m) -} - -var xxx_messageInfo_Deployment proto.InternalMessageInfo - -func (m *Deployment) GetDeploymentID() DeploymentID { - if m != nil { - return m.DeploymentID - } - return DeploymentID{} -} - -func (m *Deployment) GetState() Deployment_State { - if m != nil { - return m.State - } - return DeploymentStateInvalid -} - -func (m *Deployment) GetVersion() []byte { - if m != nil { - return m.Version - } - return nil -} - -func (m *Deployment) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -// DeploymentFilters defines filters used to filter deployments -type DeploymentFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *DeploymentFilters) Reset() { *m = DeploymentFilters{} } -func (m *DeploymentFilters) String() string { return proto.CompactTextString(m) } -func (*DeploymentFilters) ProtoMessage() {} -func (*DeploymentFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_16b7a55888f623cc, []int{2} -} -func (m *DeploymentFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DeploymentFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DeploymentFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentFilters.Merge(m, src) -} -func (m *DeploymentFilters) XXX_Size() int { - return m.Size() -} -func (m *DeploymentFilters) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentFilters proto.InternalMessageInfo - -func (m *DeploymentFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *DeploymentFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *DeploymentFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func init() { - proto.RegisterEnum("akash.deployment.v1beta3.Deployment_State", Deployment_State_name, Deployment_State_value) - proto.RegisterType((*DeploymentID)(nil), "akash.deployment.v1beta3.DeploymentID") - proto.RegisterType((*Deployment)(nil), "akash.deployment.v1beta3.Deployment") - proto.RegisterType((*DeploymentFilters)(nil), "akash.deployment.v1beta3.DeploymentFilters") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta3/deployment.proto", fileDescriptor_16b7a55888f623cc) -} - -var fileDescriptor_16b7a55888f623cc = []byte{ - // 506 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x93, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xc7, 0x7d, 0x8d, 0xd3, 0x92, 0x6b, 0xa8, 0x82, 0x55, 0x21, 0x63, 0xa9, 0x3e, 0xcb, 0x03, - 0x0d, 0x48, 0xd8, 0xa2, 0x19, 0x90, 0x22, 0x31, 0xd4, 0x44, 0x48, 0x59, 0xdd, 0x01, 0x09, 0x86, - 0xea, 0x92, 0x3b, 0xa5, 0xa7, 0x3a, 0xbe, 0xd4, 0x3e, 0x52, 0x95, 0x81, 0x19, 0x75, 0x62, 0x64, - 0xa9, 0x54, 0x89, 0x2f, 0xc0, 0xca, 0x37, 0xe8, 0xd8, 0x91, 0xe9, 0x84, 0x9c, 0x05, 0x65, 0xcc, - 0x27, 0x40, 0xbe, 0x4b, 0x71, 0x40, 0x80, 0x98, 0xd8, 0xfc, 0x7e, 0xf7, 0x7f, 0x7e, 0xff, 0x77, - 0xef, 0x1e, 0x7c, 0x80, 0x8f, 0x71, 0x7e, 0x14, 0x12, 0x3a, 0x49, 0xf8, 0xd9, 0x98, 0xa6, 0x22, - 0x9c, 0x3e, 0x1e, 0x50, 0x81, 0x3b, 0x2b, 0x28, 0x98, 0x64, 0x5c, 0x70, 0xcb, 0x56, 0xd2, 0x60, - 0x85, 0x2f, 0xa5, 0xce, 0xf6, 0x88, 0x8f, 0xb8, 0x12, 0x85, 0xe5, 0x97, 0xd6, 0xfb, 0x6f, 0x61, - 0xb3, 0xf7, 0x43, 0xdb, 0xef, 0x59, 0x21, 0xac, 0xf3, 0xd3, 0x94, 0x66, 0x36, 0xf0, 0x40, 0xbb, - 0x11, 0xdd, 0x9b, 0x4b, 0xa4, 0xc1, 0x42, 0xa2, 0xe6, 0x19, 0x1e, 0x27, 0x5d, 0x5f, 0x85, 0x7e, - 0xac, 0xb1, 0xd5, 0x81, 0x26, 0xc9, 0xe9, 0x89, 0xbd, 0xe6, 0x81, 0xb6, 0x19, 0xa1, 0x42, 0x22, - 0xb3, 0x77, 0x40, 0x4f, 0xe6, 0x12, 0x29, 0xbe, 0x90, 0x68, 0x53, 0xa7, 0x95, 0x91, 0x1f, 0x2b, - 0xd8, 0xbd, 0xf5, 0xe1, 0x12, 0x19, 0xdf, 0x2e, 0x91, 0xe1, 0x7f, 0xae, 0x41, 0x58, 0x19, 0xb0, - 0x04, 0xbc, 0x5d, 0x59, 0x3f, 0x64, 0x44, 0xd9, 0xd8, 0xdc, 0xbb, 0x1f, 0xfc, 0xa9, 0xad, 0x60, - 0xd5, 0x7d, 0xb4, 0x7b, 0x25, 0x91, 0x51, 0x48, 0xf4, 0x53, 0x4f, 0x73, 0x89, 0xd6, 0x18, 0x59, - 0x48, 0xd4, 0xd0, 0x46, 0x18, 0xf1, 0xe3, 0x66, 0xf5, 0xa7, 0x3e, 0xb1, 0x5e, 0xc1, 0x7a, 0x2e, - 0xb0, 0xa0, 0xaa, 0x89, 0xad, 0xbd, 0x87, 0xff, 0x52, 0x2d, 0x38, 0x28, 0x33, 0xf4, 0x05, 0xa9, - 0xe4, 0xea, 0x82, 0x54, 0xe8, 0xc7, 0x1a, 0x5b, 0x4f, 0xe0, 0xc6, 0x94, 0x66, 0x39, 0xe3, 0xa9, - 0x5d, 0xf3, 0x40, 0xbb, 0x19, 0xed, 0xcc, 0x25, 0xba, 0x41, 0x0b, 0x89, 0xb6, 0x74, 0xd2, 0x12, - 0xf8, 0xf1, 0xcd, 0x91, 0xb5, 0x03, 0xe1, 0x30, 0xa3, 0x58, 0x50, 0x72, 0x88, 0x85, 0x6d, 0x7a, - 0xa0, 0x5d, 0x8b, 0x1b, 0x4b, 0xb2, 0x2f, 0xfc, 0x37, 0xb0, 0xae, 0x2c, 0x58, 0xbb, 0x70, 0x83, - 0xa5, 0x53, 0x9c, 0x30, 0xd2, 0x32, 0x1c, 0xe7, 0xfc, 0xc2, 0xbb, 0x5b, 0xb9, 0x54, 0x8a, 0xbe, - 0x3e, 0xb5, 0x3c, 0xb8, 0x8e, 0x87, 0x82, 0x4d, 0x69, 0x0b, 0x38, 0xdb, 0xe7, 0x17, 0x5e, 0xab, - 0xd2, 0xed, 0x2b, 0x5e, 0x2a, 0x86, 0x09, 0xcf, 0x29, 0x69, 0xad, 0xfd, 0xaa, 0x78, 0xa6, 0xb8, - 0x63, 0xbe, 0xfb, 0xe8, 0x1a, 0x5d, 0x53, 0xcd, 0xee, 0x13, 0x80, 0x77, 0x2a, 0xc1, 0x73, 0x96, - 0x08, 0x9a, 0xe5, 0xff, 0xe7, 0x05, 0x95, 0x55, 0xf4, 0xc8, 0x6a, 0x55, 0x95, 0xbf, 0x8d, 0x41, - 0x5b, 0x8e, 0x5e, 0x5c, 0x15, 0x2e, 0xb8, 0x2e, 0x5c, 0xf0, 0xb5, 0x70, 0xc1, 0xfb, 0x99, 0x6b, - 0x5c, 0xcf, 0x5c, 0xe3, 0xcb, 0xcc, 0x35, 0x5e, 0x3e, 0x1d, 0x31, 0x71, 0xf4, 0x7a, 0x10, 0x0c, - 0xf9, 0x38, 0x54, 0xe3, 0x7f, 0x94, 0x52, 0x71, 0xca, 0xb3, 0xe3, 0x65, 0x84, 0x27, 0x2c, 0x1c, - 0xf1, 0x30, 0xe5, 0x84, 0xfe, 0x66, 0x11, 0x07, 0xeb, 0x6a, 0x9d, 0x3a, 0xdf, 0x03, 0x00, 0x00, - 0xff, 0xff, 0x7a, 0xff, 0xca, 0x08, 0xab, 0x03, 0x00, 0x00, -} - -func (m *DeploymentID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DSeq != 0 { - i = encodeVarintDeployment(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Deployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintDeployment(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x1a - } - if m.State != 0 { - i = encodeVarintDeployment(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.DeploymentID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeployment(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *DeploymentFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x1a - } - if m.DSeq != 0 { - i = encodeVarintDeployment(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintDeployment(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintDeployment(dAtA []byte, offset int, v uint64) int { - offset -= sovDeployment(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *DeploymentID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovDeployment(uint64(m.DSeq)) - } - return n -} - -func (m *Deployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.DeploymentID.Size() - n += 1 + l + sovDeployment(uint64(l)) - if m.State != 0 { - n += 1 + sovDeployment(uint64(m.State)) - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - if m.CreatedAt != 0 { - n += 1 + sovDeployment(uint64(m.CreatedAt)) - } - return n -} - -func (m *DeploymentFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovDeployment(uint64(m.DSeq)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovDeployment(uint64(l)) - } - return n -} - -func sovDeployment(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozDeployment(x uint64) (n int) { - return sovDeployment(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *DeploymentID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Deployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeploymentID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DeploymentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Deployment_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = append(m.Version[:0], dAtA[iNdEx:postIndex]...) - if m.Version == nil { - m.Version = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeployment - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeployment - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeployment - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeployment(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeployment - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipDeployment(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeployment - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeployment - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeployment - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDeployment - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupDeployment - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthDeployment - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthDeployment = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDeployment = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupDeployment = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta3/deployment_validation_test.go b/go/node/deployment/v1beta3/deployment_validation_test.go deleted file mode 100644 index 30d35a91..00000000 --- a/go/node/deployment/v1beta3/deployment_validation_test.go +++ /dev/null @@ -1,202 +0,0 @@ -package v1beta3_test - -import ( - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/stretchr/testify/require" - - types "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - akashtypes "github.com/akash-network/akash-api/go/node/types/v1beta3" - tutil "github.com/akash-network/akash-api/go/testutil" - testutil "github.com/akash-network/akash-api/go/testutil/v1beta3" -) - -const ( - regexInvalidUnitBoundaries = `^.*invalid unit count|CPU|GPU|memory|storage \(\d+ > 0 > \d+ fails\)$` -) - -func TestZeroValueGroupSpec(t *testing.T) { - did := testutil.DeploymentID(t) - - dgroup := testutil.DeploymentGroup(t, did, uint32(6)) - gspec := dgroup.GroupSpec - - t.Run("assert nominal test success", func(t *testing.T) { - err := gspec.ValidateBasic() - require.NoError(t, err) - }) -} - -func TestZeroValueGroupSpecs(t *testing.T) { - did := testutil.DeploymentID(t) - dgroups := testutil.DeploymentGroups(t, did, uint32(6)) - gspecs := make([]types.GroupSpec, 0) - for _, d := range dgroups { - gspecs = append(gspecs, d.GroupSpec) - } - - t.Run("assert nominal test success", func(t *testing.T) { - err := types.ValidateDeploymentGroups(gspecs) - require.NoError(t, err) - }) - - gspecZeroed := make([]types.GroupSpec, len(gspecs)) - gspecZeroed = append(gspecZeroed, gspecs...) - t.Run("assert error for zero value bid duration", func(t *testing.T) { - err := types.ValidateDeploymentGroups(gspecZeroed) - require.Error(t, err) - }) -} - -func TestEmptyGroupSpecIsInvalid(t *testing.T) { - err := types.ValidateDeploymentGroups(make([]types.GroupSpec, 0)) - require.Equal(t, types.ErrInvalidGroups, err) -} - -func validSimpleGroupSpec() types.GroupSpec { - resources := make(types.ResourceUnits, 1) - resources[0] = types.ResourceUnit{ - Resources: akashtypes.Resources{ - ID: 1, - CPU: &akashtypes.CPU{ - Units: akashtypes.ResourceValue{ - Val: sdk.NewInt(10), - }, - Attributes: nil, - }, - GPU: &akashtypes.GPU{ - Units: akashtypes.ResourceValue{ - Val: sdk.NewInt(0), - }, - Attributes: nil, - }, - Memory: &akashtypes.Memory{ - Quantity: akashtypes.ResourceValue{ - Val: sdk.NewIntFromUint64(types.GetValidationConfig().Unit.Min.Memory), - }, - Attributes: nil, - }, - Storage: akashtypes.Volumes{ - akashtypes.Storage{ - Quantity: akashtypes.ResourceValue{ - Val: sdk.NewIntFromUint64(types.GetValidationConfig().Unit.Min.Storage), - }, - Attributes: nil, - }, - }, - Endpoints: akashtypes.Endpoints{}, - }, - Count: 1, - Price: sdk.NewInt64DecCoin(tutil.CoinDenom, 1), - } - return types.GroupSpec{ - Name: "testGroup", - Requirements: akashtypes.PlacementRequirements{}, - Resources: resources, - } -} - -func validSimpleGroupSpecs() []types.GroupSpec { - result := make([]types.GroupSpec, 1) - result[0] = validSimpleGroupSpec() - - return result -} - -func TestSimpleGroupSpecIsValid(t *testing.T) { - groups := validSimpleGroupSpecs() - err := types.ValidateDeploymentGroups(groups) - require.NoError(t, err) -} - -func TestDuplicateSimpleGroupSpecIsInvalid(t *testing.T) { - groups := validSimpleGroupSpecs() - groupsDuplicate := make([]types.GroupSpec, 2) - groupsDuplicate[0] = groups[0] - groupsDuplicate[1] = groups[0] - err := types.ValidateDeploymentGroups(groupsDuplicate) - require.Error(t, err) // TODO - specific error - require.Regexp(t, "^.*duplicate.*$", err) -} - -func TestGroupWithZeroCount(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Count = 0 - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, regexInvalidUnitBoundaries, err) -} - -func TestGroupWithZeroCPU(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].CPU.Units.Val = sdk.NewInt(0) - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, regexInvalidUnitBoundaries, err) -} - -func TestGroupWithZeroMemory(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Memory.Quantity.Val = sdk.NewInt(0) - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, regexInvalidUnitBoundaries, err) -} - -func TestGroupWithZeroStorage(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Storage[0].Quantity.Val = sdk.NewInt(0) - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, regexInvalidUnitBoundaries, err) -} - -func TestGroupWithNilCPU(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].CPU = nil - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid unit CPU.*$", err) -} - -func TestGroupWithNilGPU(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].GPU = nil - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid unit GPU.*$", err) -} - -func TestGroupWithNilMemory(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Memory = nil - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid unit memory.*$", err) -} - -func TestGroupWithNilStorage(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Storage = nil - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid unit storage.*$", err) -} - -func TestGroupWithInvalidPrice(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Price = sdk.DecCoin{} - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid price object.*$", err) -} - -func TestGroupWithNegativePrice(t *testing.T) { - group := validSimpleGroupSpec() - group.Resources[0].Price.Amount = sdk.NewDec(-1) - err := group.ValidateBasic() - require.Error(t, err) - require.Regexp(t, "^.*invalid price object.*$", err) -} diff --git a/go/node/deployment/v1beta3/deploymentmsg.pb.go b/go/node/deployment/v1beta3/deploymentmsg.pb.go deleted file mode 100644 index 5ec7259d..00000000 --- a/go/node/deployment/v1beta3/deploymentmsg.pb.go +++ /dev/null @@ -1,1722 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta3/deploymentmsg.proto - -package v1beta3 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// MsgCreateDeployment defines an SDK message for creating deployment -type MsgCreateDeployment struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` - Groups []GroupSpec `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups" yaml:"groups"` - Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version" yaml:"version"` - Deposit types.Coin `protobuf:"bytes,4,opt,name=deposit,proto3" json:"deposit" yaml:"deposit"` - // Depositor pays for the deposit - Depositor string `protobuf:"bytes,5,opt,name=depositor,proto3" json:"depositor" yaml:"depositor"` -} - -func (m *MsgCreateDeployment) Reset() { *m = MsgCreateDeployment{} } -func (m *MsgCreateDeployment) String() string { return proto.CompactTextString(m) } -func (*MsgCreateDeployment) ProtoMessage() {} -func (*MsgCreateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_f364faefc2dcd8b7, []int{0} -} -func (m *MsgCreateDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateDeployment.Merge(m, src) -} -func (m *MsgCreateDeployment) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateDeployment proto.InternalMessageInfo - -func (m *MsgCreateDeployment) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -func (m *MsgCreateDeployment) GetGroups() []GroupSpec { - if m != nil { - return m.Groups - } - return nil -} - -func (m *MsgCreateDeployment) GetVersion() []byte { - if m != nil { - return m.Version - } - return nil -} - -func (m *MsgCreateDeployment) GetDeposit() types.Coin { - if m != nil { - return m.Deposit - } - return types.Coin{} -} - -func (m *MsgCreateDeployment) GetDepositor() string { - if m != nil { - return m.Depositor - } - return "" -} - -// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. -type MsgCreateDeploymentResponse struct { -} - -func (m *MsgCreateDeploymentResponse) Reset() { *m = MsgCreateDeploymentResponse{} } -func (m *MsgCreateDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateDeploymentResponse) ProtoMessage() {} -func (*MsgCreateDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f364faefc2dcd8b7, []int{1} -} -func (m *MsgCreateDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateDeploymentResponse.Merge(m, src) -} -func (m *MsgCreateDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateDeploymentResponse proto.InternalMessageInfo - -// MsgDepositDeployment deposits more funds into the deposit account -type MsgDepositDeployment struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` - Amount types.Coin `protobuf:"bytes,2,opt,name=amount,proto3" json:"amount" yaml:"amount"` - // Depositor pays for the deposit - Depositor string `protobuf:"bytes,3,opt,name=depositor,proto3" json:"depositor" yaml:"depositor"` -} - -func (m *MsgDepositDeployment) Reset() { *m = MsgDepositDeployment{} } -func (m *MsgDepositDeployment) String() string { return proto.CompactTextString(m) } -func (*MsgDepositDeployment) ProtoMessage() {} -func (*MsgDepositDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_f364faefc2dcd8b7, []int{2} -} -func (m *MsgDepositDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDepositDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDepositDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDepositDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDepositDeployment.Merge(m, src) -} -func (m *MsgDepositDeployment) XXX_Size() int { - return m.Size() -} -func (m *MsgDepositDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDepositDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDepositDeployment proto.InternalMessageInfo - -func (m *MsgDepositDeployment) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -func (m *MsgDepositDeployment) GetAmount() types.Coin { - if m != nil { - return m.Amount - } - return types.Coin{} -} - -func (m *MsgDepositDeployment) GetDepositor() string { - if m != nil { - return m.Depositor - } - return "" -} - -// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. -type MsgDepositDeploymentResponse struct { -} - -func (m *MsgDepositDeploymentResponse) Reset() { *m = MsgDepositDeploymentResponse{} } -func (m *MsgDepositDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*MsgDepositDeploymentResponse) ProtoMessage() {} -func (*MsgDepositDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f364faefc2dcd8b7, []int{3} -} -func (m *MsgDepositDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDepositDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDepositDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDepositDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDepositDeploymentResponse.Merge(m, src) -} -func (m *MsgDepositDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgDepositDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDepositDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDepositDeploymentResponse proto.InternalMessageInfo - -// MsgUpdateDeployment defines an SDK message for updating deployment -type MsgUpdateDeployment struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` - Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version" yaml:"version"` -} - -func (m *MsgUpdateDeployment) Reset() { *m = MsgUpdateDeployment{} } -func (m *MsgUpdateDeployment) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateDeployment) ProtoMessage() {} -func (*MsgUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_f364faefc2dcd8b7, []int{4} -} -func (m *MsgUpdateDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateDeployment.Merge(m, src) -} -func (m *MsgUpdateDeployment) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateDeployment proto.InternalMessageInfo - -func (m *MsgUpdateDeployment) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -func (m *MsgUpdateDeployment) GetVersion() []byte { - if m != nil { - return m.Version - } - return nil -} - -// MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. -type MsgUpdateDeploymentResponse struct { -} - -func (m *MsgUpdateDeploymentResponse) Reset() { *m = MsgUpdateDeploymentResponse{} } -func (m *MsgUpdateDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateDeploymentResponse) ProtoMessage() {} -func (*MsgUpdateDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f364faefc2dcd8b7, []int{5} -} -func (m *MsgUpdateDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateDeploymentResponse.Merge(m, src) -} -func (m *MsgUpdateDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateDeploymentResponse proto.InternalMessageInfo - -// MsgCloseDeployment defines an SDK message for closing deployment -type MsgCloseDeployment struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseDeployment) Reset() { *m = MsgCloseDeployment{} } -func (m *MsgCloseDeployment) String() string { return proto.CompactTextString(m) } -func (*MsgCloseDeployment) ProtoMessage() {} -func (*MsgCloseDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_f364faefc2dcd8b7, []int{6} -} -func (m *MsgCloseDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseDeployment.Merge(m, src) -} -func (m *MsgCloseDeployment) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseDeployment proto.InternalMessageInfo - -func (m *MsgCloseDeployment) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -// MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. -type MsgCloseDeploymentResponse struct { -} - -func (m *MsgCloseDeploymentResponse) Reset() { *m = MsgCloseDeploymentResponse{} } -func (m *MsgCloseDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseDeploymentResponse) ProtoMessage() {} -func (*MsgCloseDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f364faefc2dcd8b7, []int{7} -} -func (m *MsgCloseDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseDeploymentResponse.Merge(m, src) -} -func (m *MsgCloseDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseDeploymentResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*MsgCreateDeployment)(nil), "akash.deployment.v1beta3.MsgCreateDeployment") - proto.RegisterType((*MsgCreateDeploymentResponse)(nil), "akash.deployment.v1beta3.MsgCreateDeploymentResponse") - proto.RegisterType((*MsgDepositDeployment)(nil), "akash.deployment.v1beta3.MsgDepositDeployment") - proto.RegisterType((*MsgDepositDeploymentResponse)(nil), "akash.deployment.v1beta3.MsgDepositDeploymentResponse") - proto.RegisterType((*MsgUpdateDeployment)(nil), "akash.deployment.v1beta3.MsgUpdateDeployment") - proto.RegisterType((*MsgUpdateDeploymentResponse)(nil), "akash.deployment.v1beta3.MsgUpdateDeploymentResponse") - proto.RegisterType((*MsgCloseDeployment)(nil), "akash.deployment.v1beta3.MsgCloseDeployment") - proto.RegisterType((*MsgCloseDeploymentResponse)(nil), "akash.deployment.v1beta3.MsgCloseDeploymentResponse") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta3/deploymentmsg.proto", fileDescriptor_f364faefc2dcd8b7) -} - -var fileDescriptor_f364faefc2dcd8b7 = []byte{ - // 535 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0xc1, 0x6e, 0xd3, 0x4a, - 0x14, 0x8d, 0x9d, 0xbe, 0x54, 0x99, 0x3e, 0x10, 0x32, 0x5d, 0x98, 0xd0, 0x78, 0xd2, 0x41, 0x42, - 0x41, 0x02, 0x5b, 0x69, 0x17, 0x48, 0x95, 0x10, 0x92, 0x1b, 0x09, 0x75, 0xd1, 0x8d, 0x51, 0x41, - 0x42, 0x6c, 0x9c, 0x78, 0xe4, 0x8e, 0x1a, 0xfb, 0x1a, 0x8f, 0x53, 0xd4, 0x3f, 0x60, 0xc9, 0x27, - 0x20, 0xb6, 0xfc, 0x48, 0x97, 0x5d, 0xb2, 0x1a, 0xa1, 0x64, 0x83, 0xb2, 0xcc, 0x17, 0x20, 0x7b, - 0xc6, 0x31, 0x81, 0x44, 0xa0, 0x4a, 0x65, 0xe7, 0xb9, 0xe7, 0x9c, 0xeb, 0x33, 0xe7, 0x5e, 0x0d, - 0x7a, 0xec, 0x9f, 0xf9, 0xfc, 0xd4, 0x09, 0x68, 0x32, 0x82, 0x8b, 0x88, 0xc6, 0x99, 0x73, 0xde, - 0x1b, 0xd0, 0xcc, 0xdf, 0xff, 0xa9, 0x14, 0xf1, 0xd0, 0x4e, 0x52, 0xc8, 0xc0, 0x30, 0x0b, 0xb6, - 0x5d, 0x41, 0xb6, 0x62, 0xb7, 0xb6, 0x43, 0x08, 0xa1, 0x20, 0x39, 0xf9, 0x97, 0xe4, 0xb7, 0x1e, - 0xfd, 0x45, 0x77, 0x45, 0xed, 0xae, 0xa5, 0x86, 0x29, 0x8c, 0x13, 0x9e, 0xd0, 0xa1, 0x62, 0x5a, - 0x43, 0xe0, 0x11, 0x70, 0x67, 0xe0, 0x73, 0xaa, 0x48, 0x3d, 0x67, 0x08, 0x2c, 0x96, 0x38, 0xf9, - 0x5c, 0x47, 0x77, 0x8f, 0x79, 0x78, 0x98, 0x52, 0x3f, 0xa3, 0xfd, 0x45, 0x3f, 0xe3, 0x04, 0xe9, - 0x2c, 0x30, 0xb5, 0x8e, 0xd6, 0xdd, 0xda, 0x7b, 0x68, 0xaf, 0xbb, 0x89, 0x5d, 0x29, 0x8e, 0xfa, - 0x6e, 0xfb, 0x52, 0xe0, 0xda, 0x44, 0x60, 0xfd, 0xa8, 0x3f, 0x13, 0x58, 0x67, 0xc1, 0x5c, 0xe0, - 0xe6, 0x85, 0x1f, 0x8d, 0x0e, 0x08, 0x0b, 0x88, 0xa7, 0xb3, 0xc0, 0x78, 0x8b, 0x1a, 0xd2, 0xa1, - 0xa9, 0x77, 0xea, 0xdd, 0xad, 0xbd, 0x07, 0xeb, 0x5b, 0xbf, 0xc8, 0x79, 0x2f, 0x13, 0x3a, 0x74, - 0x71, 0xde, 0x77, 0x26, 0xb0, 0x92, 0xce, 0x05, 0xbe, 0x25, 0xbb, 0xca, 0x33, 0xf1, 0x14, 0x60, - 0x3c, 0x45, 0x9b, 0xe7, 0x34, 0xe5, 0x0c, 0x62, 0xb3, 0xde, 0xd1, 0xba, 0xff, 0xbb, 0xed, 0x99, - 0xc0, 0x65, 0x69, 0x2e, 0xf0, 0x6d, 0x29, 0x53, 0x05, 0xe2, 0x95, 0x90, 0xf1, 0x0a, 0x6d, 0x06, - 0x34, 0x01, 0xce, 0x32, 0x73, 0xa3, 0xb8, 0xf2, 0x3d, 0x5b, 0xe6, 0x66, 0xe7, 0xb9, 0x29, 0x4b, - 0x3d, 0xfb, 0x10, 0x58, 0xec, 0xee, 0x2a, 0x37, 0xa5, 0xa2, 0xea, 0xab, 0x0a, 0xc4, 0x2b, 0x21, - 0xe3, 0x39, 0x6a, 0xaa, 0x4f, 0x48, 0xcd, 0xff, 0x3a, 0x5a, 0xb7, 0xe9, 0xee, 0xce, 0x04, 0xae, - 0x8a, 0x73, 0x81, 0xef, 0x2c, 0x89, 0x21, 0x25, 0x5e, 0x05, 0x1f, 0x6c, 0x7c, 0xff, 0x84, 0x6b, - 0xa4, 0x8d, 0xee, 0xaf, 0x98, 0x91, 0x47, 0x79, 0x02, 0x31, 0xa7, 0xe4, 0x83, 0x8e, 0xb6, 0x8f, - 0x79, 0xd8, 0x97, 0xaa, 0x9b, 0x1f, 0xa2, 0x87, 0x1a, 0x7e, 0x04, 0xe3, 0x38, 0x33, 0xf5, 0x3f, - 0x85, 0xb5, 0x18, 0x9d, 0x14, 0x54, 0xa3, 0x93, 0x67, 0xe2, 0x29, 0x60, 0x39, 0xa9, 0xfa, 0xb5, - 0x93, 0xb2, 0xd0, 0xce, 0xaa, 0x24, 0x16, 0x51, 0x7d, 0xd1, 0x8a, 0x75, 0x3f, 0x49, 0x82, 0x7f, - 0xb2, 0xee, 0xd7, 0x5d, 0xc8, 0xa5, 0xb9, 0xff, 0x6a, 0x76, 0x71, 0x99, 0x77, 0xc8, 0xc8, 0xd7, - 0x62, 0x04, 0xfc, 0xe6, 0xaf, 0xa2, 0x1c, 0xed, 0xa0, 0xd6, 0xef, 0xbf, 0x2c, 0x0d, 0xb9, 0xaf, - 0x2f, 0x27, 0x96, 0x76, 0x35, 0xb1, 0xb4, 0x6f, 0x13, 0x4b, 0xfb, 0x38, 0xb5, 0x6a, 0x57, 0x53, - 0xab, 0xf6, 0x75, 0x6a, 0xd5, 0xde, 0x3c, 0x0b, 0x59, 0x76, 0x3a, 0x1e, 0xd8, 0x43, 0x88, 0x9c, - 0xc2, 0xd2, 0x93, 0x98, 0x66, 0xef, 0x21, 0x3d, 0x53, 0x27, 0x3f, 0x61, 0x4e, 0x08, 0x4e, 0x0c, - 0x01, 0x5d, 0xf1, 0xaa, 0x0d, 0x1a, 0xc5, 0x63, 0xb5, 0xff, 0x23, 0x00, 0x00, 0xff, 0xff, 0x8f, - 0x5e, 0x1e, 0x24, 0x81, 0x05, 0x00, 0x00, -} - -func (m *MsgCreateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Depositor) > 0 { - i -= len(m.Depositor) - copy(dAtA[i:], m.Depositor) - i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Depositor))) - i-- - dAtA[i] = 0x2a - } - { - size, err := m.Deposit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x1a - } - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCreateDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgDepositDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDepositDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDepositDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Depositor) > 0 { - i -= len(m.Depositor) - copy(dAtA[i:], m.Depositor) - i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Depositor))) - i-- - dAtA[i] = 0x1a - } - { - size, err := m.Amount.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgDepositDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDepositDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDepositDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgUpdateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Version) > 0 { - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x1a - } - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgUpdateDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgCloseDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintDeploymentmsg(dAtA []byte, offset int, v uint64) int { - offset -= sovDeploymentmsg(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MsgCreateDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - } - } - l = len(m.Version) - if l > 0 { - n += 1 + l + sovDeploymentmsg(uint64(l)) - } - l = m.Deposit.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - l = len(m.Depositor) - if l > 0 { - n += 1 + l + sovDeploymentmsg(uint64(l)) - } - return n -} - -func (m *MsgCreateDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgDepositDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - l = m.Amount.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - l = len(m.Depositor) - if l > 0 { - n += 1 + l + sovDeploymentmsg(uint64(l)) - } - return n -} - -func (m *MsgDepositDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgUpdateDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - l = len(m.Version) - if l > 0 { - n += 1 + l + sovDeploymentmsg(uint64(l)) - } - return n -} - -func (m *MsgUpdateDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgCloseDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovDeploymentmsg(uint64(l)) - return n -} - -func (m *MsgCloseDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovDeploymentmsg(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozDeploymentmsg(x uint64) (n int) { - return sovDeploymentmsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MsgCreateDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, GroupSpec{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = append(m.Version[:0], dAtA[iNdEx:postIndex]...) - if m.Version == nil { - m.Version = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Deposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Depositor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Depositor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDepositDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDepositDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDepositDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Depositor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Depositor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDepositDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDepositDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDepositDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgUpdateDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = append(m.Version[:0], dAtA[iNdEx:postIndex]...) - if m.Version == nil { - m.Version = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgUpdateDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthDeploymentmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthDeploymentmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthDeploymentmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipDeploymentmsg(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowDeploymentmsg - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthDeploymentmsg - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupDeploymentmsg - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthDeploymentmsg - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthDeploymentmsg = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowDeploymentmsg = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupDeploymentmsg = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta3/deposit_deployment_authorization.go b/go/node/deployment/v1beta3/deposit_deployment_authorization.go deleted file mode 100644 index c8ff0de3..00000000 --- a/go/node/deployment/v1beta3/deposit_deployment_authorization.go +++ /dev/null @@ -1,45 +0,0 @@ -package v1beta3 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/cosmos/cosmos-sdk/x/authz" -) - -var ( - _ authz.Authorization = &DepositDeploymentAuthorization{} -) - -// NewDepositDeploymentAuthorization creates a new DepositDeploymentAuthorization object. -func NewDepositDeploymentAuthorization(spendLimit sdk.Coin) *DepositDeploymentAuthorization { - return &DepositDeploymentAuthorization{ - SpendLimit: spendLimit, - } -} - -// MsgTypeURL implements Authorization.MsgTypeURL. -func (m DepositDeploymentAuthorization) MsgTypeURL() string { - return sdk.MsgTypeURL(&MsgDepositDeployment{}) -} - -// Accept implements Authorization.Accept. -func (m DepositDeploymentAuthorization) Accept(_ sdk.Context, msg sdk.Msg) (authz.AcceptResponse, error) { - mDepositDeployment, ok := msg.(*MsgDepositDeployment) - if !ok { - return authz.AcceptResponse{}, sdkerrors.ErrInvalidType.Wrap("type mismatch") - } - if m.SpendLimit.IsLT(mDepositDeployment.Amount) { - return authz.AcceptResponse{}, sdkerrors.ErrInsufficientFunds.Wrapf("requested amount is more than spend limit") - } - limitLeft := m.SpendLimit.Sub(mDepositDeployment.Amount) - - return authz.AcceptResponse{Accept: true, Delete: false, Updated: &DepositDeploymentAuthorization{SpendLimit: limitLeft}}, nil -} - -// ValidateBasic implements Authorization.ValidateBasic. -func (m DepositDeploymentAuthorization) ValidateBasic() error { - if !m.SpendLimit.IsPositive() { - return sdkerrors.ErrInvalidCoins.Wrapf("spend limit cannot be negative") - } - return nil -} diff --git a/go/node/deployment/v1beta3/errors.go b/go/node/deployment/v1beta3/errors.go deleted file mode 100644 index 55a61678..00000000 --- a/go/node/deployment/v1beta3/errors.go +++ /dev/null @@ -1,81 +0,0 @@ -package v1beta3 - -import ( - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - errNameDoesNotExist uint32 = iota + 1 - errInvalidRequest - errDeploymentExists - errDeploymentNotFound - errDeploymentClosed - errOwnerAcctMissing - errInvalidGroups - errInvalidDeploymentID - errEmptyVersion - errInvalidVersion - errInternal - errInvalidDeployment - errInvalidGroupID - errGroupNotFound - errGroupClosed - errGroupOpen - errGroupPaused - errGroupNotOpen - errGroupSpec - errInvalidDeposit - errInvalidIDPath - errInvalidParam - errInvalidDeploymentDepositor -) - -var ( - // ErrNameDoesNotExist is the error when name does not exist - ErrNameDoesNotExist = sdkerrors.Register(ModuleName, errNameDoesNotExist, "Name does not exist") - // ErrInvalidRequest is the error for invalid request - ErrInvalidRequest = sdkerrors.Register(ModuleName, errInvalidRequest, "Invalid request") - // ErrDeploymentExists is the error when already deployment exists - ErrDeploymentExists = sdkerrors.Register(ModuleName, errDeploymentExists, "Deployment exists") - // ErrDeploymentNotFound is the error when deployment not found - ErrDeploymentNotFound = sdkerrors.Register(ModuleName, errDeploymentNotFound, "Deployment not found") - // ErrDeploymentClosed is the error when deployment is closed - ErrDeploymentClosed = sdkerrors.Register(ModuleName, errDeploymentClosed, "Deployment closed") - // ErrOwnerAcctMissing is the error for owner account missing - ErrOwnerAcctMissing = sdkerrors.Register(ModuleName, errOwnerAcctMissing, "Owner account missing") - // ErrInvalidGroups is the error when groups are empty - ErrInvalidGroups = sdkerrors.Register(ModuleName, errInvalidGroups, "Invalid groups") - // ErrInvalidDeploymentID is the error for invalid deployment id - ErrInvalidDeploymentID = sdkerrors.Register(ModuleName, errInvalidDeploymentID, "Invalid: deployment id") - // ErrEmptyVersion is the error when version is empty - ErrEmptyVersion = sdkerrors.Register(ModuleName, errEmptyVersion, "Invalid: empty version") - // ErrInvalidVersion is the error when version is invalid - ErrInvalidVersion = sdkerrors.Register(ModuleName, errInvalidVersion, "Invalid: deployment version") - // ErrInternal is the error for internal error - ErrInternal = sdkerrors.Register(ModuleName, errInternal, "internal error") - // ErrInvalidDeployment = is the error when deployment does not pass validation - ErrInvalidDeployment = sdkerrors.Register(ModuleName, errInvalidDeployment, "Invalid deployment") - // ErrInvalidGroupID is the error when already deployment exists - ErrInvalidGroupID = sdkerrors.Register(ModuleName, errInvalidGroupID, "Deployment exists") - // ErrGroupNotFound is the keeper's error for not finding a group - ErrGroupNotFound = sdkerrors.Register(ModuleName, errGroupNotFound, "Group not found") - // ErrGroupClosed is the error when deployment is closed - ErrGroupClosed = sdkerrors.Register(ModuleName, errGroupClosed, "Group already closed") - // ErrGroupOpen is the error when deployment is closed - ErrGroupOpen = sdkerrors.Register(ModuleName, errGroupOpen, "Group open") - // ErrGroupPaused is the error when deployment is closed - ErrGroupPaused = sdkerrors.Register(ModuleName, errGroupPaused, "Group paused") - // ErrGroupNotOpen indicates the Group state has progressed beyond initial Open. - ErrGroupNotOpen = sdkerrors.Register(ModuleName, errGroupNotOpen, "Group not open") - // ErrGroupSpecInvalid indicates a GroupSpec has invalid configuration - ErrGroupSpecInvalid = sdkerrors.Register(ModuleName, errGroupSpec, "GroupSpec invalid") - - // ErrInvalidDeposit indicates an invalid deposit - ErrInvalidDeposit = sdkerrors.Register(ModuleName, errInvalidDeposit, "Deposit invalid") - // ErrInvalidIDPath indicates an invalid ID path - ErrInvalidIDPath = sdkerrors.Register(ModuleName, errInvalidIDPath, "ID path invalid") - // ErrInvalidParam indicates an invalid chain parameter - ErrInvalidParam = sdkerrors.Register(ModuleName, errInvalidParam, "parameter invalid") - // ErrInvalidDeploymentDepositor indicates an invalid chain parameter - ErrInvalidDeploymentDepositor = sdkerrors.Register(ModuleName, errInvalidDeploymentDepositor, "invalid deployment depositor") -) diff --git a/go/node/deployment/v1beta3/escrow.go b/go/node/deployment/v1beta3/escrow.go deleted file mode 100644 index bda33845..00000000 --- a/go/node/deployment/v1beta3/escrow.go +++ /dev/null @@ -1,25 +0,0 @@ -package v1beta3 - -import ( - etypes "github.com/akash-network/akash-api/go/node/escrow/v1beta3" -) - -const ( - EscrowScope = "deployment" -) - -func EscrowAccountForDeployment(id DeploymentID) etypes.AccountID { - return etypes.AccountID{ - Scope: EscrowScope, - XID: id.String(), - } -} - -func DeploymentIDFromEscrowAccount(id etypes.AccountID) (DeploymentID, bool) { - if id.Scope != EscrowScope { - return DeploymentID{}, false - } - - did, err := ParseDeploymentID(id.XID) - return did, err == nil -} diff --git a/go/node/deployment/v1beta3/event.go b/go/node/deployment/v1beta3/event.go deleted file mode 100644 index a7e015fc..00000000 --- a/go/node/deployment/v1beta3/event.go +++ /dev/null @@ -1,309 +0,0 @@ -package v1beta3 - -import ( - "encoding/hex" - "strconv" - - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -const ( - evActionDeploymentCreated = "deployment-created" - evActionDeploymentUpdated = "deployment-updated" - evActionDeploymentClosed = "deployment-closed" - evActionGroupClosed = "group-closed" - evActionGroupPaused = "group-paused" - evActionGroupStarted = "group-started" - evOwnerKey = "owner" - evDSeqKey = "dseq" - evGSeqKey = "gseq" - evVersionKey = "version" - encodedVersionHexLen = 64 -) - -// EventDeploymentCreated struct -type EventDeploymentCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID DeploymentID `json:"id"` - Version []byte `json:"version"` -} - -// NewEventDeploymentCreated initializes creation event. -func NewEventDeploymentCreated(id DeploymentID, version []byte) EventDeploymentCreated { - return EventDeploymentCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionDeploymentCreated, - }, - ID: id, - Version: version, - } -} - -// ToSDKEvent method creates new sdk event for EventDeploymentCreated struct -func (ev EventDeploymentCreated) ToSDKEvent() sdk.Event { - version := encodeHex(ev.Version) - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionDeploymentCreated), - sdk.NewAttribute(evVersionKey, string(version)), - }, DeploymentIDEVAttributes(ev.ID)...)..., - ) -} - -// EventDeploymentUpdated struct -type EventDeploymentUpdated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID DeploymentID `json:"id"` - Version []byte `json:"version"` -} - -// NewEventDeploymentUpdated initializes SDK type -func NewEventDeploymentUpdated(id DeploymentID, version []byte) EventDeploymentUpdated { - return EventDeploymentUpdated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionDeploymentUpdated, - }, - ID: id, - Version: version, - } -} - -// ToSDKEvent method creates new sdk event for EventDeploymentUpdated struct -func (ev EventDeploymentUpdated) ToSDKEvent() sdk.Event { - version := encodeHex(ev.Version) - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionDeploymentUpdated), - sdk.NewAttribute(evVersionKey, string(version)), - }, DeploymentIDEVAttributes(ev.ID)...)..., - ) -} - -// EventDeploymentClosed struct -type EventDeploymentClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID DeploymentID `json:"id"` -} - -func NewEventDeploymentClosed(id DeploymentID) EventDeploymentClosed { - return EventDeploymentClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionDeploymentClosed, - }, - ID: id, - } -} - -// ToSDKEvent method creates new sdk event for EventDeploymentClosed struct -func (ev EventDeploymentClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionDeploymentClosed), - }, DeploymentIDEVAttributes(ev.ID)...)..., - ) -} - -// DeploymentIDEVAttributes returns event attribues for given DeploymentID -func DeploymentIDEVAttributes(id DeploymentID) []sdk.Attribute { - return []sdk.Attribute{ - sdk.NewAttribute(evOwnerKey, id.Owner), - sdk.NewAttribute(evDSeqKey, strconv.FormatUint(id.DSeq, 10)), - } -} - -// ParseEVDeploymentID returns deploymentID details for given event attributes -func ParseEVDeploymentID(attrs []sdk.Attribute) (DeploymentID, error) { - owner, err := sdkutil.GetAccAddress(attrs, evOwnerKey) - if err != nil { - return DeploymentID{}, err - } - dseq, err := sdkutil.GetUint64(attrs, evDSeqKey) - if err != nil { - return DeploymentID{}, err - } - - return DeploymentID{ - Owner: owner.String(), - DSeq: dseq, - }, nil -} - -// ParseEVDeploymentVersion returns the Deployment's SDL sha256 sum -func ParseEVDeploymentVersion(attrs []sdk.Attribute) ([]byte, error) { - v, err := sdkutil.GetString(attrs, evVersionKey) - if err != nil { - return nil, err - } - return decodeHex([]byte(v)) -} - -func encodeHex(src []byte) []byte { - dst := make([]byte, hex.EncodedLen(len(src))) - hex.Encode(dst, src) - return dst -} - -func decodeHex(src []byte) ([]byte, error) { - dst := make([]byte, hex.DecodedLen(len(src))) - if _, err := hex.Decode(dst, src); err != nil { - return []byte{}, err - } - return dst, nil -} - -// EventGroupClosed provides SDK event to signal group termination -type EventGroupClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID GroupID `json:"id"` -} - -func NewEventGroupClosed(id GroupID) EventGroupClosed { - return EventGroupClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionGroupClosed, - }, - ID: id, - } -} - -// ToSDKEvent produces the SDK notification for Event -func (ev EventGroupClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionGroupClosed), - }, GroupIDEVAttributes(ev.ID)...)..., - ) -} - -// EventGroupPaused provides SDK event to signal group termination -type EventGroupPaused struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID GroupID `json:"id"` -} - -func NewEventGroupPaused(id GroupID) EventGroupPaused { - return EventGroupPaused{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionGroupPaused, - }, - ID: id, - } -} - -// ToSDKEvent produces the SDK notification for Event -func (ev EventGroupPaused) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionGroupPaused), - }, GroupIDEVAttributes(ev.ID)...)..., - ) -} - -// EventGroupStarted provides SDK event to signal group termination -type EventGroupStarted struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID GroupID `json:"id"` -} - -func NewEventGroupStarted(id GroupID) EventGroupStarted { - return EventGroupStarted{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionGroupStarted, - }, - ID: id, - } -} - -// ToSDKEvent produces the SDK notification for Event -func (ev EventGroupStarted) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionGroupStarted), - }, GroupIDEVAttributes(ev.ID)...)..., - ) -} - -// GroupIDEVAttributes returns event attribues for given GroupID -func GroupIDEVAttributes(id GroupID) []sdk.Attribute { - return append(DeploymentIDEVAttributes(id.DeploymentID()), - sdk.NewAttribute(evGSeqKey, strconv.FormatUint(uint64(id.GSeq), 10))) -} - -// ParseEVGroupID returns GroupID details for given event attributes -func ParseEVGroupID(attrs []sdk.Attribute) (GroupID, error) { - did, err := ParseEVDeploymentID(attrs) - if err != nil { - return GroupID{}, err - } - - gseq, err := sdkutil.GetUint64(attrs, evGSeqKey) - if err != nil { - return GroupID{}, err - } - - return GroupID{ - Owner: did.Owner, - DSeq: did.DSeq, - GSeq: uint32(gseq), - }, nil -} - -// ParseEvent parses event and returns details of event and error if occurred -func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { - if ev.Type != sdkutil.EventTypeMessage { - return nil, sdkutil.ErrUnknownType - } - if ev.Module != ModuleName { - return nil, sdkutil.ErrUnknownModule - } - switch ev.Action { - case evActionDeploymentCreated: - did, err := ParseEVDeploymentID(ev.Attributes) - if err != nil { - return nil, err - } - ver, err := ParseEVDeploymentVersion(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventDeploymentCreated(did, ver), nil - case evActionDeploymentUpdated: - did, err := ParseEVDeploymentID(ev.Attributes) - if err != nil { - return nil, err - } - ver, err := ParseEVDeploymentVersion(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventDeploymentUpdated(did, ver), nil - case evActionDeploymentClosed: - did, err := ParseEVDeploymentID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventDeploymentClosed(did), nil - case evActionGroupClosed: - gid, err := ParseEVGroupID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventGroupClosed(gid), nil - default: - return nil, sdkutil.ErrUnknownAction - } -} diff --git a/go/node/deployment/v1beta3/events_test.go b/go/node/deployment/v1beta3/events_test.go deleted file mode 100644 index cafd3f39..00000000 --- a/go/node/deployment/v1beta3/events_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package v1beta3 - -import ( - "crypto/sha256" - "strconv" - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -var ( - keyAcc, _ = sdk.AccAddressFromBech32("akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr") - errWildcard = errors.New("wildcard string error can't be matched") - tmpSum = sha256.Sum256([]byte(keyAcc)) - deploymentVersion = encodeHex(tmpSum[:]) -) - -type testEventParsing struct { - msg sdkutil.Event - expErr error -} - -func (tep testEventParsing) testMessageType() func(t *testing.T) { - _, err := ParseEvent(tep.msg) - return func(t *testing.T) { - // if the error expected is errWildcard to catch untyped errors, don't fail the test, the error was expected. - if errors.Is(tep.expErr, errWildcard) { - require.Error(t, err) - } else { - require.Equal(t, tep.expErr, err) - } - } -} - -var TEPS = []testEventParsing{ - { - msg: sdkutil.Event{ - Type: "nil", - }, - expErr: sdkutil.ErrUnknownType, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - }, - expErr: sdkutil.ErrUnknownAction, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: "nil", - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: "nil", - }, - expErr: sdkutil.ErrUnknownAction, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evVersionKey, - Value: string(deploymentVersion), - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "abc", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - }, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentUpdated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evVersionKey, - Value: string(deploymentVersion), - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentUpdated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionGroupClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "1", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "abc", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionGroupClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evDSeqKey, - Value: "5", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionGroupClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - { - Key: evGSeqKey, - Value: "1", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentUpdated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "neh", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evVersionKey, - Value: string(deploymentVersion), - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionDeploymentUpdated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: keyAcc.String(), - }, - }, - }, - expErr: errWildcard, - }, -} - -func TestEventParsing(t *testing.T) { - for i, test := range TEPS { - t.Run(strconv.Itoa(i), test.testMessageType()) - } -} - -func TestVersionEncoding(t *testing.T) { - versionHex := encodeHex(tmpSum[:]) - assert.Len(t, versionHex, encodedVersionHexLen) - decodedVersion, err := decodeHex(versionHex) - assert.NoError(t, err) - assert.Equal(t, tmpSum[:], decodedVersion) -} diff --git a/go/node/deployment/v1beta3/genesis.pb.go b/go/node/deployment/v1beta3/genesis.pb.go deleted file mode 100644 index e035c7f0..00000000 --- a/go/node/deployment/v1beta3/genesis.pb.go +++ /dev/null @@ -1,630 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta3/genesis.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisDeployment defines the basic genesis state used by deployment module -type GenesisDeployment struct { - Deployment Deployment `protobuf:"bytes,1,opt,name=deployment,proto3" json:"deployment" yaml:"deployment"` - Groups []Group `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups" yaml:"groups"` -} - -func (m *GenesisDeployment) Reset() { *m = GenesisDeployment{} } -func (m *GenesisDeployment) String() string { return proto.CompactTextString(m) } -func (*GenesisDeployment) ProtoMessage() {} -func (*GenesisDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_57b3dff44af2837c, []int{0} -} -func (m *GenesisDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisDeployment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisDeployment.Merge(m, src) -} -func (m *GenesisDeployment) XXX_Size() int { - return m.Size() -} -func (m *GenesisDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisDeployment proto.InternalMessageInfo - -func (m *GenesisDeployment) GetDeployment() Deployment { - if m != nil { - return m.Deployment - } - return Deployment{} -} - -func (m *GenesisDeployment) GetGroups() []Group { - if m != nil { - return m.Groups - } - return nil -} - -// GenesisState stores slice of genesis deployment instance -type GenesisState struct { - Deployments []GenesisDeployment `protobuf:"bytes,1,rep,name=deployments,proto3" json:"deployments" yaml:"deployments"` - Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params" yaml:"params"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_57b3dff44af2837c, []int{1} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetDeployments() []GenesisDeployment { - if m != nil { - return m.Deployments - } - return nil -} - -func (m *GenesisState) GetParams() Params { - if m != nil { - return m.Params - } - return Params{} -} - -func init() { - proto.RegisterType((*GenesisDeployment)(nil), "akash.deployment.v1beta3.GenesisDeployment") - proto.RegisterType((*GenesisState)(nil), "akash.deployment.v1beta3.GenesisState") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta3/genesis.proto", fileDescriptor_57b3dff44af2837c) -} - -var fileDescriptor_57b3dff44af2837c = []byte{ - // 357 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xbf, 0x4e, 0xc3, 0x30, - 0x10, 0xc6, 0xe3, 0x22, 0x75, 0x70, 0x61, 0x68, 0xc4, 0x10, 0x75, 0x88, 0x2b, 0xab, 0x40, 0x2b, - 0x44, 0x2c, 0xda, 0x0d, 0x89, 0x25, 0x42, 0xea, 0x8a, 0xc2, 0xc0, 0x9f, 0xcd, 0xa5, 0x56, 0x5a, - 0xb5, 0x89, 0xa3, 0xc4, 0x05, 0xfa, 0x16, 0x3c, 0x56, 0xc7, 0x8e, 0x0c, 0x28, 0x42, 0xcd, 0xc6, - 0xd8, 0x27, 0x40, 0xb5, 0x2d, 0x1c, 0x55, 0x84, 0x2d, 0x17, 0xff, 0xbe, 0xfb, 0xee, 0x3b, 0x1d, - 0x3c, 0xa5, 0x33, 0x9a, 0x4d, 0xc8, 0x98, 0x25, 0x73, 0xbe, 0x8c, 0x58, 0x2c, 0xc8, 0xcb, 0xe5, - 0x88, 0x09, 0x3a, 0x20, 0x21, 0x8b, 0x59, 0x36, 0xcd, 0xbc, 0x24, 0xe5, 0x82, 0xdb, 0x8e, 0xe4, - 0x3c, 0xc3, 0x79, 0x9a, 0x6b, 0x1d, 0x87, 0x3c, 0xe4, 0x12, 0x22, 0xbb, 0x2f, 0xc5, 0xb7, 0x7a, - 0x95, 0x7d, 0x4b, 0x2d, 0x14, 0xda, 0xa9, 0x1e, 0x21, 0xe5, 0x8b, 0x44, 0x53, 0x27, 0x95, 0x54, - 0x42, 0x53, 0x1a, 0xe9, 0x39, 0xf1, 0x27, 0x80, 0xcd, 0xa1, 0x9a, 0xfc, 0xe6, 0x17, 0xb5, 0x23, - 0x08, 0x8d, 0xd0, 0x01, 0x6d, 0xd0, 0x6d, 0xf4, 0x3b, 0x5e, 0x55, 0x24, 0xcf, 0x28, 0xfd, 0xb3, - 0x55, 0x8e, 0xac, 0xef, 0x1c, 0x95, 0xf4, 0xdb, 0x1c, 0x35, 0x97, 0x34, 0x9a, 0x5f, 0x61, 0xf3, - 0x0f, 0x07, 0x25, 0xc0, 0x7e, 0x80, 0x75, 0x39, 0x7a, 0xe6, 0xd4, 0xda, 0x07, 0xdd, 0x46, 0x1f, - 0x55, 0x5b, 0x0d, 0x77, 0x9c, 0x8f, 0xb4, 0x8b, 0x96, 0x6d, 0x73, 0x74, 0xa4, 0x1c, 0x54, 0x8d, - 0x03, 0xfd, 0x80, 0x0b, 0x00, 0x0f, 0x75, 0xbc, 0x3b, 0x41, 0x05, 0xb3, 0xdf, 0x60, 0xc3, 0x74, - 0xcd, 0x1c, 0x20, 0xfd, 0xce, 0xff, 0xf1, 0xdb, 0xdf, 0x8d, 0xdf, 0xd3, 0xde, 0xe5, 0x3e, 0xdb, - 0x1c, 0xd9, 0xfb, 0x11, 0x33, 0x1c, 0x94, 0x11, 0xfb, 0x11, 0xd6, 0xd5, 0xe6, 0x9d, 0x9a, 0xdc, - 0x67, 0xbb, 0xda, 0xf4, 0x56, 0x72, 0x26, 0xa5, 0xd2, 0x99, 0x94, 0xaa, 0xc6, 0x81, 0x7e, 0xf0, - 0xef, 0x57, 0x1b, 0x17, 0xac, 0x37, 0x2e, 0xf8, 0xda, 0xb8, 0xe0, 0xbd, 0x70, 0xad, 0x75, 0xe1, - 0x5a, 0x1f, 0x85, 0x6b, 0x3d, 0x5d, 0x87, 0x53, 0x31, 0x59, 0x8c, 0xbc, 0x67, 0x1e, 0x11, 0x69, - 0x77, 0x11, 0x33, 0xf1, 0xca, 0xd3, 0x99, 0xae, 0x68, 0x32, 0x25, 0x21, 0x27, 0x31, 0x1f, 0xb3, - 0x3f, 0x4e, 0x65, 0x54, 0x97, 0x47, 0x32, 0xf8, 0x09, 0x00, 0x00, 0xff, 0xff, 0x50, 0x26, 0x51, - 0x3c, 0xf6, 0x02, 0x00, 0x00, -} - -func (m *GenesisDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Deployments) > 0 { - for iNdEx := len(m.Deployments) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Deployments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisDeployment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Deployment.Size() - n += 1 + l + sovGenesis(uint64(l)) - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Deployments) > 0 { - for _, e := range m.Deployments { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - l = m.Params.Size() - n += 1 + l + sovGenesis(uint64(l)) - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, Group{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deployments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Deployments = append(m.Deployments, GenesisDeployment{}) - if err := m.Deployments[len(m.Deployments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta3/group.pb.go b/go/node/deployment/v1beta3/group.pb.go deleted file mode 100644 index 85f657b1..00000000 --- a/go/node/deployment/v1beta3/group.pb.go +++ /dev/null @@ -1,505 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta3/group.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of group -type Group_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - GroupStateInvalid Group_State = 0 - // GroupOpen denotes state for group open - GroupOpen Group_State = 1 - // GroupOrdered denotes state for group ordered - GroupPaused Group_State = 2 - // GroupInsufficientFunds denotes state for group insufficient_funds - GroupInsufficientFunds Group_State = 3 - // GroupClosed denotes state for group closed - GroupClosed Group_State = 4 -) - -var Group_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "paused", - 3: "insufficient_funds", - 4: "closed", -} - -var Group_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "paused": 2, - "insufficient_funds": 3, - "closed": 4, -} - -func (x Group_State) String() string { - return proto.EnumName(Group_State_name, int32(x)) -} - -func (Group_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_6dcebaf6050ef7f1, []int{0, 0} -} - -// Group stores group id, state and specifications of group -type Group struct { - GroupID GroupID `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"id" yaml:"id"` - State Group_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.deployment.v1beta3.Group_State" json:"state" yaml:"state"` - GroupSpec GroupSpec `protobuf:"bytes,3,opt,name=group_spec,json=groupSpec,proto3" json:"spec" yaml:"spec"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Group) Reset() { *m = Group{} } -func (m *Group) String() string { return proto.CompactTextString(m) } -func (*Group) ProtoMessage() {} -func (*Group) Descriptor() ([]byte, []int) { - return fileDescriptor_6dcebaf6050ef7f1, []int{0} -} -func (m *Group) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Group.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Group) XXX_Merge(src proto.Message) { - xxx_messageInfo_Group.Merge(m, src) -} -func (m *Group) XXX_Size() int { - return m.Size() -} -func (m *Group) XXX_DiscardUnknown() { - xxx_messageInfo_Group.DiscardUnknown(m) -} - -var xxx_messageInfo_Group proto.InternalMessageInfo - -func (m *Group) GetGroupID() GroupID { - if m != nil { - return m.GroupID - } - return GroupID{} -} - -func (m *Group) GetState() Group_State { - if m != nil { - return m.State - } - return GroupStateInvalid -} - -func (m *Group) GetGroupSpec() GroupSpec { - if m != nil { - return m.GroupSpec - } - return GroupSpec{} -} - -func (m *Group) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -func init() { - proto.RegisterEnum("akash.deployment.v1beta3.Group_State", Group_State_name, Group_State_value) - proto.RegisterType((*Group)(nil), "akash.deployment.v1beta3.Group") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta3/group.proto", fileDescriptor_6dcebaf6050ef7f1) -} - -var fileDescriptor_6dcebaf6050ef7f1 = []byte{ - // 487 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x3d, 0x8f, 0xd3, 0x30, - 0x1c, 0xc6, 0x93, 0x6b, 0x7a, 0xa5, 0x2e, 0x2f, 0xc5, 0xe2, 0x25, 0xe4, 0x44, 0x12, 0xc2, 0x8b, - 0xba, 0x90, 0x88, 0xde, 0x76, 0x12, 0x03, 0x05, 0x81, 0x3a, 0x81, 0x7a, 0x12, 0x48, 0x2c, 0xc5, - 0x8d, 0xdd, 0x9c, 0x75, 0xad, 0x6d, 0x35, 0xce, 0xa1, 0x5b, 0x99, 0x50, 0x27, 0xbe, 0x40, 0x25, - 0x24, 0xbe, 0x04, 0x1f, 0xe1, 0xc6, 0x1b, 0x99, 0x22, 0xd4, 0x2e, 0xa8, 0x63, 0x3f, 0x01, 0xb2, - 0x1d, 0xc4, 0x0d, 0x70, 0xb7, 0xc5, 0xcf, 0xf3, 0xfb, 0x3f, 0x79, 0x6c, 0xfd, 0xc1, 0x03, 0x74, - 0x88, 0xf2, 0x83, 0x04, 0x13, 0x31, 0xe1, 0xc7, 0x53, 0xc2, 0x64, 0x72, 0xf4, 0x64, 0x44, 0x24, - 0xda, 0x4d, 0xb2, 0x19, 0x2f, 0x44, 0x2c, 0x66, 0x5c, 0x72, 0xe8, 0x6a, 0x2a, 0xfe, 0x4b, 0xc5, - 0x15, 0xe5, 0xdd, 0xc8, 0x78, 0xc6, 0x35, 0x94, 0xa8, 0x2f, 0xc3, 0x7b, 0x8f, 0xce, 0x4f, 0xa5, - 0xb8, 0xe2, 0x3a, 0xe7, 0x73, 0xb9, 0x20, 0xa9, 0x21, 0xa3, 0x4f, 0x0e, 0xa8, 0xbf, 0x52, 0x1a, - 0xfc, 0x00, 0x2e, 0x69, 0x73, 0x48, 0xb1, 0x6b, 0x87, 0x76, 0xa7, 0xd5, 0xbd, 0x17, 0xff, 0xaf, - 0x5e, 0xac, 0x47, 0xfa, 0x2f, 0x7a, 0xd1, 0x49, 0x19, 0x58, 0xcb, 0x32, 0x68, 0x54, 0xc2, 0xba, - 0x0c, 0xb6, 0x28, 0xde, 0x94, 0x41, 0xf3, 0x18, 0x4d, 0x27, 0x7b, 0x11, 0xc5, 0xd1, 0xa0, 0xa1, - 0x63, 0xfb, 0x18, 0xbe, 0x05, 0xf5, 0x5c, 0x22, 0x49, 0xdc, 0xad, 0xd0, 0xee, 0x5c, 0xed, 0x3e, - 0xbc, 0x20, 0x3e, 0xde, 0x57, 0x70, 0xef, 0xce, 0xba, 0x0c, 0xcc, 0xdc, 0xa6, 0x0c, 0x2e, 0x9b, - 0x58, 0x7d, 0x8c, 0x06, 0x46, 0x86, 0x43, 0x00, 0x4c, 0x73, 0x75, 0x2f, 0xb7, 0xa6, 0xbb, 0xdf, - 0xbf, 0x20, 0x7c, 0x5f, 0x90, 0xb4, 0xb7, 0xa3, 0xda, 0xaf, 0xcb, 0xc0, 0x51, 0x83, 0x9b, 0x32, - 0x68, 0x55, 0xe9, 0x82, 0xa4, 0xd1, 0xa0, 0x99, 0xfd, 0xe1, 0xe0, 0x5d, 0x00, 0xd2, 0x19, 0x41, - 0x92, 0xe0, 0x21, 0x92, 0xae, 0x13, 0xda, 0x9d, 0xda, 0xa0, 0x59, 0x29, 0xcf, 0x64, 0xf4, 0xdd, - 0x06, 0x75, 0xdd, 0x15, 0x46, 0xa0, 0x41, 0xd9, 0x11, 0x9a, 0x50, 0xdc, 0xb6, 0xbc, 0x9b, 0xf3, - 0x45, 0x78, 0xdd, 0xfc, 0x4c, 0x99, 0x7d, 0x63, 0xc0, 0xdb, 0xc0, 0xe1, 0x82, 0xb0, 0xb6, 0xed, - 0x5d, 0x99, 0x2f, 0xc2, 0xa6, 0x06, 0x5e, 0x0b, 0xc2, 0xe0, 0x0e, 0xd8, 0x16, 0xa8, 0xc8, 0x09, - 0x6e, 0x6f, 0x79, 0xd7, 0xe6, 0x8b, 0xb0, 0xa5, 0xad, 0x37, 0x5a, 0x82, 0x5d, 0x00, 0x29, 0xcb, - 0x8b, 0xf1, 0x98, 0xa6, 0x94, 0x30, 0x39, 0x1c, 0x17, 0x0c, 0xe7, 0xed, 0x9a, 0xe7, 0xcd, 0x17, - 0xe1, 0x2d, 0xf3, 0xf8, 0x67, 0xec, 0x97, 0xca, 0x55, 0x81, 0xe9, 0x84, 0xab, 0x40, 0xe7, 0x4c, - 0xe0, 0x73, 0x2d, 0x79, 0xce, 0xe7, 0x6f, 0xbe, 0xb5, 0xe7, 0xfc, 0xfa, 0x1a, 0x58, 0xbd, 0x77, - 0x27, 0x4b, 0xdf, 0x3e, 0x5d, 0xfa, 0xf6, 0xcf, 0xa5, 0x6f, 0x7f, 0x59, 0xf9, 0xd6, 0xe9, 0xca, - 0xb7, 0x7e, 0xac, 0x7c, 0xeb, 0xfd, 0xd3, 0x8c, 0xca, 0x83, 0x62, 0x14, 0xa7, 0x7c, 0x9a, 0xe8, - 0x07, 0x7d, 0xcc, 0x88, 0xfc, 0xc8, 0x67, 0x87, 0xd5, 0x09, 0x09, 0x9a, 0x64, 0x3c, 0x61, 0x1c, - 0x93, 0x7f, 0x6c, 0xdb, 0x68, 0x5b, 0x2f, 0xd9, 0xee, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa9, - 0xde, 0x99, 0xe7, 0x0e, 0x03, 0x00, 0x00, -} - -func (m *Group) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Group) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintGroup(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.GroupSpec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintGroup(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.GroupID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroup(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGroup(dAtA []byte, offset int, v uint64) int { - offset -= sovGroup(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Group) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.GroupID.Size() - n += 1 + l + sovGroup(uint64(l)) - if m.State != 0 { - n += 1 + sovGroup(uint64(m.State)) - } - l = m.GroupSpec.Size() - n += 1 + l + sovGroup(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovGroup(uint64(m.CreatedAt)) - } - return n -} - -func sovGroup(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGroup(x uint64) (n int) { - return sovGroup(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Group) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Group: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.GroupID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Group_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupSpec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroup - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroup - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.GroupSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroup - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGroup(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroup - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGroup(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroup - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGroup - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGroup - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGroup - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGroup = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGroup = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGroup = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta3/group_validation.go b/go/node/deployment/v1beta3/group_validation.go deleted file mode 100644 index 370dbd2e..00000000 --- a/go/node/deployment/v1beta3/group_validation.go +++ /dev/null @@ -1,35 +0,0 @@ -package v1beta3 - -import ( - "fmt" -) - -// ValidateDeploymentGroups does validation for all deployment groups -func ValidateDeploymentGroups(gspecs []GroupSpec) error { - if len(gspecs) == 0 { - return ErrInvalidGroups - } - - names := make(map[string]int, len(gspecs)) // Used as set - denom := "" - for idx, group := range gspecs { - // all must be same denomination - if idx == 0 { - denom = group.Price().Denom - } else if group.Price().Denom != denom { - return fmt.Errorf("inconsistent denomination: %v != %v", denom, group.Price().Denom) - } - - if err := group.ValidateBasic(); err != nil { - return err - } - - if _, exists := names[group.GetName()]; exists { - return fmt.Errorf("duplicate deployment group name %q", group.GetName()) - } - - names[group.GetName()] = 0 // Value stored does not matter - } - - return nil -} diff --git a/go/node/deployment/v1beta3/groupid.pb.go b/go/node/deployment/v1beta3/groupid.pb.go deleted file mode 100644 index bf83a2c1..00000000 --- a/go/node/deployment/v1beta3/groupid.pb.go +++ /dev/null @@ -1,395 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta3/groupid.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GroupID stores owner, deployment sequence number and group sequence number -type GroupID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` -} - -func (m *GroupID) Reset() { *m = GroupID{} } -func (*GroupID) ProtoMessage() {} -func (*GroupID) Descriptor() ([]byte, []int) { - return fileDescriptor_9f2340787793cb10, []int{0} -} -func (m *GroupID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GroupID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GroupID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GroupID) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupID.Merge(m, src) -} -func (m *GroupID) XXX_Size() int { - return m.Size() -} -func (m *GroupID) XXX_DiscardUnknown() { - xxx_messageInfo_GroupID.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupID proto.InternalMessageInfo - -func (m *GroupID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *GroupID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *GroupID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func init() { - proto.RegisterType((*GroupID)(nil), "akash.deployment.v1beta3.GroupID") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta3/groupid.proto", fileDescriptor_9f2340787793cb10) -} - -var fileDescriptor_9f2340787793cb10 = []byte{ - // 281 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4b, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, - 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x4f, 0x2f, 0xca, 0x2f, 0x2d, 0xc8, 0x4c, 0xd1, 0x2b, 0x28, 0xca, - 0x2f, 0xc9, 0x17, 0x92, 0x00, 0xab, 0xd3, 0x43, 0xa8, 0xd3, 0x83, 0xaa, 0x93, 0x12, 0x49, 0xcf, - 0x4f, 0xcf, 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0x95, 0xd6, 0x31, 0x72, 0xb1, 0xbb, 0x83, - 0x4c, 0xf0, 0x74, 0x11, 0xd2, 0xe7, 0x62, 0xcd, 0x2f, 0xcf, 0x4b, 0x2d, 0x92, 0x60, 0x54, 0x60, - 0xd4, 0xe0, 0x74, 0x92, 0x7c, 0x75, 0x4f, 0x1e, 0x22, 0xf0, 0xe9, 0x9e, 0x3c, 0x4f, 0x65, 0x62, - 0x6e, 0x8e, 0x95, 0x12, 0x98, 0xab, 0x14, 0x04, 0x11, 0x16, 0x32, 0xe6, 0x62, 0x49, 0x29, 0x4e, - 0x2d, 0x94, 0x60, 0x52, 0x60, 0xd4, 0x60, 0x71, 0x92, 0x7f, 0x74, 0x4f, 0x9e, 0xc5, 0x25, 0x38, - 0xb5, 0xf0, 0xd5, 0x3d, 0x79, 0xb0, 0xf8, 0xa7, 0x7b, 0xf2, 0xdc, 0x10, 0x6d, 0x20, 0x9e, 0x52, - 0x10, 0x58, 0x10, 0xa4, 0x29, 0x1d, 0xa4, 0x89, 0x59, 0x81, 0x51, 0x83, 0x17, 0xa2, 0xc9, 0x1d, - 0xaa, 0x29, 0x1d, 0x45, 0x53, 0x3a, 0x44, 0x13, 0x88, 0xb2, 0xe2, 0x98, 0xb1, 0x40, 0x9e, 0xe1, - 0xc5, 0x02, 0x79, 0x06, 0xa7, 0xf0, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, - 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, - 0xb2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x07, 0x87, 0x82, 0x6e, - 0x5e, 0x6a, 0x49, 0x79, 0x7e, 0x51, 0x36, 0x94, 0x97, 0x58, 0x90, 0xa9, 0x9f, 0x9e, 0xaf, 0x9f, - 0x97, 0x9f, 0x92, 0x8a, 0x25, 0x1c, 0x93, 0xd8, 0xc0, 0x01, 0x62, 0x0c, 0x08, 0x00, 0x00, 0xff, - 0xff, 0xf0, 0x6f, 0xad, 0xb9, 0x6a, 0x01, 0x00, 0x00, -} - -func (m *GroupID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GroupID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.GSeq != 0 { - i = encodeVarintGroupid(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintGroupid(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintGroupid(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGroupid(dAtA []byte, offset int, v uint64) int { - offset -= sovGroupid(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GroupID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovGroupid(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovGroupid(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovGroupid(uint64(m.GSeq)) - } - return n -} - -func sovGroupid(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGroupid(x uint64) (n int) { - return sovGroupid(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GroupID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGroupid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGroupid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGroupid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGroupid(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGroupid - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGroupid - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGroupid - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGroupid = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGroupid = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGroupid = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta3/groupmsg.pb.go b/go/node/deployment/v1beta3/groupmsg.pb.go deleted file mode 100644 index cc238f88..00000000 --- a/go/node/deployment/v1beta3/groupmsg.pb.go +++ /dev/null @@ -1,1034 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta3/groupmsg.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// MsgCloseGroup defines SDK message to close a single Group within a Deployment. -type MsgCloseGroup struct { - ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseGroup) Reset() { *m = MsgCloseGroup{} } -func (m *MsgCloseGroup) String() string { return proto.CompactTextString(m) } -func (*MsgCloseGroup) ProtoMessage() {} -func (*MsgCloseGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_5d408bb350bcd886, []int{0} -} -func (m *MsgCloseGroup) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseGroup.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseGroup.Merge(m, src) -} -func (m *MsgCloseGroup) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseGroup) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseGroup proto.InternalMessageInfo - -func (m *MsgCloseGroup) GetID() GroupID { - if m != nil { - return m.ID - } - return GroupID{} -} - -// MsgCloseGroupResponse defines the Msg/CloseGroup response type. -type MsgCloseGroupResponse struct { -} - -func (m *MsgCloseGroupResponse) Reset() { *m = MsgCloseGroupResponse{} } -func (m *MsgCloseGroupResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseGroupResponse) ProtoMessage() {} -func (*MsgCloseGroupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5d408bb350bcd886, []int{1} -} -func (m *MsgCloseGroupResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseGroupResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseGroupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseGroupResponse.Merge(m, src) -} -func (m *MsgCloseGroupResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseGroupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseGroupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseGroupResponse proto.InternalMessageInfo - -// MsgPauseGroup defines SDK message to close a single Group within a Deployment. -type MsgPauseGroup struct { - ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgPauseGroup) Reset() { *m = MsgPauseGroup{} } -func (m *MsgPauseGroup) String() string { return proto.CompactTextString(m) } -func (*MsgPauseGroup) ProtoMessage() {} -func (*MsgPauseGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_5d408bb350bcd886, []int{2} -} -func (m *MsgPauseGroup) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgPauseGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgPauseGroup.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgPauseGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgPauseGroup.Merge(m, src) -} -func (m *MsgPauseGroup) XXX_Size() int { - return m.Size() -} -func (m *MsgPauseGroup) XXX_DiscardUnknown() { - xxx_messageInfo_MsgPauseGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgPauseGroup proto.InternalMessageInfo - -func (m *MsgPauseGroup) GetID() GroupID { - if m != nil { - return m.ID - } - return GroupID{} -} - -// MsgPauseGroupResponse defines the Msg/PauseGroup response type. -type MsgPauseGroupResponse struct { -} - -func (m *MsgPauseGroupResponse) Reset() { *m = MsgPauseGroupResponse{} } -func (m *MsgPauseGroupResponse) String() string { return proto.CompactTextString(m) } -func (*MsgPauseGroupResponse) ProtoMessage() {} -func (*MsgPauseGroupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5d408bb350bcd886, []int{3} -} -func (m *MsgPauseGroupResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgPauseGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgPauseGroupResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgPauseGroupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgPauseGroupResponse.Merge(m, src) -} -func (m *MsgPauseGroupResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgPauseGroupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgPauseGroupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgPauseGroupResponse proto.InternalMessageInfo - -// MsgStartGroup defines SDK message to close a single Group within a Deployment. -type MsgStartGroup struct { - ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` -} - -func (m *MsgStartGroup) Reset() { *m = MsgStartGroup{} } -func (m *MsgStartGroup) String() string { return proto.CompactTextString(m) } -func (*MsgStartGroup) ProtoMessage() {} -func (*MsgStartGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_5d408bb350bcd886, []int{4} -} -func (m *MsgStartGroup) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgStartGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgStartGroup.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgStartGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgStartGroup.Merge(m, src) -} -func (m *MsgStartGroup) XXX_Size() int { - return m.Size() -} -func (m *MsgStartGroup) XXX_DiscardUnknown() { - xxx_messageInfo_MsgStartGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgStartGroup proto.InternalMessageInfo - -func (m *MsgStartGroup) GetID() GroupID { - if m != nil { - return m.ID - } - return GroupID{} -} - -// MsgStartGroupResponse defines the Msg/StartGroup response type. -type MsgStartGroupResponse struct { -} - -func (m *MsgStartGroupResponse) Reset() { *m = MsgStartGroupResponse{} } -func (m *MsgStartGroupResponse) String() string { return proto.CompactTextString(m) } -func (*MsgStartGroupResponse) ProtoMessage() {} -func (*MsgStartGroupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_5d408bb350bcd886, []int{5} -} -func (m *MsgStartGroupResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgStartGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgStartGroupResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgStartGroupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgStartGroupResponse.Merge(m, src) -} -func (m *MsgStartGroupResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgStartGroupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgStartGroupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgStartGroupResponse proto.InternalMessageInfo - -func init() { - proto.RegisterType((*MsgCloseGroup)(nil), "akash.deployment.v1beta3.MsgCloseGroup") - proto.RegisterType((*MsgCloseGroupResponse)(nil), "akash.deployment.v1beta3.MsgCloseGroupResponse") - proto.RegisterType((*MsgPauseGroup)(nil), "akash.deployment.v1beta3.MsgPauseGroup") - proto.RegisterType((*MsgPauseGroupResponse)(nil), "akash.deployment.v1beta3.MsgPauseGroupResponse") - proto.RegisterType((*MsgStartGroup)(nil), "akash.deployment.v1beta3.MsgStartGroup") - proto.RegisterType((*MsgStartGroupResponse)(nil), "akash.deployment.v1beta3.MsgStartGroupResponse") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta3/groupmsg.proto", fileDescriptor_5d408bb350bcd886) -} - -var fileDescriptor_5d408bb350bcd886 = []byte{ - // 286 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4f, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, - 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x4f, 0x2f, 0xca, 0x2f, 0x2d, 0xc8, 0x2d, 0x4e, 0xd7, 0x2b, 0x28, - 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x00, 0x2b, 0xd4, 0x43, 0x28, 0xd4, 0x83, 0x2a, 0x94, 0x12, 0x49, - 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0xd4, 0xf0, 0x1b, 0x9c, 0x99, - 0x02, 0x51, 0xa7, 0x94, 0xce, 0xc5, 0xeb, 0x5b, 0x9c, 0xee, 0x9c, 0x93, 0x5f, 0x9c, 0xea, 0x0e, - 0x92, 0x10, 0x0a, 0xe0, 0x62, 0xca, 0x4c, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x36, 0x52, 0xd4, - 0xc3, 0x65, 0xab, 0x1e, 0x58, 0xb1, 0xa7, 0x8b, 0x93, 0xec, 0x89, 0x7b, 0xf2, 0x0c, 0x8f, 0xee, - 0xc9, 0x33, 0x79, 0xba, 0xbc, 0xba, 0x27, 0xcf, 0x94, 0x99, 0xf2, 0xe9, 0x9e, 0x3c, 0x67, 0x65, - 0x62, 0x6e, 0x8e, 0x95, 0x52, 0x66, 0x8a, 0x52, 0x10, 0x53, 0x66, 0x8a, 0x15, 0xcb, 0x8b, 0x05, - 0xf2, 0x0c, 0x4a, 0xe2, 0x5c, 0xa2, 0x28, 0x16, 0x05, 0xa5, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, - 0x42, 0x5d, 0x10, 0x90, 0x58, 0x4a, 0x1f, 0x17, 0x20, 0x2c, 0x42, 0x73, 0x41, 0x70, 0x49, 0x62, - 0x51, 0x09, 0x3d, 0x5c, 0x80, 0xb0, 0x08, 0xe6, 0x02, 0xa7, 0xf0, 0x13, 0x8f, 0xe4, 0x18, 0x2f, - 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, - 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xb2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, - 0xd5, 0x07, 0x3b, 0x44, 0x37, 0x2f, 0xb5, 0xa4, 0x3c, 0xbf, 0x28, 0x1b, 0xca, 0x4b, 0x2c, 0xc8, - 0xd4, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, 0x49, 0xc5, 0x12, 0xd9, 0x49, 0x6c, 0xe0, 0x58, 0x36, - 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x36, 0x40, 0x58, 0x93, 0x68, 0x02, 0x00, 0x00, -} - -func (m *MsgCloseGroup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseGroup) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroupmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseGroupResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseGroupResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgPauseGroup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgPauseGroup) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgPauseGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroupmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgPauseGroupResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgPauseGroupResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgPauseGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgStartGroup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgStartGroup) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgStartGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroupmsg(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgStartGroupResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgStartGroupResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgStartGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintGroupmsg(dAtA []byte, offset int, v uint64) int { - offset -= sovGroupmsg(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MsgCloseGroup) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovGroupmsg(uint64(l)) - return n -} - -func (m *MsgCloseGroupResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgPauseGroup) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovGroupmsg(uint64(l)) - return n -} - -func (m *MsgPauseGroupResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgStartGroup) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovGroupmsg(uint64(l)) - return n -} - -func (m *MsgStartGroupResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovGroupmsg(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGroupmsg(x uint64) (n int) { - return sovGroupmsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MsgCloseGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroupmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroupmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroupmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseGroupResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseGroupResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGroupmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgPauseGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgPauseGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgPauseGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroupmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroupmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroupmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgPauseGroupResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgPauseGroupResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgPauseGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGroupmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgStartGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgStartGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgStartGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroupmsg - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroupmsg - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroupmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgStartGroupResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgStartGroupResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgStartGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGroupmsg(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupmsg - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGroupmsg(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupmsg - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGroupmsg - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGroupmsg - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGroupmsg - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGroupmsg = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGroupmsg = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGroupmsg = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta3/groupspec.go b/go/node/deployment/v1beta3/groupspec.go deleted file mode 100644 index 05b357e4..00000000 --- a/go/node/deployment/v1beta3/groupspec.go +++ /dev/null @@ -1,196 +0,0 @@ -package v1beta3 - -import ( - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" - - atypes "github.com/akash-network/akash-api/go/node/audit/v1beta3" - types "github.com/akash-network/akash-api/go/node/types/v1beta3" -) - -type ResourceGroup interface { - GetName() string - GetResourceUnits() ResourceUnits -} - -var _ ResourceGroup = (*GroupSpec)(nil) - -type GroupSpecs []*GroupSpec - -func (gspecs GroupSpecs) Dup() GroupSpecs { - res := make(GroupSpecs, 0, len(gspecs)) - - for _, gspec := range gspecs { - gs := gspec.Dup() - res = append(res, &gs) - } - return res -} - -func (g GroupSpec) Dup() GroupSpec { - res := GroupSpec{ - Name: g.Name, - Requirements: g.Requirements.Dup(), - Resources: g.Resources, - } - - return res -} - -// ValidateBasic asserts non-zero values -func (g GroupSpec) ValidateBasic() error { - return g.validate() -} - -// GetResourceUnits method returns resources list in group -func (g GroupSpec) GetResourceUnits() ResourceUnits { - resources := make(ResourceUnits, 0, len(g.Resources)) - - for _, r := range g.Resources { - resources = append(resources, r) - } - - return resources -} - -// GetName method returns group name -func (g GroupSpec) GetName() string { - return g.Name -} - -// Price method returns price of group -func (g GroupSpec) Price() sdk.DecCoin { - var price sdk.DecCoin - for idx, resource := range g.Resources { - if idx == 0 { - price = resource.FullPrice() - continue - } - price = price.Add(resource.FullPrice()) - } - return price -} - -// MatchResourcesRequirements check if resources attributes match provider's capabilities -func (g GroupSpec) MatchResourcesRequirements(pattr types.Attributes) bool { - for _, rgroup := range g.GetResourceUnits() { - pgroup := pattr.GetCapabilitiesGroup("storage") - for _, storage := range rgroup.Storage { - if len(storage.Attributes) == 0 { - continue - } - - if !storage.Attributes.IN(pgroup) { - return false - } - } - if gpu := rgroup.GPU; gpu.Units.Val.Uint64() > 0 { - attr := gpu.Attributes - if len(attr) == 0 { - continue - } - - pgroup = pattr.GetCapabilitiesMap("gpu") - - if !gpu.Attributes.AnyIN(pgroup) { - return false - } - } - } - - return true -} - -// MatchRequirements method compares provided attributes with specific group attributes. -// Argument provider is a bit cumbersome. First element is attributes from x/provider store -// in case tenant does not need signed attributes at all -// rest of elements (if any) are attributes signed by various auditors -func (g GroupSpec) MatchRequirements(provider []atypes.Provider) bool { - if (len(g.Requirements.SignedBy.AnyOf) != 0) || (len(g.Requirements.SignedBy.AllOf) != 0) { - // we cannot match if there is no signed attributes - if len(provider) < 2 { - return false - } - - existingRequirements := make(attributesMatching) - - for _, existing := range provider[1:] { - existingRequirements[existing.Auditor] = existing.Attributes - } - - if len(g.Requirements.SignedBy.AllOf) != 0 { - for _, validator := range g.Requirements.SignedBy.AllOf { - // if at least one signature does not exist or no match on attributes - requirements cannot match - if existingAttr, exists := existingRequirements[validator]; !exists || - !types.AttributesSubsetOf(g.Requirements.Attributes, existingAttr) { - return false - } - } - } - - if len(g.Requirements.SignedBy.AnyOf) != 0 { - for _, validator := range g.Requirements.SignedBy.AnyOf { - if existingAttr, exists := existingRequirements[validator]; exists && - types.AttributesSubsetOf(g.Requirements.Attributes, existingAttr) { - return true - } - } - - return false - } - - return true - } - - return types.AttributesSubsetOf(g.Requirements.Attributes, provider[0].Attributes) -} - -// validate does validation for provided deployment group -func (g *GroupSpec) validate() error { - if g.Name == "" { - return fmt.Errorf("empty group spec name denomination") - } - - if err := g.GetResourceUnits().Validate(); err != nil { - return err - } - - if err := g.validatePricing(); err != nil { - return err - } - - return nil -} - -func (g *GroupSpec) validatePricing() error { - var price sdk.DecCoin - - mem := sdk.NewInt(0) - - for idx, resource := range g.Resources { - if err := resource.validatePricing(); err != nil { - return fmt.Errorf("group %v: %w", g.GetName(), err) - } - - // all must be same denomination - if idx == 0 { - price = resource.FullPrice() - } else { - rprice := resource.FullPrice() - if rprice.Denom != price.Denom { - return fmt.Errorf("multi-denonimation group: (%v == %v fails)", rprice.Denom, price.Denom) - } - price = price.Add(rprice) - } - - memCount := sdk.NewInt(0) - if u := resource.Memory; u != nil { - memCount.Add(sdk.NewIntFromUint64(u.Quantity.Value())) - } - - mem = mem.Add(memCount.Mul(sdk.NewIntFromUint64(uint64(resource.Count)))) - } - - return nil -} diff --git a/go/node/deployment/v1beta3/groupspec.pb.go b/go/node/deployment/v1beta3/groupspec.pb.go deleted file mode 100644 index 5614eb05..00000000 --- a/go/node/deployment/v1beta3/groupspec.pb.go +++ /dev/null @@ -1,427 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta3/groupspec.proto - -package v1beta3 - -import ( - fmt "fmt" - v1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GroupSpec stores group specifications -type GroupSpec struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` - Requirements v1beta3.PlacementRequirements `protobuf:"bytes,2,opt,name=requirements,proto3" json:"requirements" yaml:"requirements"` - Resources ResourceUnits `protobuf:"bytes,3,rep,name=resources,proto3,castrepeated=ResourceUnits" json:"resources" yaml:"resources"` -} - -func (m *GroupSpec) Reset() { *m = GroupSpec{} } -func (m *GroupSpec) String() string { return proto.CompactTextString(m) } -func (*GroupSpec) ProtoMessage() {} -func (*GroupSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_8f47a9fa0e046026, []int{0} -} -func (m *GroupSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GroupSpec.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GroupSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupSpec.Merge(m, src) -} -func (m *GroupSpec) XXX_Size() int { - return m.Size() -} -func (m *GroupSpec) XXX_DiscardUnknown() { - xxx_messageInfo_GroupSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupSpec proto.InternalMessageInfo - -func init() { - proto.RegisterType((*GroupSpec)(nil), "akash.deployment.v1beta3.GroupSpec") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta3/groupspec.proto", fileDescriptor_8f47a9fa0e046026) -} - -var fileDescriptor_8f47a9fa0e046026 = []byte{ - // 376 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x48, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, - 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x4f, 0x2f, 0xca, 0x2f, 0x2d, 0x28, 0x2e, 0x48, 0x4d, 0xd6, 0x2b, - 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x00, 0xab, 0xd4, 0x43, 0xa8, 0xd4, 0x83, 0xaa, 0x94, 0x12, - 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0x94, 0x20, 0x26, 0x27, - 0x25, 0x16, 0xa7, 0xc2, 0xcd, 0x4c, 0x2c, 0x29, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0x49, 0x85, 0xaa, - 0xd1, 0xc6, 0x69, 0x7b, 0x51, 0x6a, 0x71, 0x7e, 0x69, 0x51, 0x72, 0x6a, 0x69, 0x5e, 0x66, 0x09, - 0x44, 0xb1, 0xd2, 0x1e, 0x26, 0x2e, 0x4e, 0x77, 0x90, 0xa3, 0x82, 0x0b, 0x52, 0x93, 0x85, 0xb4, - 0xb9, 0x58, 0xf2, 0x12, 0x73, 0x53, 0x25, 0x18, 0x15, 0x18, 0x35, 0x38, 0x9d, 0xc4, 0x5f, 0xdd, - 0x93, 0x07, 0xf3, 0x3f, 0xdd, 0x93, 0xe7, 0xae, 0x4c, 0xcc, 0xcd, 0xb1, 0x52, 0x02, 0xf1, 0x94, - 0x82, 0xc0, 0x82, 0x42, 0x55, 0x5c, 0x3c, 0x45, 0xa9, 0x85, 0xa5, 0x99, 0x45, 0xa9, 0x20, 0x4b, - 0x8a, 0x25, 0x98, 0x14, 0x18, 0x35, 0xb8, 0x8d, 0x34, 0xf5, 0x20, 0x5e, 0x02, 0x39, 0x11, 0xe6, - 0x19, 0xbd, 0x80, 0x9c, 0xc4, 0x64, 0xb0, 0xaa, 0x20, 0x24, 0x0d, 0x4e, 0xda, 0x27, 0xee, 0xc9, - 0x33, 0xbc, 0xba, 0x27, 0x8f, 0x62, 0xcc, 0xa7, 0x7b, 0xf2, 0xc2, 0x10, 0xbb, 0x90, 0x45, 0x95, - 0x82, 0x50, 0x14, 0x09, 0xd5, 0x70, 0x71, 0xc2, 0x3c, 0x53, 0x2c, 0xc1, 0xac, 0xc0, 0xac, 0xc1, - 0x6d, 0xa4, 0xa6, 0x87, 0x2b, 0x2c, 0xf5, 0x82, 0xa0, 0x4a, 0x43, 0xf3, 0x32, 0x4b, 0x9c, 0xcc, - 0xa0, 0xb6, 0x22, 0x0c, 0xf8, 0x74, 0x4f, 0x5e, 0x00, 0x66, 0x25, 0x54, 0x48, 0x69, 0xd5, 0x7d, - 0x79, 0x5e, 0x64, 0x6d, 0xc5, 0x41, 0x08, 0xf5, 0x56, 0x1c, 0x1d, 0x0b, 0xe4, 0x19, 0x5e, 0x2c, - 0x90, 0x67, 0x70, 0x0a, 0x3f, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xdb, - 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0xc3, 0x74, 0xf3, 0x52, - 0x4b, 0xca, 0xf3, 0x8b, 0xb2, 0xa1, 0xbc, 0xc4, 0x82, 0x4c, 0xfd, 0xf4, 0x7c, 0xfd, 0xbc, 0xfc, - 0x94, 0x54, 0x2c, 0x51, 0x95, 0xc4, 0x06, 0x8e, 0x1e, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x1f, 0xe1, 0xdf, 0xbc, 0x4b, 0x02, 0x00, 0x00, -} - -func (m *GroupSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GroupSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroupspec(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - { - size, err := m.Requirements.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGroupspec(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGroupspec(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGroupspec(dAtA []byte, offset int, v uint64) int { - offset -= sovGroupspec(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GroupSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovGroupspec(uint64(l)) - } - l = m.Requirements.Size() - n += 1 + l + sovGroupspec(uint64(l)) - if len(m.Resources) > 0 { - for _, e := range m.Resources { - l = e.Size() - n += 1 + l + sovGroupspec(uint64(l)) - } - } - return n -} - -func sovGroupspec(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGroupspec(x uint64) (n int) { - return sovGroupspec(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GroupSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupspec - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupspec - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGroupspec - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGroupspec - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupspec - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroupspec - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroupspec - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Requirements.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGroupspec - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGroupspec - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGroupspec - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, ResourceUnit{}) - if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGroupspec(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGroupspec - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGroupspec(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupspec - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupspec - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGroupspec - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGroupspec - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGroupspec - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGroupspec - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGroupspec = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGroupspec = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGroupspec = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta3/id.go b/go/node/deployment/v1beta3/id.go deleted file mode 100644 index ab6b1ddb..00000000 --- a/go/node/deployment/v1beta3/id.go +++ /dev/null @@ -1,103 +0,0 @@ -package v1beta3 - -import ( - "fmt" - "strconv" - "strings" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -// Equals method compares specific deployment with provided deployment -func (id DeploymentID) Equals(other DeploymentID) bool { - return id.Owner == other.Owner && id.DSeq == other.DSeq -} - -// Validate method for DeploymentID and returns nil -func (id DeploymentID) Validate() error { - _, err := sdk.AccAddressFromBech32(id.Owner) - switch { - case err != nil: - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "DeploymentID: Invalid Owner Address") - case id.DSeq == 0: - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "DeploymentID: Invalid Deployment Sequence") - } - return nil -} - -// String method for deployment IDs -func (id DeploymentID) String() string { - return fmt.Sprintf("%s/%d", id.Owner, id.DSeq) -} - -func (id DeploymentID) GetOwnerAddress() (sdk.Address, error) { - return sdk.AccAddressFromBech32(id.Owner) -} - -func ParseDeploymentID(val string) (DeploymentID, error) { - parts := strings.Split(val, "/") - return ParseDeploymentPath(parts) -} - -// ParseDeploymentPath returns DeploymentID details with provided queries, and return -// error if occurred due to wrong query -func ParseDeploymentPath(parts []string) (DeploymentID, error) { - if len(parts) != 2 { - return DeploymentID{}, ErrInvalidIDPath - } - - owner, err := sdk.AccAddressFromBech32(parts[0]) - if err != nil { - return DeploymentID{}, err - } - - dseq, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return DeploymentID{}, err - } - - return DeploymentID{ - Owner: owner.String(), - DSeq: dseq, - }, nil -} - -// MakeGroupID returns GroupID instance with provided deployment details -// and group sequence number. -func MakeGroupID(id DeploymentID, gseq uint32) GroupID { - return GroupID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: gseq, - } -} - -// DeploymentID method returns DeploymentID details with specific group details -func (id GroupID) DeploymentID() DeploymentID { - return DeploymentID{ - Owner: id.Owner, - DSeq: id.DSeq, - } -} - -// Equals method compares specific group with provided group -func (id GroupID) Equals(other GroupID) bool { - return id.DeploymentID().Equals(other.DeploymentID()) && id.GSeq == other.GSeq -} - -// Validate method for GroupID and returns nil -func (id GroupID) Validate() error { - if err := id.DeploymentID().Validate(); err != nil { - return sdkerrors.Wrap(err, "GroupID: Invalid DeploymentID") - } - if id.GSeq == 0 { - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "GroupID: Invalid Group Sequence") - } - return nil -} - -// String method provides human readable representation of GroupID. -func (id GroupID) String() string { - return fmt.Sprintf("%s/%d", id.DeploymentID(), id.GSeq) -} diff --git a/go/node/deployment/v1beta3/key.go b/go/node/deployment/v1beta3/key.go deleted file mode 100644 index 400c355e..00000000 --- a/go/node/deployment/v1beta3/key.go +++ /dev/null @@ -1,20 +0,0 @@ -package v1beta3 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "deployment" - - // StoreKey is the store key string for deployment - StoreKey = ModuleName - - // RouterKey is the message route for deployment - RouterKey = ModuleName -) - -func DeploymentPrefix() []byte { - return []byte{0x01} -} - -func GroupPrefix() []byte { - return []byte{0x02} -} diff --git a/go/node/deployment/v1beta3/migrate/v1beta2.go b/go/node/deployment/v1beta3/migrate/v1beta2.go deleted file mode 100644 index 1b503689..00000000 --- a/go/node/deployment/v1beta3/migrate/v1beta2.go +++ /dev/null @@ -1,50 +0,0 @@ -package migrate - -import ( - "github.com/akash-network/akash-api/go/node/deployment/v1beta2" - "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - amigrate "github.com/akash-network/akash-api/go/node/types/v1beta3/migrate" -) - -func ResourceUnitFromV1Beta2(id uint32, from v1beta2.Resource) v1beta3.ResourceUnit { - return v1beta3.ResourceUnit{ - Resources: amigrate.ResourcesFromV1Beta2(id, from.Resources), - Count: from.Count, - Price: from.Price, - } -} - -func ResourcesUnitsFromV1Beta2(from []v1beta2.Resource) v1beta3.ResourceUnits { - res := make(v1beta3.ResourceUnits, 0, len(from)) - - for i, oval := range from { - res = append(res, ResourceUnitFromV1Beta2(uint32(i+1), oval)) - } - - return res -} - -func GroupIDFromV1Beta2(from v1beta2.GroupID) v1beta3.GroupID { - return v1beta3.GroupID{ - Owner: from.Owner, - DSeq: from.DSeq, - GSeq: from.GSeq, - } -} - -func GroupSpecFromV1Beta2(from v1beta2.GroupSpec) v1beta3.GroupSpec { - return v1beta3.GroupSpec{ - Name: from.Name, - Requirements: amigrate.PlacementRequirementsFromV1Beta2(from.Requirements), - Resources: ResourcesUnitsFromV1Beta2(from.Resources), - } -} - -func GroupFromV1Beta2(from v1beta2.Group) v1beta3.Group { - return v1beta3.Group{ - GroupID: GroupIDFromV1Beta2(from.GroupID), - State: v1beta3.Group_State(from.State), - GroupSpec: GroupSpecFromV1Beta2(from.GroupSpec), - CreatedAt: from.CreatedAt, - } -} diff --git a/go/node/deployment/v1beta3/msgs.go b/go/node/deployment/v1beta3/msgs.go deleted file mode 100644 index 23603a38..00000000 --- a/go/node/deployment/v1beta3/msgs.go +++ /dev/null @@ -1,334 +0,0 @@ -package v1beta3 - -import ( - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -const ( - MsgTypeCreateDeployment = "create-deployment" - MsgTypeDepositDeployment = "deposit-deployment" - MsgTypeUpdateDeployment = "update-deployment" - MsgTypeCloseDeployment = "close-deployment" - MsgTypeCloseGroup = "close-group" - MsgTypePauseGroup = "pause-group" - MsgTypeStartGroup = "start-group" -) - -var ( - _, _, _, _ sdk.Msg = &MsgCreateDeployment{}, &MsgUpdateDeployment{}, &MsgCloseDeployment{}, &MsgCloseGroup{} -) - -// NewMsgCreateDeployment creates a new MsgCreateDeployment instance -func NewMsgCreateDeployment(id DeploymentID, groups []GroupSpec, version []byte, - deposit sdk.Coin, depositor sdk.AccAddress) *MsgCreateDeployment { - return &MsgCreateDeployment{ - ID: id, - Groups: groups, - Version: version, - Deposit: deposit, - Depositor: depositor.String(), - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateDeployment) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateDeployment) Type() string { return MsgTypeCreateDeployment } - -// GetSignBytes encodes the message for signing -func (msg MsgCreateDeployment) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateDeployment) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// ValidateBasic does basic validation like check owner and groups length -func (msg MsgCreateDeployment) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - if err := msg.Deposit.Validate(); err != nil { - return err - } - if len(msg.Groups) == 0 { - return ErrInvalidGroups - } - - if len(msg.Version) == 0 { - return ErrEmptyVersion - } - - if len(msg.Version) != ManifestVersionLength { - return ErrInvalidVersion - } - - for _, gs := range msg.Groups { - err := gs.ValidateBasic() - if err != nil { - return err - } - - // deposit must be same denom as price - if !msg.Deposit.IsZero() { - if gdenom := gs.Price().Denom; gdenom != msg.Deposit.Denom { - return sdkerrors.Wrapf(ErrInvalidDeposit, "Mismatched denominations (%v != %v)", msg.Deposit.Denom, gdenom) - } - } - } - - _, err := sdk.AccAddressFromBech32(msg.Depositor) - if err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreateDeployment: Invalid Depositor Address") - } - - return nil -} - -// NewMsgDepositDeployment creates a new MsgDepositDeployment instance -func NewMsgDepositDeployment(id DeploymentID, amount sdk.Coin, depositor string) *MsgDepositDeployment { - return &MsgDepositDeployment{ - ID: id, - Amount: amount, - Depositor: depositor, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgDepositDeployment) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgDepositDeployment) Type() string { return MsgTypeDepositDeployment } - -// GetSignBytes encodes the message for signing -func (msg MsgDepositDeployment) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgDepositDeployment) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// ValidateBasic does basic validation like check owner and groups length -func (msg MsgDepositDeployment) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - - if msg.Amount.IsZero() { - return ErrInvalidDeposit - } - - _, err := sdk.AccAddressFromBech32(msg.Depositor) - if err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgDepositDeployment: Invalid Depositor Address") - } - - return nil -} - -// NewMsgUpdateDeployment creates a new MsgUpdateDeployment instance -func NewMsgUpdateDeployment(id DeploymentID, version []byte) *MsgUpdateDeployment { - return &MsgUpdateDeployment{ - ID: id, - Version: version, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgUpdateDeployment) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgUpdateDeployment) Type() string { return MsgTypeUpdateDeployment } - -// ValidateBasic does basic validation -func (msg MsgUpdateDeployment) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - - if len(msg.Version) == 0 { - return ErrEmptyVersion - } - - if len(msg.Version) != ManifestVersionLength { - return ErrInvalidVersion - } - - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgUpdateDeployment) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgUpdateDeployment) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgCloseDeployment creates a new MsgCloseDeployment instance -func NewMsgCloseDeployment(id DeploymentID) *MsgCloseDeployment { - return &MsgCloseDeployment{ - ID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCloseDeployment) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCloseDeployment) Type() string { return MsgTypeCloseDeployment } - -// ValidateBasic does basic validation with deployment details -func (msg MsgCloseDeployment) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgCloseDeployment) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseDeployment) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgCloseGroup creates a new MsgCloseGroup instance -func NewMsgCloseGroup(id GroupID) *MsgCloseGroup { - return &MsgCloseGroup{ - ID: id, - } -} - -// Route implements the sdk.Msg interface for routing -func (msg MsgCloseGroup) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface exposing message type -func (msg MsgCloseGroup) Type() string { return MsgTypeCloseGroup } - -// ValidateBasic calls underlying GroupID.Validate() check and returns result -func (msg MsgCloseGroup) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgCloseGroup) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseGroup) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgPauseGroup creates a new MsgPauseGroup instance -func NewMsgPauseGroup(id GroupID) *MsgPauseGroup { - return &MsgPauseGroup{ - ID: id, - } -} - -// Route implements the sdk.Msg interface for routing -func (msg MsgPauseGroup) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface exposing message type -func (msg MsgPauseGroup) Type() string { return MsgTypePauseGroup } - -// ValidateBasic calls underlying GroupID.Validate() check and returns result -func (msg MsgPauseGroup) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgPauseGroup) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgPauseGroup) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgStartGroup creates a new MsgStartGroup instance -func NewMsgStartGroup(id GroupID) *MsgStartGroup { - return &MsgStartGroup{ - ID: id, - } -} - -// Route implements the sdk.Msg interface for routing -func (msg MsgStartGroup) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface exposing message type -func (msg MsgStartGroup) Type() string { return MsgTypeStartGroup } - -// ValidateBasic calls underlying GroupID.Validate() check and returns result -func (msg MsgStartGroup) ValidateBasic() error { - if err := msg.ID.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgStartGroup) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgStartGroup) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} diff --git a/go/node/deployment/v1beta3/msgs_test.go b/go/node/deployment/v1beta3/msgs_test.go deleted file mode 100644 index 15b606af..00000000 --- a/go/node/deployment/v1beta3/msgs_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package v1beta3_test - -import ( - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/stretchr/testify/require" - - types "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - sdktestutil "github.com/akash-network/akash-api/go/testutil" - testutil "github.com/akash-network/akash-api/go/testutil/v1beta3" -) - -type testMsg struct { - msg sdk.Msg - err error -} - -func TestVersionValidation(t *testing.T) { - tests := []testMsg{ - { - msg: &types.MsgCreateDeployment{ - ID: testutil.DeploymentID(t), - Version: testutil.DeploymentVersion(t), - Groups: []types.GroupSpec{ - testutil.GroupSpec(t), - }, - Depositor: testutil.AccAddress(t).String(), - Deposit: sdktestutil.AkashCoin(t, 0), - }, - err: nil, - }, - { - msg: &types.MsgCreateDeployment{ - ID: testutil.DeploymentID(t), - Version: []byte(""), - Groups: []types.GroupSpec{ - testutil.GroupSpec(t), - }, - Depositor: testutil.AccAddress(t).String(), - Deposit: sdktestutil.AkashCoin(t, 0), - }, - err: types.ErrEmptyVersion, - }, - { - msg: &types.MsgCreateDeployment{ - ID: testutil.DeploymentID(t), - Version: []byte("invalidversion"), - Groups: []types.GroupSpec{ - testutil.GroupSpec(t), - }, - Depositor: testutil.AccAddress(t).String(), - Deposit: sdktestutil.AkashCoin(t, 0), - }, - err: types.ErrInvalidVersion, - }, - { - msg: &types.MsgUpdateDeployment{ - ID: testutil.DeploymentID(t), - Version: testutil.DeploymentVersion(t), - }, - err: nil, - }, - { - msg: &types.MsgUpdateDeployment{ - ID: testutil.DeploymentID(t), - Version: []byte(""), - }, - err: types.ErrEmptyVersion, - }, - { - msg: &types.MsgUpdateDeployment{ - ID: testutil.DeploymentID(t), - Version: []byte("invalidversion"), - }, - err: types.ErrInvalidVersion, - }, - } - - for _, test := range tests { - require.Equal(t, test.err, test.msg.ValidateBasic()) - } -} diff --git a/go/node/deployment/v1beta3/params.go b/go/node/deployment/v1beta3/params.go deleted file mode 100644 index 6847892e..00000000 --- a/go/node/deployment/v1beta3/params.go +++ /dev/null @@ -1,92 +0,0 @@ -package v1beta3 - -import ( - "fmt" - "math" - - sdk "github.com/cosmos/cosmos-sdk/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/pkg/errors" -) - -var _ paramtypes.ParamSet = (*Params)(nil) - -const ( - keyMinDeposits = "MinDeposits" -) - -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair([]byte(keyMinDeposits), &p.MinDeposits, validateMinDeposits), - } -} - -func DefaultParams() Params { - return Params{ - MinDeposits: sdk.Coins{ - sdk.NewCoin("uakt", sdk.NewInt(500000)), - }, - } -} - -func (p Params) Validate() error { - if err := validateMinDeposits(p.MinDeposits); err != nil { - return err - } - return nil -} - -func (p Params) ValidateDeposit(amt sdk.Coin) error { - min, err := p.MinDepositFor(amt.Denom) - - if err != nil { - return err - } - - if amt.IsGTE(min) { - return nil - } - - return errors.Wrapf(ErrInvalidDeposit, "Deposit too low - %v < %v", amt.Amount, min) -} - -func (p Params) MinDepositFor(denom string) (sdk.Coin, error) { - for _, minDeposit := range p.MinDeposits { - if minDeposit.Denom == denom { - return sdk.NewCoin(minDeposit.Denom, minDeposit.Amount), nil - } - } - - return sdk.NewInt64Coin(denom, math.MaxInt64), errors.Wrapf(ErrInvalidDeposit, "Invalid deposit denomination %v", denom) -} - -func validateMinDeposits(i interface{}) error { - vals, ok := i.(sdk.Coins) - if !ok { - return errors.Wrapf(ErrInvalidParam, "Min Deposits - invalid type: %T", i) - } - - check := make(map[string]bool) - - for _, minDeposit := range vals { - if _, exists := check[minDeposit.Denom]; exists { - return fmt.Errorf("duplicate Min Deposit for denom (%#v)", minDeposit) - } - - check[minDeposit.Denom] = true - - if minDeposit.Amount.Uint64() >= math.MaxInt32 { - return errors.Wrapf(ErrInvalidParam, "Min Deposit (%v) - too large: %v", minDeposit.Denom, minDeposit.Amount.Uint64()) - } - } - - if _, exists := check["uakt"]; !exists { - return errors.Wrapf(ErrInvalidParam, "Min Deposits - uakt not given: %#v", vals) - } - - return nil -} diff --git a/go/node/deployment/v1beta3/params.pb.go b/go/node/deployment/v1beta3/params.pb.go deleted file mode 100644 index 087b5ab5..00000000 --- a/go/node/deployment/v1beta3/params.pb.go +++ /dev/null @@ -1,340 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta3/params.proto - -package v1beta3 - -import ( - fmt "fmt" - github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Params defines the parameters for the x/deployment package -type Params struct { - MinDeposits github_com_cosmos_cosmos_sdk_types.Coins `protobuf:"bytes,1,rep,name=min_deposits,json=minDeposits,proto3,castrepeated=github.com/cosmos/cosmos-sdk/types.Coins" json:"min_deposits" yaml:"min_deposits"` -} - -func (m *Params) Reset() { *m = Params{} } -func (m *Params) String() string { return proto.CompactTextString(m) } -func (*Params) ProtoMessage() {} -func (*Params) Descriptor() ([]byte, []int) { - return fileDescriptor_9a677e8b392d6c91, []int{0} -} -func (m *Params) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Params.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Params) XXX_Merge(src proto.Message) { - xxx_messageInfo_Params.Merge(m, src) -} -func (m *Params) XXX_Size() int { - return m.Size() -} -func (m *Params) XXX_DiscardUnknown() { - xxx_messageInfo_Params.DiscardUnknown(m) -} - -var xxx_messageInfo_Params proto.InternalMessageInfo - -func (m *Params) GetMinDeposits() github_com_cosmos_cosmos_sdk_types.Coins { - if m != nil { - return m.MinDeposits - } - return nil -} - -func init() { - proto.RegisterType((*Params)(nil), "akash.deployment.v1beta3.Params") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta3/params.proto", fileDescriptor_9a677e8b392d6c91) -} - -var fileDescriptor_9a677e8b392d6c91 = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xbf, 0x4a, 0x03, 0x31, - 0x1c, 0xc7, 0x2f, 0x08, 0x1d, 0x5a, 0xa7, 0xea, 0x50, 0x3b, 0xe4, 0xe4, 0x40, 0xe8, 0xd2, 0x84, - 0xb3, 0x9b, 0xe0, 0x52, 0x5d, 0x05, 0x71, 0x11, 0x5c, 0x24, 0x77, 0x17, 0xaf, 0xe1, 0x9a, 0xfc, - 0xc2, 0x25, 0x2a, 0xf7, 0x16, 0x3e, 0x84, 0x93, 0x8b, 0xaf, 0xd1, 0xb1, 0xa3, 0xd3, 0x29, 0x77, - 0x9b, 0xa3, 0x4f, 0x20, 0xcd, 0x05, 0x5a, 0xc1, 0x29, 0xff, 0x3e, 0xf9, 0x7c, 0x7f, 0x7c, 0xfb, - 0x27, 0xac, 0x60, 0x66, 0x41, 0x33, 0xae, 0x97, 0x50, 0x49, 0xae, 0x2c, 0x7d, 0x8a, 0x13, 0x6e, - 0xd9, 0x8c, 0x6a, 0x56, 0x32, 0x69, 0x88, 0x2e, 0xc1, 0xc2, 0x70, 0xe4, 0x30, 0xb2, 0xc5, 0x88, - 0xc7, 0xc6, 0x87, 0x39, 0xe4, 0xe0, 0x20, 0xba, 0xd9, 0x75, 0xfc, 0x18, 0xa7, 0x60, 0x24, 0x18, - 0x9a, 0x30, 0xc3, 0xbd, 0x31, 0xa6, 0x29, 0x08, 0xd5, 0xbd, 0x47, 0xef, 0xa8, 0xdf, 0xbb, 0x76, - 0x01, 0xc3, 0x57, 0xd4, 0xdf, 0x97, 0x42, 0xdd, 0x67, 0x5c, 0x83, 0x11, 0xd6, 0x8c, 0xd0, 0xf1, - 0xde, 0x64, 0x70, 0x7a, 0x44, 0x3a, 0x05, 0xd9, 0x28, 0x7c, 0x5a, 0x4c, 0x2e, 0x40, 0xa8, 0xf9, - 0xc3, 0xaa, 0x0e, 0x83, 0xa6, 0x0e, 0x07, 0x57, 0x42, 0x5d, 0xfa, 0x5f, 0xdf, 0x75, 0xf8, 0xc7, - 0xf2, 0x53, 0x87, 0x07, 0x15, 0x93, 0xcb, 0xb3, 0x68, 0xf7, 0x36, 0x7a, 0xfb, 0x0c, 0x27, 0xb9, - 0xb0, 0x8b, 0xc7, 0x84, 0xa4, 0x20, 0xa9, 0x9f, 0xb2, 0x5b, 0xa6, 0x26, 0x2b, 0xa8, 0xad, 0x34, - 0x37, 0x2e, 0xc6, 0xdc, 0x0c, 0xe4, 0xd6, 0x3f, 0xbf, 0x5d, 0x35, 0x18, 0xad, 0x1b, 0x8c, 0xbe, - 0x1a, 0x8c, 0x5e, 0x5a, 0x1c, 0xac, 0x5b, 0x1c, 0x7c, 0xb4, 0x38, 0xb8, 0x3b, 0xdf, 0x11, 0xba, - 0x9a, 0xa6, 0x8a, 0xdb, 0x67, 0x28, 0x0b, 0x7f, 0x62, 0x5a, 0xd0, 0x1c, 0xa8, 0x82, 0x8c, 0xff, - 0xd3, 0x73, 0xd2, 0x73, 0x8d, 0xcc, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc7, 0xd3, 0xdf, 0x39, - 0x8a, 0x01, 0x00, 0x00, -} - -func (m *Params) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Params) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.MinDeposits) > 0 { - for iNdEx := len(m.MinDeposits) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MinDeposits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintParams(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintParams(dAtA []byte, offset int, v uint64) int { - offset -= sovParams(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Params) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.MinDeposits) > 0 { - for _, e := range m.MinDeposits { - l = e.Size() - n += 1 + l + sovParams(uint64(l)) - } - } - return n -} - -func sovParams(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozParams(x uint64) (n int) { - return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Params) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Params: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinDeposits", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MinDeposits = append(m.MinDeposits, types.Coin{}) - if err := m.MinDeposits[len(m.MinDeposits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipParams(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipParams(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthParams - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupParams - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthParams - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta3/query.pb.go b/go/node/deployment/v1beta3/query.pb.go deleted file mode 100644 index 9a17b4c6..00000000 --- a/go/node/deployment/v1beta3/query.pb.go +++ /dev/null @@ -1,1628 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta3/query.proto - -package v1beta3 - -import ( - context "context" - fmt "fmt" - v1beta3 "github.com/akash-network/akash-api/go/node/escrow/v1beta3" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryDeploymentsRequest is request type for the Query/Deployments RPC method -type QueryDeploymentsRequest struct { - Filters DeploymentFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryDeploymentsRequest) Reset() { *m = QueryDeploymentsRequest{} } -func (m *QueryDeploymentsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryDeploymentsRequest) ProtoMessage() {} -func (*QueryDeploymentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_98f8fa16759df714, []int{0} -} -func (m *QueryDeploymentsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryDeploymentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryDeploymentsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryDeploymentsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryDeploymentsRequest.Merge(m, src) -} -func (m *QueryDeploymentsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryDeploymentsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryDeploymentsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryDeploymentsRequest proto.InternalMessageInfo - -func (m *QueryDeploymentsRequest) GetFilters() DeploymentFilters { - if m != nil { - return m.Filters - } - return DeploymentFilters{} -} - -func (m *QueryDeploymentsRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryDeploymentsResponse is response type for the Query/Deployments RPC method -type QueryDeploymentsResponse struct { - Deployments DeploymentResponses `protobuf:"bytes,1,rep,name=deployments,proto3,castrepeated=DeploymentResponses" json:"deployments"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryDeploymentsResponse) Reset() { *m = QueryDeploymentsResponse{} } -func (m *QueryDeploymentsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryDeploymentsResponse) ProtoMessage() {} -func (*QueryDeploymentsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_98f8fa16759df714, []int{1} -} -func (m *QueryDeploymentsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryDeploymentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryDeploymentsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryDeploymentsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryDeploymentsResponse.Merge(m, src) -} -func (m *QueryDeploymentsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryDeploymentsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryDeploymentsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryDeploymentsResponse proto.InternalMessageInfo - -func (m *QueryDeploymentsResponse) GetDeployments() DeploymentResponses { - if m != nil { - return m.Deployments - } - return nil -} - -func (m *QueryDeploymentsResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryDeploymentRequest is request type for the Query/Deployment RPC method -type QueryDeploymentRequest struct { - ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryDeploymentRequest) Reset() { *m = QueryDeploymentRequest{} } -func (m *QueryDeploymentRequest) String() string { return proto.CompactTextString(m) } -func (*QueryDeploymentRequest) ProtoMessage() {} -func (*QueryDeploymentRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_98f8fa16759df714, []int{2} -} -func (m *QueryDeploymentRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryDeploymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryDeploymentRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryDeploymentRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryDeploymentRequest.Merge(m, src) -} -func (m *QueryDeploymentRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryDeploymentRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryDeploymentRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryDeploymentRequest proto.InternalMessageInfo - -func (m *QueryDeploymentRequest) GetID() DeploymentID { - if m != nil { - return m.ID - } - return DeploymentID{} -} - -// QueryDeploymentResponse is response type for the Query/Deployment RPC method -type QueryDeploymentResponse struct { - Deployment Deployment `protobuf:"bytes,1,opt,name=deployment,proto3" json:"deployment" yaml:"deployment"` - Groups []Group `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups" yaml:"groups"` - EscrowAccount v1beta3.Account `protobuf:"bytes,3,opt,name=escrow_account,json=escrowAccount,proto3" json:"escrow_account"` -} - -func (m *QueryDeploymentResponse) Reset() { *m = QueryDeploymentResponse{} } -func (m *QueryDeploymentResponse) String() string { return proto.CompactTextString(m) } -func (*QueryDeploymentResponse) ProtoMessage() {} -func (*QueryDeploymentResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_98f8fa16759df714, []int{3} -} -func (m *QueryDeploymentResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryDeploymentResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryDeploymentResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryDeploymentResponse.Merge(m, src) -} -func (m *QueryDeploymentResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryDeploymentResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryDeploymentResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryDeploymentResponse proto.InternalMessageInfo - -func (m *QueryDeploymentResponse) GetDeployment() Deployment { - if m != nil { - return m.Deployment - } - return Deployment{} -} - -func (m *QueryDeploymentResponse) GetGroups() []Group { - if m != nil { - return m.Groups - } - return nil -} - -func (m *QueryDeploymentResponse) GetEscrowAccount() v1beta3.Account { - if m != nil { - return m.EscrowAccount - } - return v1beta3.Account{} -} - -// QueryGroupRequest is request type for the Query/Group RPC method -type QueryGroupRequest struct { - ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryGroupRequest) Reset() { *m = QueryGroupRequest{} } -func (m *QueryGroupRequest) String() string { return proto.CompactTextString(m) } -func (*QueryGroupRequest) ProtoMessage() {} -func (*QueryGroupRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_98f8fa16759df714, []int{4} -} -func (m *QueryGroupRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryGroupRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryGroupRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryGroupRequest.Merge(m, src) -} -func (m *QueryGroupRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryGroupRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryGroupRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryGroupRequest proto.InternalMessageInfo - -func (m *QueryGroupRequest) GetID() GroupID { - if m != nil { - return m.ID - } - return GroupID{} -} - -// QueryGroupResponse is response type for the Query/Group RPC method -type QueryGroupResponse struct { - Group Group `protobuf:"bytes,1,opt,name=group,proto3" json:"group"` -} - -func (m *QueryGroupResponse) Reset() { *m = QueryGroupResponse{} } -func (m *QueryGroupResponse) String() string { return proto.CompactTextString(m) } -func (*QueryGroupResponse) ProtoMessage() {} -func (*QueryGroupResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_98f8fa16759df714, []int{5} -} -func (m *QueryGroupResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryGroupResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryGroupResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryGroupResponse.Merge(m, src) -} -func (m *QueryGroupResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryGroupResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryGroupResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryGroupResponse proto.InternalMessageInfo - -func (m *QueryGroupResponse) GetGroup() Group { - if m != nil { - return m.Group - } - return Group{} -} - -func init() { - proto.RegisterType((*QueryDeploymentsRequest)(nil), "akash.deployment.v1beta3.QueryDeploymentsRequest") - proto.RegisterType((*QueryDeploymentsResponse)(nil), "akash.deployment.v1beta3.QueryDeploymentsResponse") - proto.RegisterType((*QueryDeploymentRequest)(nil), "akash.deployment.v1beta3.QueryDeploymentRequest") - proto.RegisterType((*QueryDeploymentResponse)(nil), "akash.deployment.v1beta3.QueryDeploymentResponse") - proto.RegisterType((*QueryGroupRequest)(nil), "akash.deployment.v1beta3.QueryGroupRequest") - proto.RegisterType((*QueryGroupResponse)(nil), "akash.deployment.v1beta3.QueryGroupResponse") -} - -func init() { - proto.RegisterFile("akash/deployment/v1beta3/query.proto", fileDescriptor_98f8fa16759df714) -} - -var fileDescriptor_98f8fa16759df714 = []byte{ - // 680 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xcb, 0x6e, 0xd3, 0x40, - 0x14, 0x8d, 0xdd, 0x07, 0xd2, 0x44, 0x45, 0xea, 0x80, 0xc0, 0x0a, 0x60, 0x17, 0xab, 0x24, 0xa5, - 0x0f, 0x0f, 0x49, 0x77, 0x45, 0x5d, 0x60, 0x45, 0xad, 0x0a, 0x1b, 0xea, 0x0d, 0x08, 0x21, 0x21, - 0x27, 0x99, 0xba, 0x56, 0x13, 0x8f, 0xeb, 0x99, 0x50, 0x65, 0xcb, 0x17, 0x80, 0xf8, 0x01, 0x36, - 0x48, 0x88, 0x05, 0x2b, 0x3e, 0xa2, 0xcb, 0x4a, 0x08, 0x89, 0x55, 0x40, 0x09, 0x0b, 0xc4, 0x82, - 0x45, 0xbf, 0x00, 0x79, 0x66, 0x12, 0x1b, 0x92, 0x34, 0xc9, 0x2e, 0xf1, 0x9c, 0x7b, 0xce, 0xb9, - 0xf7, 0xdc, 0xb1, 0xc1, 0xb2, 0x7b, 0xe4, 0xd2, 0x43, 0x54, 0xc3, 0x61, 0x9d, 0xb4, 0x1a, 0x38, - 0x60, 0xe8, 0x65, 0xb1, 0x82, 0x99, 0xbb, 0x89, 0x8e, 0x9b, 0x38, 0x6a, 0x59, 0x61, 0x44, 0x18, - 0x81, 0x1a, 0x47, 0x59, 0x09, 0xca, 0x92, 0xa8, 0xdc, 0x55, 0x8f, 0x78, 0x84, 0x83, 0x50, 0xfc, - 0x4b, 0xe0, 0x73, 0x37, 0x3d, 0x42, 0xbc, 0x3a, 0x46, 0x6e, 0xe8, 0x23, 0x37, 0x08, 0x08, 0x73, - 0x99, 0x4f, 0x02, 0x2a, 0x4f, 0x57, 0xab, 0x84, 0x36, 0x08, 0x45, 0x15, 0x97, 0x62, 0x21, 0x23, - 0x45, 0x8b, 0x28, 0x74, 0x3d, 0x3f, 0xe0, 0x60, 0x89, 0xbd, 0x3b, 0xd2, 0x5f, 0xca, 0x8c, 0x80, - 0x8e, 0x6e, 0xc5, 0x8b, 0x48, 0x33, 0x94, 0xa8, 0xfc, 0xc5, 0x28, 0xbf, 0x26, 0x71, 0x4b, 0x02, - 0x87, 0x69, 0x35, 0x22, 0x27, 0x7d, 0x0c, 0x6b, 0x85, 0x58, 0xb6, 0x61, 0x7e, 0x52, 0xc0, 0xf5, - 0xfd, 0xd8, 0x7d, 0xb9, 0xcf, 0x45, 0x1d, 0x7c, 0xdc, 0xc4, 0x94, 0xc1, 0x47, 0xe0, 0xd2, 0x81, - 0x5f, 0x67, 0x38, 0xa2, 0x9a, 0xb2, 0xa4, 0xac, 0x64, 0x4b, 0x6b, 0xd6, 0xa8, 0x11, 0x5a, 0x49, - 0xf9, 0x8e, 0x28, 0xb1, 0x67, 0x4f, 0xdb, 0x46, 0xc6, 0xe9, 0x31, 0xc0, 0x1d, 0x00, 0x92, 0xb9, - 0x68, 0x2a, 0xe7, 0xcb, 0x5b, 0x62, 0x88, 0x56, 0x3c, 0x44, 0x4b, 0x64, 0x25, 0x87, 0x68, 0x3d, - 0x76, 0x3d, 0x2c, 0x8d, 0x38, 0xa9, 0x4a, 0xf3, 0xab, 0x02, 0xb4, 0x41, 0xc3, 0x34, 0x24, 0x01, - 0xc5, 0x30, 0x04, 0xd9, 0xc4, 0x5b, 0xec, 0x7a, 0x66, 0x25, 0x5b, 0x2a, 0x8e, 0x76, 0xfd, 0x1f, - 0x51, 0x8f, 0xc7, 0xbe, 0x11, 0x7b, 0xff, 0xf8, 0xdd, 0xb8, 0x32, 0x78, 0x46, 0x9d, 0xb4, 0x04, - 0xdc, 0x1d, 0xd2, 0x56, 0x61, 0x6c, 0x5b, 0x82, 0xea, 0x9f, 0xbe, 0x9e, 0x83, 0x6b, 0x03, 0x6e, - 0x44, 0x0c, 0x36, 0x50, 0xfd, 0x9a, 0x4c, 0x20, 0x3f, 0x49, 0x02, 0x7b, 0x65, 0x1b, 0xc4, 0x0d, - 0x74, 0xda, 0x86, 0xba, 0x57, 0x76, 0x54, 0xbf, 0x66, 0x7e, 0x56, 0x07, 0x62, 0xee, 0x0f, 0xad, - 0x01, 0x40, 0x42, 0x27, 0x75, 0x96, 0x27, 0xd1, 0xb1, 0x0b, 0xb1, 0xca, 0xef, 0xb6, 0x91, 0xaa, - 0x3f, 0x6f, 0x1b, 0x8b, 0x2d, 0xb7, 0x51, 0xdf, 0x32, 0x93, 0x67, 0xa6, 0x93, 0x02, 0xc0, 0xa7, - 0x60, 0x9e, 0x2f, 0x29, 0xd5, 0x54, 0x1e, 0x8f, 0x31, 0x5a, 0x6a, 0x37, 0xc6, 0xd9, 0x86, 0x54, - 0x91, 0x65, 0xe7, 0x6d, 0x63, 0x41, 0x28, 0x88, 0xff, 0xa6, 0x23, 0x0f, 0xe0, 0x43, 0x70, 0x59, - 0x6c, 0xfa, 0x0b, 0xb7, 0x5a, 0x25, 0xcd, 0x80, 0x69, 0x33, 0xbc, 0x99, 0x5b, 0x52, 0x41, 0x1c, - 0xf6, 0xd9, 0x1f, 0x08, 0x90, 0x5c, 0xd4, 0x05, 0x71, 0x2a, 0x1f, 0x6e, 0xcd, 0xfe, 0x7a, 0x67, - 0x64, 0x4c, 0x07, 0x2c, 0xf2, 0xa9, 0x71, 0x23, 0xbd, 0x3c, 0xb6, 0x53, 0x79, 0xdc, 0x1e, 0x63, - 0x7e, 0x48, 0x14, 0xfb, 0x00, 0xa6, 0x39, 0x65, 0x08, 0xf7, 0xc1, 0x1c, 0xef, 0x42, 0xf2, 0x8e, - 0x1d, 0x8a, 0x30, 0x2d, 0x6a, 0x4a, 0x7f, 0x66, 0xc0, 0x1c, 0xe7, 0x84, 0x1f, 0x14, 0x90, 0x4d, - 0x5d, 0x0c, 0x38, 0xf9, 0xee, 0xf7, 0x6e, 0x7d, 0xae, 0x34, 0x4d, 0x89, 0x70, 0x6f, 0x96, 0x5e, - 0x7d, 0xf9, 0xf9, 0x56, 0x5d, 0x87, 0xab, 0x68, 0x82, 0x37, 0x1d, 0x45, 0x75, 0x9f, 0x32, 0xf8, - 0x5e, 0x01, 0x20, 0xe1, 0x82, 0xf7, 0xa6, 0xb8, 0xa5, 0xc2, 0xe8, 0xf4, 0xf7, 0x7a, 0x5a, 0x9f, - 0x7e, 0x70, 0x40, 0xe0, 0x1b, 0x05, 0xcc, 0xf1, 0x99, 0xc3, 0xb5, 0x31, 0x82, 0xe9, 0x2d, 0xc9, - 0xad, 0x4f, 0x06, 0x96, 0xc6, 0x36, 0xb8, 0xb1, 0x02, 0xbc, 0x83, 0x2e, 0x7e, 0xb3, 0x0b, 0x4f, - 0xf6, 0x93, 0xd3, 0x8e, 0xae, 0x9c, 0x75, 0x74, 0xe5, 0x47, 0x47, 0x57, 0x5e, 0x77, 0xf5, 0xcc, - 0x59, 0x57, 0xcf, 0x7c, 0xeb, 0xea, 0x99, 0x67, 0xdb, 0x9e, 0xcf, 0x0e, 0x9b, 0x15, 0xab, 0x4a, - 0x1a, 0x82, 0x6a, 0x23, 0xc0, 0xec, 0x84, 0x44, 0x47, 0xf2, 0x5f, 0xfc, 0x31, 0xf3, 0x08, 0x0a, - 0x48, 0x0d, 0x0f, 0x11, 0xa9, 0xcc, 0xf3, 0xaf, 0xc2, 0xe6, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x57, 0xf2, 0xb2, 0x41, 0x52, 0x07, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Deployments queries deployments - Deployments(ctx context.Context, in *QueryDeploymentsRequest, opts ...grpc.CallOption) (*QueryDeploymentsResponse, error) - // Deployment queries deployment details - Deployment(ctx context.Context, in *QueryDeploymentRequest, opts ...grpc.CallOption) (*QueryDeploymentResponse, error) - // Group queries group details - Group(ctx context.Context, in *QueryGroupRequest, opts ...grpc.CallOption) (*QueryGroupResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Deployments(ctx context.Context, in *QueryDeploymentsRequest, opts ...grpc.CallOption) (*QueryDeploymentsResponse, error) { - out := new(QueryDeploymentsResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta3.Query/Deployments", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Deployment(ctx context.Context, in *QueryDeploymentRequest, opts ...grpc.CallOption) (*QueryDeploymentResponse, error) { - out := new(QueryDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta3.Query/Deployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Group(ctx context.Context, in *QueryGroupRequest, opts ...grpc.CallOption) (*QueryGroupResponse, error) { - out := new(QueryGroupResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta3.Query/Group", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Deployments queries deployments - Deployments(context.Context, *QueryDeploymentsRequest) (*QueryDeploymentsResponse, error) - // Deployment queries deployment details - Deployment(context.Context, *QueryDeploymentRequest) (*QueryDeploymentResponse, error) - // Group queries group details - Group(context.Context, *QueryGroupRequest) (*QueryGroupResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Deployments(ctx context.Context, req *QueryDeploymentsRequest) (*QueryDeploymentsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Deployments not implemented") -} -func (*UnimplementedQueryServer) Deployment(ctx context.Context, req *QueryDeploymentRequest) (*QueryDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Deployment not implemented") -} -func (*UnimplementedQueryServer) Group(ctx context.Context, req *QueryGroupRequest) (*QueryGroupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Group not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Deployments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryDeploymentsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Deployments(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta3.Query/Deployments", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Deployments(ctx, req.(*QueryDeploymentsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Deployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryDeploymentRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Deployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta3.Query/Deployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Deployment(ctx, req.(*QueryDeploymentRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Group_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryGroupRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Group(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta3.Query/Group", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Group(ctx, req.(*QueryGroupRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.deployment.v1beta3.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Deployments", - Handler: _Query_Deployments_Handler, - }, - { - MethodName: "Deployment", - Handler: _Query_Deployment_Handler, - }, - { - MethodName: "Group", - Handler: _Query_Group_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/deployment/v1beta3/query.proto", -} - -func (m *QueryDeploymentsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryDeploymentsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryDeploymentsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryDeploymentsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryDeploymentsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryDeploymentsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Deployments) > 0 { - for iNdEx := len(m.Deployments) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Deployments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryDeploymentRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryDeploymentRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryDeploymentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryDeploymentResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.EscrowAccount.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryGroupRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryGroupRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryGroupRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryGroupResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryGroupResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryDeploymentsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryDeploymentsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Deployments) > 0 { - for _, e := range m.Deployments { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryDeploymentRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryDeploymentResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Deployment.Size() - n += 1 + l + sovQuery(uint64(l)) - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - l = m.EscrowAccount.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryGroupRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryGroupResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Group.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryDeploymentsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryDeploymentsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryDeploymentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryDeploymentsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryDeploymentsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryDeploymentsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deployments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Deployments = append(m.Deployments, QueryDeploymentResponse{}) - if err := m.Deployments[len(m.Deployments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryDeploymentRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryDeploymentRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryDeploymentRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryDeploymentResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryDeploymentResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, Group{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EscrowAccount", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EscrowAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryGroupRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryGroupRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryGroupRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryGroupResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryGroupResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/deployment/v1beta3/query.pb.gw.go b/go/node/deployment/v1beta3/query.pb.gw.go deleted file mode 100644 index 40020037..00000000 --- a/go/node/deployment/v1beta3/query.pb.gw.go +++ /dev/null @@ -1,337 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/deployment/v1beta3/query.proto - -/* -Package v1beta3 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta3 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Deployments_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Deployments_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryDeploymentsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Deployments(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Deployments_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryDeploymentsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Deployments(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Deployment_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Deployment_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryDeploymentRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployment_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Deployment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Deployment_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryDeploymentRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployment_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Deployment(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Group_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Group_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryGroupRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Group_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Group(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Group_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryGroupRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Group_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Group(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Deployments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Deployments_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Deployments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Deployment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Deployment_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Deployment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Group_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Group_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Group_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Deployments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Deployments_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Deployments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Deployment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Deployment_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Deployment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Group_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Group_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Group_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Deployments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta3", "deployments", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Deployment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta3", "deployments", "info"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Group_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta3", "groups", "info"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Deployments_0 = runtime.ForwardResponseMessage - - forward_Query_Deployment_0 = runtime.ForwardResponseMessage - - forward_Query_Group_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/deployment/v1beta3/resource_list_validation.go b/go/node/deployment/v1beta3/resource_list_validation.go deleted file mode 100644 index 5dced741..00000000 --- a/go/node/deployment/v1beta3/resource_list_validation.go +++ /dev/null @@ -1,187 +0,0 @@ -package v1beta3 - -import ( - "errors" -) - -var ( - ErrNoGroupsPresent = errors.New("validation: no groups present") - ErrGroupEmptyName = errors.New("validation: group has empty name") -) - -// func ValidateResourceList(rlist GSpec) error { -// if rlist.GetName() == "" { -// return ErrGroupEmptyName -// } -// -// units := rlist.GetResources() -// -// if count := len(units); count > validationConfig.MaxGroupUnits { -// return fmt.Errorf("group %v: too many units (%v > %v)", rlist.GetName(), count, validationConfig.MaxGroupUnits) -// } -// -// if err := units.Validate(); err != nil { -// return fmt.Errorf("group %v: %w", rlist.GetName(), err) -// } -// -// limits := newLimits() -// -// for _, resource := range units { -// gLimits, err := validateGroupResource(resource) -// if err != nil { -// return fmt.Errorf("group %v: %w", rlist.GetName(), err) -// } -// -// limits.add(gLimits) -// } -// -// if limits.cpu.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupCPU)) || limits.cpu.LTE(sdk.ZeroInt()) { -// return fmt.Errorf("group %v: invalid total CPU (%v > %v > %v fails)", -// rlist.GetName(), validationConfig.MaxGroupCPU, limits.cpu, 0) -// } -// -// if !limits.gpu.IsZero() && (limits.gpu.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupGPU)) || limits.gpu.LTE(sdk.ZeroInt())) { -// return fmt.Errorf("group %v: invalid total GPU (%v > %v > %v fails)", -// rlist.GetName(), validationConfig.MaxGroupGPU, limits.gpu, 0) -// } -// -// if limits.memory.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupMemory)) || limits.memory.LTE(sdk.ZeroInt()) { -// return fmt.Errorf("group %v: invalid total memory (%v > %v > %v fails)", -// rlist.GetName(), validationConfig.MaxGroupMemory, limits.memory, 0) -// } -// -// for i := range limits.storage { -// if limits.storage[i].GT(sdk.NewIntFromUint64(validationConfig.MaxGroupStorage)) || limits.storage[i].LTE(sdk.ZeroInt()) { -// return fmt.Errorf("group %v: invalid total storage (%v > %v > %v fails)", -// rlist.GetName(), validationConfig.MaxGroupStorage, limits.storage, 0) -// } -// } -// -// return nil -// } - -// func validateGroupResource(rg GroupResource) (resourceLimits, error) { -// limits, err := validateResourceUnits(rg.Resource.Units) -// if err != nil { -// return resourceLimits{}, err -// } -// -// if rg.Count > uint32(validationConfig.MaxUnitCount) || rg.Count < uint32(validationConfig.MinUnitCount) { -// return resourceLimits{}, fmt.Errorf("error: invalid unit count (%v > %v > %v fails)", -// validationConfig.MaxUnitCount, rg.Count, validationConfig.MinUnitCount) -// } -// -// limits.mul(rg.Count) -// -// return limits, nil -// } - -// func validateResourceUnits(units types.ResourceUnits) (resourceLimits, error) { -// limits := newLimits() -// -// val, err := validateCPU(units.CPU) -// if err != nil { -// return resourceLimits{}, err -// } -// limits.cpu = limits.cpu.Add(val) -// -// val, err = validateGPU(units.GPU) -// if err != nil { -// return resourceLimits{}, err -// } -// limits.gpu = limits.gpu.Add(val) -// -// val, err = validateMemory(units.Memory) -// if err != nil { -// return resourceLimits{}, err -// } -// limits.memory = limits.memory.Add(val) -// -// var storage []sdk.Int -// storage, err = validateStorage(units.Storage) -// if err != nil { -// return resourceLimits{}, err -// } -// -// // fixme this is not actually sum for storage usecase. -// // do we really need sum here? -// limits.storage = storage -// -// return limits, nil -// } -// -// func validateCPU(u *types.CPU) (sdk.Int, error) { -// if u == nil { -// return sdk.Int{}, fmt.Errorf("error: invalid unit CPU, cannot be nil") -// } -// if (u.Units.Value() > uint64(validationConfig.MaxUnitCPU)) || (u.Units.Value() < uint64(validationConfig.MinUnitCPU)) { -// return sdk.Int{}, fmt.Errorf("error: invalid unit CPU (%v > %v > %v fails)", -// validationConfig.MaxUnitCPU, u.Units.Value(), validationConfig.MinUnitCPU) -// } -// -// if err := u.Attributes.Validate(); err != nil { -// return sdk.Int{}, fmt.Errorf("error: invalid CPU attributes: %w", err) -// } -// -// return u.Units.Val, nil -// } -// -// func validateGPU(u *types.GPU) (sdk.Int, error) { -// if u == nil { -// return sdk.Int{}, fmt.Errorf("error: invalid unit GPU, cannot be nil") -// } -// -// if (u.Units.Value() > uint64(validationConfig.MaxUnitGPU)) || (u.Units.Value() < uint64(validationConfig.MinUnitGPU)) { -// return sdk.Int{}, fmt.Errorf("error: invalid unit GPU (%v > %v > %v fails)", -// validationConfig.MaxUnitGPU, u.Units.Value(), validationConfig.MinUnitGPU) -// } -// -// if u.Units.Value() == 0 && len(u.Attributes) > 0 { -// return sdk.Int{}, fmt.Errorf("error: invalid GPU state. attributes cannot be present if units == 0") -// } -// -// if err := u.Attributes.Validate(); err != nil { -// return sdk.Int{}, fmt.Errorf("error: invalid GPU attributes: %w", err) -// } -// -// return u.Units.Val, nil -// } -// -// func validateMemory(u *types.Memory) (sdk.Int, error) { -// if u == nil { -// return sdk.Int{}, fmt.Errorf("error: invalid unit memory, cannot be nil") -// } -// if (u.Quantity.Value() > validationConfig.MaxUnitMemory) || (u.Quantity.Value() < validationConfig.MinUnitMemory) { -// return sdk.Int{}, fmt.Errorf("error: invalid unit memory (%v > %v > %v fails)", -// validationConfig.MaxUnitMemory, u.Quantity.Value(), validationConfig.MinUnitMemory) -// } -// -// if err := u.Attributes.Validate(); err != nil { -// return sdk.Int{}, fmt.Errorf("error: invalid Memory attributes: %w", err) -// } -// -// return u.Quantity.Val, nil -// } -// -// func validateStorage(u types.Volumes) ([]sdk.Int, error) { -// if u == nil { -// return nil, fmt.Errorf("error: invalid unit storage, cannot be nil") -// } -// -// storage := make([]sdk.Int, 0, len(u)) -// -// for i := range u { -// if (u[i].Quantity.Value() > validationConfig.MaxUnitStorage) || (u[i].Quantity.Value() < validationConfig.MinUnitStorage) { -// return nil, fmt.Errorf("error: invalid unit storage (%v > %v > %v fails)", -// validationConfig.MaxUnitStorage, u[i].Quantity.Value(), validationConfig.MinUnitStorage) -// } -// -// if err := u[i].Attributes.Validate(); err != nil { -// return []sdk.Int{}, fmt.Errorf("error: invalid Storage attributes: %w", err) -// } -// -// storage = append(storage, u[i].Quantity.Val) -// } -// -// return storage, nil -// } diff --git a/go/node/deployment/v1beta3/resourcelimits.go b/go/node/deployment/v1beta3/resourcelimits.go deleted file mode 100644 index e4f52418..00000000 --- a/go/node/deployment/v1beta3/resourcelimits.go +++ /dev/null @@ -1,38 +0,0 @@ -package v1beta3 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" -) - -type resourceLimits struct { - cpu sdk.Int - gpu sdk.Int - memory sdk.Int - storage []sdk.Int -} - -func newLimits() resourceLimits { - return resourceLimits{ - cpu: sdk.ZeroInt(), - gpu: sdk.ZeroInt(), - memory: sdk.ZeroInt(), - } -} - -func (u *resourceLimits) add(rhs resourceLimits) { - u.cpu = u.cpu.Add(rhs.cpu) - u.gpu = u.gpu.Add(rhs.gpu) - u.memory = u.memory.Add(rhs.memory) - - // u.storage = u.storage.Add(rhs.storage) -} - -func (u *resourceLimits) mul(count uint32) { - u.cpu = u.cpu.MulRaw(int64(count)) - u.gpu = u.gpu.MulRaw(int64(count)) - u.memory = u.memory.MulRaw(int64(count)) - - for i := range u.storage { - u.storage[i] = u.storage[i].MulRaw(int64(count)) - } -} diff --git a/go/node/deployment/v1beta3/service.pb.go b/go/node/deployment/v1beta3/service.pb.go deleted file mode 100644 index e4722681..00000000 --- a/go/node/deployment/v1beta3/service.pb.go +++ /dev/null @@ -1,365 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta3/service.proto - -package v1beta3 - -import ( - context "context" - fmt "fmt" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { - proto.RegisterFile("akash/deployment/v1beta3/service.proto", fileDescriptor_d88380ad17449a3c) -} - -var fileDescriptor_d88380ad17449a3c = []byte{ - // 325 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0xd3, 0x31, 0x4e, 0xeb, 0x30, - 0x18, 0xc0, 0xf1, 0x56, 0x4f, 0xea, 0xe0, 0xe5, 0x95, 0x4c, 0xc8, 0x83, 0x47, 0xba, 0xb4, 0xb6, - 0xa0, 0xc0, 0xc6, 0x42, 0x2b, 0x31, 0x55, 0x42, 0x20, 0x84, 0xc4, 0xe6, 0xb6, 0x1f, 0x6e, 0xd4, - 0x36, 0xb6, 0x6c, 0xa7, 0x14, 0x71, 0x09, 0x2e, 0xc1, 0x5d, 0x18, 0x3b, 0x32, 0xa2, 0xe4, 0x22, - 0x88, 0x50, 0xe2, 0x10, 0x48, 0x93, 0x8c, 0x49, 0x7e, 0xf9, 0xfe, 0x71, 0x64, 0xa3, 0x03, 0x3e, - 0xe7, 0x66, 0xc6, 0xa6, 0xa0, 0x16, 0xf2, 0x71, 0x09, 0x81, 0x65, 0xab, 0xc3, 0x31, 0x58, 0xde, - 0x67, 0x06, 0xf4, 0xca, 0x9f, 0x00, 0x55, 0x5a, 0x5a, 0xe9, 0xed, 0x27, 0x8e, 0x3a, 0x47, 0xb7, - 0x0e, 0x77, 0x0b, 0x27, 0xb8, 0x5b, 0x4b, 0x23, 0xbe, 0xe6, 0xe0, 0x4e, 0xa1, 0x16, 0x5a, 0x86, - 0x2a, 0x85, 0x47, 0x2f, 0x2d, 0xf4, 0x6f, 0x64, 0x84, 0xb7, 0x46, 0xed, 0x81, 0x06, 0x6e, 0x61, - 0x98, 0xbe, 0xe2, 0xf5, 0x68, 0xd1, 0xd7, 0xd0, 0x91, 0x11, 0x79, 0x8e, 0x4f, 0x6a, 0xf1, 0x2b, - 0x30, 0x4a, 0x06, 0x06, 0xbc, 0x27, 0xb4, 0x37, 0x04, 0x25, 0x8d, 0x6f, 0x33, 0x69, 0xba, 0x73, - 0xd6, 0x2f, 0x8f, 0x4f, 0xeb, 0xf9, 0x34, 0xbe, 0x46, 0xed, 0x1b, 0x35, 0xad, 0xb3, 0xec, 0x3c, - 0x2f, 0x59, 0x76, 0x9e, 0xa7, 0xe5, 0x10, 0xfd, 0x1f, 0x2c, 0xa4, 0xc9, 0x86, 0xbb, 0xbb, 0x7f, - 0xe0, 0x4f, 0x8d, 0x8f, 0xeb, 0xe8, 0x34, 0x7b, 0x8f, 0x50, 0xf2, 0xe8, 0xe2, 0x73, 0x1b, 0x78, - 0x9d, 0xf2, 0x19, 0x09, 0xc4, 0xac, 0x22, 0xcc, 0x76, 0x2e, 0x79, 0x58, 0xad, 0xe3, 0x60, 0x49, - 0xc7, 0xc1, 0x6c, 0xe7, 0xda, 0x72, 0x6d, 0xab, 0x74, 0x1c, 0x2c, 0xe9, 0x38, 0xf8, 0xdd, 0x39, - 0xbf, 0x7d, 0x8d, 0x48, 0x73, 0x13, 0x91, 0xe6, 0x7b, 0x44, 0x9a, 0xcf, 0x31, 0x69, 0x6c, 0x62, - 0xd2, 0x78, 0x8b, 0x49, 0xe3, 0xee, 0x4c, 0xf8, 0x76, 0x16, 0x8e, 0xe9, 0x44, 0x2e, 0x59, 0x32, - 0xb4, 0x17, 0x80, 0x7d, 0x90, 0x7a, 0xbe, 0xbd, 0xe2, 0xca, 0x67, 0x42, 0xb2, 0x40, 0x4e, 0xe1, - 0x8f, 0xf3, 0x38, 0x6e, 0x25, 0xe7, 0xb0, 0xff, 0x11, 0x00, 0x00, 0xff, 0xff, 0x17, 0xb5, 0xb4, - 0x10, 0x22, 0x04, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // CreateDeployment defines a method to create new deployment given proper inputs. - CreateDeployment(ctx context.Context, in *MsgCreateDeployment, opts ...grpc.CallOption) (*MsgCreateDeploymentResponse, error) - // DepositDeployment deposits more funds into the deployment account - DepositDeployment(ctx context.Context, in *MsgDepositDeployment, opts ...grpc.CallOption) (*MsgDepositDeploymentResponse, error) - // UpdateDeployment defines a method to update a deployment given proper inputs. - UpdateDeployment(ctx context.Context, in *MsgUpdateDeployment, opts ...grpc.CallOption) (*MsgUpdateDeploymentResponse, error) - // CloseDeployment defines a method to close a deployment given proper inputs. - CloseDeployment(ctx context.Context, in *MsgCloseDeployment, opts ...grpc.CallOption) (*MsgCloseDeploymentResponse, error) - // CloseGroup defines a method to close a group of a deployment given proper inputs. - CloseGroup(ctx context.Context, in *MsgCloseGroup, opts ...grpc.CallOption) (*MsgCloseGroupResponse, error) - // PauseGroup defines a method to close a group of a deployment given proper inputs. - PauseGroup(ctx context.Context, in *MsgPauseGroup, opts ...grpc.CallOption) (*MsgPauseGroupResponse, error) - // StartGroup defines a method to close a group of a deployment given proper inputs. - StartGroup(ctx context.Context, in *MsgStartGroup, opts ...grpc.CallOption) (*MsgStartGroupResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) CreateDeployment(ctx context.Context, in *MsgCreateDeployment, opts ...grpc.CallOption) (*MsgCreateDeploymentResponse, error) { - out := new(MsgCreateDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta3.Msg/CreateDeployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) DepositDeployment(ctx context.Context, in *MsgDepositDeployment, opts ...grpc.CallOption) (*MsgDepositDeploymentResponse, error) { - out := new(MsgDepositDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta3.Msg/DepositDeployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) UpdateDeployment(ctx context.Context, in *MsgUpdateDeployment, opts ...grpc.CallOption) (*MsgUpdateDeploymentResponse, error) { - out := new(MsgUpdateDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta3.Msg/UpdateDeployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseDeployment(ctx context.Context, in *MsgCloseDeployment, opts ...grpc.CallOption) (*MsgCloseDeploymentResponse, error) { - out := new(MsgCloseDeploymentResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta3.Msg/CloseDeployment", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseGroup(ctx context.Context, in *MsgCloseGroup, opts ...grpc.CallOption) (*MsgCloseGroupResponse, error) { - out := new(MsgCloseGroupResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta3.Msg/CloseGroup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) PauseGroup(ctx context.Context, in *MsgPauseGroup, opts ...grpc.CallOption) (*MsgPauseGroupResponse, error) { - out := new(MsgPauseGroupResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta3.Msg/PauseGroup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) StartGroup(ctx context.Context, in *MsgStartGroup, opts ...grpc.CallOption) (*MsgStartGroupResponse, error) { - out := new(MsgStartGroupResponse) - err := c.cc.Invoke(ctx, "/akash.deployment.v1beta3.Msg/StartGroup", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // CreateDeployment defines a method to create new deployment given proper inputs. - CreateDeployment(context.Context, *MsgCreateDeployment) (*MsgCreateDeploymentResponse, error) - // DepositDeployment deposits more funds into the deployment account - DepositDeployment(context.Context, *MsgDepositDeployment) (*MsgDepositDeploymentResponse, error) - // UpdateDeployment defines a method to update a deployment given proper inputs. - UpdateDeployment(context.Context, *MsgUpdateDeployment) (*MsgUpdateDeploymentResponse, error) - // CloseDeployment defines a method to close a deployment given proper inputs. - CloseDeployment(context.Context, *MsgCloseDeployment) (*MsgCloseDeploymentResponse, error) - // CloseGroup defines a method to close a group of a deployment given proper inputs. - CloseGroup(context.Context, *MsgCloseGroup) (*MsgCloseGroupResponse, error) - // PauseGroup defines a method to close a group of a deployment given proper inputs. - PauseGroup(context.Context, *MsgPauseGroup) (*MsgPauseGroupResponse, error) - // StartGroup defines a method to close a group of a deployment given proper inputs. - StartGroup(context.Context, *MsgStartGroup) (*MsgStartGroupResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) CreateDeployment(ctx context.Context, req *MsgCreateDeployment) (*MsgCreateDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateDeployment not implemented") -} -func (*UnimplementedMsgServer) DepositDeployment(ctx context.Context, req *MsgDepositDeployment) (*MsgDepositDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DepositDeployment not implemented") -} -func (*UnimplementedMsgServer) UpdateDeployment(ctx context.Context, req *MsgUpdateDeployment) (*MsgUpdateDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateDeployment not implemented") -} -func (*UnimplementedMsgServer) CloseDeployment(ctx context.Context, req *MsgCloseDeployment) (*MsgCloseDeploymentResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseDeployment not implemented") -} -func (*UnimplementedMsgServer) CloseGroup(ctx context.Context, req *MsgCloseGroup) (*MsgCloseGroupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseGroup not implemented") -} -func (*UnimplementedMsgServer) PauseGroup(ctx context.Context, req *MsgPauseGroup) (*MsgPauseGroupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method PauseGroup not implemented") -} -func (*UnimplementedMsgServer) StartGroup(ctx context.Context, req *MsgStartGroup) (*MsgStartGroupResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method StartGroup not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_CreateDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateDeployment) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateDeployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta3.Msg/CreateDeployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateDeployment(ctx, req.(*MsgCreateDeployment)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_DepositDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgDepositDeployment) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).DepositDeployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta3.Msg/DepositDeployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).DepositDeployment(ctx, req.(*MsgDepositDeployment)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_UpdateDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgUpdateDeployment) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).UpdateDeployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta3.Msg/UpdateDeployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).UpdateDeployment(ctx, req.(*MsgUpdateDeployment)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseDeployment) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseDeployment(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta3.Msg/CloseDeployment", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseDeployment(ctx, req.(*MsgCloseDeployment)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseGroup) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseGroup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta3.Msg/CloseGroup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseGroup(ctx, req.(*MsgCloseGroup)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_PauseGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgPauseGroup) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).PauseGroup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta3.Msg/PauseGroup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).PauseGroup(ctx, req.(*MsgPauseGroup)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_StartGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgStartGroup) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).StartGroup(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.deployment.v1beta3.Msg/StartGroup", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).StartGroup(ctx, req.(*MsgStartGroup)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.deployment.v1beta3.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateDeployment", - Handler: _Msg_CreateDeployment_Handler, - }, - { - MethodName: "DepositDeployment", - Handler: _Msg_DepositDeployment_Handler, - }, - { - MethodName: "UpdateDeployment", - Handler: _Msg_UpdateDeployment_Handler, - }, - { - MethodName: "CloseDeployment", - Handler: _Msg_CloseDeployment_Handler, - }, - { - MethodName: "CloseGroup", - Handler: _Msg_CloseGroup_Handler, - }, - { - MethodName: "PauseGroup", - Handler: _Msg_PauseGroup_Handler, - }, - { - MethodName: "StartGroup", - Handler: _Msg_StartGroup_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/deployment/v1beta3/service.proto", -} diff --git a/go/node/deployment/v1beta3/types.go b/go/node/deployment/v1beta3/types.go deleted file mode 100644 index bd761149..00000000 --- a/go/node/deployment/v1beta3/types.go +++ /dev/null @@ -1,122 +0,0 @@ -package v1beta3 - -import ( - "bytes" - - types "github.com/akash-network/akash-api/go/node/types/v1beta3" -) - -type attributesMatching map[string]types.Attributes - -const ( - // ManifestVersionLength is the length of manifest version - ManifestVersionLength = 32 - - // DefaultOrderBiddingDuration is the default time limit for an Order being active. - // After the duration, the Order is automatically closed. - // ( 24(hr) * 3600(seconds per hour) ) / 7s-Block - DefaultOrderBiddingDuration = int64(12342) - - // MaxBiddingDuration is roughly 30 days of block height - MaxBiddingDuration = DefaultOrderBiddingDuration * int64(30) -) - -// ID method returns DeploymentID details of specific deployment -func (obj Deployment) ID() DeploymentID { - return obj.DeploymentID -} - -// MatchAttributes method compares provided attributes with specific group attributes -func (g *GroupSpec) MatchAttributes(attr types.Attributes) bool { - return types.AttributesSubsetOf(g.Requirements.Attributes, attr) -} - -// ID method returns GroupID details of specific group -func (g Group) ID() GroupID { - return g.GroupID -} - -// ValidateClosable provides error response if group is already closed, -// and thus should not be closed again, else nil. -func (g Group) ValidateClosable() error { - switch g.State { - case GroupClosed: - return ErrGroupClosed - default: - return nil - } -} - -// ValidatePausable provides error response if group is not pausable -func (g Group) ValidatePausable() error { - switch g.State { - case GroupClosed: - return ErrGroupClosed - case GroupPaused: - return ErrGroupPaused - default: - return nil - } -} - -// ValidatePausable provides error response if group is not pausable -func (g Group) ValidateStartable() error { - switch g.State { - case GroupClosed: - return ErrGroupClosed - case GroupOpen: - return ErrGroupOpen - default: - return nil - } -} - -// GetName method returns group name -func (g Group) GetName() string { - return g.GroupSpec.Name -} - -// GetResourceUnits method returns resources list in group -func (g Group) GetResourceUnits() ResourceUnits { - return g.GroupSpec.Resources -} - -// DeploymentResponses is a collection of DeploymentResponse -type DeploymentResponses []QueryDeploymentResponse - -func (ds DeploymentResponses) String() string { - var buf bytes.Buffer - - const sep = "\n\n" - - for _, d := range ds { - buf.WriteString(d.String()) - buf.WriteString(sep) - } - - if len(ds) > 0 { - buf.Truncate(buf.Len() - len(sep)) - } - - return buf.String() -} - -// Accept returns whether deployment filters valid or not -func (filters DeploymentFilters) Accept(obj Deployment, stateVal Deployment_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.DeploymentID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.DeploymentID.DSeq { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} diff --git a/go/node/deployment/v1beta3/types_test.go b/go/node/deployment/v1beta3/types_test.go deleted file mode 100644 index b996c87e..00000000 --- a/go/node/deployment/v1beta3/types_test.go +++ /dev/null @@ -1,461 +0,0 @@ -package v1beta3_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - sdk "github.com/cosmos/cosmos-sdk/types" - abci "github.com/tendermint/tendermint/abci/types" - - atypes "github.com/akash-network/akash-api/go/node/audit/v1beta3" - types "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - akashtypes "github.com/akash-network/akash-api/go/node/types/v1beta3" - "github.com/akash-network/akash-api/go/sdkutil" - tutil "github.com/akash-network/akash-api/go/testutil" - "github.com/akash-network/akash-api/go/testutil/v1beta3" -) - -type gStateTest struct { - state types.Group_State - expValidateClosable error -} - -func TestGroupState(t *testing.T) { - tests := []gStateTest{ - { - state: types.GroupOpen, - }, - { - state: types.GroupOpen, - }, - { - state: types.GroupInsufficientFunds, - }, - { - state: types.GroupClosed, - expValidateClosable: types.ErrGroupClosed, - }, - { - state: types.Group_State(99), - }, - } - - for _, test := range tests { - group := types.Group{ - GroupID: testutil.GroupID(t), - State: test.state, - } - - assert.Equal(t, group.ValidateClosable(), test.expValidateClosable, group.State) - } -} - -func TestDeploymentVersionAttributeLifecycle(t *testing.T) { - d := testutil.Deployment(t) - - t.Run("deployment created", func(t *testing.T) { - edc := types.NewEventDeploymentCreated(d.ID(), d.Version) - sdkEvent := edc.ToSDKEvent() - strEvent := sdk.StringifyEvent(abci.Event(sdkEvent)) - - ev, err := sdkutil.ParseEvent(strEvent) - require.NoError(t, err) - - versionString, err := types.ParseEVDeploymentVersion(ev.Attributes) - require.NoError(t, err) - assert.Equal(t, d.Version, versionString) - }) - - t.Run("deployment updated", func(t *testing.T) { - edu := types.NewEventDeploymentUpdated(d.ID(), d.Version) - - sdkEvent := edu.ToSDKEvent() - strEvent := sdk.StringifyEvent(abci.Event(sdkEvent)) - - ev, err := sdkutil.ParseEvent(strEvent) - require.NoError(t, err) - - versionString, err := types.ParseEVDeploymentVersion(ev.Attributes) - require.NoError(t, err) - assert.Equal(t, d.Version, versionString) - }) - - t.Run("deployment closed error", func(t *testing.T) { - edc := types.NewEventDeploymentClosed(d.ID()) - - sdkEvent := edc.ToSDKEvent() - strEvent := sdk.StringifyEvent(abci.Event(sdkEvent)) - - ev, err := sdkutil.ParseEvent(strEvent) - require.NoError(t, err) - - versionString, err := types.ParseEVDeploymentVersion(ev.Attributes) - require.Error(t, err) - assert.NotEqual(t, d.Version, versionString) - }) -} - -func TestGroupSpecValidation(t *testing.T) { - tests := []struct { - desc string - gspec types.GroupSpec - expErr error - }{ - { - desc: "groupspec requires name", - gspec: types.GroupSpec{ - Name: "", - Requirements: testutil.PlacementRequirements(t), - Resources: testutil.ResourcesList(t, 1), - }, - expErr: types.ErrInvalidGroups, - }, - { - desc: "groupspec valid", - gspec: types.GroupSpec{ - Name: "hihi", - Requirements: testutil.PlacementRequirements(t), - Resources: testutil.ResourcesList(t, 1), - }, - expErr: nil, - }, - } - - for _, test := range tests { - err := test.gspec.ValidateBasic() - if test.expErr != nil { - assert.Error(t, err, test.desc) - continue - } - assert.Equal(t, test.expErr, err, test.desc) - } -} - -func TestGroupPlacementRequirementsNoSigners(t *testing.T) { - group := types.GroupSpec{ - Name: "spec", - Requirements: testutil.PlacementRequirements(t), - Resources: testutil.ResourcesList(t, 1), - } - - providerAttr := []atypes.Provider{ - { - Owner: "test", - Attributes: group.Requirements.Attributes, - }, - } - - require.True(t, group.MatchRequirements(providerAttr)) -} - -func TestGroupPlacementRequirementsSignerAllOf(t *testing.T) { - group := types.GroupSpec{ - Name: "spec", - Requirements: testutil.PlacementRequirements(t), - Resources: testutil.ResourcesList(t, 1), - } - - group.Requirements.SignedBy.AllOf = append(group.Requirements.SignedBy.AllOf, "auditor1") - group.Requirements.SignedBy.AllOf = append(group.Requirements.SignedBy.AllOf, "auditor2") - - providerAttr := []atypes.Provider{ - { - Owner: "test", - Attributes: group.Requirements.Attributes, - }, - } - - require.False(t, group.MatchRequirements(providerAttr)) - - providerAttr = append(providerAttr, atypes.Provider{ - Owner: "test", - Auditor: "auditor2", - Attributes: group.Requirements.Attributes, - }) - - require.False(t, group.MatchRequirements(providerAttr)) - - providerAttr = append(providerAttr, atypes.Provider{ - Owner: "test", - Auditor: "auditor1", - Attributes: group.Requirements.Attributes, - }) - - require.True(t, group.MatchRequirements(providerAttr)) -} - -func TestGroupPlacementRequirementsSignerAnyOf(t *testing.T) { - group := types.GroupSpec{ - Name: "spec", - Requirements: testutil.PlacementRequirements(t), - Resources: testutil.ResourcesList(t, 1), - } - - group.Requirements.SignedBy.AnyOf = append(group.Requirements.SignedBy.AnyOf, "auditor1") - - providerAttr := []atypes.Provider{ - { - Owner: "test", - Attributes: group.Requirements.Attributes, - }, - } - - require.False(t, group.MatchRequirements(providerAttr)) - - providerAttr = append(providerAttr, atypes.Provider{ - Owner: "test", - Auditor: "auditor2", - Attributes: group.Requirements.Attributes, - }) - - require.False(t, group.MatchRequirements(providerAttr)) - - providerAttr = append(providerAttr, atypes.Provider{ - Owner: "test", - Auditor: "auditor1", - Attributes: group.Requirements.Attributes, - }) - - require.True(t, group.MatchRequirements(providerAttr)) -} - -func TestGroupPlacementRequirementsSignerAllOfAnyOf(t *testing.T) { - group := types.GroupSpec{ - Name: "spec", - Requirements: testutil.PlacementRequirements(t), - Resources: testutil.ResourcesList(t, 1), - } - - group.Requirements.SignedBy.AllOf = append(group.Requirements.SignedBy.AllOf, "auditor1") - group.Requirements.SignedBy.AllOf = append(group.Requirements.SignedBy.AllOf, "auditor2") - - group.Requirements.SignedBy.AnyOf = append(group.Requirements.SignedBy.AnyOf, "auditor3") - group.Requirements.SignedBy.AnyOf = append(group.Requirements.SignedBy.AnyOf, "auditor4") - - providerAttr := []atypes.Provider{ - { - Owner: "test", - Attributes: group.Requirements.Attributes, - }, - { - Owner: "test", - Auditor: "auditor3", - Attributes: group.Requirements.Attributes, - }, - { - Owner: "test", - Auditor: "auditor4", - Attributes: group.Requirements.Attributes, - }, - } - - require.False(t, group.MatchRequirements(providerAttr)) - - providerAttr = append(providerAttr, atypes.Provider{ - Owner: "test", - Auditor: "auditor2", - Attributes: group.Requirements.Attributes, - }) - - require.False(t, group.MatchRequirements(providerAttr)) - - providerAttr = append(providerAttr, atypes.Provider{ - Owner: "test", - Auditor: "auditor1", - Attributes: group.Requirements.Attributes, - }) - - require.True(t, group.MatchRequirements(providerAttr)) -} - -func TestGroupSpec_MatchResourcesAttributes(t *testing.T) { - group := types.GroupSpec{ - Name: "spec", - Requirements: testutil.PlacementRequirements(t), - Resources: testutil.ResourcesList(t, 1), - } - - group.Resources[0].Storage[0].Attributes = akashtypes.Attributes{ - { - Key: "persistent", - Value: "true", - }, - { - Key: "class", - Value: "default", - }, - } - - provAttributes := akashtypes.Attributes{ - { - Key: "capabilities/storage/1/class", - Value: "default", - }, - { - Key: "capabilities/storage/1/persistent", - Value: "true", - }, - } - - prov2Attributes := akashtypes.Attributes{ - { - Key: "capabilities/storage/1/class", - Value: "default", - }, - } - - prov3Attributes := akashtypes.Attributes{ - { - Key: "capabilities/storage/1/class", - Value: "beta2", - }, - } - - match := group.MatchResourcesRequirements(provAttributes) - require.True(t, match) - match = group.MatchResourcesRequirements(prov2Attributes) - require.False(t, match) - match = group.MatchResourcesRequirements(prov3Attributes) - require.False(t, match) -} - -func TestGroupSpec_MatchGPUAttributes(t *testing.T) { - group := types.GroupSpec{ - Name: "spec", - Requirements: testutil.PlacementRequirements(t), - Resources: testutil.ResourcesList(t, 1), - } - - group.Resources[0].GPU.Attributes = akashtypes.Attributes{ - { - Key: "vendor/nvidia/model/a100", - Value: "true", - }, - } - - provAttributes := akashtypes.Attributes{ - { - Key: "capabilities/storage/1/class", - Value: "default", - }, - { - Key: "capabilities/storage/1/persistent", - Value: "true", - }, - { - Key: "capabilities/gpu/vendor/nvidia/model/a100", - Value: "true", - }, - } - - prov2Attributes := akashtypes.Attributes{ - { - Key: "capabilities/storage/1/class", - Value: "default", - }, - } - - prov3Attributes := akashtypes.Attributes{ - { - Key: "capabilities/storage/1/class", - Value: "beta2", - }, - } - - match := group.MatchResourcesRequirements(provAttributes) - require.True(t, match) - match = group.MatchResourcesRequirements(prov2Attributes) - require.False(t, match) - match = group.MatchResourcesRequirements(prov3Attributes) - require.False(t, match) -} - -func TestGroupSpec_MatchGPUAttributesWildcard(t *testing.T) { - group := types.GroupSpec{ - Name: "spec", - Requirements: testutil.PlacementRequirements(t), - Resources: testutil.ResourcesList(t, 1), - } - - group.Resources[0].GPU.Attributes = akashtypes.Attributes{ - { - Key: "vendor/nvidia/model/*", - Value: "true", - }, - } - - provAttributes := akashtypes.Attributes{ - { - Key: "capabilities/storage/1/class", - Value: "default", - }, - { - Key: "capabilities/storage/1/persistent", - Value: "true", - }, - { - Key: "capabilities/gpu/vendor/nvidia/model/a100", - Value: "true", - }, - } - - prov2Attributes := akashtypes.Attributes{ - { - Key: "capabilities/storage/1/class", - Value: "default", - }, - } - - prov3Attributes := akashtypes.Attributes{ - { - Key: "capabilities/storage/1/class", - Value: "beta2", - }, - } - - match := group.MatchResourcesRequirements(provAttributes) - require.True(t, match) - match = group.MatchResourcesRequirements(prov2Attributes) - require.False(t, match) - match = group.MatchResourcesRequirements(prov3Attributes) - require.False(t, match) -} - -func TestDepositDeploymentAuthorization_Accept(t *testing.T) { - limit := sdk.NewInt64Coin(tutil.CoinDenom, 333) - dda := types.NewDepositDeploymentAuthorization(limit) - - // Send the wrong type of message, expect an error - var msg sdk.Msg - response, err := dda.Accept(sdk.Context{}, msg) - require.Error(t, err) - require.Contains(t, err.Error(), "invalid type") - require.Zero(t, response) - - // Try to deposit too much coin, expect an error - msg = types.NewMsgDepositDeployment(testutil.DeploymentID(t), limit.Add(sdk.NewInt64Coin(tutil.CoinDenom, 1)), testutil.AccAddress(t).String()) - response, err = dda.Accept(sdk.Context{}, msg) - require.Error(t, err) - require.Contains(t, err.Error(), "requested amount is more than spend limit") - require.Zero(t, response) - - // Deposit 1 less than the limit, expect an updated deposit - msg = types.NewMsgDepositDeployment(testutil.DeploymentID(t), limit.Sub(sdk.NewInt64Coin(tutil.CoinDenom, 1)), testutil.AccAddress(t).String()) - response, err = dda.Accept(sdk.Context{}, msg) - require.NoError(t, err) - require.True(t, response.Accept) - require.False(t, response.Delete) - - ok := false - dda, ok = response.Updated.(*types.DepositDeploymentAuthorization) - require.True(t, ok) - - // Deposit the limit (now 1), expect that it is not to be deleted - msg = types.NewMsgDepositDeployment(testutil.DeploymentID(t), sdk.NewInt64Coin(tutil.CoinDenom, 1), testutil.AccAddress(t).String()) - response, err = dda.Accept(sdk.Context{}, msg) - require.NoError(t, err) - require.True(t, response.Accept) - require.False(t, response.Delete) -} diff --git a/go/node/deployment/v1beta3/validation_config.go b/go/node/deployment/v1beta3/validation_config.go deleted file mode 100644 index 9c661f68..00000000 --- a/go/node/deployment/v1beta3/validation_config.go +++ /dev/null @@ -1,118 +0,0 @@ -package v1beta3 - -import ( - "github.com/akash-network/akash-api/go/node/types/unit" -) - -const ( - maxUnitCPU = 384 * 1000 // max amount of CPU units single replicate of service can request - maxUnitGPU = 24 - maxUnitMemory = 2 * unit.Ti - maxUnitStorage = 32 * unit.Ti - maxUnitCount = 50 // max amount of service replicas allowed - maxUnitPrice = 10000000 // 10akt - maxGroupCount = 20 // max amount of - maxGroupUnits = 20 -) - -// This is the validation configuration that acts as a hard limit -// on what the network accepts for deployments. This is never changed -// and is the same across all members of the network - -type Limits struct { - Memory uint64 - Storage uint64 - Price uint64 - CPU uint - GPU uint - Count uint -} - -type UnitLimits struct { - Max Limits - Min Limits -} - -type GroupLimit struct { - Limits - Units uint32 -} - -type GroupLimits struct { - Max GroupLimit -} - -type ValidationConfig struct { - Unit UnitLimits - Group GroupLimits - - // // MaxUnitCPU is the maximum number of milli (1/1000) cpu units a single instance may take - // MaxUnitCPU uint - // MaxUnitGPU uint - // // MaxUnitMemory is the maximum number of bytes of memory that a unit can consume - // MaxUnitMemory uint64 - // // MaxUnitStorage is the maximum number of bytes of storage that a unit can consume - // MaxUnitStorage uint64 - // // MaxUnitCount is the maximum number of replias of a service - // MaxUnitCount uint - // // MaxUnitPrice is the maximum price that a unit can have - // MaxUnitPrice uint64 - // - // MinUnitCPU uint - // MinUnitGPU uint - // MinUnitMemory uint64 - // MinUnitStorage uint64 - // MinUnitCount uint - // - // // MaxGroupCount is the maximum number of groups allowed per deployment - // MaxGroupCount int - // // MaxGroupUnits is the maximum number services per group - // MaxGroupUnits int - // - // // MaxGroupCPU is the maximum total amount of CPU requested per group - // MaxGroupCPU uint64 - // // MaxGroupGPU is the maximum total amount of GPU requested per group - // MaxGroupGPU uint64 - // // MaxGroupMemory is the maximum total amount of memory requested per group - // MaxGroupMemory uint64 - // // MaxGroupStorage is the maximum total amount of storage requested per group - // MaxGroupStorage uint64 -} - -var validationConfig = ValidationConfig{ - Unit: UnitLimits{ - Max: Limits{ - Memory: maxUnitMemory, - Storage: maxUnitStorage, - CPU: maxUnitCPU, - GPU: maxUnitGPU, - Count: maxUnitCount, - Price: maxUnitPrice, - }, - Min: Limits{ - Memory: unit.Mi, - Storage: 5 * unit.Mi, - CPU: 10, - GPU: 0, - Count: 1, - Price: 0, - }, - }, - Group: GroupLimits{ - Max: GroupLimit{ - Limits: Limits{ - Memory: maxUnitMemory * maxUnitCount, - Storage: maxUnitStorage * maxUnitCount, - CPU: maxUnitCPU * maxUnitCount, - GPU: maxUnitGPU * maxUnitCount, - Count: maxGroupCount, - Price: 0, - }, - Units: maxGroupUnits, - }, - }, -} - -func GetValidationConfig() ValidationConfig { - return validationConfig -} diff --git a/go/node/deployment/v1beta4/codec.go b/go/node/deployment/v1beta4/codec.go new file mode 100644 index 00000000..78b70018 --- /dev/null +++ b/go/node/deployment/v1beta4/codec.go @@ -0,0 +1,58 @@ +package v1beta4 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" + "github.com/cosmos/cosmos-sdk/x/authz" + + v1 "pkg.akt.dev/go/node/deployment/v1" +) + +var ( + // ModuleCdc references the global x/deployment module codec. Note, the codec should + // ONLY be used in certain instances of tests and for JSON encoding as Amino is + // still used for that purpose. + // + // The actual codec used for serialization should be provided to x/deployment and + // defined at the application level. + // + // Deprecated: ModuleCdc use is deprecated + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) + +// RegisterLegacyAminoCodec register concrete types on codec +// +// Deprecated: RegisterLegacyAminoCodec is deprecated +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgCreateDeployment{}, "akash-sdk/x/"+v1.ModuleName+"/"+(&MsgCreateDeployment{}).Type(), nil) + cdc.RegisterConcrete(&MsgUpdateDeployment{}, "akash-sdk/x/"+v1.ModuleName+"/"+(&MsgUpdateDeployment{}).Type(), nil) + cdc.RegisterConcrete(&MsgCloseDeployment{}, "akash-sdk/x/"+v1.ModuleName+"/"+(&MsgCloseDeployment{}).Type(), nil) + cdc.RegisterConcrete(&MsgStartGroup{}, "akash-sdk/x/"+v1.ModuleName+"/"+(&MsgStartGroup{}).Type(), nil) + cdc.RegisterConcrete(&MsgPauseGroup{}, "akash-sdk/x/"+v1.ModuleName+"/"+(&MsgPauseGroup{}).Type(), nil) + cdc.RegisterConcrete(&MsgCloseGroup{}, "akash-sdk/x/"+v1.ModuleName+"/"+(&MsgCloseGroup{}).Type(), nil) + cdc.RegisterConcrete(&v1.MsgDepositDeployment{}, "akash-sdk/x/"+v1.ModuleName+"/"+(&v1.MsgDepositDeployment{}).Type(), nil) + cdc.RegisterConcrete(&MsgUpdateParams{}, "akash-sdk/x/"+v1.ModuleName+"/"+(&MsgUpdateParams{}).Type(), nil) +} + +// RegisterInterfaces registers the x/deployment interfaces types with the interface registry +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), + &v1.MsgDepositDeployment{}, + &MsgCreateDeployment{}, + &MsgUpdateDeployment{}, + &MsgCloseDeployment{}, + &MsgStartGroup{}, + &MsgPauseGroup{}, + &MsgCloseGroup{}, + &MsgUpdateParams{}, + ) + + registry.RegisterImplementations( + (*authz.Authorization)(nil), + &v1.DepositAuthorization{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} diff --git a/go/node/deployment/v1beta4/deployment_validation_test.go b/go/node/deployment/v1beta4/deployment_validation_test.go new file mode 100644 index 00000000..96ad9743 --- /dev/null +++ b/go/node/deployment/v1beta4/deployment_validation_test.go @@ -0,0 +1,203 @@ +package v1beta4_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" + + v1 "pkg.akt.dev/go/node/deployment/v1" + types "pkg.akt.dev/go/node/deployment/v1beta4" + attr "pkg.akt.dev/go/node/types/attributes/v1" + akashtypes "pkg.akt.dev/go/node/types/resources/v1beta4" + tutil "pkg.akt.dev/go/testutil" +) + +const ( + regexInvalidUnitBoundaries = `^.*invalid unit count|CPU|GPU|memory|storage \(\d+ > 0 > \d+ fails\)$` +) + +func TestZeroValueGroupSpec(t *testing.T) { + did := tutil.DeploymentID(t) + + dgroup := tutil.DeploymentGroup(t, did, uint32(6)) + gspec := dgroup.GroupSpec + + t.Run("assert nominal test success", func(t *testing.T) { + err := gspec.ValidateBasic() + require.NoError(t, err) + }) +} + +func TestZeroValueGroupSpecs(t *testing.T) { + did := tutil.DeploymentID(t) + dgroups := tutil.DeploymentGroups(t, did, uint32(6)) + gspecs := make([]types.GroupSpec, 0) + for _, d := range dgroups { + gspecs = append(gspecs, d.GroupSpec) + } + + t.Run("assert nominal test success", func(t *testing.T) { + err := types.ValidateDeploymentGroups(gspecs) + require.NoError(t, err) + }) + + gspecZeroed := make([]types.GroupSpec, len(gspecs)) + gspecZeroed = append(gspecZeroed, gspecs...) + t.Run("assert error for zero value bid duration", func(t *testing.T) { + err := types.ValidateDeploymentGroups(gspecZeroed) + require.Error(t, err) + }) +} + +func TestEmptyGroupSpecIsInvalid(t *testing.T) { + err := types.ValidateDeploymentGroups(make([]types.GroupSpec, 0)) + require.Equal(t, v1.ErrInvalidGroups, err) +} + +func validSimpleGroupSpec() types.GroupSpec { + resources := make(types.ResourceUnits, 1) + resources[0] = types.ResourceUnit{ + Resources: akashtypes.Resources{ + ID: 1, + CPU: &akashtypes.CPU{ + Units: akashtypes.ResourceValue{ + Val: sdk.NewInt(10), + }, + Attributes: nil, + }, + GPU: &akashtypes.GPU{ + Units: akashtypes.ResourceValue{ + Val: sdk.NewInt(0), + }, + Attributes: nil, + }, + Memory: &akashtypes.Memory{ + Quantity: akashtypes.ResourceValue{ + Val: sdk.NewIntFromUint64(types.GetValidationConfig().Unit.Min.Memory), + }, + Attributes: nil, + }, + Storage: akashtypes.Volumes{ + akashtypes.Storage{ + Quantity: akashtypes.ResourceValue{ + Val: sdk.NewIntFromUint64(types.GetValidationConfig().Unit.Min.Storage), + }, + Attributes: nil, + }, + }, + Endpoints: akashtypes.Endpoints{}, + }, + Count: 1, + Price: sdk.NewInt64DecCoin(tutil.CoinDenom, 1), + } + return types.GroupSpec{ + Name: "testGroup", + Requirements: attr.PlacementRequirements{}, + Resources: resources, + } +} + +func validSimpleGroupSpecs() []types.GroupSpec { + result := make([]types.GroupSpec, 1) + result[0] = validSimpleGroupSpec() + + return result +} + +func TestSimpleGroupSpecIsValid(t *testing.T) { + groups := validSimpleGroupSpecs() + err := types.ValidateDeploymentGroups(groups) + require.NoError(t, err) +} + +func TestDuplicateSimpleGroupSpecIsInvalid(t *testing.T) { + groups := validSimpleGroupSpecs() + groupsDuplicate := make([]types.GroupSpec, 2) + groupsDuplicate[0] = groups[0] + groupsDuplicate[1] = groups[0] + err := types.ValidateDeploymentGroups(groupsDuplicate) + require.Error(t, err) // TODO - specific error + require.Regexp(t, "^.*duplicate.*$", err) +} + +func TestGroupWithZeroCount(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Count = 0 + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, regexInvalidUnitBoundaries, err) +} + +func TestGroupWithZeroCPU(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].CPU.Units.Val = sdk.NewInt(0) + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, regexInvalidUnitBoundaries, err) +} + +func TestGroupWithZeroMemory(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Memory.Quantity.Val = sdk.NewInt(0) + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, regexInvalidUnitBoundaries, err) +} + +func TestGroupWithZeroStorage(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Storage[0].Quantity.Val = sdk.NewInt(0) + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, regexInvalidUnitBoundaries, err) +} + +func TestGroupWithNilCPU(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].CPU = nil + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, "^.*invalid unit CPU.*$", err) +} + +func TestGroupWithNilGPU(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].GPU = nil + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, "^.*invalid unit GPU.*$", err) +} + +func TestGroupWithNilMemory(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Memory = nil + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, "^.*invalid unit memory.*$", err) +} + +func TestGroupWithNilStorage(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Storage = nil + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, "^.*invalid unit storage.*$", err) +} + +func TestGroupWithInvalidPrice(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Price = sdk.DecCoin{} + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, "^.*invalid price object.*$", err) +} + +func TestGroupWithNegativePrice(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Price.Amount = sdk.NewDec(-1) + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, "^.*invalid price object.*$", err) +} diff --git a/go/node/deployment/v1beta4/deploymentmsg.pb.go b/go/node/deployment/v1beta4/deploymentmsg.pb.go new file mode 100644 index 00000000..90413d31 --- /dev/null +++ b/go/node/deployment/v1beta4/deploymentmsg.pb.go @@ -0,0 +1,1327 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/deploymentmsg.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + v1 "pkg.akt.dev/go/node/deployment/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgCreateDeployment defines an SDK message for creating deployment +type MsgCreateDeployment struct { + ID v1.DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + Groups GroupSpecs `protobuf:"bytes,2,rep,name=groups,proto3,castrepeated=GroupSpecs" json:"groups" yaml:"groups"` + Hash []byte `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash" yaml:"hash"` + Deposit types.Coin `protobuf:"bytes,4,opt,name=deposit,proto3" json:"deposit" yaml:"deposit"` + // Depositor pays for the deposit + Depositor string `protobuf:"bytes,5,opt,name=depositor,proto3" json:"depositor" yaml:"depositor"` +} + +func (m *MsgCreateDeployment) Reset() { *m = MsgCreateDeployment{} } +func (m *MsgCreateDeployment) String() string { return proto.CompactTextString(m) } +func (*MsgCreateDeployment) ProtoMessage() {} +func (*MsgCreateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{0} +} +func (m *MsgCreateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateDeployment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateDeployment.Merge(m, src) +} +func (m *MsgCreateDeployment) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateDeployment proto.InternalMessageInfo + +func (m *MsgCreateDeployment) GetID() v1.DeploymentID { + if m != nil { + return m.ID + } + return v1.DeploymentID{} +} + +func (m *MsgCreateDeployment) GetGroups() GroupSpecs { + if m != nil { + return m.Groups + } + return nil +} + +func (m *MsgCreateDeployment) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *MsgCreateDeployment) GetDeposit() types.Coin { + if m != nil { + return m.Deposit + } + return types.Coin{} +} + +func (m *MsgCreateDeployment) GetDepositor() string { + if m != nil { + return m.Depositor + } + return "" +} + +// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. +type MsgCreateDeploymentResponse struct { +} + +func (m *MsgCreateDeploymentResponse) Reset() { *m = MsgCreateDeploymentResponse{} } +func (m *MsgCreateDeploymentResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateDeploymentResponse) ProtoMessage() {} +func (*MsgCreateDeploymentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{1} +} +func (m *MsgCreateDeploymentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateDeploymentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateDeploymentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateDeploymentResponse.Merge(m, src) +} +func (m *MsgCreateDeploymentResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateDeploymentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateDeploymentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateDeploymentResponse proto.InternalMessageInfo + +// MsgUpdateDeployment defines an SDK message for updating deployment +type MsgUpdateDeployment struct { + ID v1.DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + Hash []byte `protobuf:"bytes,3,opt,name=hash,proto3" json:"hash" yaml:"hash"` +} + +func (m *MsgUpdateDeployment) Reset() { *m = MsgUpdateDeployment{} } +func (m *MsgUpdateDeployment) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateDeployment) ProtoMessage() {} +func (*MsgUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{2} +} +func (m *MsgUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateDeployment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateDeployment.Merge(m, src) +} +func (m *MsgUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateDeployment proto.InternalMessageInfo + +func (m *MsgUpdateDeployment) GetID() v1.DeploymentID { + if m != nil { + return m.ID + } + return v1.DeploymentID{} +} + +func (m *MsgUpdateDeployment) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +// MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. +type MsgUpdateDeploymentResponse struct { +} + +func (m *MsgUpdateDeploymentResponse) Reset() { *m = MsgUpdateDeploymentResponse{} } +func (m *MsgUpdateDeploymentResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateDeploymentResponse) ProtoMessage() {} +func (*MsgUpdateDeploymentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{3} +} +func (m *MsgUpdateDeploymentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateDeploymentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateDeploymentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateDeploymentResponse.Merge(m, src) +} +func (m *MsgUpdateDeploymentResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateDeploymentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateDeploymentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateDeploymentResponse proto.InternalMessageInfo + +// MsgCloseDeployment defines an SDK message for closing deployment +type MsgCloseDeployment struct { + ID v1.DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *MsgCloseDeployment) Reset() { *m = MsgCloseDeployment{} } +func (m *MsgCloseDeployment) String() string { return proto.CompactTextString(m) } +func (*MsgCloseDeployment) ProtoMessage() {} +func (*MsgCloseDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{4} +} +func (m *MsgCloseDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseDeployment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseDeployment.Merge(m, src) +} +func (m *MsgCloseDeployment) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseDeployment proto.InternalMessageInfo + +func (m *MsgCloseDeployment) GetID() v1.DeploymentID { + if m != nil { + return m.ID + } + return v1.DeploymentID{} +} + +// MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. +type MsgCloseDeploymentResponse struct { +} + +func (m *MsgCloseDeploymentResponse) Reset() { *m = MsgCloseDeploymentResponse{} } +func (m *MsgCloseDeploymentResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCloseDeploymentResponse) ProtoMessage() {} +func (*MsgCloseDeploymentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{5} +} +func (m *MsgCloseDeploymentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseDeploymentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseDeploymentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseDeploymentResponse.Merge(m, src) +} +func (m *MsgCloseDeploymentResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseDeploymentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseDeploymentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseDeploymentResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCreateDeployment)(nil), "akash.deployment.v1beta4.MsgCreateDeployment") + proto.RegisterType((*MsgCreateDeploymentResponse)(nil), "akash.deployment.v1beta4.MsgCreateDeploymentResponse") + proto.RegisterType((*MsgUpdateDeployment)(nil), "akash.deployment.v1beta4.MsgUpdateDeployment") + proto.RegisterType((*MsgUpdateDeploymentResponse)(nil), "akash.deployment.v1beta4.MsgUpdateDeploymentResponse") + proto.RegisterType((*MsgCloseDeployment)(nil), "akash.deployment.v1beta4.MsgCloseDeployment") + proto.RegisterType((*MsgCloseDeploymentResponse)(nil), "akash.deployment.v1beta4.MsgCloseDeploymentResponse") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/deploymentmsg.proto", fileDescriptor_9b10e8e78e405ddf) +} + +var fileDescriptor_9b10e8e78e405ddf = []byte{ + // 534 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x93, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xc7, 0x6d, 0xa7, 0x14, 0x72, 0x01, 0x04, 0xa6, 0xa2, 0x6e, 0xa0, 0xbe, 0x60, 0x10, 0xb2, + 0x80, 0x9e, 0x95, 0xc2, 0x42, 0x27, 0x70, 0x23, 0xa1, 0x0a, 0x21, 0x21, 0x57, 0x08, 0x89, 0x05, + 0x39, 0xb9, 0x93, 0x63, 0x12, 0xfb, 0x2c, 0x9f, 0x09, 0xca, 0xca, 0x27, 0xe0, 0x23, 0xb0, 0x21, + 0x31, 0x31, 0xb0, 0xf0, 0x0d, 0x3a, 0x56, 0x4c, 0x4c, 0x07, 0x4a, 0x06, 0x50, 0x46, 0x7f, 0x02, + 0x64, 0xdf, 0x35, 0x29, 0xe0, 0x0c, 0x0c, 0xdd, 0xee, 0xdd, 0xfb, 0xbd, 0xf7, 0xfe, 0xff, 0x7b, + 0x3a, 0x70, 0xc7, 0x1f, 0xf8, 0xac, 0xef, 0x60, 0x92, 0x0c, 0xe9, 0x38, 0x22, 0x71, 0xe6, 0x8c, + 0xda, 0x5d, 0x92, 0xf9, 0xf7, 0x8e, 0x5d, 0x45, 0x2c, 0x40, 0x49, 0x4a, 0x33, 0xaa, 0x1b, 0x25, + 0x8d, 0x16, 0x29, 0x24, 0xe9, 0xe6, 0x5a, 0x40, 0x03, 0x5a, 0x42, 0x4e, 0x71, 0x12, 0x7c, 0x73, + 0xa3, 0x47, 0x59, 0x44, 0xd9, 0x4b, 0x91, 0x10, 0x81, 0x4c, 0xad, 0x8b, 0xc8, 0x89, 0x58, 0xe0, + 0x8c, 0xda, 0xce, 0x7c, 0x46, 0xd3, 0x94, 0x89, 0xae, 0xcf, 0x88, 0x14, 0xd3, 0x76, 0x7a, 0x34, + 0x8c, 0x65, 0xfe, 0x46, 0x85, 0xe2, 0x63, 0x91, 0xa4, 0xec, 0xa5, 0xbe, 0x82, 0x94, 0xbe, 0x4e, + 0x58, 0x42, 0x7a, 0x82, 0xb4, 0xbe, 0xd4, 0xc0, 0xa5, 0x27, 0x2c, 0xd8, 0x4d, 0x89, 0x9f, 0x91, + 0xce, 0x9c, 0xd7, 0x9f, 0x02, 0x2d, 0xc4, 0x86, 0xda, 0x52, 0xed, 0xc6, 0xf6, 0x35, 0x54, 0x61, + 0x1c, 0x2d, 0xe0, 0xbd, 0x8e, 0xbb, 0x79, 0xc0, 0xa1, 0x32, 0xe1, 0x50, 0xdb, 0xeb, 0xcc, 0x38, + 0xd4, 0x42, 0x9c, 0x73, 0x58, 0x1f, 0xfb, 0xd1, 0x70, 0xc7, 0x0a, 0xb1, 0xe5, 0x69, 0x21, 0xd6, + 0x5f, 0x81, 0x55, 0x31, 0xdc, 0xd0, 0x5a, 0x35, 0xbb, 0xb1, 0x7d, 0x1d, 0x2d, 0x7b, 0x4e, 0xf4, + 0xa8, 0xe0, 0xf6, 0x13, 0xd2, 0x73, 0xb7, 0x8a, 0xbe, 0x33, 0x0e, 0x65, 0x69, 0xce, 0xe1, 0x39, + 0xd1, 0x55, 0xc4, 0xd6, 0xc7, 0xef, 0x10, 0xcc, 0x69, 0xe6, 0x49, 0x4c, 0xbf, 0x0d, 0x56, 0xfa, + 0x3e, 0xeb, 0x1b, 0xb5, 0x96, 0x6a, 0x9f, 0x75, 0xd7, 0x67, 0x1c, 0x96, 0x71, 0xce, 0x61, 0x43, + 0x94, 0x17, 0x91, 0xe5, 0x95, 0x97, 0xfa, 0x63, 0x70, 0x1a, 0x93, 0x84, 0xb2, 0x30, 0x33, 0x56, + 0x4a, 0xbf, 0x1b, 0x48, 0xee, 0xaa, 0x58, 0x82, 0x14, 0xd5, 0x46, 0xbb, 0x34, 0x8c, 0xdd, 0xcb, + 0x85, 0x9e, 0x9c, 0xc3, 0xf3, 0xa2, 0x8d, 0xac, 0xb3, 0xbc, 0xa3, 0x0e, 0xfa, 0x73, 0x50, 0x97, + 0x47, 0x9a, 0x1a, 0xa7, 0x5a, 0xaa, 0x5d, 0x77, 0xef, 0xcf, 0x38, 0x5c, 0x5c, 0xe6, 0x1c, 0x5e, + 0xf8, 0xa3, 0x98, 0xa6, 0xd6, 0xd7, 0xcf, 0x5b, 0x6b, 0x72, 0xe4, 0x43, 0x8c, 0x53, 0xc2, 0xd8, + 0x7e, 0x96, 0x86, 0x71, 0xe0, 0x2d, 0xca, 0x76, 0x2e, 0xfe, 0x7a, 0x0f, 0x95, 0xb7, 0x3f, 0x3f, + 0xdd, 0x3a, 0x13, 0x62, 0x44, 0xdf, 0xc4, 0x24, 0xb5, 0x36, 0xc1, 0x95, 0x8a, 0xd5, 0x79, 0x84, + 0x25, 0x34, 0x66, 0xc4, 0xfa, 0xa0, 0x96, 0xab, 0x7d, 0x96, 0xe0, 0x93, 0x5e, 0xed, 0xff, 0x3c, + 0xf7, 0x72, 0x23, 0x7f, 0x0b, 0x9d, 0x1b, 0x19, 0x03, 0xbd, 0xf0, 0x39, 0xa4, 0xec, 0x44, 0x6d, + 0x54, 0x29, 0xbb, 0x0a, 0x9a, 0xff, 0x8e, 0x3e, 0x12, 0xe6, 0x3e, 0x38, 0x98, 0x98, 0xea, 0xe1, + 0xc4, 0x54, 0x7f, 0x4c, 0x4c, 0xf5, 0xdd, 0xd4, 0x54, 0x0e, 0xa7, 0xa6, 0xf2, 0x6d, 0x6a, 0x2a, + 0x2f, 0x6e, 0x26, 0x83, 0x00, 0xf9, 0x83, 0x0c, 0x61, 0x32, 0x72, 0x02, 0xea, 0xc4, 0x14, 0x93, + 0x8a, 0xef, 0xd8, 0x5d, 0x2d, 0x7f, 0xe1, 0xdd, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x67, + 0x98, 0xc0, 0x89, 0x04, 0x00, 0x00, +} + +func (m *MsgCreateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Depositor) > 0 { + i -= len(m.Depositor) + copy(dAtA[i:], m.Depositor) + i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Depositor))) + i-- + dAtA[i] = 0x2a + } + { + size, err := m.Deposit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x1a + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCreateDeploymentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgUpdateDeploymentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgCloseDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCloseDeploymentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintDeploymentmsg(dAtA []byte, offset int, v uint64) int { + offset -= sovDeploymentmsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCreateDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovDeploymentmsg(uint64(l)) + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovDeploymentmsg(uint64(l)) + } + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovDeploymentmsg(uint64(l)) + } + l = m.Deposit.Size() + n += 1 + l + sovDeploymentmsg(uint64(l)) + l = len(m.Depositor) + if l > 0 { + n += 1 + l + sovDeploymentmsg(uint64(l)) + } + return n +} + +func (m *MsgCreateDeploymentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovDeploymentmsg(uint64(l)) + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovDeploymentmsg(uint64(l)) + } + return n +} + +func (m *MsgUpdateDeploymentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgCloseDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovDeploymentmsg(uint64(l)) + return n +} + +func (m *MsgCloseDeploymentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovDeploymentmsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDeploymentmsg(x uint64) (n int) { + return sovDeploymentmsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCreateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, GroupSpec{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Deposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Depositor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Depositor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateDeploymentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateDeploymentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateDeploymentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateDeploymentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseDeploymentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseDeploymentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDeploymentmsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDeploymentmsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDeploymentmsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDeploymentmsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDeploymentmsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDeploymentmsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDeploymentmsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/escrow.go b/go/node/deployment/v1beta4/escrow.go new file mode 100644 index 00000000..45cca379 --- /dev/null +++ b/go/node/deployment/v1beta4/escrow.go @@ -0,0 +1,27 @@ +package v1beta4 + +import ( + v1 "pkg.akt.dev/go/node/deployment/v1" + etypes "pkg.akt.dev/go/node/escrow/v1" +) + +const ( + EscrowScope = "deployment" +) + +func EscrowAccountForDeployment(id v1.DeploymentID) etypes.AccountID { + return etypes.AccountID{ + Scope: EscrowScope, + XID: id.String(), + } +} + +func DeploymentIDFromEscrowAccount(id etypes.AccountID) (v1.DeploymentID, bool) { + if id.Scope != EscrowScope { + return v1.DeploymentID{}, false + } + + did, err := v1.ParseDeploymentID(id.XID) + + return did, err == nil +} diff --git a/go/node/deployment/v1beta4/filters.pb.go b/go/node/deployment/v1beta4/filters.pb.go new file mode 100644 index 00000000..0187144a --- /dev/null +++ b/go/node/deployment/v1beta4/filters.pb.go @@ -0,0 +1,709 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/filters.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// DeploymentFilters defines filters used to filter deployments +type DeploymentFilters struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state" yaml:"state"` +} + +func (m *DeploymentFilters) Reset() { *m = DeploymentFilters{} } +func (m *DeploymentFilters) String() string { return proto.CompactTextString(m) } +func (*DeploymentFilters) ProtoMessage() {} +func (*DeploymentFilters) Descriptor() ([]byte, []int) { + return fileDescriptor_fb62db02ae97591f, []int{0} +} +func (m *DeploymentFilters) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeploymentFilters.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeploymentFilters) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentFilters.Merge(m, src) +} +func (m *DeploymentFilters) XXX_Size() int { + return m.Size() +} +func (m *DeploymentFilters) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentFilters.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentFilters proto.InternalMessageInfo + +func (m *DeploymentFilters) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *DeploymentFilters) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *DeploymentFilters) GetState() string { + if m != nil { + return m.State + } + return "" +} + +// GroupFilters defines filters used to filter groups +type GroupFilters struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint64 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` + State string `protobuf:"bytes,4,opt,name=state,proto3" json:"state" yaml:"state"` +} + +func (m *GroupFilters) Reset() { *m = GroupFilters{} } +func (m *GroupFilters) String() string { return proto.CompactTextString(m) } +func (*GroupFilters) ProtoMessage() {} +func (*GroupFilters) Descriptor() ([]byte, []int) { + return fileDescriptor_fb62db02ae97591f, []int{1} +} +func (m *GroupFilters) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GroupFilters.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GroupFilters) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupFilters.Merge(m, src) +} +func (m *GroupFilters) XXX_Size() int { + return m.Size() +} +func (m *GroupFilters) XXX_DiscardUnknown() { + xxx_messageInfo_GroupFilters.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupFilters proto.InternalMessageInfo + +func (m *GroupFilters) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *GroupFilters) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *GroupFilters) GetGSeq() uint64 { + if m != nil { + return m.GSeq + } + return 0 +} + +func (m *GroupFilters) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func init() { + proto.RegisterType((*DeploymentFilters)(nil), "akash.deployment.v1beta4.DeploymentFilters") + proto.RegisterType((*GroupFilters)(nil), "akash.deployment.v1beta4.GroupFilters") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/filters.proto", fileDescriptor_fb62db02ae97591f) +} + +var fileDescriptor_fb62db02ae97591f = []byte{ + // 338 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x92, 0xb1, 0x4e, 0x02, 0x31, + 0x1c, 0xc6, 0xaf, 0x72, 0x98, 0x58, 0x59, 0xbc, 0x30, 0x1c, 0x0c, 0x57, 0x72, 0x03, 0x61, 0xf1, + 0x1a, 0x82, 0x13, 0x93, 0x12, 0x22, 0x3b, 0x6c, 0x2e, 0xa6, 0xd8, 0x5a, 0x09, 0x70, 0x3d, 0xda, + 0x8a, 0xe1, 0x2d, 0x7c, 0x04, 0x1f, 0xc2, 0x87, 0x60, 0x24, 0x4e, 0x4e, 0x17, 0x73, 0x2c, 0x86, + 0x91, 0xc9, 0xd1, 0x5c, 0x4b, 0x24, 0x24, 0x2e, 0x6e, 0x6e, 0xfd, 0xbe, 0xfe, 0xbe, 0xff, 0x97, + 0x7f, 0xf2, 0x87, 0x75, 0x32, 0x26, 0xea, 0x01, 0x53, 0x96, 0x4c, 0xc4, 0x62, 0xca, 0x62, 0x8d, + 0xe7, 0xcd, 0x21, 0xd3, 0xe4, 0x02, 0xdf, 0x8f, 0x26, 0x9a, 0x49, 0x15, 0x25, 0x52, 0x68, 0xe1, + 0xf9, 0x86, 0x8b, 0xf6, 0x5c, 0xb4, 0xe3, 0xaa, 0x65, 0x2e, 0xb8, 0x30, 0x10, 0xce, 0x5f, 0x96, + 0xaf, 0x56, 0xee, 0x84, 0x9a, 0x0a, 0x75, 0x6b, 0x3f, 0xac, 0xb0, 0x5f, 0xe1, 0x12, 0xc0, 0xb3, + 0xee, 0xcf, 0x9c, 0x6b, 0x5b, 0xe3, 0xf5, 0x60, 0x51, 0x3c, 0xc5, 0x4c, 0xfa, 0xa0, 0x06, 0x1a, + 0x27, 0x9d, 0xe6, 0x26, 0x45, 0xd6, 0xd8, 0xa6, 0xa8, 0xb4, 0x20, 0xd3, 0x49, 0x3b, 0x34, 0x32, + 0x7c, 0x7b, 0x3d, 0x2f, 0xef, 0xe6, 0x5d, 0x51, 0x2a, 0x99, 0x52, 0x03, 0x2d, 0x47, 0x31, 0xef, + 0x5b, 0xdc, 0x6b, 0x41, 0x97, 0x2a, 0x36, 0xf3, 0x8f, 0x6a, 0xa0, 0xe1, 0x76, 0x50, 0x96, 0x22, + 0xb7, 0x3b, 0x60, 0xb3, 0x4d, 0x8a, 0x8c, 0xbf, 0x4d, 0xd1, 0xa9, 0x1d, 0x97, 0xab, 0xb0, 0x6f, + 0x4c, 0x0f, 0xc3, 0xa2, 0xd2, 0x44, 0x33, 0xbf, 0x60, 0xda, 0x2b, 0x79, 0xbb, 0x31, 0xf6, 0xed, + 0x46, 0x86, 0x7d, 0x6b, 0xb7, 0xdd, 0xcf, 0x17, 0xe4, 0x84, 0x5f, 0x00, 0x96, 0x7a, 0x52, 0x3c, + 0x26, 0xff, 0x63, 0x8b, 0x16, 0x74, 0x79, 0x1e, 0x2a, 0xec, 0x43, 0xbd, 0x5d, 0x88, 0x1f, 0x84, + 0xb8, 0x0d, 0xf1, 0x83, 0xd5, 0xdd, 0xbf, 0xac, 0xde, 0xb9, 0x5c, 0x66, 0x01, 0x58, 0x65, 0x01, + 0xf8, 0xc8, 0x02, 0xf0, 0xbc, 0x0e, 0x9c, 0xd5, 0x3a, 0x70, 0xde, 0xd7, 0x81, 0x73, 0x53, 0x4f, + 0xc6, 0x3c, 0x22, 0x63, 0x1d, 0x51, 0x36, 0xc7, 0x5c, 0xe0, 0x58, 0x50, 0xf6, 0xcb, 0x81, 0x0d, + 0x8f, 0xcd, 0x39, 0xb4, 0xbe, 0x03, 0x00, 0x00, 0xff, 0xff, 0x45, 0x11, 0x40, 0x8d, 0x83, 0x02, + 0x00, 0x00, +} + +func (m *DeploymentFilters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentFilters) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintFilters(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x1a + } + if m.DSeq != 0 { + i = encodeVarintFilters(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintFilters(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GroupFilters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupFilters) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintFilters(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x22 + } + if m.GSeq != 0 { + i = encodeVarintFilters(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintFilters(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintFilters(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintFilters(dAtA []byte, offset int, v uint64) int { + offset -= sovFilters(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DeploymentFilters) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovFilters(uint64(m.DSeq)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + return n +} + +func (m *GroupFilters) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovFilters(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovFilters(uint64(m.GSeq)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + return n +} + +func sovFilters(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFilters(x uint64) (n int) { + return sovFilters(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *DeploymentFilters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentFilters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentFilters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFilters(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthFilters + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupFilters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupFilters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupFilters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFilters(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthFilters + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFilters(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilters + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilters + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilters + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFilters + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupFilters + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthFilters + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthFilters = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFilters = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupFilters = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/genesis.pb.go b/go/node/deployment/v1beta4/genesis.pb.go new file mode 100644 index 00000000..6774701c --- /dev/null +++ b/go/node/deployment/v1beta4/genesis.pb.go @@ -0,0 +1,630 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/genesis.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + v1 "pkg.akt.dev/go/node/deployment/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisDeployment defines the basic genesis state used by deployment module +type GenesisDeployment struct { + Deployment v1.Deployment `protobuf:"bytes,1,opt,name=deployment,proto3" json:"deployment" yaml:"deployment"` + Groups Groups `protobuf:"bytes,2,rep,name=groups,proto3,castrepeated=Groups" json:"groups" yaml:"groups"` +} + +func (m *GenesisDeployment) Reset() { *m = GenesisDeployment{} } +func (m *GenesisDeployment) String() string { return proto.CompactTextString(m) } +func (*GenesisDeployment) ProtoMessage() {} +func (*GenesisDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_a4941a99faf6028f, []int{0} +} +func (m *GenesisDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisDeployment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisDeployment.Merge(m, src) +} +func (m *GenesisDeployment) XXX_Size() int { + return m.Size() +} +func (m *GenesisDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisDeployment proto.InternalMessageInfo + +func (m *GenesisDeployment) GetDeployment() v1.Deployment { + if m != nil { + return m.Deployment + } + return v1.Deployment{} +} + +func (m *GenesisDeployment) GetGroups() Groups { + if m != nil { + return m.Groups + } + return nil +} + +// GenesisState stores slice of genesis deployment instance +type GenesisState struct { + Deployments []GenesisDeployment `protobuf:"bytes,1,rep,name=deployments,proto3" json:"deployments" yaml:"deployments"` + Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params" yaml:"params"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_a4941a99faf6028f, []int{1} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetDeployments() []GenesisDeployment { + if m != nil { + return m.Deployments + } + return nil +} + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*GenesisDeployment)(nil), "akash.deployment.v1beta4.GenesisDeployment") + proto.RegisterType((*GenesisState)(nil), "akash.deployment.v1beta4.GenesisState") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/genesis.proto", fileDescriptor_a4941a99faf6028f) +} + +var fileDescriptor_a4941a99faf6028f = []byte{ + // 350 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xbd, 0x4e, 0xfb, 0x30, + 0x14, 0xc5, 0xe3, 0xfe, 0xa5, 0x0c, 0xee, 0x9f, 0xa1, 0x11, 0x43, 0xd4, 0x21, 0xae, 0x2c, 0x28, + 0x20, 0x24, 0x5b, 0x7c, 0x4c, 0x4c, 0x28, 0x42, 0xea, 0x8a, 0xc2, 0x04, 0x9b, 0xab, 0x5a, 0x29, + 0xea, 0x87, 0xa3, 0xda, 0x54, 0xf4, 0x2d, 0x78, 0x0e, 0x9e, 0xa4, 0x63, 0x47, 0xa6, 0x00, 0xcd, + 0xc6, 0xd8, 0x27, 0x40, 0xb5, 0x4d, 0x1d, 0x55, 0x4d, 0xb7, 0xde, 0xe6, 0x77, 0xee, 0x39, 0xe7, + 0xca, 0xb0, 0xcd, 0x06, 0x4c, 0xf6, 0x69, 0x8f, 0x67, 0x43, 0x31, 0x1b, 0xf1, 0xb1, 0xa2, 0xd3, + 0x8b, 0x2e, 0x57, 0xec, 0x9a, 0xa6, 0x7c, 0xcc, 0xe5, 0xb3, 0x24, 0xd9, 0x44, 0x28, 0x11, 0x84, + 0x9a, 0x23, 0x8e, 0x23, 0x96, 0x6b, 0x1e, 0xa6, 0x22, 0x15, 0x1a, 0xa2, 0xeb, 0x5f, 0x86, 0x6f, + 0x1e, 0xed, 0xd8, 0x5b, 0x9a, 0xf6, 0x50, 0xd6, 0x7d, 0x22, 0x5e, 0x32, 0x4b, 0x1d, 0x57, 0x52, + 0x19, 0x9b, 0xb0, 0x91, 0x8d, 0x88, 0xbf, 0x01, 0x6c, 0x74, 0x4c, 0xe8, 0xbb, 0x0d, 0x1a, 0xf4, + 0x21, 0x74, 0xc2, 0x10, 0xb4, 0xc0, 0x69, 0xfd, 0x12, 0x91, 0x1d, 0x6d, 0x88, 0x13, 0xc5, 0x27, + 0xf3, 0x1c, 0x79, 0x3f, 0x39, 0x2a, 0x49, 0x57, 0x39, 0x6a, 0xcc, 0xd8, 0x68, 0x78, 0x83, 0xdd, + 0x7f, 0x38, 0x29, 0x01, 0x41, 0x17, 0xfa, 0x3a, 0xb5, 0x0c, 0x6b, 0xad, 0x7f, 0x55, 0x2e, 0x3a, + 0x37, 0xe9, 0xac, 0xb9, 0xf8, 0xcc, 0xba, 0x58, 0xd9, 0x2a, 0x47, 0x07, 0xc6, 0xc1, 0xcc, 0xf8, + 0xfd, 0x13, 0xf9, 0x9a, 0x94, 0x89, 0x45, 0x70, 0x01, 0xe0, 0x7f, 0xdb, 0xf1, 0x41, 0x31, 0xc5, + 0x83, 0x57, 0x58, 0x77, 0xfb, 0x65, 0x08, 0xb4, 0xf3, 0xf9, 0x1e, 0xe7, 0xed, 0x03, 0x6d, 0x52, + 0x94, 0xf7, 0xac, 0x72, 0x14, 0x6c, 0x97, 0x95, 0x38, 0x29, 0x23, 0xc1, 0x23, 0xf4, 0xcd, 0xf9, + 0xc3, 0x9a, 0x3e, 0x6a, 0xab, 0xda, 0xf4, 0x5e, 0x73, 0x31, 0xfa, 0xeb, 0x6b, 0x74, 0xae, 0xaf, + 0x99, 0x71, 0x62, 0x3f, 0xc4, 0xb7, 0xf3, 0x65, 0x04, 0x16, 0xcb, 0x08, 0x7c, 0x2d, 0x23, 0xf0, + 0x56, 0x44, 0xde, 0xa2, 0x88, 0xbc, 0x8f, 0x22, 0xf2, 0x9e, 0xda, 0xd9, 0x20, 0x25, 0x6c, 0xa0, + 0x48, 0x8f, 0x4f, 0x69, 0x2a, 0xe8, 0x58, 0xf4, 0xf8, 0x8e, 0x87, 0xd1, 0xf5, 0xf5, 0x93, 0xb8, + 0xfa, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x8f, 0x45, 0x36, 0xdf, 0x02, 0x00, 0x00, +} + +func (m *GenesisDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Deployments) > 0 { + for iNdEx := len(m.Deployments) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Deployments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Deployment.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Deployments) > 0 { + for _, e := range m.Deployments { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, Group{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deployments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Deployments = append(m.Deployments, GenesisDeployment{}) + if err := m.Deployments[len(m.Deployments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/group.go b/go/node/deployment/v1beta4/group.go new file mode 100644 index 00000000..f411da27 --- /dev/null +++ b/go/node/deployment/v1beta4/group.go @@ -0,0 +1,3 @@ +package v1beta4 + +type Groups []Group diff --git a/go/node/deployment/v1beta4/group.pb.go b/go/node/deployment/v1beta4/group.pb.go new file mode 100644 index 00000000..13772c30 --- /dev/null +++ b/go/node/deployment/v1beta4/group.pb.go @@ -0,0 +1,505 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/group.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + v1 "pkg.akt.dev/go/node/deployment/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State is an enum which refers to state of group +type Group_State int32 + +const ( + // Prefix should start with 0 in enum. So declaring dummy state + GroupStateInvalid Group_State = 0 + // GroupOpen denotes state for group open + GroupOpen Group_State = 1 + // GroupOrdered denotes state for group ordered + GroupPaused Group_State = 2 + // GroupInsufficientFunds denotes state for group insufficient_funds + GroupInsufficientFunds Group_State = 3 + // GroupClosed denotes state for group closed + GroupClosed Group_State = 4 +) + +var Group_State_name = map[int32]string{ + 0: "invalid", + 1: "open", + 2: "paused", + 3: "insufficient_funds", + 4: "closed", +} + +var Group_State_value = map[string]int32{ + "invalid": 0, + "open": 1, + "paused": 2, + "insufficient_funds": 3, + "closed": 4, +} + +func (x Group_State) String() string { + return proto.EnumName(Group_State_name, int32(x)) +} + +func (Group_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_a45c04780ffee23e, []int{0, 0} +} + +// Group stores group id, state and specifications of group +type Group struct { + ID v1.GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + State Group_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.deployment.v1beta4.Group_State" json:"state" yaml:"state"` + GroupSpec GroupSpec `protobuf:"bytes,3,opt,name=group_spec,json=groupSpec,proto3,castrepeated=GroupSpecs" json:"spec" yaml:"spec"` + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` +} + +func (m *Group) Reset() { *m = Group{} } +func (m *Group) String() string { return proto.CompactTextString(m) } +func (*Group) ProtoMessage() {} +func (*Group) Descriptor() ([]byte, []int) { + return fileDescriptor_a45c04780ffee23e, []int{0} +} +func (m *Group) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Group.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_Group.Merge(m, src) +} +func (m *Group) XXX_Size() int { + return m.Size() +} +func (m *Group) XXX_DiscardUnknown() { + xxx_messageInfo_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_Group proto.InternalMessageInfo + +func (m *Group) GetID() v1.GroupID { + if m != nil { + return m.ID + } + return v1.GroupID{} +} + +func (m *Group) GetState() Group_State { + if m != nil { + return m.State + } + return GroupStateInvalid +} + +func (m *Group) GetGroupSpec() GroupSpec { + if m != nil { + return m.GroupSpec + } + return GroupSpec{} +} + +func (m *Group) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func init() { + proto.RegisterEnum("akash.deployment.v1beta4.Group_State", Group_State_name, Group_State_value) + proto.RegisterType((*Group)(nil), "akash.deployment.v1beta4.Group") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/group.proto", fileDescriptor_a45c04780ffee23e) +} + +var fileDescriptor_a45c04780ffee23e = []byte{ + // 479 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x3d, 0x8f, 0xd3, 0x30, + 0x18, 0xc7, 0x93, 0x34, 0x3d, 0x54, 0x97, 0x97, 0x62, 0xf1, 0x12, 0x02, 0x17, 0x47, 0xe1, 0x45, + 0x65, 0x49, 0x44, 0x61, 0xba, 0x09, 0xca, 0x09, 0x54, 0x31, 0x80, 0x7a, 0x12, 0x03, 0x4b, 0xe5, + 0x8b, 0xdd, 0x60, 0xb5, 0x8d, 0xad, 0xc6, 0xad, 0x74, 0xdf, 0x00, 0x75, 0xe2, 0x0b, 0x54, 0x42, + 0x62, 0xe3, 0x13, 0xf0, 0x11, 0x6e, 0xbc, 0x91, 0x29, 0xa0, 0x76, 0x41, 0x1d, 0xbb, 0xb2, 0x20, + 0xdb, 0x05, 0x3a, 0x14, 0xd8, 0xdc, 0xff, 0xff, 0xe7, 0x5f, 0x9f, 0x47, 0x31, 0xb8, 0x83, 0x07, + 0xb8, 0x78, 0x9b, 0x10, 0x2a, 0x86, 0xfc, 0x64, 0x44, 0x73, 0x99, 0x4c, 0x1f, 0x1c, 0x53, 0x89, + 0x1f, 0x25, 0xd9, 0x98, 0x4f, 0x44, 0x2c, 0xc6, 0x5c, 0x72, 0xe8, 0x69, 0x2a, 0xfe, 0x43, 0xc5, + 0x1b, 0xca, 0xbf, 0x92, 0xf1, 0x8c, 0x6b, 0x28, 0x51, 0x27, 0xc3, 0xfb, 0x68, 0x87, 0x75, 0x5b, + 0xe8, 0x37, 0xff, 0xfd, 0xb7, 0x85, 0xa0, 0xa9, 0x21, 0xa3, 0x1f, 0x15, 0x50, 0x7d, 0xae, 0x32, + 0xf8, 0x02, 0x38, 0x8c, 0x78, 0x76, 0x68, 0x37, 0xeb, 0xad, 0x5b, 0xf1, 0x8e, 0x89, 0x62, 0xcd, + 0x75, 0x0e, 0xdb, 0xfb, 0xa7, 0x25, 0xb2, 0x16, 0x25, 0x72, 0x3a, 0x87, 0xab, 0x12, 0x39, 0x8c, + 0xac, 0x4b, 0x54, 0x3b, 0xc1, 0xa3, 0xe1, 0x41, 0xc4, 0x48, 0xd4, 0x75, 0x18, 0x81, 0xaf, 0x41, + 0xb5, 0x90, 0x58, 0x52, 0xcf, 0x09, 0xed, 0xe6, 0xc5, 0xd6, 0xdd, 0xf8, 0x6f, 0x1b, 0x1a, 0x69, + 0x7c, 0xa4, 0xe0, 0xf6, 0x8d, 0x55, 0x89, 0xcc, 0xbd, 0x75, 0x89, 0xce, 0x1b, 0xa3, 0xfe, 0x19, + 0x75, 0x4d, 0x0c, 0x47, 0x00, 0xe8, 0x0d, 0x7a, 0x6a, 0x05, 0xaf, 0xa2, 0x87, 0xbd, 0xfd, 0x1f, + 0xf9, 0x91, 0xa0, 0x69, 0xfb, 0xbe, 0x9a, 0x79, 0x55, 0x22, 0x57, 0x5d, 0x5c, 0x97, 0xa8, 0xbe, + 0xb1, 0x0b, 0x9a, 0x46, 0x9f, 0xbe, 0x22, 0xf0, 0x9b, 0x2c, 0xba, 0xb5, 0xec, 0xd7, 0x19, 0xee, + 0x03, 0x90, 0x8e, 0x29, 0x96, 0x94, 0xf4, 0xb0, 0xf4, 0xdc, 0xd0, 0x6e, 0x56, 0xba, 0xb5, 0x4d, + 0xf2, 0x44, 0x46, 0x9f, 0x6d, 0x50, 0xd5, 0x93, 0xc3, 0x08, 0x9c, 0x63, 0xf9, 0x14, 0x0f, 0x19, + 0x69, 0x58, 0xfe, 0xd5, 0xd9, 0x3c, 0xbc, 0x6c, 0x84, 0xaa, 0xec, 0x98, 0x02, 0x5e, 0x07, 0x2e, + 0x17, 0x34, 0x6f, 0xd8, 0xfe, 0x85, 0xd9, 0x3c, 0xac, 0x69, 0xe0, 0xa5, 0xa0, 0x39, 0xbc, 0x09, + 0xf6, 0x04, 0x9e, 0x14, 0x94, 0x34, 0x1c, 0xff, 0xd2, 0x6c, 0x1e, 0xd6, 0x75, 0xf5, 0x4a, 0x47, + 0xb0, 0x05, 0x20, 0xcb, 0x8b, 0x49, 0xbf, 0xcf, 0x52, 0x46, 0x73, 0xd9, 0xeb, 0x4f, 0x72, 0x52, + 0x34, 0x2a, 0xbe, 0x3f, 0x9b, 0x87, 0xd7, 0xcc, 0x17, 0xd9, 0xaa, 0x9f, 0xa9, 0x56, 0x09, 0xd3, + 0x21, 0x57, 0x42, 0x77, 0x4b, 0xf8, 0x54, 0x47, 0xbe, 0xfb, 0xee, 0x63, 0x60, 0x1d, 0xb8, 0xdf, + 0x3f, 0x20, 0xab, 0xfd, 0xf8, 0x74, 0x11, 0xd8, 0x67, 0x8b, 0xc0, 0xfe, 0xb6, 0x08, 0xec, 0xf7, + 0xcb, 0xc0, 0x3a, 0x5b, 0x06, 0xd6, 0x97, 0x65, 0x60, 0xbd, 0xb9, 0x27, 0x06, 0x59, 0x8c, 0x07, + 0x32, 0x26, 0x74, 0x9a, 0x64, 0x3c, 0xc9, 0x39, 0xa1, 0x3b, 0xde, 0xd3, 0xf1, 0x9e, 0x7e, 0x46, + 0x0f, 0x7f, 0x06, 0x00, 0x00, 0xff, 0xff, 0x39, 0xf7, 0x33, 0xba, 0xe9, 0x02, 0x00, 0x00, +} + +func (m *Group) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Group) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreatedAt != 0 { + i = encodeVarintGroup(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.GroupSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroup(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.State != 0 { + i = encodeVarintGroup(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroup(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGroup(dAtA []byte, offset int, v uint64) int { + offset -= sovGroup(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Group) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovGroup(uint64(l)) + if m.State != 0 { + n += 1 + sovGroup(uint64(m.State)) + } + l = m.GroupSpec.Size() + n += 1 + l + sovGroup(uint64(l)) + if m.CreatedAt != 0 { + n += 1 + sovGroup(uint64(m.CreatedAt)) + } + return n +} + +func sovGroup(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGroup(x uint64) (n int) { + return sovGroup(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Group) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Group: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroup + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Group_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroup + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GroupSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGroup(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroup + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGroup(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGroup + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGroup + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGroup = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGroup = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/group_validation.go b/go/node/deployment/v1beta4/group_validation.go new file mode 100644 index 00000000..7398678d --- /dev/null +++ b/go/node/deployment/v1beta4/group_validation.go @@ -0,0 +1,37 @@ +package v1beta4 + +import ( + "fmt" + + v1 "pkg.akt.dev/go/node/deployment/v1" +) + +// ValidateDeploymentGroups does validation for all deployment groups +func ValidateDeploymentGroups(gspecs []GroupSpec) error { + if len(gspecs) == 0 { + return v1.ErrInvalidGroups + } + + names := make(map[string]int, len(gspecs)) // Used as set + denom := "" + for idx, group := range gspecs { + // all must be same denomination + if idx == 0 { + denom = group.Price().Denom + } else if group.Price().Denom != denom { + return fmt.Errorf("inconsistent denomination: %v != %v", denom, group.Price().Denom) + } + + if err := group.ValidateBasic(); err != nil { + return err + } + + if _, exists := names[group.GetName()]; exists { + return fmt.Errorf("duplicate deployment group name %q", group.GetName()) + } + + names[group.GetName()] = 0 // Value stored does not matter + } + + return nil +} diff --git a/go/node/deployment/v1beta4/groupmsg.pb.go b/go/node/deployment/v1beta4/groupmsg.pb.go new file mode 100644 index 00000000..2281f778 --- /dev/null +++ b/go/node/deployment/v1beta4/groupmsg.pb.go @@ -0,0 +1,1037 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/groupmsg.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + v1 "pkg.akt.dev/go/node/deployment/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgCloseGroup defines SDK message to close a single Group within a Deployment. +type MsgCloseGroup struct { + ID v1.GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *MsgCloseGroup) Reset() { *m = MsgCloseGroup{} } +func (m *MsgCloseGroup) String() string { return proto.CompactTextString(m) } +func (*MsgCloseGroup) ProtoMessage() {} +func (*MsgCloseGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_ec2e655b03e10552, []int{0} +} +func (m *MsgCloseGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseGroup.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseGroup.Merge(m, src) +} +func (m *MsgCloseGroup) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseGroup) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseGroup proto.InternalMessageInfo + +func (m *MsgCloseGroup) GetID() v1.GroupID { + if m != nil { + return m.ID + } + return v1.GroupID{} +} + +// MsgCloseGroupResponse defines the Msg/CloseGroup response type. +type MsgCloseGroupResponse struct { +} + +func (m *MsgCloseGroupResponse) Reset() { *m = MsgCloseGroupResponse{} } +func (m *MsgCloseGroupResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCloseGroupResponse) ProtoMessage() {} +func (*MsgCloseGroupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ec2e655b03e10552, []int{1} +} +func (m *MsgCloseGroupResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseGroupResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseGroupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseGroupResponse.Merge(m, src) +} +func (m *MsgCloseGroupResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseGroupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseGroupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseGroupResponse proto.InternalMessageInfo + +// MsgPauseGroup defines SDK message to close a single Group within a Deployment. +type MsgPauseGroup struct { + ID v1.GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *MsgPauseGroup) Reset() { *m = MsgPauseGroup{} } +func (m *MsgPauseGroup) String() string { return proto.CompactTextString(m) } +func (*MsgPauseGroup) ProtoMessage() {} +func (*MsgPauseGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_ec2e655b03e10552, []int{2} +} +func (m *MsgPauseGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgPauseGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgPauseGroup.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgPauseGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgPauseGroup.Merge(m, src) +} +func (m *MsgPauseGroup) XXX_Size() int { + return m.Size() +} +func (m *MsgPauseGroup) XXX_DiscardUnknown() { + xxx_messageInfo_MsgPauseGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgPauseGroup proto.InternalMessageInfo + +func (m *MsgPauseGroup) GetID() v1.GroupID { + if m != nil { + return m.ID + } + return v1.GroupID{} +} + +// MsgPauseGroupResponse defines the Msg/PauseGroup response type. +type MsgPauseGroupResponse struct { +} + +func (m *MsgPauseGroupResponse) Reset() { *m = MsgPauseGroupResponse{} } +func (m *MsgPauseGroupResponse) String() string { return proto.CompactTextString(m) } +func (*MsgPauseGroupResponse) ProtoMessage() {} +func (*MsgPauseGroupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ec2e655b03e10552, []int{3} +} +func (m *MsgPauseGroupResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgPauseGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgPauseGroupResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgPauseGroupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgPauseGroupResponse.Merge(m, src) +} +func (m *MsgPauseGroupResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgPauseGroupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgPauseGroupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgPauseGroupResponse proto.InternalMessageInfo + +// MsgStartGroup defines SDK message to close a single Group within a Deployment. +type MsgStartGroup struct { + ID v1.GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *MsgStartGroup) Reset() { *m = MsgStartGroup{} } +func (m *MsgStartGroup) String() string { return proto.CompactTextString(m) } +func (*MsgStartGroup) ProtoMessage() {} +func (*MsgStartGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_ec2e655b03e10552, []int{4} +} +func (m *MsgStartGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgStartGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgStartGroup.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgStartGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgStartGroup.Merge(m, src) +} +func (m *MsgStartGroup) XXX_Size() int { + return m.Size() +} +func (m *MsgStartGroup) XXX_DiscardUnknown() { + xxx_messageInfo_MsgStartGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgStartGroup proto.InternalMessageInfo + +func (m *MsgStartGroup) GetID() v1.GroupID { + if m != nil { + return m.ID + } + return v1.GroupID{} +} + +// MsgStartGroupResponse defines the Msg/StartGroup response type. +type MsgStartGroupResponse struct { +} + +func (m *MsgStartGroupResponse) Reset() { *m = MsgStartGroupResponse{} } +func (m *MsgStartGroupResponse) String() string { return proto.CompactTextString(m) } +func (*MsgStartGroupResponse) ProtoMessage() {} +func (*MsgStartGroupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ec2e655b03e10552, []int{5} +} +func (m *MsgStartGroupResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgStartGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgStartGroupResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgStartGroupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgStartGroupResponse.Merge(m, src) +} +func (m *MsgStartGroupResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgStartGroupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgStartGroupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgStartGroupResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCloseGroup)(nil), "akash.deployment.v1beta4.MsgCloseGroup") + proto.RegisterType((*MsgCloseGroupResponse)(nil), "akash.deployment.v1beta4.MsgCloseGroupResponse") + proto.RegisterType((*MsgPauseGroup)(nil), "akash.deployment.v1beta4.MsgPauseGroup") + proto.RegisterType((*MsgPauseGroupResponse)(nil), "akash.deployment.v1beta4.MsgPauseGroupResponse") + proto.RegisterType((*MsgStartGroup)(nil), "akash.deployment.v1beta4.MsgStartGroup") + proto.RegisterType((*MsgStartGroupResponse)(nil), "akash.deployment.v1beta4.MsgStartGroupResponse") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/groupmsg.proto", fileDescriptor_ec2e655b03e10552) +} + +var fileDescriptor_ec2e655b03e10552 = []byte{ + // 303 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4f, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, + 0x4a, 0x2d, 0x49, 0x34, 0xd1, 0x4f, 0x2f, 0xca, 0x2f, 0x2d, 0xc8, 0x2d, 0x4e, 0xd7, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x00, 0x2b, 0xd4, 0x43, 0x28, 0xd4, 0x83, 0x2a, 0x94, 0x12, 0x49, + 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0xc4, 0x93, 0xf3, 0x8b, 0x73, + 0xf3, 0x8b, 0xf5, 0x73, 0x8b, 0xd3, 0xf5, 0xcb, 0x0c, 0xf5, 0xe1, 0x06, 0x49, 0xc9, 0x63, 0xb1, + 0x11, 0x62, 0x19, 0x44, 0x81, 0x52, 0x3e, 0x17, 0xaf, 0x6f, 0x71, 0xba, 0x73, 0x4e, 0x7e, 0x71, + 0xaa, 0x3b, 0x48, 0x58, 0xc8, 0x9b, 0x8b, 0x29, 0x33, 0x45, 0x82, 0x51, 0x81, 0x51, 0x83, 0xdb, + 0x48, 0x46, 0x0f, 0x8b, 0x3b, 0xf4, 0xc0, 0xea, 0x3c, 0x5d, 0x9c, 0x64, 0x4f, 0xdc, 0x93, 0x67, + 0x78, 0x74, 0x4f, 0x9e, 0xc9, 0xd3, 0xe5, 0xd5, 0x3d, 0x79, 0xa6, 0xcc, 0x94, 0x4f, 0xf7, 0xe4, + 0x39, 0x2b, 0x13, 0x73, 0x73, 0xac, 0x94, 0x32, 0x53, 0x94, 0x82, 0x98, 0x32, 0x53, 0xac, 0x04, + 0x5f, 0x2c, 0x90, 0x67, 0x68, 0x7a, 0xbe, 0x41, 0x8b, 0x23, 0x33, 0x45, 0x2f, 0xbf, 0x3c, 0x2f, + 0xb5, 0x48, 0x49, 0x9c, 0x4b, 0x14, 0xc5, 0xc2, 0xa0, 0xd4, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, 0x54, + 0xa8, 0x4b, 0x02, 0x12, 0x4b, 0xe9, 0xeb, 0x12, 0x84, 0x85, 0x68, 0x2e, 0x09, 0x2e, 0x49, 0x2c, + 0x2a, 0xa1, 0xa7, 0x4b, 0x10, 0x16, 0xc2, 0x5c, 0xe2, 0xe4, 0x70, 0xe2, 0x91, 0x1c, 0xe3, 0x85, + 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, + 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0x6a, 0x05, 0xd9, 0xe9, 0x7a, 0x89, 0xd9, 0x25, 0x7a, 0x29, 0xa9, + 0x65, 0xfa, 0xe9, 0xf9, 0xfa, 0x79, 0xf9, 0x29, 0xa9, 0x58, 0x12, 0x56, 0x12, 0x1b, 0x38, 0x9a, + 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x24, 0x00, 0x18, 0xec, 0x7b, 0x02, 0x00, 0x00, +} + +func (m *MsgCloseGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseGroup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroupmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCloseGroupResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseGroupResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgPauseGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgPauseGroup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgPauseGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroupmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgPauseGroupResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgPauseGroupResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgPauseGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgStartGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgStartGroup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgStartGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroupmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgStartGroupResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgStartGroupResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgStartGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintGroupmsg(dAtA []byte, offset int, v uint64) int { + offset -= sovGroupmsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCloseGroup) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovGroupmsg(uint64(l)) + return n +} + +func (m *MsgCloseGroupResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgPauseGroup) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovGroupmsg(uint64(l)) + return n +} + +func (m *MsgPauseGroupResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgStartGroup) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovGroupmsg(uint64(l)) + return n +} + +func (m *MsgStartGroupResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovGroupmsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGroupmsg(x uint64) (n int) { + return sovGroupmsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCloseGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroupmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroupmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGroupmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseGroupResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseGroupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGroupmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgPauseGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgPauseGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgPauseGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroupmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroupmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGroupmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgPauseGroupResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgPauseGroupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgPauseGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGroupmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgStartGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgStartGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgStartGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroupmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroupmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGroupmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgStartGroupResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgStartGroupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgStartGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGroupmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGroupmsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGroupmsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGroupmsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGroupmsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGroupmsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGroupmsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGroupmsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/groupspec.go b/go/node/deployment/v1beta4/groupspec.go new file mode 100644 index 00000000..910d1f7e --- /dev/null +++ b/go/node/deployment/v1beta4/groupspec.go @@ -0,0 +1,196 @@ +package v1beta4 + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + + atypes "pkg.akt.dev/go/node/audit/v1" + attr "pkg.akt.dev/go/node/types/attributes/v1" +) + +type ResourceGroup interface { + GetName() string + GetResourceUnits() ResourceUnits +} + +var _ ResourceGroup = (*GroupSpec)(nil) + +type GroupSpecs []GroupSpec + +func (gspecs GroupSpecs) Dup() GroupSpecs { + res := make(GroupSpecs, 0, len(gspecs)) + + for _, gspec := range gspecs { + gs := gspec.Dup() + res = append(res, gs) + } + return res +} + +func (g GroupSpec) Dup() GroupSpec { + res := GroupSpec{ + Name: g.Name, + Requirements: g.Requirements.Dup(), + Resources: g.Resources.Dup(), + } + + return res +} + +// ValidateBasic asserts non-zero values +func (g GroupSpec) ValidateBasic() error { + return g.validate() +} + +// GetResourceUnits method returns resources list in group +func (g GroupSpec) GetResourceUnits() ResourceUnits { + resources := make(ResourceUnits, 0, len(g.Resources)) + + for _, r := range g.Resources { + resources = append(resources, r) + } + + return resources +} + +// GetName method returns group name +func (g GroupSpec) GetName() string { + return g.Name +} + +// Price method returns price of group +func (g GroupSpec) Price() sdk.DecCoin { + var price sdk.DecCoin + for idx, resource := range g.Resources { + if idx == 0 { + price = resource.FullPrice() + continue + } + price = price.Add(resource.FullPrice()) + } + return price +} + +// MatchResourcesRequirements check if resources attributes match provider's capabilities +func (g GroupSpec) MatchResourcesRequirements(pattr attr.Attributes) bool { + for _, rgroup := range g.GetResourceUnits() { + pgroup := pattr.GetCapabilitiesGroup("storage") + for _, storage := range rgroup.Storage { + if len(storage.Attributes) == 0 { + continue + } + + if !storage.Attributes.IN(pgroup) { + return false + } + } + if gpu := rgroup.GPU; gpu.Units.Val.Uint64() > 0 { + attr := gpu.Attributes + if len(attr) == 0 { + continue + } + + pgroup = pattr.GetCapabilitiesMap("gpu") + + if !gpu.Attributes.AnyIN(pgroup) { + return false + } + } + } + + return true +} + +// MatchRequirements method compares provided attributes with specific group attributes. +// Argument provider is a bit cumbersome. First element is attributes from x/provider store +// in case tenant does not need signed attributes at all +// rest of elements (if any) are attributes signed by various auditors +func (g GroupSpec) MatchRequirements(provider []atypes.AuditedProvider) bool { + if (len(g.Requirements.SignedBy.AnyOf) != 0) || (len(g.Requirements.SignedBy.AllOf) != 0) { + // we cannot match if there is no signed attributes + if len(provider) < 2 { + return false + } + + existingRequirements := make(attributesMatching) + + for _, existing := range provider[1:] { + existingRequirements[existing.Auditor] = existing.Attributes + } + + if len(g.Requirements.SignedBy.AllOf) != 0 { + for _, validator := range g.Requirements.SignedBy.AllOf { + // if at least one signature does not exist or no match on attributes - requirements cannot match + if existingAttr, exists := existingRequirements[validator]; !exists || + !attr.AttributesSubsetOf(g.Requirements.Attributes, existingAttr) { + return false + } + } + } + + if len(g.Requirements.SignedBy.AnyOf) != 0 { + for _, validator := range g.Requirements.SignedBy.AnyOf { + if existingAttr, exists := existingRequirements[validator]; exists && + attr.AttributesSubsetOf(g.Requirements.Attributes, existingAttr) { + return true + } + } + + return false + } + + return true + } + + return attr.AttributesSubsetOf(g.Requirements.Attributes, provider[0].Attributes) +} + +// validate does validation for provided deployment group +func (g *GroupSpec) validate() error { + if g.Name == "" { + return fmt.Errorf("empty group spec name denomination") + } + + if err := g.GetResourceUnits().Validate(); err != nil { + return err + } + + if err := g.validatePricing(); err != nil { + return err + } + + return nil +} + +func (g *GroupSpec) validatePricing() error { + var price sdk.DecCoin + + mem := sdk.NewInt(0) + + for idx, resource := range g.Resources { + if err := resource.validatePricing(); err != nil { + return fmt.Errorf("group %v: %w", g.GetName(), err) + } + + // all must be same denomination + if idx == 0 { + price = resource.FullPrice() + } else { + rprice := resource.FullPrice() + if rprice.Denom != price.Denom { + return fmt.Errorf("multi-denonimation group: (%v == %v fails)", rprice.Denom, price.Denom) + } + price = price.Add(rprice) + } + + memCount := sdk.NewInt(0) + if u := resource.Memory; u != nil { + memCount.Add(sdk.NewIntFromUint64(u.Quantity.Value())) + } + + mem = mem.Add(memCount.Mul(sdk.NewIntFromUint64(uint64(resource.Count)))) + } + + return nil +} diff --git a/go/node/deployment/v1beta4/groupspec.pb.go b/go/node/deployment/v1beta4/groupspec.pb.go new file mode 100644 index 00000000..cc3753f1 --- /dev/null +++ b/go/node/deployment/v1beta4/groupspec.pb.go @@ -0,0 +1,426 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/groupspec.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + v1 "pkg.akt.dev/go/node/types/attributes/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Spec stores group specifications +type GroupSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` + Requirements v1.PlacementRequirements `protobuf:"bytes,2,opt,name=requirements,proto3" json:"requirements" yaml:"requirements"` + Resources ResourceUnits `protobuf:"bytes,3,rep,name=resources,proto3,castrepeated=ResourceUnits" json:"resources" yaml:"resources"` +} + +func (m *GroupSpec) Reset() { *m = GroupSpec{} } +func (m *GroupSpec) String() string { return proto.CompactTextString(m) } +func (*GroupSpec) ProtoMessage() {} +func (*GroupSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_bd2049f4b23a57e8, []int{0} +} +func (m *GroupSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GroupSpec.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GroupSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupSpec.Merge(m, src) +} +func (m *GroupSpec) XXX_Size() int { + return m.Size() +} +func (m *GroupSpec) XXX_DiscardUnknown() { + xxx_messageInfo_GroupSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*GroupSpec)(nil), "akash.deployment.v1beta4.GroupSpec") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/groupspec.proto", fileDescriptor_bd2049f4b23a57e8) +} + +var fileDescriptor_bd2049f4b23a57e8 = []byte{ + // 368 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x3f, 0x4b, 0xf3, 0x50, + 0x14, 0xc6, 0x93, 0xf6, 0xe5, 0xe5, 0x6d, 0xfa, 0x0a, 0x12, 0x05, 0x43, 0x87, 0xdc, 0x92, 0xa1, + 0x04, 0x0a, 0xf7, 0xd2, 0x2a, 0x0e, 0x9d, 0x24, 0x8b, 0xab, 0x44, 0x5c, 0xdc, 0x6e, 0xd2, 0x43, + 0x0c, 0x6d, 0x73, 0x63, 0xee, 0x4d, 0xa0, 0xe0, 0xe0, 0xe8, 0xe8, 0x47, 0xe8, 0xec, 0xd7, 0x70, + 0xe9, 0xd8, 0xd1, 0x29, 0x4a, 0xbb, 0x48, 0xc7, 0x7e, 0x02, 0xc9, 0x9f, 0x92, 0x16, 0xec, 0x76, + 0xcf, 0xe1, 0x77, 0x9e, 0xe7, 0x39, 0xf7, 0x28, 0x26, 0x1d, 0x51, 0xfe, 0x40, 0x86, 0x10, 0x8e, + 0xd9, 0x74, 0x02, 0x81, 0x20, 0x49, 0xcf, 0x01, 0x41, 0x2f, 0x88, 0x17, 0xb1, 0x38, 0xe4, 0x21, + 0xb8, 0x38, 0x8c, 0x98, 0x60, 0xaa, 0x96, 0x93, 0xb8, 0x22, 0x71, 0x49, 0xb6, 0x4e, 0x3d, 0xe6, + 0xb1, 0x1c, 0x22, 0xd9, 0xab, 0xe0, 0x5b, 0xa5, 0xb2, 0x43, 0x39, 0x10, 0x2a, 0x44, 0xe4, 0x3b, + 0xb1, 0x00, 0x4e, 0x92, 0x5e, 0x55, 0x95, 0x64, 0xf7, 0x60, 0x86, 0x08, 0x38, 0x8b, 0x23, 0x17, + 0xe2, 0xc0, 0x17, 0x05, 0x6c, 0xbc, 0xd7, 0x94, 0xc6, 0x75, 0x16, 0xed, 0x36, 0x04, 0x57, 0xed, + 0x2a, 0x7f, 0x02, 0x3a, 0x01, 0x4d, 0x6e, 0xcb, 0x66, 0xc3, 0x3a, 0x5b, 0xa7, 0x28, 0xaf, 0x37, + 0x29, 0x6a, 0x4e, 0xe9, 0x64, 0x3c, 0x30, 0xb2, 0xca, 0xb0, 0xf3, 0xa6, 0xfa, 0x2c, 0x2b, 0xff, + 0x23, 0x78, 0x8c, 0xfd, 0x08, 0x32, 0x17, 0xae, 0xd5, 0xda, 0xb2, 0xd9, 0xec, 0x13, 0x5c, 0x6c, + 0x96, 0x25, 0xc5, 0x55, 0x52, 0x9c, 0xf4, 0xf0, 0xcd, 0x98, 0xba, 0x39, 0x6b, 0xef, 0x8c, 0x59, + 0xdd, 0x79, 0x8a, 0xa4, 0x75, 0x8a, 0xf6, 0xc4, 0x36, 0x29, 0x3a, 0x29, 0x2c, 0x77, 0xbb, 0x86, + 0xbd, 0x07, 0xa9, 0x4f, 0x4a, 0x63, 0xbb, 0x13, 0xd7, 0xea, 0xed, 0xba, 0xd9, 0xec, 0x77, 0xf0, + 0xa1, 0x8f, 0xc5, 0x76, 0x89, 0xde, 0x05, 0xbe, 0xb0, 0x2e, 0x4b, 0xd7, 0x4a, 0x60, 0x93, 0xa2, + 0xe3, 0xad, 0x65, 0xd9, 0x32, 0xde, 0x3e, 0xd1, 0xd1, 0xee, 0x18, 0xb7, 0x2b, 0x7e, 0xf0, 0xef, + 0x65, 0x86, 0xa4, 0xef, 0x19, 0x92, 0xac, 0xab, 0xf9, 0x52, 0x97, 0x17, 0x4b, 0x5d, 0xfe, 0x5a, + 0xea, 0xf2, 0xeb, 0x4a, 0x97, 0x16, 0x2b, 0x5d, 0xfa, 0x58, 0xe9, 0xd2, 0x7d, 0x27, 0x1c, 0x79, + 0x98, 0x8e, 0x04, 0x1e, 0x42, 0x42, 0x3c, 0x46, 0x02, 0x36, 0x84, 0x5f, 0x4e, 0xe3, 0xfc, 0xcd, + 0xcf, 0x71, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x8f, 0x69, 0x14, 0x41, 0x02, 0x00, 0x00, +} + +func (m *GroupSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroupspec(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.Requirements.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroupspec(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGroupspec(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGroupspec(dAtA []byte, offset int, v uint64) int { + offset -= sovGroupspec(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GroupSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovGroupspec(uint64(l)) + } + l = m.Requirements.Size() + n += 1 + l + sovGroupspec(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGroupspec(uint64(l)) + } + } + return n +} + +func sovGroupspec(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGroupspec(x uint64) (n int) { + return sovGroupspec(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GroupSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupspec + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupspec + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGroupspec + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGroupspec + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupspec + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroupspec + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroupspec + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Requirements.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupspec + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroupspec + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroupspec + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, ResourceUnit{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGroupspec(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupspec + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGroupspec(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupspec + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupspec + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupspec + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGroupspec + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGroupspec + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGroupspec + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGroupspec = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGroupspec = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGroupspec = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/msgs.go b/go/node/deployment/v1beta4/msgs.go new file mode 100644 index 00000000..6b76ea02 --- /dev/null +++ b/go/node/deployment/v1beta4/msgs.go @@ -0,0 +1,368 @@ +package v1beta4 + +import ( + "reflect" + + cerrors "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + v1 "pkg.akt.dev/go/node/deployment/v1" +) + +var ( + _ sdk.Msg = &MsgCreateDeployment{} + _ sdk.Msg = &MsgUpdateDeployment{} + _ sdk.Msg = &MsgCloseDeployment{} + _ sdk.Msg = &MsgCloseGroup{} + _ sdk.Msg = &MsgPauseGroup{} + _ sdk.Msg = &MsgStartGroup{} + _ sdk.Msg = &MsgUpdateParams{} +) + +var ( + msgTypeCreateDeployment = "" + msgTypeUpdateDeployment = "" + msgTypeCloseDeployment = "" + msgTypeCloseGroup = "" + msgTypePauseGroup = "" + msgTypeStartGroup = "" + msgTypeUpdateParams = "" +) + +func init() { + msgTypeCreateDeployment = reflect.TypeOf(&MsgCreateDeployment{}).Elem().Name() + msgTypeUpdateDeployment = reflect.TypeOf(&MsgUpdateDeployment{}).Elem().Name() + msgTypeCloseDeployment = reflect.TypeOf(&MsgCloseDeployment{}).Elem().Name() + msgTypeCloseGroup = reflect.TypeOf(&MsgCloseGroup{}).Elem().Name() + msgTypePauseGroup = reflect.TypeOf(&MsgPauseGroup{}).Elem().Name() + msgTypeStartGroup = reflect.TypeOf(&MsgStartGroup{}).Elem().Name() + msgTypeUpdateParams = reflect.TypeOf(&MsgUpdateParams{}).Elem().Name() +} + +// NewMsgCreateDeployment creates a new MsgCreateDeployment instance +func NewMsgCreateDeployment(id v1.DeploymentID, groups []GroupSpec, hash []byte, + deposit sdk.Coin, depositor sdk.AccAddress) *MsgCreateDeployment { + return &MsgCreateDeployment{ + ID: id, + Groups: groups, + Hash: hash, + Deposit: deposit, + Depositor: depositor.String(), + } +} + +// Type implements the sdk.Msg interface +func (msg *MsgCreateDeployment) Type() string { return msgTypeCreateDeployment } + +// GetSigners defines whose signature is required +func (msg *MsgCreateDeployment) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// ValidateBasic does basic validation like check owner and groups length +func (msg *MsgCreateDeployment) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + if err := msg.Deposit.Validate(); err != nil { + return err + } + if len(msg.Groups) == 0 { + return v1.ErrInvalidGroups + } + + if len(msg.Hash) == 0 { + return v1.ErrEmptyHash + } + + if len(msg.Hash) != v1.HashLength { + return v1.ErrInvalidHash + } + + for _, gs := range msg.Groups { + err := gs.ValidateBasic() + if err != nil { + return err + } + + // deposit must be same denom as price + if !msg.Deposit.IsZero() { + if gdenom := gs.Price().Denom; gdenom != msg.Deposit.Denom { + return cerrors.Wrapf(v1.ErrInvalidDeposit, "Mismatched denominations (%v != %v)", msg.Deposit.Denom, gdenom) + } + } + } + + _, err := sdk.AccAddressFromBech32(msg.Depositor) + if err != nil { + return cerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreateDeployment: Invalid Depositor Address") + } + + return nil +} + +// NewMsgUpdateDeployment creates a new MsgUpdateDeployment instance +func NewMsgUpdateDeployment(id v1.DeploymentID, version []byte) *MsgUpdateDeployment { + return &MsgUpdateDeployment{ + ID: id, + Hash: version, + } +} + +// Type implements the sdk.Msg interface +func (msg *MsgUpdateDeployment) Type() string { return msgTypeUpdateDeployment } + +// ValidateBasic does basic validation +func (msg *MsgUpdateDeployment) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + + if len(msg.Hash) == 0 { + return v1.ErrEmptyHash + } + + if len(msg.Hash) != v1.HashLength { + return v1.ErrInvalidHash + } + + return nil +} + +// GetSigners defines whose signature is required +func (msg *MsgUpdateDeployment) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// NewMsgCloseDeployment creates a new MsgCloseDeployment instance +func NewMsgCloseDeployment(id v1.DeploymentID) *MsgCloseDeployment { + return &MsgCloseDeployment{ + ID: id, + } +} + +// Type implements the sdk.Msg interface +func (msg *MsgCloseDeployment) Type() string { return msgTypeCloseDeployment } + +// ValidateBasic does basic validation with deployment details +func (msg *MsgCloseDeployment) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + return nil +} + +// GetSigners defines whose signature is required +func (msg *MsgCloseDeployment) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// NewMsgCloseGroup creates a new MsgCloseGroup instance +func NewMsgCloseGroup(id v1.GroupID) *MsgCloseGroup { + return &MsgCloseGroup{ + ID: id, + } +} + +// Type implements the sdk.Msg interface exposing message type +func (msg *MsgCloseGroup) Type() string { return msgTypeCloseGroup } + +// ValidateBasic calls underlying GroupID.Validate() check and returns result +func (msg *MsgCloseGroup) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + return nil +} + +// GetSigners defines whose signature is required +func (msg *MsgCloseGroup) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// NewMsgPauseGroup creates a new MsgPauseGroup instance +func NewMsgPauseGroup(id v1.GroupID) *MsgPauseGroup { + return &MsgPauseGroup{ + ID: id, + } +} + +// Type implements the sdk.Msg interface exposing message type +func (msg *MsgPauseGroup) Type() string { return msgTypePauseGroup } + +// ValidateBasic calls underlying GroupID.Validate() check and returns result +func (msg *MsgPauseGroup) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + return nil +} + +// GetSigners defines whose signature is required +func (msg *MsgPauseGroup) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// NewMsgStartGroup creates a new MsgStartGroup instance +func NewMsgStartGroup(id v1.GroupID) *MsgStartGroup { + return &MsgStartGroup{ + ID: id, + } +} + +// Type implements the sdk.Msg interface exposing message type +func (msg *MsgStartGroup) Type() string { return msgTypeStartGroup } + +// ValidateBasic calls underlying GroupID.Validate() check and returns result +func (msg *MsgStartGroup) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + return nil +} + +// GetSigners defines whose signature is required +func (msg *MsgStartGroup) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// Type implements the sdk.Msg interface exposing message type +func (m *MsgUpdateParams) Type() string { + return msgTypeUpdateParams +} + +// GetSigners returns the expected signers for a MsgUpdateParams message. +func (m *MsgUpdateParams) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(m.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (m *MsgUpdateParams) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(m.Authority); err != nil { + return cerrors.Wrap(err, "invalid authority address") + } + + if err := m.Params.Validate(); err != nil { + return err + } + + return nil +} + +// ============= GetSignBytes ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgCreateDeployment) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg MsgUpdateDeployment) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgCloseDeployment) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgCloseGroup) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgPauseGroup) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgStartGroup) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// GetSignBytes implements the LegacyMsg interface.// +// // Deprecated: GetSignBytes is deprecated +func (m *MsgUpdateParams) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(m)) +} + +// ============= Route ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all since sdk.Msg does not not have Route defined anymore + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (msg *MsgCreateDeployment) Route() string { return v1.RouterKey } + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (msg *MsgUpdateDeployment) Route() string { return v1.RouterKey } + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (msg *MsgCloseDeployment) Route() string { return v1.RouterKey } + +// Route implements the sdk.Msg interface for routing +// +// Deprecated: Route is deprecated +func (msg *MsgCloseGroup) Route() string { return v1.RouterKey } + +// Route implements the sdk.Msg interface for routing +// +// Deprecated: Route is deprecated +func (msg *MsgPauseGroup) Route() string { return v1.RouterKey } + +// Route implements the sdk.Msg interface for routing +// +// Deprecated: Route is deprecated +func (msg *MsgStartGroup) Route() string { return v1.RouterKey } diff --git a/go/node/deployment/v1beta4/msgs_test.go b/go/node/deployment/v1beta4/msgs_test.go new file mode 100644 index 00000000..ec16b1e1 --- /dev/null +++ b/go/node/deployment/v1beta4/msgs_test.go @@ -0,0 +1,84 @@ +package v1beta4_test + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + + v1 "pkg.akt.dev/go/node/deployment/v1" + types "pkg.akt.dev/go/node/deployment/v1beta4" + tutil "pkg.akt.dev/go/testutil" + testutil "pkg.akt.dev/go/testutil/v1beta3" +) + +type testMsg struct { + msg sdk.Msg + err error +} + +func TestVersionValidation(t *testing.T) { + tests := []testMsg{ + { + msg: &types.MsgCreateDeployment{ + ID: tutil.DeploymentID(t), + Hash: tutil.DeploymentVersion(t), + Groups: types.GroupSpecs{ + tutil.GroupSpec(t), + }, + Depositor: tutil.AccAddress(t).String(), + Deposit: tutil.AkashCoin(t, 0), + }, + err: nil, + }, + { + msg: &types.MsgCreateDeployment{ + ID: testutil.DeploymentID(t), + Hash: []byte(""), + Groups: []types.GroupSpec{ + testutil.GroupSpec(t), + }, + Depositor: tutil.AccAddress(t).String(), + Deposit: tutil.AkashCoin(t, 0), + }, + err: v1.ErrEmptyHash, + }, + { + msg: &types.MsgCreateDeployment{ + ID: testutil.DeploymentID(t), + Hash: []byte("invalidversion"), + Groups: []types.GroupSpec{ + testutil.GroupSpec(t), + }, + Depositor: tutil.AccAddress(t).String(), + Deposit: tutil.AkashCoin(t, 0), + }, + err: v1.ErrInvalidHash, + }, + { + msg: &types.MsgUpdateDeployment{ + ID: testutil.DeploymentID(t), + Hash: testutil.DeploymentVersion(t), + }, + err: nil, + }, + { + msg: &types.MsgUpdateDeployment{ + ID: testutil.DeploymentID(t), + Hash: []byte(""), + }, + err: v1.ErrEmptyHash, + }, + { + msg: &types.MsgUpdateDeployment{ + ID: testutil.DeploymentID(t), + Hash: []byte("invalidversion"), + }, + err: v1.ErrInvalidHash, + }, + } + + for _, test := range tests { + require.Equal(t, test.err, test.msg.ValidateBasic()) + } +} diff --git a/go/node/deployment/v1beta4/params.go b/go/node/deployment/v1beta4/params.go new file mode 100644 index 00000000..647b3a3c --- /dev/null +++ b/go/node/deployment/v1beta4/params.go @@ -0,0 +1,93 @@ +package v1beta4 + +import ( + "fmt" + "math" + + sdk "github.com/cosmos/cosmos-sdk/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" + + v1 "pkg.akt.dev/go/node/deployment/v1" +) + +var _ paramtypes.ParamSet = (*Params)(nil) + +const ( + keyMinDeposits = "MinDeposits" +) + +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{ + paramtypes.NewParamSetPair([]byte(keyMinDeposits), &p.MinDeposits, validateMinDeposits), + } +} + +func DefaultParams() Params { + return Params{ + MinDeposits: sdk.Coins{ + sdk.NewCoin("uakt", sdk.NewInt(500000)), + }, + } +} + +func (p Params) Validate() error { + if err := validateMinDeposits(p.MinDeposits); err != nil { + return err + } + return nil +} + +func (p Params) ValidateDeposit(amt sdk.Coin) error { + min, err := p.MinDepositFor(amt.Denom) + + if err != nil { + return err + } + + if amt.IsGTE(min) { + return nil + } + + return fmt.Errorf("%w: Deposit too low - %v < %v", v1.ErrInvalidDeposit, amt.Amount, min) +} + +func (p Params) MinDepositFor(denom string) (sdk.Coin, error) { + for _, minDeposit := range p.MinDeposits { + if minDeposit.Denom == denom { + return sdk.NewCoin(minDeposit.Denom, minDeposit.Amount), nil + } + } + + return sdk.NewInt64Coin(denom, math.MaxInt64), fmt.Errorf("%w: Invalid deposit denomination %v", v1.ErrInvalidDeposit, denom) +} + +func validateMinDeposits(i interface{}) error { + vals, ok := i.(sdk.Coins) + if !ok { + return fmt.Errorf("%w: Min Deposits - invalid type: %T", v1.ErrInvalidParam, i) + } + + check := make(map[string]bool) + + for _, minDeposit := range vals { + if _, exists := check[minDeposit.Denom]; exists { + return fmt.Errorf("duplicate Min Deposit for denom (%#v)", minDeposit) + } + + check[minDeposit.Denom] = true + + if minDeposit.Amount.Uint64() >= math.MaxInt32 { + return fmt.Errorf("%w: Min Deposit (%v) - too large: %v", v1.ErrInvalidParam, minDeposit.Denom, minDeposit.Amount.Uint64()) + } + } + + if _, exists := check["uakt"]; !exists { + return fmt.Errorf("%w: Min Deposits - uakt not given: %#v", v1.ErrInvalidParam, vals) + } + + return nil +} diff --git a/go/node/deployment/v1beta4/params.pb.go b/go/node/deployment/v1beta4/params.pb.go new file mode 100644 index 00000000..800b78a9 --- /dev/null +++ b/go/node/deployment/v1beta4/params.pb.go @@ -0,0 +1,339 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/params.proto + +package v1beta4 + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params defines the parameters for the x/deployment module +type Params struct { + MinDeposits github_com_cosmos_cosmos_sdk_types.Coins `protobuf:"bytes,1,rep,name=min_deposits,json=minDeposits,proto3,castrepeated=github.com/cosmos/cosmos-sdk/types.Coins" json:"min_deposits" yaml:"min_deposits"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_31b8da9fdb2b2cf0, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetMinDeposits() github_com_cosmos_cosmos_sdk_types.Coins { + if m != nil { + return m.MinDeposits + } + return nil +} + +func init() { + proto.RegisterType((*Params)(nil), "akash.deployment.v1beta4.Params") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/params.proto", fileDescriptor_31b8da9fdb2b2cf0) +} + +var fileDescriptor_31b8da9fdb2b2cf0 = []byte{ + // 286 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, + 0x4a, 0x2d, 0x49, 0x34, 0xd1, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x92, 0x00, 0x2b, 0xd3, 0x43, 0x28, 0xd3, 0x83, 0x2a, 0x93, 0x12, 0x49, 0xcf, 0x4f, + 0xcf, 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0xe4, 0x92, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, + 0xf5, 0x93, 0x12, 0x8b, 0x53, 0xa1, 0x26, 0x1a, 0xea, 0x27, 0xe7, 0x67, 0xe6, 0x41, 0xe4, 0x95, + 0xd6, 0x33, 0x72, 0xb1, 0x05, 0x80, 0x2d, 0x10, 0x5a, 0xc2, 0xc8, 0xc5, 0x93, 0x9b, 0x99, 0x17, + 0x9f, 0x92, 0x5a, 0x90, 0x5f, 0x9c, 0x59, 0x52, 0x2c, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, 0x6d, 0x24, + 0xa9, 0x07, 0x31, 0x42, 0x0f, 0x64, 0x04, 0xd4, 0x36, 0x43, 0x3d, 0xe7, 0xfc, 0xcc, 0x3c, 0xa7, + 0xb4, 0x13, 0xf7, 0xe4, 0x19, 0x1e, 0xdd, 0x93, 0xe7, 0xf6, 0xcd, 0xcc, 0x73, 0x81, 0xea, 0x7a, + 0x75, 0x4f, 0x1e, 0xc5, 0x94, 0x4f, 0xf7, 0xe4, 0x85, 0x2b, 0x13, 0x73, 0x73, 0xac, 0x94, 0x90, + 0x45, 0x95, 0x56, 0xdd, 0x97, 0xd7, 0x48, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, + 0xd5, 0x87, 0xba, 0x12, 0x42, 0xe9, 0x16, 0xa7, 0x64, 0xeb, 0x97, 0x54, 0x16, 0xa4, 0x16, 0x83, + 0xad, 0x29, 0x0e, 0xe2, 0xce, 0x45, 0x98, 0xef, 0xe4, 0x70, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, + 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, + 0xc7, 0x72, 0x0c, 0x51, 0x6a, 0x05, 0xd9, 0xe9, 0x7a, 0x89, 0xd9, 0x25, 0x7a, 0x29, 0xa9, 0x65, + 0xfa, 0xe9, 0xf9, 0xfa, 0x79, 0xf9, 0x29, 0xa9, 0x58, 0x02, 0x34, 0x89, 0x0d, 0xec, 0x75, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfb, 0xc8, 0x70, 0x7c, 0x73, 0x01, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MinDeposits) > 0 { + for iNdEx := len(m.MinDeposits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MinDeposits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MinDeposits) > 0 { + for _, e := range m.MinDeposits { + l = e.Size() + n += 1 + l + sovParams(uint64(l)) + } + } + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MinDeposits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MinDeposits = append(m.MinDeposits, types.Coin{}) + if err := m.MinDeposits[len(m.MinDeposits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/paramsmsg.pb.go b/go/node/deployment/v1beta4/paramsmsg.pb.go new file mode 100644 index 00000000..9cd0bbd8 --- /dev/null +++ b/go/node/deployment/v1beta4/paramsmsg.pb.go @@ -0,0 +1,511 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/paramsmsg.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgUpdateParams is the Msg/UpdateParams request type. +// +// Since: akash v1.0.0 +type MsgUpdateParams struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // params defines the x/deployment parameters to update. + // + // NOTE: All parameters must be supplied. + Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"` +} + +func (m *MsgUpdateParams) Reset() { *m = MsgUpdateParams{} } +func (m *MsgUpdateParams) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParams) ProtoMessage() {} +func (*MsgUpdateParams) Descriptor() ([]byte, []int) { + return fileDescriptor_519d8e94f02fefe6, []int{0} +} +func (m *MsgUpdateParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParams.Merge(m, src) +} +func (m *MsgUpdateParams) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParams) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParams.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParams proto.InternalMessageInfo + +func (m *MsgUpdateParams) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgUpdateParams) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +// MsgUpdateParamsResponse defines the response structure for executing a +// MsgUpdateParams message. +// +// Since: akash v1.0.0 +type MsgUpdateParamsResponse struct { +} + +func (m *MsgUpdateParamsResponse) Reset() { *m = MsgUpdateParamsResponse{} } +func (m *MsgUpdateParamsResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParamsResponse) ProtoMessage() {} +func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_519d8e94f02fefe6, []int{1} +} +func (m *MsgUpdateParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParamsResponse.Merge(m, src) +} +func (m *MsgUpdateParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgUpdateParams)(nil), "akash.deployment.v1beta4.MsgUpdateParams") + proto.RegisterType((*MsgUpdateParamsResponse)(nil), "akash.deployment.v1beta4.MsgUpdateParamsResponse") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/paramsmsg.proto", fileDescriptor_519d8e94f02fefe6) +} + +var fileDescriptor_519d8e94f02fefe6 = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x48, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, + 0x4a, 0x2d, 0x49, 0x34, 0xd1, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xce, 0x2d, 0x4e, 0xd7, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x00, 0xab, 0xd4, 0x43, 0xa8, 0xd4, 0x83, 0xaa, 0x94, 0x12, + 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0xc4, 0x93, 0xf3, 0x8b, + 0x73, 0xf3, 0x8b, 0xf5, 0x73, 0x8b, 0xd3, 0xf5, 0xcb, 0x0c, 0xf5, 0xe1, 0x06, 0x49, 0x49, 0x42, + 0x24, 0xe2, 0x21, 0x3a, 0x20, 0x1c, 0xa8, 0x94, 0x2a, 0x01, 0xd7, 0x40, 0x94, 0x29, 0xcd, 0x64, + 0xe4, 0xe2, 0xf7, 0x2d, 0x4e, 0x0f, 0x2d, 0x48, 0x49, 0x2c, 0x49, 0x0d, 0x00, 0xcb, 0x08, 0x99, + 0x71, 0x71, 0x26, 0x96, 0x96, 0x64, 0xe4, 0x17, 0x65, 0x96, 0x54, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x3a, 0x49, 0x5c, 0xda, 0xa2, 0x2b, 0x02, 0x35, 0xdf, 0x31, 0x25, 0xa5, 0x28, 0xb5, 0xb8, + 0x38, 0xb8, 0xa4, 0x28, 0x33, 0x2f, 0x3d, 0x08, 0xa1, 0x54, 0xc8, 0x8e, 0x8b, 0x0d, 0x62, 0xb6, + 0x04, 0x93, 0x02, 0xa3, 0x06, 0xb7, 0x91, 0x82, 0x1e, 0x2e, 0x7f, 0xea, 0x41, 0x6c, 0x72, 0x62, + 0x39, 0x71, 0x4f, 0x9e, 0x21, 0x08, 0xaa, 0xcb, 0x8a, 0xaf, 0xe9, 0xf9, 0x06, 0x2d, 0x84, 0x79, + 0x4a, 0x92, 0x5c, 0xe2, 0x68, 0x4e, 0x0b, 0x4a, 0x2d, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x75, 0x72, + 0x38, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, + 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xb5, 0x82, 0xec, 0x74, 0xbd, + 0xc4, 0xec, 0x12, 0xbd, 0x94, 0xd4, 0x32, 0xfd, 0xf4, 0x7c, 0xfd, 0xbc, 0xfc, 0x94, 0x54, 0x2c, + 0xa1, 0x90, 0xc4, 0x06, 0xf6, 0xbf, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x56, 0x52, 0xf5, 0x5e, + 0xb6, 0x01, 0x00, 0x00, +} + +func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParamsmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintParamsmsg(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintParamsmsg(dAtA []byte, offset int, v uint64) int { + offset -= sovParamsmsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgUpdateParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovParamsmsg(uint64(l)) + } + l = m.Params.Size() + n += 1 + l + sovParamsmsg(uint64(l)) + return n +} + +func (m *MsgUpdateParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovParamsmsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParamsmsg(x uint64) (n int) { + return sovParamsmsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgUpdateParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParamsmsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParamsmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParamsmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParamsmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParamsmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParamsmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipParamsmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParamsmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParamsmsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParamsmsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParamsmsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParamsmsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParamsmsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParamsmsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParamsmsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/path.go b/go/node/deployment/v1beta4/path.go new file mode 100644 index 00000000..294345d7 --- /dev/null +++ b/go/node/deployment/v1beta4/path.go @@ -0,0 +1,60 @@ +package v1beta4 + +import ( + "errors" + "fmt" + "strconv" + + types "pkg.akt.dev/go/node/deployment/v1" +) + +const ( + deploymentsPath = "deployments" + deploymentPath = "deployment" + groupPath = "group" +) + +var ( + ErrInvalidPath = errors.New("query: invalid path") +) + +// getDeploymentsPath returns deployments path for queries +// nolint: unused +func getDeploymentsPath(dfilters DeploymentFilters) string { + return fmt.Sprintf("%s/%s/%v", deploymentsPath, dfilters.Owner, dfilters.State) +} + +// DeploymentPath return deployment path of given deployment id for queries +func DeploymentPath(id types.DeploymentID) string { + return fmt.Sprintf("%s/%s", deploymentPath, deploymentParts(id)) +} + +// getGroupPath return group path of given group id for queries +// nolint: unused +func getGroupPath(id types.GroupID) string { + return fmt.Sprintf("%s/%s/%v/%v", groupPath, id.Owner, id.DSeq, id.GSeq) +} + +// ParseGroupPath returns GroupID details with provided queries, and return +// error if occurred due to wrong query +func ParseGroupPath(parts []string) (types.GroupID, error) { + if len(parts) < 3 { + return types.GroupID{}, ErrInvalidPath + } + + did, err := types.ParseDeploymentPath(parts[0:2]) + if err != nil { + return types.GroupID{}, err + } + + gseq, err := strconv.ParseUint(parts[2], 10, 32) + if err != nil { + return types.GroupID{}, err + } + + return types.MakeGroupID(did, uint32(gseq)), nil +} + +func deploymentParts(id types.DeploymentID) string { + return fmt.Sprintf("%s/%v", id.Owner, id.DSeq) +} diff --git a/go/node/deployment/v1beta4/query.pb.go b/go/node/deployment/v1beta4/query.pb.go new file mode 100644 index 00000000..cb3259c3 --- /dev/null +++ b/go/node/deployment/v1beta4/query.pb.go @@ -0,0 +1,1967 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/query.proto + +package v1beta4 + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + v1 "pkg.akt.dev/go/node/deployment/v1" + v11 "pkg.akt.dev/go/node/escrow/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryDeploymentsRequest is request type for the Query/Deployments RPC method +type QueryDeploymentsRequest struct { + Filters DeploymentFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryDeploymentsRequest) Reset() { *m = QueryDeploymentsRequest{} } +func (m *QueryDeploymentsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDeploymentsRequest) ProtoMessage() {} +func (*QueryDeploymentsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{0} +} +func (m *QueryDeploymentsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDeploymentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDeploymentsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDeploymentsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDeploymentsRequest.Merge(m, src) +} +func (m *QueryDeploymentsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDeploymentsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDeploymentsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDeploymentsRequest proto.InternalMessageInfo + +func (m *QueryDeploymentsRequest) GetFilters() DeploymentFilters { + if m != nil { + return m.Filters + } + return DeploymentFilters{} +} + +func (m *QueryDeploymentsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryDeploymentsResponse is response type for the Query/Deployments RPC method +type QueryDeploymentsResponse struct { + Deployments DeploymentResponses `protobuf:"bytes,1,rep,name=deployments,proto3,castrepeated=DeploymentResponses" json:"deployments"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryDeploymentsResponse) Reset() { *m = QueryDeploymentsResponse{} } +func (m *QueryDeploymentsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDeploymentsResponse) ProtoMessage() {} +func (*QueryDeploymentsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{1} +} +func (m *QueryDeploymentsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDeploymentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDeploymentsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDeploymentsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDeploymentsResponse.Merge(m, src) +} +func (m *QueryDeploymentsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDeploymentsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDeploymentsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDeploymentsResponse proto.InternalMessageInfo + +func (m *QueryDeploymentsResponse) GetDeployments() DeploymentResponses { + if m != nil { + return m.Deployments + } + return nil +} + +func (m *QueryDeploymentsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryDeploymentRequest is request type for the Query/Deployment RPC method +type QueryDeploymentRequest struct { + ID v1.DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` +} + +func (m *QueryDeploymentRequest) Reset() { *m = QueryDeploymentRequest{} } +func (m *QueryDeploymentRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDeploymentRequest) ProtoMessage() {} +func (*QueryDeploymentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{2} +} +func (m *QueryDeploymentRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDeploymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDeploymentRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDeploymentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDeploymentRequest.Merge(m, src) +} +func (m *QueryDeploymentRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDeploymentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDeploymentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDeploymentRequest proto.InternalMessageInfo + +func (m *QueryDeploymentRequest) GetID() v1.DeploymentID { + if m != nil { + return m.ID + } + return v1.DeploymentID{} +} + +// QueryDeploymentResponse is response type for the Query/Deployment RPC method +type QueryDeploymentResponse struct { + Deployment v1.Deployment `protobuf:"bytes,1,opt,name=deployment,proto3" json:"deployment" yaml:"deployment"` + Groups Groups `protobuf:"bytes,2,rep,name=groups,proto3,castrepeated=Groups" json:"groups" yaml:"groups"` + EscrowAccount v11.Account `protobuf:"bytes,3,opt,name=escrow_account,json=escrowAccount,proto3" json:"escrow_account"` +} + +func (m *QueryDeploymentResponse) Reset() { *m = QueryDeploymentResponse{} } +func (m *QueryDeploymentResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDeploymentResponse) ProtoMessage() {} +func (*QueryDeploymentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{3} +} +func (m *QueryDeploymentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDeploymentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDeploymentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDeploymentResponse.Merge(m, src) +} +func (m *QueryDeploymentResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDeploymentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDeploymentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDeploymentResponse proto.InternalMessageInfo + +func (m *QueryDeploymentResponse) GetDeployment() v1.Deployment { + if m != nil { + return m.Deployment + } + return v1.Deployment{} +} + +func (m *QueryDeploymentResponse) GetGroups() Groups { + if m != nil { + return m.Groups + } + return nil +} + +func (m *QueryDeploymentResponse) GetEscrowAccount() v11.Account { + if m != nil { + return m.EscrowAccount + } + return v11.Account{} +} + +// QueryGroupRequest is request type for the Query/Group RPC method +type QueryGroupRequest struct { + ID v1.GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` +} + +func (m *QueryGroupRequest) Reset() { *m = QueryGroupRequest{} } +func (m *QueryGroupRequest) String() string { return proto.CompactTextString(m) } +func (*QueryGroupRequest) ProtoMessage() {} +func (*QueryGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{4} +} +func (m *QueryGroupRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryGroupRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryGroupRequest.Merge(m, src) +} +func (m *QueryGroupRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryGroupRequest proto.InternalMessageInfo + +func (m *QueryGroupRequest) GetID() v1.GroupID { + if m != nil { + return m.ID + } + return v1.GroupID{} +} + +// QueryGroupResponse is response type for the Query/Group RPC method +type QueryGroupResponse struct { + Group Group `protobuf:"bytes,1,opt,name=group,proto3" json:"group"` +} + +func (m *QueryGroupResponse) Reset() { *m = QueryGroupResponse{} } +func (m *QueryGroupResponse) String() string { return proto.CompactTextString(m) } +func (*QueryGroupResponse) ProtoMessage() {} +func (*QueryGroupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{5} +} +func (m *QueryGroupResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryGroupResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryGroupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryGroupResponse.Merge(m, src) +} +func (m *QueryGroupResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryGroupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryGroupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryGroupResponse proto.InternalMessageInfo + +func (m *QueryGroupResponse) GetGroup() Group { + if m != nil { + return m.Group + } + return Group{} +} + +// QueryParamsRequest is the request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{6} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is the response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params defines the parameters of the module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{7} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*QueryDeploymentsRequest)(nil), "akash.deployment.v1beta4.QueryDeploymentsRequest") + proto.RegisterType((*QueryDeploymentsResponse)(nil), "akash.deployment.v1beta4.QueryDeploymentsResponse") + proto.RegisterType((*QueryDeploymentRequest)(nil), "akash.deployment.v1beta4.QueryDeploymentRequest") + proto.RegisterType((*QueryDeploymentResponse)(nil), "akash.deployment.v1beta4.QueryDeploymentResponse") + proto.RegisterType((*QueryGroupRequest)(nil), "akash.deployment.v1beta4.QueryGroupRequest") + proto.RegisterType((*QueryGroupResponse)(nil), "akash.deployment.v1beta4.QueryGroupResponse") + proto.RegisterType((*QueryParamsRequest)(nil), "akash.deployment.v1beta4.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "akash.deployment.v1beta4.QueryParamsResponse") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/query.proto", fileDescriptor_17bc7a7d7b435f28) +} + +var fileDescriptor_17bc7a7d7b435f28 = []byte{ + // 768 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x4f, 0x6b, 0x13, 0x4d, + 0x1c, 0xce, 0xa6, 0x4d, 0x5e, 0xde, 0x09, 0x7d, 0xa1, 0xd3, 0xf2, 0x1a, 0x62, 0xcd, 0xd6, 0xc5, + 0xfe, 0xb1, 0x7f, 0x76, 0x6c, 0xf4, 0x20, 0x15, 0x41, 0x63, 0x6d, 0x29, 0x22, 0xb4, 0x7b, 0x11, + 0x7a, 0x91, 0x49, 0x32, 0xdd, 0x2e, 0x4d, 0x76, 0xb6, 0x99, 0x4d, 0xa5, 0x37, 0xf1, 0x0b, 0xa8, + 0xf8, 0x05, 0xbc, 0x08, 0xa5, 0x07, 0xf1, 0xe0, 0x87, 0xe8, 0xb1, 0x20, 0x82, 0xa7, 0x54, 0x52, + 0x41, 0xf1, 0xd8, 0x4f, 0x20, 0x3b, 0xf3, 0x4b, 0x77, 0x6b, 0xb2, 0x4d, 0x73, 0x09, 0xd9, 0x99, + 0xe7, 0xf7, 0x3c, 0xcf, 0xef, 0xf7, 0xcc, 0xce, 0xa2, 0x1b, 0x74, 0x9b, 0x8a, 0x2d, 0x52, 0x61, + 0x5e, 0x95, 0xef, 0xd5, 0x98, 0xeb, 0x93, 0xdd, 0x85, 0x12, 0xf3, 0xe9, 0x1d, 0xb2, 0xd3, 0x60, + 0xf5, 0x3d, 0xd3, 0xab, 0x73, 0x9f, 0xe3, 0xac, 0x44, 0x99, 0x21, 0xca, 0x04, 0x54, 0x6e, 0xd4, + 0xe6, 0x36, 0x97, 0x20, 0x12, 0xfc, 0x53, 0xf8, 0xdc, 0x30, 0xad, 0x39, 0x2e, 0x27, 0xf2, 0x17, + 0x96, 0xc6, 0x6c, 0xce, 0xed, 0x2a, 0x23, 0xd4, 0x73, 0x08, 0x75, 0x5d, 0xee, 0x53, 0xdf, 0xe1, + 0xae, 0x80, 0xdd, 0x99, 0x32, 0x17, 0x35, 0x2e, 0x48, 0x89, 0x0a, 0xa6, 0x94, 0xc1, 0xc7, 0x02, + 0xf1, 0xa8, 0xed, 0xb8, 0x12, 0x0c, 0xd8, 0xc9, 0x58, 0xcb, 0x9b, 0x4e, 0xd5, 0x67, 0xf5, 0x36, + 0x67, 0x7c, 0x6b, 0x76, 0x9d, 0x37, 0x3c, 0x40, 0x4d, 0xc4, 0xa2, 0x3c, 0x5a, 0xa7, 0xb5, 0x8b, + 0xc8, 0x22, 0x4f, 0x80, 0xd2, 0xbb, 0xa1, 0xa2, 0x6a, 0xd7, 0x14, 0x80, 0x89, 0x72, 0x9d, 0xbf, + 0x08, 0x36, 0x69, 0xb9, 0xcc, 0x1b, 0xed, 0x7a, 0xe3, 0xa3, 0x86, 0xae, 0xac, 0x07, 0xdd, 0x2f, + 0x9d, 0x31, 0x08, 0x8b, 0xed, 0x34, 0x98, 0xf0, 0xf1, 0x13, 0xf4, 0x0f, 0xf4, 0x97, 0xd5, 0xc6, + 0xb5, 0xe9, 0x4c, 0x61, 0xd6, 0x8c, 0x4b, 0xc5, 0x0c, 0xcb, 0x97, 0x55, 0x49, 0x71, 0xf0, 0xb0, + 0xa9, 0x27, 0xac, 0x36, 0x03, 0x5e, 0x46, 0x28, 0x9c, 0x6b, 0x36, 0x29, 0xf9, 0x26, 0x4d, 0x15, + 0x82, 0x19, 0x84, 0x60, 0xaa, 0xf8, 0x21, 0x04, 0x73, 0x8d, 0xda, 0x0c, 0x8c, 0x58, 0x91, 0x4a, + 0xe3, 0xab, 0x86, 0xb2, 0x9d, 0x86, 0x85, 0xc7, 0x5d, 0xc1, 0xb0, 0x87, 0x32, 0xa1, 0xb7, 0xc0, + 0xf5, 0xc0, 0x74, 0xa6, 0xb0, 0x10, 0xef, 0xfa, 0x2f, 0xa2, 0x36, 0x4f, 0xf1, 0x6a, 0xe0, 0xfd, + 0xe0, 0x58, 0x1f, 0xe9, 0xdc, 0x13, 0x56, 0x54, 0x02, 0xaf, 0x74, 0x69, 0x6b, 0xaa, 0x67, 0x5b, + 0x8a, 0xea, 0x5c, 0x5f, 0xcf, 0xd0, 0xff, 0x1d, 0x6e, 0x54, 0x0c, 0xf7, 0x51, 0xd2, 0xa9, 0x40, + 0x02, 0xd7, 0xbb, 0xf5, 0x12, 0x19, 0xfe, 0xea, 0x52, 0x11, 0x05, 0xde, 0x5b, 0x4d, 0x3d, 0xb9, + 0xba, 0x64, 0x25, 0x9d, 0x8a, 0xf1, 0x39, 0xd9, 0x91, 0xf0, 0xd9, 0xbc, 0xb6, 0x10, 0x0a, 0x99, + 0x40, 0x42, 0xef, 0x21, 0x51, 0x9c, 0x0a, 0x04, 0x7e, 0x37, 0xf5, 0x48, 0xe9, 0x69, 0x53, 0x1f, + 0xde, 0xa3, 0xb5, 0xea, 0xa2, 0x11, 0xae, 0x19, 0x56, 0x04, 0x80, 0x4b, 0x28, 0x2d, 0x4f, 0xa5, + 0xc8, 0x26, 0x65, 0x28, 0x7a, 0x7c, 0x28, 0x2b, 0x01, 0xae, 0x78, 0x13, 0x54, 0xa0, 0xec, 0xb4, + 0xa9, 0x0f, 0x29, 0x05, 0xf5, 0x6c, 0x1c, 0x1c, 0xeb, 0x69, 0x89, 0x14, 0x16, 0x40, 0xf0, 0x63, + 0xf4, 0x9f, 0x3a, 0xe6, 0xcf, 0xe1, 0x8c, 0x67, 0x07, 0x64, 0x47, 0x59, 0xd0, 0x52, 0x9b, 0x41, + 0x37, 0x0f, 0xd5, 0x3e, 0x9c, 0xd1, 0x21, 0xb5, 0x01, 0x8b, 0x8b, 0x83, 0xbf, 0xde, 0xeb, 0x09, + 0xe3, 0x29, 0x1a, 0x96, 0x53, 0x93, 0x1a, 0xed, 0x28, 0xee, 0x46, 0xa2, 0x18, 0xeb, 0x3a, 0x27, + 0x09, 0xef, 0x92, 0xc2, 0x3a, 0xc2, 0x51, 0x3a, 0x98, 0xff, 0x3d, 0x94, 0x92, 0xde, 0x2f, 0x1a, + 0x7d, 0x74, 0x28, 0xca, 0xaf, 0xaa, 0x31, 0x46, 0x81, 0x72, 0x4d, 0xde, 0x1a, 0x60, 0xd1, 0xd8, + 0x40, 0x23, 0xe7, 0x56, 0x41, 0xe9, 0x11, 0x4a, 0xab, 0xdb, 0x05, 0xa4, 0xc6, 0xe3, 0xa5, 0x54, + 0x65, 0xf1, 0xdf, 0x40, 0x6b, 0xff, 0xe7, 0xa7, 0x19, 0xcd, 0x82, 0xd2, 0xc2, 0xcb, 0x14, 0x4a, + 0x49, 0x72, 0xbc, 0xaf, 0xa1, 0x4c, 0xe4, 0x05, 0xc4, 0x97, 0x7f, 0xc7, 0xda, 0x46, 0x73, 0x85, + 0x7e, 0x4a, 0x54, 0x17, 0x46, 0xe1, 0xd5, 0x97, 0x1f, 0xef, 0x92, 0x73, 0x78, 0x86, 0xc4, 0xde, + 0xa1, 0x91, 0x97, 0x93, 0x54, 0x1d, 0xe1, 0xe3, 0x0f, 0x1a, 0x42, 0x21, 0x17, 0xbe, 0xd5, 0xc7, + 0x6d, 0xa0, 0x8c, 0xf6, 0x7f, 0x7f, 0xf4, 0xeb, 0xd3, 0x71, 0x37, 0x39, 0x7e, 0xab, 0xa1, 0x94, + 0x4c, 0x19, 0xcf, 0xf6, 0x10, 0x8c, 0x1e, 0xc9, 0xdc, 0xdc, 0xe5, 0xc0, 0x60, 0x6c, 0x5e, 0x1a, + 0x9b, 0xc2, 0x13, 0xe4, 0xe2, 0x4f, 0x15, 0x78, 0x7a, 0xad, 0xa1, 0xb4, 0x3a, 0x0e, 0xb8, 0x97, + 0xce, 0xb9, 0x53, 0x98, 0x9b, 0xbf, 0x24, 0x1a, 0x6c, 0x4d, 0x4b, 0x5b, 0x06, 0x1e, 0x27, 0x3d, + 0xbe, 0x8d, 0xc5, 0x07, 0x87, 0xad, 0xbc, 0x76, 0xd4, 0xca, 0x6b, 0xdf, 0x5b, 0x79, 0xed, 0xcd, + 0x49, 0x3e, 0x71, 0x74, 0x92, 0x4f, 0x7c, 0x3b, 0xc9, 0x27, 0x36, 0x26, 0xbd, 0x6d, 0xdb, 0xa4, + 0xdb, 0xbe, 0x59, 0x61, 0xbb, 0xc4, 0xe6, 0xc4, 0xe5, 0x15, 0xd6, 0x85, 0xa8, 0x94, 0x96, 0x1f, + 0xbe, 0xdb, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe1, 0xd1, 0xd3, 0x1b, 0x88, 0x08, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Deployments queries deployments + Deployments(ctx context.Context, in *QueryDeploymentsRequest, opts ...grpc.CallOption) (*QueryDeploymentsResponse, error) + // Deployment queries deployment details + Deployment(ctx context.Context, in *QueryDeploymentRequest, opts ...grpc.CallOption) (*QueryDeploymentResponse, error) + // Group queries group details + Group(ctx context.Context, in *QueryGroupRequest, opts ...grpc.CallOption) (*QueryGroupResponse, error) + // Params returns the total set of minting parameters. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Deployments(ctx context.Context, in *QueryDeploymentsRequest, opts ...grpc.CallOption) (*QueryDeploymentsResponse, error) { + out := new(QueryDeploymentsResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Query/Deployments", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Deployment(ctx context.Context, in *QueryDeploymentRequest, opts ...grpc.CallOption) (*QueryDeploymentResponse, error) { + out := new(QueryDeploymentResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Query/Deployment", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Group(ctx context.Context, in *QueryGroupRequest, opts ...grpc.CallOption) (*QueryGroupResponse, error) { + out := new(QueryGroupResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Query/Group", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Deployments queries deployments + Deployments(context.Context, *QueryDeploymentsRequest) (*QueryDeploymentsResponse, error) + // Deployment queries deployment details + Deployment(context.Context, *QueryDeploymentRequest) (*QueryDeploymentResponse, error) + // Group queries group details + Group(context.Context, *QueryGroupRequest) (*QueryGroupResponse, error) + // Params returns the total set of minting parameters. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Deployments(ctx context.Context, req *QueryDeploymentsRequest) (*QueryDeploymentsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Deployments not implemented") +} +func (*UnimplementedQueryServer) Deployment(ctx context.Context, req *QueryDeploymentRequest) (*QueryDeploymentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Deployment not implemented") +} +func (*UnimplementedQueryServer) Group(ctx context.Context, req *QueryGroupRequest) (*QueryGroupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Group not implemented") +} +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Deployments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDeploymentsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Deployments(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Query/Deployments", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Deployments(ctx, req.(*QueryDeploymentsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Deployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDeploymentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Deployment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Query/Deployment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Deployment(ctx, req.(*QueryDeploymentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Group_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Group(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Query/Group", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Group(ctx, req.(*QueryGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.deployment.v1beta4.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Deployments", + Handler: _Query_Deployments_Handler, + }, + { + MethodName: "Deployment", + Handler: _Query_Deployment_Handler, + }, + { + MethodName: "Group", + Handler: _Query_Group_Handler, + }, + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/deployment/v1beta4/query.proto", +} + +func (m *QueryDeploymentsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDeploymentsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDeploymentsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryDeploymentsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDeploymentsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDeploymentsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Deployments) > 0 { + for iNdEx := len(m.Deployments) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Deployments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryDeploymentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDeploymentRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDeploymentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryDeploymentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.EscrowAccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryGroupRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryGroupRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryGroupRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryGroupResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryGroupResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryDeploymentsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Filters.Size() + n += 1 + l + sovQuery(uint64(l)) + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDeploymentsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Deployments) > 0 { + for _, e := range m.Deployments { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDeploymentRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryDeploymentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Deployment.Size() + n += 1 + l + sovQuery(uint64(l)) + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + l = m.EscrowAccount.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryGroupRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryGroupResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Group.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryDeploymentsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDeploymentsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDeploymentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDeploymentsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDeploymentsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDeploymentsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deployments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Deployments = append(m.Deployments, QueryDeploymentResponse{}) + if err := m.Deployments[len(m.Deployments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDeploymentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDeploymentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDeploymentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDeploymentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDeploymentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, Group{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EscrowAccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EscrowAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryGroupRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryGroupRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryGroupRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryGroupResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryGroupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/query.pb.gw.go b/go/node/deployment/v1beta4/query.pb.gw.go new file mode 100644 index 00000000..faec0082 --- /dev/null +++ b/go/node/deployment/v1beta4/query.pb.gw.go @@ -0,0 +1,402 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: akash/deployment/v1beta4/query.proto + +/* +Package v1beta4 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1beta4 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_Query_Deployments_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Deployments_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDeploymentsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployments_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Deployments(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Deployments_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDeploymentsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployments_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Deployments(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Deployment_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Deployment_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDeploymentRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployment_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Deployment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Deployment_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDeploymentRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployment_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Deployment(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Group_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Group_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryGroupRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Group_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Group(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Group_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryGroupRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Group_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Group(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Deployments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Deployments_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Deployments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Deployment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Deployment_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Deployment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Group_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Group_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Group_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Deployments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Deployments_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Deployments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Deployment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Deployment_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Deployment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Group_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Group_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Group_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Deployments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta4", "deployments", "list"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Deployment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta4", "deployments", "info"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Group_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta4", "groups", "info"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"akash", "deployment", "v1beta4", "params"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_Deployments_0 = runtime.ForwardResponseMessage + + forward_Query_Deployment_0 = runtime.ForwardResponseMessage + + forward_Query_Group_0 = runtime.ForwardResponseMessage + + forward_Query_Params_0 = runtime.ForwardResponseMessage +) diff --git a/go/node/deployment/v1beta4/resource_list_validation.go b/go/node/deployment/v1beta4/resource_list_validation.go new file mode 100644 index 00000000..89e965df --- /dev/null +++ b/go/node/deployment/v1beta4/resource_list_validation.go @@ -0,0 +1,187 @@ +package v1beta4 + +import ( + "errors" +) + +var ( + ErrNoGroupsPresent = errors.New("validation: no groups present") + ErrGroupEmptyName = errors.New("validation: group has empty name") +) + +// func ValidateResourceList(rlist GSpec) error { +// if rlist.GetName() == "" { +// return ErrGroupEmptyName +// } +// +// units := rlist.GetResources() +// +// if count := len(units); count > validationConfig.MaxGroupUnits { +// return fmt.Errorf("group %v: too many units (%v > %v)", rlist.GetName(), count, validationConfig.MaxGroupUnits) +// } +// +// if err := units.Validate(); err != nil { +// return fmt.Errorf("group %v: %w", rlist.GetName(), err) +// } +// +// limits := newLimits() +// +// for _, resource := range units { +// gLimits, err := validateGroupResource(resource) +// if err != nil { +// return fmt.Errorf("group %v: %w", rlist.GetName(), err) +// } +// +// limits.add(gLimits) +// } +// +// if limits.cpu.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupCPU)) || limits.cpu.LTE(sdk.ZeroInt()) { +// return fmt.Errorf("group %v: invalid total CPU (%v > %v > %v fails)", +// rlist.GetName(), validationConfig.MaxGroupCPU, limits.cpu, 0) +// } +// +// if !limits.gpu.IsZero() && (limits.gpu.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupGPU)) || limits.gpu.LTE(sdk.ZeroInt())) { +// return fmt.Errorf("group %v: invalid total GPU (%v > %v > %v fails)", +// rlist.GetName(), validationConfig.MaxGroupGPU, limits.gpu, 0) +// } +// +// if limits.memory.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupMemory)) || limits.memory.LTE(sdk.ZeroInt()) { +// return fmt.Errorf("group %v: invalid total memory (%v > %v > %v fails)", +// rlist.GetName(), validationConfig.MaxGroupMemory, limits.memory, 0) +// } +// +// for i := range limits.storage { +// if limits.storage[i].GT(sdk.NewIntFromUint64(validationConfig.MaxGroupStorage)) || limits.storage[i].LTE(sdk.ZeroInt()) { +// return fmt.Errorf("group %v: invalid total storage (%v > %v > %v fails)", +// rlist.GetName(), validationConfig.MaxGroupStorage, limits.storage, 0) +// } +// } +// +// return nil +// } + +// func validateGroupResource(rg GroupResource) (resourceLimits, error) { +// limits, err := validateResourceUnits(rg.Resource.Units) +// if err != nil { +// return resourceLimits{}, err +// } +// +// if rg.Count > uint32(validationConfig.MaxUnitCount) || rg.Count < uint32(validationConfig.MinUnitCount) { +// return resourceLimits{}, fmt.Errorf("error: invalid unit count (%v > %v > %v fails)", +// validationConfig.MaxUnitCount, rg.Count, validationConfig.MinUnitCount) +// } +// +// limits.mul(rg.Count) +// +// return limits, nil +// } + +// func validateResourceUnits(units types.ResourceUnits) (resourceLimits, error) { +// limits := newLimits() +// +// val, err := validateCPU(units.CPU) +// if err != nil { +// return resourceLimits{}, err +// } +// limits.cpu = limits.cpu.Add(val) +// +// val, err = validateGPU(units.GPU) +// if err != nil { +// return resourceLimits{}, err +// } +// limits.gpu = limits.gpu.Add(val) +// +// val, err = validateMemory(units.Memory) +// if err != nil { +// return resourceLimits{}, err +// } +// limits.memory = limits.memory.Add(val) +// +// var storage []sdk.Int +// storage, err = validateStorage(units.Storage) +// if err != nil { +// return resourceLimits{}, err +// } +// +// // fixme this is not actually sum for storage usecase. +// // do we really need sum here? +// limits.storage = storage +// +// return limits, nil +// } +// +// func validateCPU(u *types.CPU) (sdk.Int, error) { +// if u == nil { +// return sdk.Int{}, fmt.Errorf("error: invalid unit CPU, cannot be nil") +// } +// if (u.Units.Value() > uint64(validationConfig.MaxUnitCPU)) || (u.Units.Value() < uint64(validationConfig.MinUnitCPU)) { +// return sdk.Int{}, fmt.Errorf("error: invalid unit CPU (%v > %v > %v fails)", +// validationConfig.MaxUnitCPU, u.Units.Value(), validationConfig.MinUnitCPU) +// } +// +// if err := u.Attributes.Validate(); err != nil { +// return sdk.Int{}, fmt.Errorf("error: invalid CPU attributes: %w", err) +// } +// +// return u.Units.Val, nil +// } +// +// func validateGPU(u *types.GPU) (sdk.Int, error) { +// if u == nil { +// return sdk.Int{}, fmt.Errorf("error: invalid unit GPU, cannot be nil") +// } +// +// if (u.Units.Value() > uint64(validationConfig.MaxUnitGPU)) || (u.Units.Value() < uint64(validationConfig.MinUnitGPU)) { +// return sdk.Int{}, fmt.Errorf("error: invalid unit GPU (%v > %v > %v fails)", +// validationConfig.MaxUnitGPU, u.Units.Value(), validationConfig.MinUnitGPU) +// } +// +// if u.Units.Value() == 0 && len(u.Attributes) > 0 { +// return sdk.Int{}, fmt.Errorf("error: invalid GPU state. attributes cannot be present if units == 0") +// } +// +// if err := u.Attributes.Validate(); err != nil { +// return sdk.Int{}, fmt.Errorf("error: invalid GPU attributes: %w", err) +// } +// +// return u.Units.Val, nil +// } +// +// func validateMemory(u *types.Memory) (sdk.Int, error) { +// if u == nil { +// return sdk.Int{}, fmt.Errorf("error: invalid unit memory, cannot be nil") +// } +// if (u.Quantity.Value() > validationConfig.MaxUnitMemory) || (u.Quantity.Value() < validationConfig.MinUnitMemory) { +// return sdk.Int{}, fmt.Errorf("error: invalid unit memory (%v > %v > %v fails)", +// validationConfig.MaxUnitMemory, u.Quantity.Value(), validationConfig.MinUnitMemory) +// } +// +// if err := u.Attributes.Validate(); err != nil { +// return sdk.Int{}, fmt.Errorf("error: invalid Memory attributes: %w", err) +// } +// +// return u.Quantity.Val, nil +// } +// +// func validateStorage(u types.Volumes) ([]sdk.Int, error) { +// if u == nil { +// return nil, fmt.Errorf("error: invalid unit storage, cannot be nil") +// } +// +// storage := make([]sdk.Int, 0, len(u)) +// +// for i := range u { +// if (u[i].Quantity.Value() > validationConfig.MaxUnitStorage) || (u[i].Quantity.Value() < validationConfig.MinUnitStorage) { +// return nil, fmt.Errorf("error: invalid unit storage (%v > %v > %v fails)", +// validationConfig.MaxUnitStorage, u[i].Quantity.Value(), validationConfig.MinUnitStorage) +// } +// +// if err := u[i].Attributes.Validate(); err != nil { +// return []sdk.Int{}, fmt.Errorf("error: invalid Storage attributes: %w", err) +// } +// +// storage = append(storage, u[i].Quantity.Val) +// } +// +// return storage, nil +// } diff --git a/go/node/deployment/v1beta3/resource_list_validation_test.go b/go/node/deployment/v1beta4/resource_list_validation_test.go similarity index 99% rename from go/node/deployment/v1beta3/resource_list_validation_test.go rename to go/node/deployment/v1beta4/resource_list_validation_test.go index df40e749..b7e91a00 100644 --- a/go/node/deployment/v1beta3/resource_list_validation_test.go +++ b/go/node/deployment/v1beta4/resource_list_validation_test.go @@ -1,4 +1,4 @@ -package v1beta3 +package v1beta4 // func TestValidateCPUNil(t *testing.T) { // _, err := validateCPU(nil) diff --git a/go/node/deployment/v1beta4/resourcelimits.go b/go/node/deployment/v1beta4/resourcelimits.go new file mode 100644 index 00000000..a5258025 --- /dev/null +++ b/go/node/deployment/v1beta4/resourcelimits.go @@ -0,0 +1,38 @@ +package v1beta4 + +import ( + sdkmath "cosmossdk.io/math" +) + +type resourceLimits struct { + cpu sdkmath.Int + gpu sdkmath.Int + memory sdkmath.Int + storage []sdkmath.Int +} + +func newLimits() resourceLimits { + return resourceLimits{ + cpu: sdkmath.ZeroInt(), + gpu: sdkmath.ZeroInt(), + memory: sdkmath.ZeroInt(), + } +} + +func (u *resourceLimits) add(rhs resourceLimits) { + u.cpu = u.cpu.Add(rhs.cpu) + u.gpu = u.gpu.Add(rhs.gpu) + u.memory = u.memory.Add(rhs.memory) + + // u.storage = u.storage.Add(rhs.storage) +} + +func (u *resourceLimits) mul(count uint32) { + u.cpu = u.cpu.MulRaw(int64(count)) + u.gpu = u.gpu.MulRaw(int64(count)) + u.memory = u.memory.MulRaw(int64(count)) + + for i := range u.storage { + u.storage[i] = u.storage[i].MulRaw(int64(count)) + } +} diff --git a/go/node/deployment/v1beta3/resourceunit.go b/go/node/deployment/v1beta4/resourceunit.go similarity index 96% rename from go/node/deployment/v1beta3/resourceunit.go rename to go/node/deployment/v1beta4/resourceunit.go index 620deee9..f1289812 100644 --- a/go/node/deployment/v1beta3/resourceunit.go +++ b/go/node/deployment/v1beta4/resourceunit.go @@ -1,11 +1,12 @@ -package v1beta3 +package v1beta4 import ( "fmt" + sdkmath "cosmossdk.io/math" sdk "github.com/cosmos/cosmos-sdk/types" - types "github.com/akash-network/akash-api/go/node/types/v1beta3" + types "pkg.akt.dev/go/node/types/resources/v1beta4" ) // FullPrice method returns full price of resource @@ -41,7 +42,7 @@ func (r *ResourceUnit) totalResources() resourceLimits { limits.gpu = limits.gpu.Add(r.GPU.Units.Val) limits.memory = limits.memory.Add(r.Memory.Quantity.Val) - storage := make([]sdk.Int, 0, len(r.Storage)) + storage := make([]sdkmath.Int, 0, len(r.Storage)) for _, vol := range r.Storage { storage = append(storage, vol.Quantity.Val) diff --git a/go/node/deployment/v1beta3/resourceunit.pb.go b/go/node/deployment/v1beta4/resourceunit.pb.go similarity index 78% rename from go/node/deployment/v1beta3/resourceunit.pb.go rename to go/node/deployment/v1beta4/resourceunit.pb.go index 39c5cdbc..179b0d33 100644 --- a/go/node/deployment/v1beta3/resourceunit.pb.go +++ b/go/node/deployment/v1beta4/resourceunit.pb.go @@ -1,17 +1,17 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/deployment/v1beta3/resourceunit.proto +// source: akash/deployment/v1beta4/resourceunit.proto -package v1beta3 +package v1beta4 import ( fmt "fmt" - v1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" + v1beta4 "pkg.akt.dev/go/node/types/resources/v1beta4" ) // Reference imports to suppress errors if they are not otherwise used. @@ -27,7 +27,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // ResourceUnit extends Resources and adds Count along with the Price type ResourceUnit struct { - v1beta3.Resources `protobuf:"bytes,1,opt,name=resource,proto3,embedded=resource" json:"resource" yaml:"resource"` + v1beta4.Resources `protobuf:"bytes,1,opt,name=resource,proto3,embedded=resource" json:"resource" yaml:"resource"` Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count" yaml:"count"` Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` } @@ -36,7 +36,7 @@ func (m *ResourceUnit) Reset() { *m = ResourceUnit{} } func (m *ResourceUnit) String() string { return proto.CompactTextString(m) } func (*ResourceUnit) ProtoMessage() {} func (*ResourceUnit) Descriptor() ([]byte, []int) { - return fileDescriptor_fb431767d5aa2e0f, []int{0} + return fileDescriptor_d48c54f3414ff9e1, []int{0} } func (m *ResourceUnit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -80,37 +80,37 @@ func (m *ResourceUnit) GetPrice() types.DecCoin { } func init() { - proto.RegisterType((*ResourceUnit)(nil), "akash.deployment.v1beta3.ResourceUnit") + proto.RegisterType((*ResourceUnit)(nil), "akash.deployment.v1beta4.ResourceUnit") } func init() { - proto.RegisterFile("akash/deployment/v1beta3/resourceunit.proto", fileDescriptor_fb431767d5aa2e0f) + proto.RegisterFile("akash/deployment/v1beta4/resourceunit.proto", fileDescriptor_d48c54f3414ff9e1) } -var fileDescriptor_fb431767d5aa2e0f = []byte{ - // 344 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xc1, 0x4a, 0xeb, 0x40, - 0x14, 0x86, 0x33, 0xf7, 0xde, 0x5e, 0x2e, 0xb9, 0x15, 0x21, 0xb8, 0x88, 0xc5, 0x26, 0x25, 0x1b, - 0x0b, 0xe2, 0x0c, 0xb5, 0xbb, 0x82, 0x9b, 0xe8, 0x0b, 0x18, 0x10, 0xc1, 0x5d, 0x92, 0x0e, 0xe9, - 0xd0, 0x66, 0x4e, 0xc8, 0x4c, 0x94, 0xbe, 0x85, 0x8f, 0xe0, 0xe3, 0x74, 0xd9, 0xa5, 0xab, 0x20, - 0xed, 0x46, 0xba, 0xec, 0xda, 0x85, 0x64, 0x26, 0x69, 0x0b, 0xba, 0xcb, 0xf9, 0xcf, 0xf7, 0xff, - 0xfc, 0x27, 0x63, 0x5e, 0x84, 0xd3, 0x50, 0x4c, 0xc8, 0x98, 0x66, 0x33, 0x98, 0xa7, 0x94, 0x4b, - 0xf2, 0x34, 0x88, 0xa8, 0x0c, 0x87, 0x24, 0xa7, 0x02, 0x8a, 0x3c, 0xa6, 0x05, 0x67, 0x12, 0x67, - 0x39, 0x48, 0xb0, 0x6c, 0x05, 0xe3, 0x3d, 0x8c, 0x6b, 0xb8, 0x73, 0x92, 0x40, 0x02, 0x0a, 0x22, - 0xd5, 0x97, 0xe6, 0x3b, 0x9e, 0x0e, 0x8f, 0x42, 0x41, 0xbf, 0xc5, 0x8a, 0x9a, 0x71, 0x62, 0x10, - 0x29, 0x88, 0x43, 0x68, 0x40, 0x62, 0x60, 0x5c, 0xef, 0xbd, 0x4f, 0x64, 0xb6, 0x83, 0xda, 0x73, - 0xcf, 0x99, 0xb4, 0x22, 0xf3, 0x5f, 0x93, 0x61, 0xa3, 0x1e, 0xea, 0xff, 0xbf, 0xea, 0x62, 0xdd, - 0xab, 0x8a, 0x68, 0x1a, 0xe1, 0xc6, 0x23, 0xfc, 0xf3, 0x45, 0xe9, 0x1a, 0xcb, 0xd2, 0x45, 0x9b, - 0xd2, 0xdd, 0x59, 0xb7, 0xa5, 0x7b, 0x3c, 0x0f, 0xd3, 0xd9, 0xc8, 0x6b, 0x14, 0x2f, 0xd8, 0x2d, - 0x2d, 0x62, 0xb6, 0x62, 0x28, 0xb8, 0xb4, 0x7f, 0xf5, 0x50, 0xff, 0xc8, 0x3f, 0xdd, 0x94, 0xae, - 0x16, 0xb6, 0xa5, 0xdb, 0xd6, 0x36, 0x35, 0x7a, 0x81, 0x96, 0xad, 0x3b, 0xb3, 0x95, 0xe5, 0x2c, - 0xa6, 0xf6, 0x6f, 0xd5, 0xe8, 0x0c, 0xeb, 0xab, 0x0e, 0x2b, 0x0d, 0xf0, 0x2d, 0x8d, 0x6f, 0x80, - 0x71, 0xbf, 0x5b, 0x15, 0xaa, 0x22, 0x95, 0x65, 0x1f, 0xa9, 0x46, 0x2f, 0xd0, 0xf2, 0xe8, 0xcf, - 0xc7, 0xab, 0x8b, 0xfc, 0x87, 0xc5, 0xca, 0x41, 0xcb, 0x95, 0x83, 0xde, 0x57, 0x0e, 0x7a, 0x59, - 0x3b, 0xc6, 0x72, 0xed, 0x18, 0x6f, 0x6b, 0xc7, 0x78, 0xbc, 0x4e, 0x98, 0x9c, 0x14, 0x11, 0x8e, - 0x21, 0x25, 0xea, 0xfe, 0x4b, 0x4e, 0xe5, 0x33, 0xe4, 0xd3, 0x7a, 0x0a, 0x33, 0x46, 0x12, 0x20, - 0x1c, 0xc6, 0xf4, 0x87, 0xe7, 0x8d, 0xfe, 0xaa, 0xdf, 0x3b, 0xfc, 0x0a, 0x00, 0x00, 0xff, 0xff, - 0xaf, 0x0a, 0x29, 0xb9, 0x01, 0x02, 0x00, 0x00, +var fileDescriptor_d48c54f3414ff9e1 = []byte{ + // 340 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xb1, 0x4e, 0xf3, 0x30, + 0x14, 0x85, 0xe3, 0xfe, 0x7f, 0x11, 0x0a, 0x45, 0x48, 0x11, 0x43, 0xa8, 0xc0, 0xae, 0x32, 0xd0, + 0x4a, 0x20, 0x5b, 0x05, 0xa6, 0x4e, 0x28, 0xf0, 0x02, 0x44, 0x62, 0x61, 0x4b, 0x53, 0x2b, 0x44, + 0x6d, 0x7d, 0xa3, 0xd8, 0xad, 0xd4, 0x17, 0x60, 0xe6, 0x11, 0x78, 0x9c, 0x8e, 0x1d, 0x99, 0x22, + 0xd4, 0x2e, 0xa8, 0x63, 0x9f, 0x00, 0xc5, 0x4e, 0x9a, 0x01, 0xb6, 0xf8, 0xdc, 0xef, 0x9c, 0x7b, + 0x74, 0x63, 0x5f, 0x85, 0xe3, 0x50, 0xbe, 0xb2, 0x11, 0x4f, 0x27, 0xb0, 0x98, 0x72, 0xa1, 0xd8, + 0xbc, 0x3f, 0xe4, 0x2a, 0xbc, 0x63, 0x19, 0x97, 0x30, 0xcb, 0x22, 0x3e, 0x13, 0x89, 0xa2, 0x69, + 0x06, 0x0a, 0x1c, 0x57, 0xc3, 0xb4, 0x86, 0x69, 0x09, 0xb7, 0x4f, 0x63, 0x88, 0x41, 0x43, 0xac, + 0xf8, 0x32, 0x7c, 0x1b, 0x47, 0x20, 0xa7, 0x20, 0xd9, 0x30, 0x94, 0xbc, 0xcc, 0xed, 0xb3, 0x08, + 0x12, 0x51, 0xce, 0xaf, 0xcd, 0x72, 0x3d, 0xae, 0xd6, 0xc9, 0x5f, 0x05, 0xa4, 0xa1, 0xbd, 0xb7, + 0x86, 0xdd, 0x0a, 0x4a, 0xed, 0x59, 0x24, 0xca, 0x99, 0xd8, 0x87, 0x15, 0xe3, 0xa2, 0x0e, 0xea, + 0x1d, 0xdd, 0x74, 0xa9, 0x69, 0x58, 0x24, 0xd2, 0xda, 0x5f, 0x26, 0xd2, 0xca, 0x2d, 0xfd, 0xee, + 0x32, 0x27, 0xd6, 0x2a, 0x27, 0x68, 0x9b, 0x93, 0x7d, 0xc8, 0x2e, 0x27, 0x27, 0x8b, 0x70, 0x3a, + 0x19, 0x78, 0x95, 0xe2, 0x05, 0xfb, 0xa1, 0xc3, 0xec, 0x66, 0x04, 0x33, 0xa1, 0xdc, 0x46, 0x07, + 0xf5, 0x8e, 0xfd, 0xb3, 0x6d, 0x4e, 0x8c, 0xb0, 0xcb, 0x49, 0xcb, 0xd8, 0xf4, 0xd3, 0x0b, 0x8c, + 0xec, 0x3c, 0xd9, 0xcd, 0x34, 0x4b, 0x22, 0xee, 0xfe, 0xd3, 0xdd, 0xce, 0xa9, 0xb9, 0x86, 0x29, + 0x57, 0x5e, 0x83, 0x3e, 0xf2, 0xe8, 0x01, 0x12, 0xe1, 0x5f, 0x14, 0x85, 0x8a, 0x48, 0x6d, 0xa9, + 0x23, 0xf5, 0xd3, 0x0b, 0x8c, 0x3c, 0xf8, 0xff, 0xfd, 0x41, 0x90, 0x7f, 0xbf, 0x5c, 0x63, 0xb4, + 0x5a, 0x63, 0xf4, 0xb5, 0xc6, 0xe8, 0x7d, 0x83, 0xad, 0xd5, 0x06, 0x5b, 0x9f, 0x1b, 0x6c, 0xbd, + 0x5c, 0xa6, 0xe3, 0x98, 0x86, 0x63, 0x45, 0x47, 0x7c, 0xce, 0x62, 0x60, 0x02, 0x46, 0xfc, 0x8f, + 0x7f, 0x3b, 0x3c, 0xd0, 0x17, 0xbd, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x6a, 0xd5, 0x42, + 0xfe, 0x01, 0x00, 0x00, } func (this *ResourceUnit) Equal(that interface{}) bool { diff --git a/go/node/deployment/v1beta3/resourceunits.go b/go/node/deployment/v1beta4/resourceunits.go similarity index 99% rename from go/node/deployment/v1beta3/resourceunits.go rename to go/node/deployment/v1beta4/resourceunits.go index 4c24b923..290ee4c0 100644 --- a/go/node/deployment/v1beta3/resourceunits.go +++ b/go/node/deployment/v1beta4/resourceunits.go @@ -1,4 +1,4 @@ -package v1beta3 +package v1beta4 import ( "fmt" diff --git a/go/node/deployment/v1beta4/service.pb.go b/go/node/deployment/v1beta4/service.pb.go new file mode 100644 index 00000000..6d6468e4 --- /dev/null +++ b/go/node/deployment/v1beta4/service.pb.go @@ -0,0 +1,413 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/service.proto + +package v1beta4 + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" + v1 "pkg.akt.dev/go/node/deployment/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/service.proto", fileDescriptor_2013a754c1800268) +} + +var fileDescriptor_2013a754c1800268 = []byte{ + // 367 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0xd3, 0xc1, 0x4e, 0xfa, 0x30, + 0x1c, 0xc0, 0x71, 0x96, 0x7f, 0xfe, 0x9a, 0x34, 0x26, 0xe2, 0x2e, 0x9a, 0x26, 0xee, 0x08, 0x6a, + 0xb0, 0x0d, 0x8a, 0x77, 0x23, 0x24, 0x9e, 0x48, 0x88, 0xc6, 0x8b, 0xb7, 0xc2, 0xea, 0x24, 0x30, + 0x5a, 0xf7, 0x2b, 0x04, 0x6f, 0x3e, 0x02, 0x8f, 0xe2, 0x63, 0x78, 0xe4, 0xe8, 0xd1, 0xc0, 0xc1, + 0xd7, 0x30, 0x0c, 0x5d, 0xc7, 0xec, 0xdc, 0x76, 0x85, 0x0f, 0xbf, 0x6f, 0x4b, 0x5b, 0x54, 0x61, + 0x03, 0x06, 0x8f, 0xd4, 0xe5, 0x72, 0x28, 0x9e, 0x7d, 0x3e, 0x52, 0x74, 0x52, 0xef, 0x72, 0xc5, + 0x1a, 0x14, 0x78, 0x30, 0xe9, 0xf7, 0x38, 0x91, 0x81, 0x50, 0xc2, 0x3e, 0x08, 0x1d, 0xd1, 0x8e, + 0x7c, 0x3b, 0x7c, 0x68, 0x98, 0x40, 0x7d, 0xf0, 0xd6, 0x3f, 0xc4, 0xb5, 0xd4, 0x80, 0xfe, 0x48, + 0xeb, 0x6a, 0xaa, 0xf6, 0x02, 0x31, 0x96, 0x1a, 0x1e, 0xa5, 0x42, 0xc9, 0x02, 0xe6, 0x83, 0x96, + 0xfb, 0x3d, 0x01, 0xbe, 0x80, 0xd5, 0x92, 0x36, 0x56, 0x76, 0x36, 0xdb, 0x46, 0xff, 0xda, 0xe0, + 0xd9, 0x53, 0x54, 0x6e, 0x06, 0x9c, 0x29, 0xde, 0x8a, 0x86, 0xd9, 0xa7, 0x24, 0x6d, 0xbf, 0xa4, + 0x0d, 0x5e, 0x92, 0xe3, 0x8b, 0x42, 0xfc, 0x86, 0x83, 0x14, 0x23, 0xe0, 0xf6, 0x13, 0xda, 0x6b, + 0x71, 0x29, 0xa0, 0xaf, 0x62, 0xe9, 0x63, 0xd3, 0xac, 0xd5, 0x98, 0x5f, 0x14, 0xd7, 0x73, 0xd3, + 0x28, 0x39, 0x45, 0xe5, 0x3b, 0xe9, 0x16, 0xd9, 0x6c, 0x92, 0x67, 0x6c, 0x36, 0xc9, 0xa3, 0xf2, + 0x18, 0xed, 0x36, 0x87, 0x02, 0xe2, 0xe1, 0xda, 0xdf, 0x7f, 0xdb, 0xa6, 0xc6, 0x8d, 0x22, 0x3a, + 0xca, 0x3e, 0x20, 0x14, 0x7e, 0x75, 0xbd, 0xba, 0x3f, 0x76, 0x35, 0x7b, 0x46, 0x08, 0x31, 0xcd, + 0x09, 0xe3, 0x9d, 0x0e, 0x1b, 0xe7, 0xeb, 0x68, 0x98, 0xd1, 0xd1, 0x30, 0xde, 0xb9, 0x55, 0x2c, + 0x50, 0x79, 0x3a, 0x1a, 0x66, 0x74, 0x34, 0x8c, 0x3a, 0x43, 0xb4, 0xb3, 0x3e, 0xca, 0x4e, 0xf8, + 0x9e, 0xcc, 0xd7, 0x32, 0x71, 0xea, 0x6b, 0x6a, 0xbe, 0x96, 0x46, 0xfa, 0x53, 0xc3, 0xff, 0x5f, + 0x3e, 0x5f, 0x4f, 0xac, 0xab, 0xcb, 0xb7, 0x85, 0x63, 0xcd, 0x17, 0x8e, 0xf5, 0xb1, 0x70, 0xac, + 0xd9, 0xd2, 0x29, 0xcd, 0x97, 0x4e, 0xe9, 0x7d, 0xe9, 0x94, 0xee, 0x2b, 0x72, 0xe0, 0x11, 0x36, + 0x50, 0xc4, 0xe5, 0x13, 0xea, 0x09, 0x3a, 0x12, 0x2e, 0x37, 0xbc, 0xfe, 0xee, 0x56, 0xf8, 0xb6, + 0xcf, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x20, 0x93, 0x23, 0x4c, 0xd8, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // CreateDeployment defines a method to create new deployment given proper inputs. + CreateDeployment(ctx context.Context, in *MsgCreateDeployment, opts ...grpc.CallOption) (*MsgCreateDeploymentResponse, error) + // DepositDeployment deposits more funds into the deployment account + DepositDeployment(ctx context.Context, in *v1.MsgDepositDeployment, opts ...grpc.CallOption) (*v1.MsgDepositDeploymentResponse, error) + // UpdateDeployment defines a method to update a deployment given proper inputs. + UpdateDeployment(ctx context.Context, in *MsgUpdateDeployment, opts ...grpc.CallOption) (*MsgUpdateDeploymentResponse, error) + // CloseDeployment defines a method to close a deployment given proper inputs. + CloseDeployment(ctx context.Context, in *MsgCloseDeployment, opts ...grpc.CallOption) (*MsgCloseDeploymentResponse, error) + // CloseGroup defines a method to close a group of a deployment given proper inputs. + CloseGroup(ctx context.Context, in *MsgCloseGroup, opts ...grpc.CallOption) (*MsgCloseGroupResponse, error) + // PauseGroup defines a method to close a group of a deployment given proper inputs. + PauseGroup(ctx context.Context, in *MsgPauseGroup, opts ...grpc.CallOption) (*MsgPauseGroupResponse, error) + // StartGroup defines a method to close a group of a deployment given proper inputs. + StartGroup(ctx context.Context, in *MsgStartGroup, opts ...grpc.CallOption) (*MsgStartGroupResponse, error) + // UpdateParams defines a governance operation for updating the x/deployment module + // parameters. The authority is hard-coded to the x/gov module account. + // + // Since: akash v1.0.0 + UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) CreateDeployment(ctx context.Context, in *MsgCreateDeployment, opts ...grpc.CallOption) (*MsgCreateDeploymentResponse, error) { + out := new(MsgCreateDeploymentResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/CreateDeployment", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) DepositDeployment(ctx context.Context, in *v1.MsgDepositDeployment, opts ...grpc.CallOption) (*v1.MsgDepositDeploymentResponse, error) { + out := new(v1.MsgDepositDeploymentResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/DepositDeployment", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateDeployment(ctx context.Context, in *MsgUpdateDeployment, opts ...grpc.CallOption) (*MsgUpdateDeploymentResponse, error) { + out := new(MsgUpdateDeploymentResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/UpdateDeployment", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CloseDeployment(ctx context.Context, in *MsgCloseDeployment, opts ...grpc.CallOption) (*MsgCloseDeploymentResponse, error) { + out := new(MsgCloseDeploymentResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/CloseDeployment", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CloseGroup(ctx context.Context, in *MsgCloseGroup, opts ...grpc.CallOption) (*MsgCloseGroupResponse, error) { + out := new(MsgCloseGroupResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/CloseGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) PauseGroup(ctx context.Context, in *MsgPauseGroup, opts ...grpc.CallOption) (*MsgPauseGroupResponse, error) { + out := new(MsgPauseGroupResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/PauseGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) StartGroup(ctx context.Context, in *MsgStartGroup, opts ...grpc.CallOption) (*MsgStartGroupResponse, error) { + out := new(MsgStartGroupResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/StartGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) { + out := new(MsgUpdateParamsResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/UpdateParams", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // CreateDeployment defines a method to create new deployment given proper inputs. + CreateDeployment(context.Context, *MsgCreateDeployment) (*MsgCreateDeploymentResponse, error) + // DepositDeployment deposits more funds into the deployment account + DepositDeployment(context.Context, *v1.MsgDepositDeployment) (*v1.MsgDepositDeploymentResponse, error) + // UpdateDeployment defines a method to update a deployment given proper inputs. + UpdateDeployment(context.Context, *MsgUpdateDeployment) (*MsgUpdateDeploymentResponse, error) + // CloseDeployment defines a method to close a deployment given proper inputs. + CloseDeployment(context.Context, *MsgCloseDeployment) (*MsgCloseDeploymentResponse, error) + // CloseGroup defines a method to close a group of a deployment given proper inputs. + CloseGroup(context.Context, *MsgCloseGroup) (*MsgCloseGroupResponse, error) + // PauseGroup defines a method to close a group of a deployment given proper inputs. + PauseGroup(context.Context, *MsgPauseGroup) (*MsgPauseGroupResponse, error) + // StartGroup defines a method to close a group of a deployment given proper inputs. + StartGroup(context.Context, *MsgStartGroup) (*MsgStartGroupResponse, error) + // UpdateParams defines a governance operation for updating the x/deployment module + // parameters. The authority is hard-coded to the x/gov module account. + // + // Since: akash v1.0.0 + UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) CreateDeployment(ctx context.Context, req *MsgCreateDeployment) (*MsgCreateDeploymentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateDeployment not implemented") +} +func (*UnimplementedMsgServer) DepositDeployment(ctx context.Context, req *v1.MsgDepositDeployment) (*v1.MsgDepositDeploymentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DepositDeployment not implemented") +} +func (*UnimplementedMsgServer) UpdateDeployment(ctx context.Context, req *MsgUpdateDeployment) (*MsgUpdateDeploymentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateDeployment not implemented") +} +func (*UnimplementedMsgServer) CloseDeployment(ctx context.Context, req *MsgCloseDeployment) (*MsgCloseDeploymentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CloseDeployment not implemented") +} +func (*UnimplementedMsgServer) CloseGroup(ctx context.Context, req *MsgCloseGroup) (*MsgCloseGroupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CloseGroup not implemented") +} +func (*UnimplementedMsgServer) PauseGroup(ctx context.Context, req *MsgPauseGroup) (*MsgPauseGroupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PauseGroup not implemented") +} +func (*UnimplementedMsgServer) StartGroup(ctx context.Context, req *MsgStartGroup) (*MsgStartGroupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartGroup not implemented") +} +func (*UnimplementedMsgServer) UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_CreateDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateDeployment) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateDeployment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/CreateDeployment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateDeployment(ctx, req.(*MsgCreateDeployment)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_DepositDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(v1.MsgDepositDeployment) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).DepositDeployment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/DepositDeployment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).DepositDeployment(ctx, req.(*v1.MsgDepositDeployment)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateDeployment) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateDeployment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/UpdateDeployment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateDeployment(ctx, req.(*MsgUpdateDeployment)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CloseDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCloseDeployment) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CloseDeployment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/CloseDeployment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CloseDeployment(ctx, req.(*MsgCloseDeployment)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CloseGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCloseGroup) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CloseGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/CloseGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CloseGroup(ctx, req.(*MsgCloseGroup)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_PauseGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgPauseGroup) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).PauseGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/PauseGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).PauseGroup(ctx, req.(*MsgPauseGroup)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_StartGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgStartGroup) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).StartGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/StartGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).StartGroup(ctx, req.(*MsgStartGroup)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateParams) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateParams(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/UpdateParams", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateParams(ctx, req.(*MsgUpdateParams)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.deployment.v1beta4.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateDeployment", + Handler: _Msg_CreateDeployment_Handler, + }, + { + MethodName: "DepositDeployment", + Handler: _Msg_DepositDeployment_Handler, + }, + { + MethodName: "UpdateDeployment", + Handler: _Msg_UpdateDeployment_Handler, + }, + { + MethodName: "CloseDeployment", + Handler: _Msg_CloseDeployment_Handler, + }, + { + MethodName: "CloseGroup", + Handler: _Msg_CloseGroup_Handler, + }, + { + MethodName: "PauseGroup", + Handler: _Msg_PauseGroup_Handler, + }, + { + MethodName: "StartGroup", + Handler: _Msg_StartGroup_Handler, + }, + { + MethodName: "UpdateParams", + Handler: _Msg_UpdateParams_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/deployment/v1beta4/service.proto", +} diff --git a/go/node/deployment/v1beta4/types.go b/go/node/deployment/v1beta4/types.go new file mode 100644 index 00000000..9589ff07 --- /dev/null +++ b/go/node/deployment/v1beta4/types.go @@ -0,0 +1,115 @@ +package v1beta4 + +import ( + "bytes" + + v1 "pkg.akt.dev/go/node/deployment/v1" + attr "pkg.akt.dev/go/node/types/attributes/v1" +) + +type attributesMatching map[string]attr.Attributes + +const ( + GatewayVersion = "v1beta4" + + // ManifestHashLength is the length of manifest hash + ManifestHashLength = 32 + + // DefaultOrderBiddingDuration is the default time limit for an Order being active. + // After the duration, the Order is automatically closed. + // ( 24(hr) * 3600(seconds per hour) ) / 7s-Block + DefaultOrderBiddingDuration = int64(12342) + + // MaxBiddingDuration is roughly 30 days of block height + MaxBiddingDuration = DefaultOrderBiddingDuration * int64(30) +) + +// MatchAttributes method compares provided attributes with specific group attributes +func (g *GroupSpec) MatchAttributes(at attr.Attributes) bool { + return attr.AttributesSubsetOf(g.Requirements.Attributes, at) +} + +// ValidateClosable provides error response if group is already closed, +// and thus should not be closed again, else nil. +func (g Group) ValidateClosable() error { + switch g.State { + case GroupClosed: + return v1.ErrGroupClosed + default: + return nil + } +} + +// ValidatePausable provides error response if group is not pausable +func (g Group) ValidatePausable() error { + switch g.State { + case GroupClosed: + return v1.ErrGroupClosed + case GroupPaused: + return v1.ErrGroupPaused + default: + return nil + } +} + +// ValidatePausable provides error response if group is not pausable +func (g Group) ValidateStartable() error { + switch g.State { + case GroupClosed: + return v1.ErrGroupClosed + case GroupOpen: + return v1.ErrGroupOpen + default: + return nil + } +} + +// GetName method returns group name +func (g Group) GetName() string { + return g.GroupSpec.Name +} + +// GetResourceUnits method returns resources list in group +func (g Group) GetResourceUnits() ResourceUnits { + return g.GroupSpec.Resources +} + +// DeploymentResponses is a collection of DeploymentResponse +type DeploymentResponses []QueryDeploymentResponse + +func (ds DeploymentResponses) String() string { + var buf bytes.Buffer + + const sep = "\n\n" + + for _, d := range ds { + buf.WriteString(d.String()) + buf.WriteString(sep) + } + + if len(ds) > 0 { + buf.Truncate(buf.Len() - len(sep)) + } + + return buf.String() +} + +// Accept returns whether deployment filters valid or not +func (filters DeploymentFilters) Accept(obj v1.Deployment, stateVal v1.Deployment_State) bool { + // Checking owner filter + if filters.Owner != "" && filters.Owner != obj.ID.Owner { + return false + } + + // Checking dseq filter + if filters.DSeq != 0 && filters.DSeq != obj.ID.DSeq { + return false + } + + // Checking state filter + if stateVal != 0 && stateVal != obj.State { + return false + } + + return true +} diff --git a/go/node/deployment/v1beta4/types_test.go b/go/node/deployment/v1beta4/types_test.go new file mode 100644 index 00000000..315413b3 --- /dev/null +++ b/go/node/deployment/v1beta4/types_test.go @@ -0,0 +1,461 @@ +package v1beta4_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" + + v1 "pkg.akt.dev/go/node/deployment/v1" + attr "pkg.akt.dev/go/node/types/attributes/v1" + tutil "pkg.akt.dev/go/testutil" + testutil "pkg.akt.dev/go/testutil/v1beta3" + + atypes "pkg.akt.dev/go/node/audit/v1" + types "pkg.akt.dev/go/node/deployment/v1beta4" +) + +type gStateTest struct { + state types.Group_State + expValidateClosable error +} + +func TestGroupState(t *testing.T) { + tests := []gStateTest{ + { + state: types.GroupOpen, + }, + { + state: types.GroupOpen, + }, + { + state: types.GroupInsufficientFunds, + }, + { + state: types.GroupClosed, + expValidateClosable: v1.ErrGroupClosed, + }, + { + state: types.Group_State(99), + }, + } + + for _, test := range tests { + group := types.Group{ + ID: testutil.GroupID(t), + State: test.state, + } + + assert.Equal(t, group.ValidateClosable(), test.expValidateClosable, group.State) + } +} + +// func TestDeploymentVersionAttributeLifecycle(t *testing.T) { +// d := testutil.Deployment(t) +// +// t.Run("deployment created", func(t *testing.T) { +// edc := types.NewEventDeploymentCreated(d.GetID(), d.Hash) +// sdkEvent := edc.ToSDKEvent() +// strEvent := sdk.StringifyEvent(abci.Event(sdkEvent)) +// +// ev, err := sdkutil.ParseEvent(strEvent) +// require.NoError(t, err) +// +// hashString, err := types.ParseEVDeploymentHash(ev.Attributes) +// require.NoError(t, err) +// assert.Equal(t, d.Hash, hashString) +// }) +// +// t.Run("deployment updated", func(t *testing.T) { +// edu := types.NewEventDeploymentUpdated(d.GetID(), d.GetHash()) +// +// sdkEvent := edu.ToSDKEvent() +// strEvent := sdk.StringifyEvent(abci.Event(sdkEvent)) +// +// ev, err := sdkutil.ParseEvent(strEvent) +// require.NoError(t, err) +// +// hashString, err := types.ParseEVDeploymentHash(ev.Attributes) +// require.NoError(t, err) +// assert.Equal(t, d.Hash, hashString) +// }) +// +// t.Run("deployment closed error", func(t *testing.T) { +// edc := types.NewEventDeploymentClosed(d.GetID()) +// +// sdkEvent := edc.ToSDKEvent() +// strEvent := sdk.StringifyEvent(abci.Event(sdkEvent)) +// +// ev, err := sdkutil.ParseEvent(strEvent) +// require.NoError(t, err) +// +// hashString, err := types.ParseEVDeploymentHash(ev.Attributes) +// require.Error(t, err) +// assert.NotEqual(t, d.Hash, hashString) +// }) +// } + +func TestGroupSpecValidation(t *testing.T) { + tests := []struct { + desc string + gspec types.GroupSpec + expErr error + }{ + { + desc: "groupspec requires name", + gspec: types.GroupSpec{ + Name: "", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + }, + expErr: v1.ErrInvalidGroups, + }, + { + desc: "groupspec valid", + gspec: types.GroupSpec{ + Name: "hihi", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + }, + expErr: nil, + }, + } + + for _, test := range tests { + err := test.gspec.ValidateBasic() + if test.expErr != nil { + assert.Error(t, err, test.desc) + continue + } + assert.Equal(t, test.expErr, err, test.desc) + } +} + +func TestGroupPlacementRequirementsNoSigners(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + providerAttr := []atypes.AuditedProvider{ + { + Owner: "test", + Attributes: group.Requirements.Attributes, + }, + } + + require.True(t, group.MatchRequirements(providerAttr)) +} + +func TestGroupPlacementRequirementsSignerAllOf(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + group.Requirements.SignedBy.AllOf = append(group.Requirements.SignedBy.AllOf, "auditor1") + group.Requirements.SignedBy.AllOf = append(group.Requirements.SignedBy.AllOf, "auditor2") + + providerAttr := []atypes.AuditedProvider{ + { + Owner: "test", + Attributes: group.Requirements.Attributes, + }, + } + + require.False(t, group.MatchRequirements(providerAttr)) + + providerAttr = append(providerAttr, atypes.AuditedProvider{ + Owner: "test", + Auditor: "auditor2", + Attributes: group.Requirements.Attributes, + }) + + require.False(t, group.MatchRequirements(providerAttr)) + + providerAttr = append(providerAttr, atypes.AuditedProvider{ + Owner: "test", + Auditor: "auditor1", + Attributes: group.Requirements.Attributes, + }) + + require.True(t, group.MatchRequirements(providerAttr)) +} + +func TestGroupPlacementRequirementsSignerAnyOf(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + group.Requirements.SignedBy.AnyOf = append(group.Requirements.SignedBy.AnyOf, "auditor1") + + providerAttr := []atypes.AuditedProvider{ + { + Owner: "test", + Attributes: group.Requirements.Attributes, + }, + } + + require.False(t, group.MatchRequirements(providerAttr)) + + providerAttr = append(providerAttr, atypes.AuditedProvider{ + Owner: "test", + Auditor: "auditor2", + Attributes: group.Requirements.Attributes, + }) + + require.False(t, group.MatchRequirements(providerAttr)) + + providerAttr = append(providerAttr, atypes.AuditedProvider{ + Owner: "test", + Auditor: "auditor1", + Attributes: group.Requirements.Attributes, + }) + + require.True(t, group.MatchRequirements(providerAttr)) +} + +func TestGroupPlacementRequirementsSignerAllOfAnyOf(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + group.Requirements.SignedBy.AllOf = append(group.Requirements.SignedBy.AllOf, "auditor1") + group.Requirements.SignedBy.AllOf = append(group.Requirements.SignedBy.AllOf, "auditor2") + + group.Requirements.SignedBy.AnyOf = append(group.Requirements.SignedBy.AnyOf, "auditor3") + group.Requirements.SignedBy.AnyOf = append(group.Requirements.SignedBy.AnyOf, "auditor4") + + providerAttr := []atypes.AuditedProvider{ + { + Owner: "test", + Attributes: group.Requirements.Attributes, + }, + { + Owner: "test", + Auditor: "auditor3", + Attributes: group.Requirements.Attributes, + }, + { + Owner: "test", + Auditor: "auditor4", + Attributes: group.Requirements.Attributes, + }, + } + + require.False(t, group.MatchRequirements(providerAttr)) + + providerAttr = append(providerAttr, atypes.AuditedProvider{ + Owner: "test", + Auditor: "auditor2", + Attributes: group.Requirements.Attributes, + }) + + require.False(t, group.MatchRequirements(providerAttr)) + + providerAttr = append(providerAttr, atypes.AuditedProvider{ + Owner: "test", + Auditor: "auditor1", + Attributes: group.Requirements.Attributes, + }) + + require.True(t, group.MatchRequirements(providerAttr)) +} + +func TestGroupSpec_MatchResourcesAttributes(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + group.Resources[0].Storage[0].Attributes = attr.Attributes{ + attr.Attribute{ + Key: "persistent", + Value: "true", + }, + attr.Attribute{ + Key: "class", + Value: "default", + }, + } + + provAttributes := attr.Attributes{ + attr.Attribute{ + Key: "capabilities/storage/1/class", + Value: "default", + }, + attr.Attribute{ + Key: "capabilities/storage/1/persistent", + Value: "true", + }, + } + + prov2Attributes := attr.Attributes{ + attr.Attribute{ + Key: "capabilities/storage/1/class", + Value: "default", + }, + } + + prov3Attributes := attr.Attributes{ + attr.Attribute{ + Key: "capabilities/storage/1/class", + Value: "beta2", + }, + } + + match := group.MatchResourcesRequirements(provAttributes) + require.True(t, match) + match = group.MatchResourcesRequirements(prov2Attributes) + require.False(t, match) + match = group.MatchResourcesRequirements(prov3Attributes) + require.False(t, match) +} + +func TestGroupSpec_MatchGPUAttributes(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + group.Resources[0].GPU.Attributes = attr.Attributes{ + attr.Attribute{ + Key: "vendor/nvidia/model/a100", + Value: "true", + }, + } + + provAttributes := attr.Attributes{ + attr.Attribute{ + Key: "capabilities/storage/1/class", + Value: "default", + }, + attr.Attribute{ + Key: "capabilities/storage/1/persistent", + Value: "true", + }, + attr.Attribute{ + Key: "capabilities/gpu/vendor/nvidia/model/a100", + Value: "true", + }, + } + + prov2Attributes := attr.Attributes{ + attr.Attribute{ + Key: "capabilities/storage/1/class", + Value: "default", + }, + } + + prov3Attributes := attr.Attributes{ + attr.Attribute{ + Key: "capabilities/storage/1/class", + Value: "beta2", + }, + } + + match := group.MatchResourcesRequirements(provAttributes) + require.True(t, match) + match = group.MatchResourcesRequirements(prov2Attributes) + require.False(t, match) + match = group.MatchResourcesRequirements(prov3Attributes) + require.False(t, match) +} + +func TestGroupSpec_MatchGPUAttributesWildcard(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + group.Resources[0].GPU.Attributes = attr.Attributes{ + attr.Attribute{ + Key: "vendor/nvidia/model/*", + Value: "true", + }, + } + + provAttributes := attr.Attributes{ + attr.Attribute{ + Key: "capabilities/storage/1/class", + Value: "default", + }, + attr.Attribute{ + Key: "capabilities/storage/1/persistent", + Value: "true", + }, + attr.Attribute{ + Key: "capabilities/gpu/vendor/nvidia/model/a100", + Value: "true", + }, + } + + prov2Attributes := attr.Attributes{ + attr.Attribute{ + Key: "capabilities/storage/1/class", + Value: "default", + }, + } + + prov3Attributes := attr.Attributes{ + attr.Attribute{ + Key: "capabilities/storage/1/class", + Value: "beta2", + }, + } + + match := group.MatchResourcesRequirements(provAttributes) + require.True(t, match) + match = group.MatchResourcesRequirements(prov2Attributes) + require.False(t, match) + match = group.MatchResourcesRequirements(prov3Attributes) + require.False(t, match) +} + +func TestDepositDeploymentAuthorization_Accept(t *testing.T) { + limit := sdk.NewInt64Coin(tutil.CoinDenom, 333) + dda := v1.NewDepositAuthorization(limit) + + // Send the wrong type of message, expect an error + var msg sdk.Msg + response, err := dda.Accept(sdk.Context{}, msg) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid type") + require.Zero(t, response) + + // Try to deposit too much coin, expect an error + msg = v1.NewMsgDepositDeployment(testutil.DeploymentID(t), limit.Add(sdk.NewInt64Coin(tutil.CoinDenom, 1)), testutil.AccAddress(t).String()) + response, err = dda.Accept(sdk.Context{}, msg) + require.Error(t, err) + require.Contains(t, err.Error(), "requested amount is more than spend limit") + require.Zero(t, response) + + // Deposit 1 less than the limit, expect an updated deposit + msg = v1.NewMsgDepositDeployment(testutil.DeploymentID(t), limit.Sub(sdk.NewInt64Coin(tutil.CoinDenom, 1)), testutil.AccAddress(t).String()) + response, err = dda.Accept(sdk.Context{}, msg) + require.NoError(t, err) + require.True(t, response.Accept) + require.False(t, response.Delete) + + ok := false + dda, ok = response.Updated.(*v1.DepositAuthorization) + require.True(t, ok) + + // Deposit the limit (now 1), expect that it is not to be deleted + msg = v1.NewMsgDepositDeployment(testutil.DeploymentID(t), sdk.NewInt64Coin(tutil.CoinDenom, 1), testutil.AccAddress(t).String()) + response, err = dda.Accept(sdk.Context{}, msg) + require.NoError(t, err) + require.True(t, response.Accept) + require.False(t, response.Delete) +} diff --git a/go/node/deployment/v1beta4/validation_config.go b/go/node/deployment/v1beta4/validation_config.go new file mode 100644 index 00000000..5f0e4d8c --- /dev/null +++ b/go/node/deployment/v1beta4/validation_config.go @@ -0,0 +1,118 @@ +package v1beta4 + +import ( + "pkg.akt.dev/go/node/types/unit" +) + +const ( + maxUnitCPU = 384 * 1000 // max amount of CPU units single replicate of service can request + maxUnitGPU = 24 + maxUnitMemory = 2 * unit.Ti + maxUnitStorage = 32 * unit.Ti + maxUnitCount = 50 // max amount of service replicas allowed + maxUnitPrice = 10000000 // 10akt + maxGroupCount = 20 // max amount of + maxGroupUnits = 20 +) + +// This is the validation configuration that acts as a hard limit +// on what the network accepts for deployments. This is never changed +// and is the same across all members of the network + +type Limits struct { + Memory uint64 + Storage uint64 + Price uint64 + CPU uint + GPU uint + Count uint +} + +type UnitLimits struct { + Max Limits + Min Limits +} + +type GroupLimit struct { + Limits + Units uint32 +} + +type GroupLimits struct { + Max GroupLimit +} + +type ValidationConfig struct { + Unit UnitLimits + Group GroupLimits + + // // MaxUnitCPU is the maximum number of milli (1/1000) cpu units a single instance may take + // MaxUnitCPU uint + // MaxUnitGPU uint + // // MaxUnitMemory is the maximum number of bytes of memory that a unit can consume + // MaxUnitMemory uint64 + // // MaxUnitStorage is the maximum number of bytes of storage that a unit can consume + // MaxUnitStorage uint64 + // // MaxUnitCount is the maximum number of replias of a service + // MaxUnitCount uint + // // MaxUnitPrice is the maximum price that a unit can have + // MaxUnitPrice uint64 + // + // MinUnitCPU uint + // MinUnitGPU uint + // MinUnitMemory uint64 + // MinUnitStorage uint64 + // MinUnitCount uint + // + // // MaxGroupCount is the maximum number of groups allowed per deployment + // MaxGroupCount int + // // MaxGroupUnits is the maximum number services per group + // MaxGroupUnits int + // + // // MaxGroupCPU is the maximum total amount of CPU requested per group + // MaxGroupCPU uint64 + // // MaxGroupGPU is the maximum total amount of GPU requested per group + // MaxGroupGPU uint64 + // // MaxGroupMemory is the maximum total amount of memory requested per group + // MaxGroupMemory uint64 + // // MaxGroupStorage is the maximum total amount of storage requested per group + // MaxGroupStorage uint64 +} + +var validationConfig = ValidationConfig{ + Unit: UnitLimits{ + Max: Limits{ + Memory: maxUnitMemory, + Storage: maxUnitStorage, + CPU: maxUnitCPU, + GPU: maxUnitGPU, + Count: maxUnitCount, + Price: maxUnitPrice, + }, + Min: Limits{ + Memory: unit.Mi, + Storage: 5 * unit.Mi, + CPU: 10, + GPU: 0, + Count: 1, + Price: 0, + }, + }, + Group: GroupLimits{ + Max: GroupLimit{ + Limits: Limits{ + Memory: maxUnitMemory * maxUnitCount, + Storage: maxUnitStorage * maxUnitCount, + CPU: maxUnitCPU * maxUnitCount, + GPU: maxUnitGPU * maxUnitCount, + Count: maxGroupCount, + Price: 0, + }, + Units: maxGroupUnits, + }, + }, +} + +func GetValidationConfig() ValidationConfig { + return validationConfig +} diff --git a/go/node/escrow/v1/account.go b/go/node/escrow/v1/account.go new file mode 100644 index 00000000..79ba7e3b --- /dev/null +++ b/go/node/escrow/v1/account.go @@ -0,0 +1,8 @@ +package v1 + +type Accounts []Account +type FractionalPayments []FractionalPayment + +func (m *Account) HasDepositor() bool { + return m.Owner != m.Depositor +} diff --git a/go/node/escrow/v1/account.pb.go b/go/node/escrow/v1/account.pb.go new file mode 100644 index 00000000..b683bbb6 --- /dev/null +++ b/go/node/escrow/v1/account.pb.go @@ -0,0 +1,727 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/escrow/v1/account.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State stores state for an escrow account +type Account_State int32 + +const ( + // AccountStateInvalid is an invalid state + AccountStateInvalid Account_State = 0 + // AccountOpen is the state when an account is open + AccountOpen Account_State = 1 + // AccountClosed is the state when an account is closed + AccountClosed Account_State = 2 + // AccountOverdrawn is the state when an account is overdrawn + AccountOverdrawn Account_State = 3 +) + +var Account_State_name = map[int32]string{ + 0: "invalid", + 1: "open", + 2: "closed", + 3: "overdrawn", +} + +var Account_State_value = map[string]int32{ + "invalid": 0, + "open": 1, + "closed": 2, + "overdrawn": 3, +} + +func (x Account_State) String() string { + return proto.EnumName(Account_State_name, int32(x)) +} + +func (Account_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_6835c04008686422, []int{0, 0} +} + +// Account stores state for an escrow account +type Account struct { + // unique identifier for this escrow account + ID AccountID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + // bech32 encoded account address of the owner of this escrow account + Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner" yaml:"owner"` + // current state of this escrow account + State Account_State `protobuf:"varint,3,opt,name=state,proto3,enum=akash.escrow.v1.Account_State" json:"state" yaml:"state"` + // unspent coins received from the owner's wallet + Balance types.DecCoin `protobuf:"bytes,4,opt,name=balance,proto3" json:"balance" yaml:"balance"` + // total coins spent by this account + Transferred types.DecCoin `protobuf:"bytes,5,opt,name=transferred,proto3" json:"transferred" yaml:"transferred"` + // block height at which this account was last settled + SettledAt int64 `protobuf:"varint,6,opt,name=settled_at,json=settledAt,proto3" json:"settledAt" yaml:"settledAt"` + // bech32 encoded account address of the depositor. + // If depositor is same as the owner, then any incoming coins are added to the Balance. + // If depositor isn't same as the owner, then any incoming coins are added to the Funds. + Depositor string `protobuf:"bytes,7,opt,name=depositor,proto3" json:"depositor" yaml:"depositor"` + // Funds are unspent coins received from the (non-Owner) Depositor's wallet. + // If there are any funds, they should be spent before spending the Balance. + Funds types.DecCoin `protobuf:"bytes,8,opt,name=funds,proto3" json:"funds" yaml:"funds"` +} + +func (m *Account) Reset() { *m = Account{} } +func (m *Account) String() string { return proto.CompactTextString(m) } +func (*Account) ProtoMessage() {} +func (*Account) Descriptor() ([]byte, []int) { + return fileDescriptor_6835c04008686422, []int{0} +} +func (m *Account) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Account) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Account.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Account) XXX_Merge(src proto.Message) { + xxx_messageInfo_Account.Merge(m, src) +} +func (m *Account) XXX_Size() int { + return m.Size() +} +func (m *Account) XXX_DiscardUnknown() { + xxx_messageInfo_Account.DiscardUnknown(m) +} + +var xxx_messageInfo_Account proto.InternalMessageInfo + +func (m *Account) GetID() AccountID { + if m != nil { + return m.ID + } + return AccountID{} +} + +func (m *Account) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *Account) GetState() Account_State { + if m != nil { + return m.State + } + return AccountStateInvalid +} + +func (m *Account) GetBalance() types.DecCoin { + if m != nil { + return m.Balance + } + return types.DecCoin{} +} + +func (m *Account) GetTransferred() types.DecCoin { + if m != nil { + return m.Transferred + } + return types.DecCoin{} +} + +func (m *Account) GetSettledAt() int64 { + if m != nil { + return m.SettledAt + } + return 0 +} + +func (m *Account) GetDepositor() string { + if m != nil { + return m.Depositor + } + return "" +} + +func (m *Account) GetFunds() types.DecCoin { + if m != nil { + return m.Funds + } + return types.DecCoin{} +} + +func init() { + proto.RegisterEnum("akash.escrow.v1.Account_State", Account_State_name, Account_State_value) + proto.RegisterType((*Account)(nil), "akash.escrow.v1.Account") +} + +func init() { proto.RegisterFile("akash/escrow/v1/account.proto", fileDescriptor_6835c04008686422) } + +var fileDescriptor_6835c04008686422 = []byte{ + // 607 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xb1, 0x6f, 0x13, 0x3f, + 0x14, 0xc7, 0x73, 0x69, 0x93, 0xfc, 0xe2, 0xfe, 0x68, 0x83, 0xa9, 0xc4, 0xf5, 0x44, 0xce, 0xc7, + 0xc1, 0x10, 0x06, 0xee, 0x94, 0x32, 0x20, 0xd8, 0x9a, 0x56, 0x42, 0x19, 0x50, 0xc5, 0x75, 0x40, + 0xb0, 0x54, 0xce, 0xd9, 0x0d, 0xa7, 0xa6, 0x76, 0x74, 0x76, 0x53, 0xf1, 0x1f, 0xa0, 0x4c, 0x88, + 0x3d, 0x13, 0xff, 0x02, 0x7f, 0x02, 0x43, 0xc7, 0x8a, 0x89, 0xc9, 0x42, 0xe9, 0x96, 0x31, 0x7f, + 0x01, 0x8a, 0xed, 0xf4, 0x2a, 0xa4, 0x48, 0xdd, 0xee, 0x7d, 0xbe, 0xdf, 0xf7, 0x9e, 0xfd, 0xde, + 0x19, 0x34, 0xf1, 0x29, 0x16, 0x9f, 0x62, 0x2a, 0xd2, 0x9c, 0x5f, 0xc4, 0xa3, 0x76, 0x8c, 0xd3, + 0x94, 0x9f, 0x33, 0x19, 0x0d, 0x73, 0x2e, 0x39, 0xdc, 0xd2, 0x72, 0x64, 0xe4, 0x68, 0xd4, 0xf6, + 0xb6, 0xfb, 0xbc, 0xcf, 0xb5, 0x16, 0x2f, 0xbe, 0x8c, 0xcd, 0xdb, 0x49, 0xb9, 0x38, 0xe3, 0xe2, + 0xd8, 0x08, 0x26, 0xb0, 0x92, 0x6f, 0xa2, 0xb8, 0x87, 0x05, 0x8d, 0x47, 0xed, 0x1e, 0x95, 0xb8, + 0x1d, 0xa7, 0x3c, 0x63, 0x56, 0x47, 0x2b, 0x0e, 0x90, 0x11, 0x63, 0x08, 0x7f, 0x56, 0x41, 0x6d, + 0xcf, 0x30, 0xd8, 0x05, 0xe5, 0x8c, 0xb8, 0x4e, 0xe0, 0xb4, 0x36, 0x76, 0xbd, 0xe8, 0x9f, 0xb3, + 0x45, 0xd6, 0xd5, 0x3d, 0xe8, 0x34, 0x2f, 0x15, 0x2a, 0x4d, 0x15, 0x2a, 0x77, 0x0f, 0x66, 0x0a, + 0x95, 0x33, 0x32, 0x57, 0xa8, 0xfe, 0x19, 0x9f, 0x0d, 0x5e, 0x87, 0x19, 0x09, 0x93, 0x72, 0x46, + 0xe0, 0x1b, 0x50, 0xe1, 0x17, 0x8c, 0xe6, 0x6e, 0x39, 0x70, 0x5a, 0xf5, 0x4e, 0x7b, 0xa6, 0x90, + 0x01, 0x73, 0x85, 0xfe, 0x37, 0x56, 0x1d, 0x86, 0xbf, 0x7e, 0x3c, 0xdf, 0xb6, 0x37, 0xda, 0x23, + 0x24, 0xa7, 0x42, 0x1c, 0xc9, 0x3c, 0x63, 0xfd, 0xc4, 0xd8, 0xe1, 0x21, 0xa8, 0x08, 0x89, 0x25, + 0x75, 0xd7, 0x02, 0xa7, 0xb5, 0xb9, 0xeb, 0xaf, 0x3a, 0x56, 0x74, 0xb4, 0x70, 0x75, 0x76, 0x16, + 0x8d, 0x74, 0x42, 0xd1, 0x48, 0x87, 0x61, 0x62, 0x30, 0xfc, 0x00, 0x6a, 0x3d, 0x3c, 0xc0, 0x2c, + 0xa5, 0xee, 0xba, 0xbe, 0xe9, 0xa3, 0xc8, 0xf6, 0x5f, 0xcc, 0x30, 0xb2, 0x33, 0x8c, 0x0e, 0x68, + 0xba, 0xcf, 0x33, 0xd6, 0x79, 0xbc, 0xb8, 0xeb, 0x4c, 0xa1, 0x65, 0xd2, 0x5c, 0xa1, 0x4d, 0x53, + 0xd6, 0x82, 0x30, 0x59, 0x4a, 0x30, 0x03, 0x1b, 0x32, 0xc7, 0x4c, 0x9c, 0xd0, 0x3c, 0xa7, 0xc4, + 0xad, 0xdc, 0xa1, 0xfc, 0x33, 0x5b, 0xfe, 0x76, 0xe2, 0x5c, 0x21, 0x68, 0x5a, 0xdc, 0x82, 0x61, + 0x72, 0xdb, 0x02, 0xdf, 0x02, 0x20, 0xa8, 0x94, 0x03, 0x4a, 0x8e, 0xb1, 0x74, 0xab, 0x81, 0xd3, + 0x5a, 0xeb, 0x44, 0x53, 0x85, 0xea, 0x47, 0x86, 0xee, 0xc9, 0x99, 0x42, 0x75, 0xb1, 0x0c, 0xe6, + 0x0a, 0x35, 0xec, 0x30, 0x96, 0x28, 0x4c, 0x0a, 0x19, 0xbe, 0x07, 0x75, 0x42, 0x87, 0x5c, 0x64, + 0x92, 0xe7, 0x6e, 0x4d, 0xaf, 0xec, 0xd5, 0xa2, 0xc0, 0x0d, 0x2c, 0x0a, 0xdc, 0xa0, 0xd5, 0xab, + 0x2b, 0xd2, 0xe0, 0x3b, 0x50, 0x39, 0x39, 0x67, 0x44, 0xb8, 0xff, 0xdd, 0x61, 0x18, 0x4d, 0x3b, + 0x0c, 0x93, 0x52, 0x2c, 0x50, 0x87, 0x61, 0x62, 0x70, 0xf8, 0xcd, 0x01, 0x15, 0xbd, 0x6c, 0xf8, + 0x14, 0xd4, 0x32, 0x36, 0xc2, 0x83, 0x8c, 0x34, 0x4a, 0xde, 0xc3, 0xf1, 0x24, 0x78, 0x60, 0x7f, + 0x06, 0x2d, 0x77, 0x8d, 0x04, 0x77, 0xc0, 0x3a, 0x1f, 0x52, 0xd6, 0x70, 0xbc, 0xad, 0xf1, 0x24, + 0xd8, 0xb0, 0x96, 0xc3, 0x21, 0x65, 0xb0, 0x09, 0xaa, 0xe9, 0x80, 0x0b, 0x4a, 0x1a, 0x65, 0xef, + 0xfe, 0x78, 0x12, 0xdc, 0xb3, 0xe2, 0xbe, 0x86, 0xf0, 0x09, 0xa8, 0xf3, 0x11, 0xcd, 0x49, 0x8e, + 0x2f, 0x58, 0x63, 0xcd, 0xdb, 0x1e, 0x4f, 0x82, 0xc6, 0x32, 0x7d, 0xc9, 0xbd, 0xf5, 0x2f, 0xdf, + 0xfd, 0x52, 0xe7, 0xe5, 0xe5, 0xd4, 0x77, 0xae, 0xa6, 0xbe, 0xf3, 0x67, 0xea, 0x3b, 0x5f, 0xaf, + 0xfd, 0xd2, 0xd5, 0xb5, 0x5f, 0xfa, 0x7d, 0xed, 0x97, 0x3e, 0x36, 0x87, 0xa7, 0xfd, 0x08, 0x9f, + 0xca, 0x88, 0xd0, 0x51, 0xdc, 0xe7, 0x31, 0xe3, 0x84, 0x16, 0xef, 0xb1, 0x57, 0xd5, 0xcf, 0xf0, + 0xc5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x42, 0x2c, 0xb5, 0x60, 0x2a, 0x04, 0x00, 0x00, +} + +func (m *Account) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Account) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Account) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Funds.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAccount(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + if len(m.Depositor) > 0 { + i -= len(m.Depositor) + copy(dAtA[i:], m.Depositor) + i = encodeVarintAccount(dAtA, i, uint64(len(m.Depositor))) + i-- + dAtA[i] = 0x3a + } + if m.SettledAt != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.SettledAt)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.Transferred.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAccount(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size, err := m.Balance.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAccount(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if m.State != 0 { + i = encodeVarintAccount(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x18 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintAccount(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAccount(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintAccount(dAtA []byte, offset int, v uint64) int { + offset -= sovAccount(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Account) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovAccount(uint64(l)) + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovAccount(uint64(l)) + } + if m.State != 0 { + n += 1 + sovAccount(uint64(m.State)) + } + l = m.Balance.Size() + n += 1 + l + sovAccount(uint64(l)) + l = m.Transferred.Size() + n += 1 + l + sovAccount(uint64(l)) + if m.SettledAt != 0 { + n += 1 + sovAccount(uint64(m.SettledAt)) + } + l = len(m.Depositor) + if l > 0 { + n += 1 + l + sovAccount(uint64(l)) + } + l = m.Funds.Size() + n += 1 + l + sovAccount(uint64(l)) + return n +} + +func sovAccount(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAccount(x uint64) (n int) { + return sovAccount(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Account) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Account: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Account: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Account_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Balance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Transferred", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Transferred.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SettledAt", wireType) + } + m.SettledAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SettledAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Depositor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Depositor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Funds", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccount + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAccount + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAccount + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Funds.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAccount(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAccount + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAccount(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAccount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAccount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAccount + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAccount + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAccount + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAccount + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAccount = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAccount = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAccount = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/escrow/v1/accountid.pb.go b/go/node/escrow/v1/accountid.pb.go new file mode 100644 index 00000000..2722a483 --- /dev/null +++ b/go/node/escrow/v1/accountid.pb.go @@ -0,0 +1,371 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/escrow/v1/accountid.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// AccountID is the account identifier +type AccountID struct { + Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope" yaml:"scope"` + XID string `protobuf:"bytes,2,opt,name=xid,proto3" json:"xid" yaml:"xid"` +} + +func (m *AccountID) Reset() { *m = AccountID{} } +func (m *AccountID) String() string { return proto.CompactTextString(m) } +func (*AccountID) ProtoMessage() {} +func (*AccountID) Descriptor() ([]byte, []int) { + return fileDescriptor_0d5f6bffbb48f285, []int{0} +} +func (m *AccountID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AccountID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AccountID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AccountID) XXX_Merge(src proto.Message) { + xxx_messageInfo_AccountID.Merge(m, src) +} +func (m *AccountID) XXX_Size() int { + return m.Size() +} +func (m *AccountID) XXX_DiscardUnknown() { + xxx_messageInfo_AccountID.DiscardUnknown(m) +} + +var xxx_messageInfo_AccountID proto.InternalMessageInfo + +func (m *AccountID) GetScope() string { + if m != nil { + return m.Scope + } + return "" +} + +func (m *AccountID) GetXID() string { + if m != nil { + return m.XID + } + return "" +} + +func init() { + proto.RegisterType((*AccountID)(nil), "akash.escrow.v1.AccountID") +} + +func init() { proto.RegisterFile("akash/escrow/v1/accountid.proto", fileDescriptor_0d5f6bffbb48f285) } + +var fileDescriptor_0d5f6bffbb48f285 = []byte{ + // 220 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x2d, 0x4e, 0x2e, 0xca, 0x2f, 0xd7, 0x2f, 0x33, 0xd4, 0x4f, 0x4c, 0x4e, 0xce, + 0x2f, 0xcd, 0x2b, 0xc9, 0x4c, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x07, 0x2b, 0xd0, + 0x83, 0x28, 0xd0, 0x2b, 0x33, 0x94, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xe9, 0x83, 0x58, + 0x10, 0x65, 0x4a, 0x39, 0x5c, 0x9c, 0x8e, 0x10, 0x9d, 0x9e, 0x2e, 0x42, 0xfa, 0x5c, 0xac, 0xc5, + 0xc9, 0xf9, 0x05, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x4e, 0x92, 0xaf, 0xee, 0xc9, 0x43, + 0x04, 0x3e, 0xdd, 0x93, 0xe7, 0xa9, 0x4c, 0xcc, 0xcd, 0xb1, 0x52, 0x02, 0x73, 0x95, 0x82, 0x20, + 0xc2, 0x42, 0x7a, 0x5c, 0xcc, 0x15, 0x99, 0x29, 0x12, 0x4c, 0x60, 0xe5, 0x32, 0x8f, 0xee, 0xc9, + 0x33, 0x47, 0x78, 0xba, 0xbc, 0xba, 0x27, 0x0f, 0x12, 0xfd, 0x74, 0x4f, 0x9e, 0x0b, 0xa2, 0xa7, + 0x22, 0x33, 0x45, 0x29, 0x08, 0x24, 0xe4, 0x64, 0x7e, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, + 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, + 0x72, 0x0c, 0x51, 0xb2, 0x05, 0xd9, 0xe9, 0x7a, 0x89, 0xd9, 0x25, 0x7a, 0x29, 0xa9, 0x65, 0xfa, + 0xe9, 0xf9, 0xfa, 0x79, 0xf9, 0x29, 0xa9, 0x08, 0xdf, 0x25, 0xb1, 0x81, 0x5d, 0x6b, 0x0c, 0x08, + 0x00, 0x00, 0xff, 0xff, 0x02, 0x00, 0xe3, 0x40, 0xf7, 0x00, 0x00, 0x00, +} + +func (m *AccountID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AccountID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AccountID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.XID) > 0 { + i -= len(m.XID) + copy(dAtA[i:], m.XID) + i = encodeVarintAccountid(dAtA, i, uint64(len(m.XID))) + i-- + dAtA[i] = 0x12 + } + if len(m.Scope) > 0 { + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintAccountid(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintAccountid(dAtA []byte, offset int, v uint64) int { + offset -= sovAccountid(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AccountID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Scope) + if l > 0 { + n += 1 + l + sovAccountid(uint64(l)) + } + l = len(m.XID) + if l > 0 { + n += 1 + l + sovAccountid(uint64(l)) + } + return n +} + +func sovAccountid(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAccountid(x uint64) (n int) { + return sovAccountid(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *AccountID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccountid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AccountID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AccountID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccountid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAccountid + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAccountid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field XID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAccountid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAccountid + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAccountid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.XID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAccountid(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAccountid + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAccountid(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAccountid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAccountid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAccountid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAccountid + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAccountid + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAccountid + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAccountid = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAccountid = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAccountid = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/escrow/v1/codec.go b/go/node/escrow/v1/codec.go new file mode 100644 index 00000000..15822e50 --- /dev/null +++ b/go/node/escrow/v1/codec.go @@ -0,0 +1,16 @@ +package v1 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// RegisterInterfaces registers the x/provider interfaces types with the interface registry +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil)) +} + +// RegisterLegacyAminoCodec register concrete types on codec +func RegisterLegacyAminoCodec(_ *codec.LegacyAmino) { +} diff --git a/go/node/escrow/v1/error.go b/go/node/escrow/v1/error.go new file mode 100644 index 00000000..72d04abf --- /dev/null +++ b/go/node/escrow/v1/error.go @@ -0,0 +1,39 @@ +package v1 + +import ( + cerrors "cosmossdk.io/errors" +) + +const ( + errAccountExists uint32 = iota + 1 + errAccountClosed + errAccountNotFound + errAccountOverdrawn + errInvalidDenomination + errPaymentExists + errPaymentClosed + errPaymentNotFound + errPaymentRateZero + errInvalidPayment + errInvalidSettlement + errInvalidAccountID + errInvalidAccount + errInvalidAccountDepositor +) + +var ( + ErrAccountExists = cerrors.Register(ModuleName, errAccountExists, "account exists") + ErrAccountClosed = cerrors.Register(ModuleName, errAccountClosed, "account closed") + ErrAccountNotFound = cerrors.Register(ModuleName, errAccountNotFound, "account not found") + ErrAccountOverdrawn = cerrors.Register(ModuleName, errAccountOverdrawn, "account overdrawn") + ErrInvalidDenomination = cerrors.Register(ModuleName, errInvalidDenomination, "invalid denomination") + ErrPaymentExists = cerrors.Register(ModuleName, errPaymentExists, "payment exists") + ErrPaymentClosed = cerrors.Register(ModuleName, errPaymentClosed, "payment closed") + ErrPaymentNotFound = cerrors.Register(ModuleName, errPaymentNotFound, "payment not found") + ErrPaymentRateZero = cerrors.Register(ModuleName, errPaymentRateZero, "payment rate zero") + ErrInvalidPayment = cerrors.Register(ModuleName, errInvalidPayment, "invalid payment") + ErrInvalidSettlement = cerrors.Register(ModuleName, errInvalidSettlement, "invalid settlement") + ErrInvalidAccountID = cerrors.Register(ModuleName, errInvalidAccountID, "invalid account ID") + ErrInvalidAccount = cerrors.Register(ModuleName, errInvalidAccount, "invalid account") + ErrInvalidAccountDepositor = cerrors.Register(ModuleName, errInvalidAccountDepositor, "invalid account depositor") +) diff --git a/go/node/escrow/v1/fractional_payment.pb.go b/go/node/escrow/v1/fractional_payment.pb.go new file mode 100644 index 00000000..d8e51b25 --- /dev/null +++ b/go/node/escrow/v1/fractional_payment.pb.go @@ -0,0 +1,682 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/escrow/v1/fractional_payment.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State defines payment state +type FractionalPayment_State int32 + +const ( + // PaymentStateInvalid is the state when the payment is invalid + PaymentStateInvalid FractionalPayment_State = 0 + // PaymentStateOpen is the state when the payment is open + PaymentOpen FractionalPayment_State = 1 + // PaymentStateClosed is the state when the payment is closed + PaymentClosed FractionalPayment_State = 2 + // PaymentStateOverdrawn is the state when the payment is overdrawn + PaymentOverdrawn FractionalPayment_State = 3 +) + +var FractionalPayment_State_name = map[int32]string{ + 0: "invalid", + 1: "open", + 2: "closed", + 3: "overdrawn", +} + +var FractionalPayment_State_value = map[string]int32{ + "invalid": 0, + "open": 1, + "closed": 2, + "overdrawn": 3, +} + +func (x FractionalPayment_State) String() string { + return proto.EnumName(FractionalPayment_State_name, int32(x)) +} + +func (FractionalPayment_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_deaabcd18f9ef7ba, []int{0, 0} +} + +// Payment stores state for a payment +type FractionalPayment struct { + AccountID AccountID `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"accountID" yaml:"accountID"` + PaymentID string `protobuf:"bytes,2,opt,name=payment_id,json=paymentId,proto3" json:"paymentID" yaml:"paymentID"` + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner" yaml:"owner"` + State FractionalPayment_State `protobuf:"varint,4,opt,name=state,proto3,enum=akash.escrow.v1.FractionalPayment_State" json:"state" yaml:"state"` + Rate types.DecCoin `protobuf:"bytes,5,opt,name=rate,proto3" json:"rate" yaml:"rate"` + Balance types.DecCoin `protobuf:"bytes,6,opt,name=balance,proto3" json:"balance" yaml:"balance"` + Withdrawn types.Coin `protobuf:"bytes,7,opt,name=withdrawn,proto3" json:"withdrawn" yaml:"withdrawn"` +} + +func (m *FractionalPayment) Reset() { *m = FractionalPayment{} } +func (m *FractionalPayment) String() string { return proto.CompactTextString(m) } +func (*FractionalPayment) ProtoMessage() {} +func (*FractionalPayment) Descriptor() ([]byte, []int) { + return fileDescriptor_deaabcd18f9ef7ba, []int{0} +} +func (m *FractionalPayment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FractionalPayment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FractionalPayment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FractionalPayment) XXX_Merge(src proto.Message) { + xxx_messageInfo_FractionalPayment.Merge(m, src) +} +func (m *FractionalPayment) XXX_Size() int { + return m.Size() +} +func (m *FractionalPayment) XXX_DiscardUnknown() { + xxx_messageInfo_FractionalPayment.DiscardUnknown(m) +} + +var xxx_messageInfo_FractionalPayment proto.InternalMessageInfo + +func (m *FractionalPayment) GetAccountID() AccountID { + if m != nil { + return m.AccountID + } + return AccountID{} +} + +func (m *FractionalPayment) GetPaymentID() string { + if m != nil { + return m.PaymentID + } + return "" +} + +func (m *FractionalPayment) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *FractionalPayment) GetState() FractionalPayment_State { + if m != nil { + return m.State + } + return PaymentStateInvalid +} + +func (m *FractionalPayment) GetRate() types.DecCoin { + if m != nil { + return m.Rate + } + return types.DecCoin{} +} + +func (m *FractionalPayment) GetBalance() types.DecCoin { + if m != nil { + return m.Balance + } + return types.DecCoin{} +} + +func (m *FractionalPayment) GetWithdrawn() types.Coin { + if m != nil { + return m.Withdrawn + } + return types.Coin{} +} + +func init() { + proto.RegisterEnum("akash.escrow.v1.FractionalPayment_State", FractionalPayment_State_name, FractionalPayment_State_value) + proto.RegisterType((*FractionalPayment)(nil), "akash.escrow.v1.FractionalPayment") +} + +func init() { + proto.RegisterFile("akash/escrow/v1/fractional_payment.proto", fileDescriptor_deaabcd18f9ef7ba) +} + +var fileDescriptor_deaabcd18f9ef7ba = []byte{ + // 584 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xc7, 0xed, 0x36, 0x49, 0x95, 0x2b, 0xb4, 0xee, 0x51, 0x89, 0xc4, 0x50, 0xdb, 0x18, 0x90, + 0xb2, 0x70, 0x56, 0xca, 0x80, 0xc4, 0xd6, 0xb4, 0x02, 0x75, 0xa8, 0x40, 0xee, 0x54, 0x06, 0xa2, + 0x8b, 0x7d, 0xa4, 0x56, 0x92, 0x3b, 0xcb, 0x36, 0x8e, 0xfa, 0x0d, 0x50, 0x26, 0x84, 0x58, 0x33, + 0xf1, 0x15, 0xf8, 0x10, 0x1d, 0x2b, 0x26, 0x26, 0x0b, 0x25, 0x5b, 0xc6, 0x7c, 0x02, 0xe4, 0xbb, + 0xb3, 0x2d, 0xb5, 0x0c, 0x6c, 0x7e, 0xff, 0xff, 0xff, 0xfd, 0xde, 0xd3, 0x3b, 0x19, 0x74, 0xf0, + 0x08, 0xc7, 0x97, 0x0e, 0x89, 0xbd, 0x88, 0x4d, 0x9d, 0xb4, 0xeb, 0x7c, 0x8a, 0xb0, 0x97, 0x04, + 0x8c, 0xe2, 0x71, 0x3f, 0xc4, 0x57, 0x13, 0x42, 0x13, 0x14, 0x46, 0x2c, 0x61, 0x70, 0x97, 0x27, + 0x91, 0x48, 0xa2, 0xb4, 0xab, 0xef, 0x0f, 0xd9, 0x90, 0x71, 0xcf, 0xc9, 0xbf, 0x44, 0x4c, 0x6f, + 0x7b, 0x2c, 0x9e, 0xb0, 0xb8, 0x2f, 0x0c, 0x51, 0x48, 0xcb, 0x10, 0x95, 0x33, 0xc0, 0x31, 0x71, + 0xd2, 0xee, 0x80, 0x24, 0xb8, 0xeb, 0x78, 0x2c, 0xa0, 0xd2, 0x37, 0x6f, 0xef, 0x82, 0x3d, 0x8f, + 0x7d, 0xa6, 0x49, 0xe0, 0x8b, 0x80, 0xfd, 0xbd, 0x01, 0xf6, 0xde, 0x94, 0xfb, 0xbd, 0x17, 0xeb, + 0xc1, 0x09, 0x00, 0x32, 0xd8, 0x0f, 0xfc, 0x96, 0x6a, 0xa9, 0x9d, 0xed, 0x43, 0x1d, 0xdd, 0xda, + 0x16, 0x1d, 0x89, 0xc8, 0xe9, 0x49, 0xef, 0xf0, 0x3a, 0x33, 0x95, 0x45, 0x66, 0x36, 0x4b, 0x69, + 0x95, 0x99, 0x4d, 0x5c, 0x14, 0xeb, 0xcc, 0xd4, 0xae, 0xf0, 0x64, 0xfc, 0xda, 0x2e, 0x25, 0xdb, + 0x2d, 0x6d, 0x1f, 0x9e, 0x01, 0x20, 0x0f, 0x93, 0x8f, 0xdb, 0xb0, 0xd4, 0x4e, 0xb3, 0x87, 0x72, + 0x9c, 0xdc, 0x47, 0xe0, 0xc2, 0xa2, 0xa8, 0x70, 0xa5, 0x64, 0xbb, 0xa5, 0xed, 0xc3, 0xb7, 0xa0, + 0xce, 0xa6, 0x94, 0x44, 0xad, 0x4d, 0x4e, 0xea, 0xae, 0x32, 0x53, 0x08, 0xeb, 0xcc, 0xbc, 0x27, + 0x1a, 0x79, 0x69, 0xff, 0xfa, 0xf9, 0x62, 0x5f, 0x9e, 0xf3, 0xc8, 0xf7, 0x23, 0x12, 0xc7, 0xe7, + 0x49, 0x14, 0xd0, 0xa1, 0x2b, 0xe2, 0xf0, 0x02, 0xd4, 0xe3, 0x04, 0x27, 0xa4, 0x55, 0xb3, 0xd4, + 0xce, 0xce, 0x61, 0xe7, 0xce, 0x05, 0xee, 0x5c, 0x0e, 0x9d, 0xe7, 0xf9, 0x5e, 0x3b, 0x1f, 0xc9, + 0x5b, 0xab, 0x91, 0xbc, 0xb4, 0x5d, 0x21, 0xc3, 0x33, 0x50, 0x8b, 0x72, 0x72, 0x9d, 0xdf, 0xf6, + 0x31, 0x92, 0x6b, 0xe4, 0xef, 0x88, 0xe4, 0x3b, 0xa2, 0x13, 0xe2, 0x1d, 0xb3, 0x80, 0xf6, 0x1e, + 0xe5, 0xd7, 0x5d, 0x65, 0x26, 0xef, 0x58, 0x67, 0xe6, 0xb6, 0x00, 0x46, 0x9c, 0xc7, 0x45, 0x78, + 0x01, 0xb6, 0x06, 0x78, 0x8c, 0xa9, 0x47, 0x5a, 0x8d, 0xff, 0x20, 0x3e, 0x91, 0xc4, 0xa2, 0x69, + 0x9d, 0x99, 0x3b, 0x02, 0x2a, 0x05, 0xdb, 0x2d, 0x2c, 0xf8, 0x11, 0x34, 0xa7, 0x41, 0x72, 0xe9, + 0x47, 0x78, 0x4a, 0x5b, 0x5b, 0x1c, 0xde, 0xfe, 0x27, 0x9c, 0x93, 0x9f, 0x4b, 0x72, 0xd5, 0x53, + 0xbd, 0x56, 0x29, 0xd9, 0x6e, 0x65, 0xdb, 0xdf, 0x54, 0x50, 0xe7, 0x57, 0x83, 0xcf, 0xc0, 0x56, + 0x40, 0x53, 0x3c, 0x0e, 0x7c, 0x4d, 0xd1, 0x1f, 0xce, 0xe6, 0xd6, 0x03, 0x79, 0x55, 0x6e, 0x9f, + 0x0a, 0x0b, 0xb6, 0x41, 0x8d, 0x85, 0x84, 0x6a, 0xaa, 0xbe, 0x3b, 0x9b, 0x5b, 0xdb, 0x32, 0xf2, + 0x2e, 0x24, 0x14, 0x1e, 0x80, 0x86, 0x37, 0x66, 0x31, 0xf1, 0xb5, 0x0d, 0x7d, 0x6f, 0x36, 0xb7, + 0xee, 0x4b, 0xf3, 0x98, 0x8b, 0xf0, 0x29, 0x68, 0xb2, 0x94, 0x44, 0x7c, 0xac, 0xb6, 0xa9, 0xef, + 0xcf, 0xe6, 0x96, 0x56, 0xb4, 0x17, 0xba, 0x5e, 0xfb, 0xf2, 0xc3, 0x50, 0x7a, 0xaf, 0xae, 0x17, + 0x86, 0x7a, 0xb3, 0x30, 0xd4, 0x3f, 0x0b, 0x43, 0xfd, 0xba, 0x34, 0x94, 0x9b, 0xa5, 0xa1, 0xfc, + 0x5e, 0x1a, 0xca, 0x87, 0x83, 0x70, 0x34, 0x44, 0x78, 0x94, 0x20, 0x9f, 0xa4, 0xce, 0x90, 0x39, + 0x94, 0xf9, 0xa4, 0xfa, 0xbf, 0x06, 0x0d, 0xfe, 0x5b, 0xbd, 0xfc, 0x1b, 0x00, 0x00, 0xff, 0xff, + 0x90, 0x55, 0x29, 0xd1, 0x05, 0x04, 0x00, 0x00, +} + +func (m *FractionalPayment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FractionalPayment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FractionalPayment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Withdrawn.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintFractionalPayment(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.Balance.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintFractionalPayment(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.Rate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintFractionalPayment(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.State != 0 { + i = encodeVarintFractionalPayment(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x20 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintFractionalPayment(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x1a + } + if len(m.PaymentID) > 0 { + i -= len(m.PaymentID) + copy(dAtA[i:], m.PaymentID) + i = encodeVarintFractionalPayment(dAtA, i, uint64(len(m.PaymentID))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.AccountID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintFractionalPayment(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintFractionalPayment(dAtA []byte, offset int, v uint64) int { + offset -= sovFractionalPayment(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *FractionalPayment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.AccountID.Size() + n += 1 + l + sovFractionalPayment(uint64(l)) + l = len(m.PaymentID) + if l > 0 { + n += 1 + l + sovFractionalPayment(uint64(l)) + } + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovFractionalPayment(uint64(l)) + } + if m.State != 0 { + n += 1 + sovFractionalPayment(uint64(m.State)) + } + l = m.Rate.Size() + n += 1 + l + sovFractionalPayment(uint64(l)) + l = m.Balance.Size() + n += 1 + l + sovFractionalPayment(uint64(l)) + l = m.Withdrawn.Size() + n += 1 + l + sovFractionalPayment(uint64(l)) + return n +} + +func sovFractionalPayment(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFractionalPayment(x uint64) (n int) { + return sovFractionalPayment(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *FractionalPayment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFractionalPayment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FractionalPayment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FractionalPayment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccountID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFractionalPayment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthFractionalPayment + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthFractionalPayment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.AccountID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PaymentID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFractionalPayment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFractionalPayment + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFractionalPayment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PaymentID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFractionalPayment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFractionalPayment + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFractionalPayment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFractionalPayment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= FractionalPayment_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFractionalPayment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthFractionalPayment + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthFractionalPayment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFractionalPayment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthFractionalPayment + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthFractionalPayment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Balance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Withdrawn", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFractionalPayment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthFractionalPayment + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthFractionalPayment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Withdrawn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFractionalPayment(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthFractionalPayment + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFractionalPayment(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFractionalPayment + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFractionalPayment + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFractionalPayment + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFractionalPayment + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupFractionalPayment + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthFractionalPayment + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthFractionalPayment = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFractionalPayment = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupFractionalPayment = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/escrow/v1/genesis.pb.go b/go/node/escrow/v1/genesis.pb.go new file mode 100644 index 00000000..b19ca9dc --- /dev/null +++ b/go/node/escrow/v1/genesis.pb.go @@ -0,0 +1,397 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/escrow/v1/genesis.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the basic genesis state used by the escrow module +type GenesisState struct { + Accounts Accounts `protobuf:"bytes,1,rep,name=accounts,proto3,castrepeated=Accounts" json:"accounts" yaml:"accounts"` + Payments FractionalPayments `protobuf:"bytes,2,rep,name=payments,proto3,castrepeated=FractionalPayments" json:"payments" yaml:"payments"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_c65ae044e44df997, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetAccounts() Accounts { + if m != nil { + return m.Accounts + } + return nil +} + +func (m *GenesisState) GetPayments() FractionalPayments { + if m != nil { + return m.Payments + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "akash.escrow.v1.GenesisState") +} + +func init() { proto.RegisterFile("akash/escrow/v1/genesis.proto", fileDescriptor_c65ae044e44df997) } + +var fileDescriptor_c65ae044e44df997 = []byte{ + // 278 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4d, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x2d, 0x4e, 0x2e, 0xca, 0x2f, 0xd7, 0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, + 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x07, 0x4b, 0xeb, 0x41, 0xa4, + 0xf5, 0xca, 0x0c, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x72, 0xfa, 0x20, 0x16, 0x44, 0x99, + 0x14, 0x86, 0x29, 0x89, 0xc9, 0xc9, 0xf9, 0xa5, 0x79, 0x25, 0x50, 0x69, 0x0d, 0x74, 0xe9, 0xb4, + 0xa2, 0xc4, 0xe4, 0x92, 0xcc, 0xfc, 0xbc, 0xc4, 0x9c, 0xf8, 0x82, 0xc4, 0xca, 0xdc, 0x54, 0x98, + 0x4a, 0xa5, 0x0f, 0x8c, 0x5c, 0x3c, 0xee, 0x10, 0x17, 0x04, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0xa5, + 0x72, 0x71, 0x40, 0xcd, 0x2a, 0x96, 0x60, 0x54, 0x60, 0xd6, 0xe0, 0x36, 0x92, 0xd0, 0x43, 0x73, + 0x93, 0x9e, 0x23, 0x44, 0x81, 0x93, 0xfe, 0x89, 0x7b, 0xf2, 0x0c, 0xaf, 0xee, 0xc9, 0xc3, 0x75, + 0x7c, 0xba, 0x27, 0xcf, 0x5f, 0x99, 0x98, 0x9b, 0x63, 0xa5, 0x04, 0x13, 0x51, 0x5a, 0x75, 0x5f, + 0x9e, 0x03, 0xaa, 0xbe, 0x38, 0x08, 0xae, 0x50, 0xa8, 0x92, 0x8b, 0x03, 0xea, 0x90, 0x62, 0x09, + 0x26, 0xb0, 0x35, 0x4a, 0x18, 0xd6, 0xb8, 0xc1, 0x1d, 0x1d, 0x00, 0x51, 0xea, 0x64, 0x09, 0xb3, + 0x10, 0xa6, 0x17, 0x61, 0x21, 0x4c, 0x04, 0x64, 0xa1, 0x10, 0x86, 0xce, 0xe2, 0x20, 0xb8, 0x16, + 0x27, 0xf3, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, + 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x92, 0x2d, 0xc8, 0x4e, + 0xd7, 0x4b, 0xcc, 0x2e, 0xd1, 0x4b, 0x49, 0x2d, 0xd3, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, 0x49, + 0x45, 0x04, 0x62, 0x12, 0x1b, 0x38, 0xc8, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x06, 0x60, + 0xa1, 0x4c, 0xc3, 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Payments) > 0 { + for iNdEx := len(m.Payments) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Payments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Accounts) > 0 { + for iNdEx := len(m.Accounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Accounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Accounts) > 0 { + for _, e := range m.Accounts { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.Payments) > 0 { + for _, e := range m.Payments { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Accounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Accounts = append(m.Accounts, Account{}) + if err := m.Accounts[len(m.Accounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payments = append(m.Payments, FractionalPayment{}) + if err := m.Payments[len(m.Payments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/escrow/v1/key.go b/go/node/escrow/v1/key.go new file mode 100644 index 00000000..9c657ec3 --- /dev/null +++ b/go/node/escrow/v1/key.go @@ -0,0 +1,20 @@ +package v1 + +const ( + // ModuleName is the module name constant used in many places + ModuleName = "escrow" + + // StoreKey is the store key string for deployment + StoreKey = ModuleName + + // RouterKey is the message route for deployment + RouterKey = ModuleName +) + +func AccountKeyPrefix() []byte { + return []byte{0x01} +} + +func PaymentKeyPrefix() []byte { + return []byte{0x02} +} diff --git a/go/node/escrow/v1/query.pb.go b/go/node/escrow/v1/query.pb.go new file mode 100644 index 00000000..f3e81e6c --- /dev/null +++ b/go/node/escrow/v1/query.pb.go @@ -0,0 +1,1606 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/escrow/v1/query.proto + +package v1 + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryAccountRequest is request type for the Query/Account RPC method +type QueryAccountsRequest struct { + Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` + Xid string `protobuf:"bytes,2,opt,name=xid,proto3" json:"xid,omitempty"` + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + State string `protobuf:"bytes,4,opt,name=state,proto3" json:"state,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,5,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAccountsRequest) Reset() { *m = QueryAccountsRequest{} } +func (m *QueryAccountsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAccountsRequest) ProtoMessage() {} +func (*QueryAccountsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5a5573bbd1dc8788, []int{0} +} +func (m *QueryAccountsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAccountsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAccountsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAccountsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAccountsRequest.Merge(m, src) +} +func (m *QueryAccountsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAccountsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAccountsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAccountsRequest proto.InternalMessageInfo + +func (m *QueryAccountsRequest) GetScope() string { + if m != nil { + return m.Scope + } + return "" +} + +func (m *QueryAccountsRequest) GetXid() string { + if m != nil { + return m.Xid + } + return "" +} + +func (m *QueryAccountsRequest) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *QueryAccountsRequest) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func (m *QueryAccountsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryProvidersResponse is response type for the Query/Providers RPC method +type QueryAccountsResponse struct { + Accounts Accounts `protobuf:"bytes,1,rep,name=accounts,proto3,castrepeated=Accounts" json:"accounts"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAccountsResponse) Reset() { *m = QueryAccountsResponse{} } +func (m *QueryAccountsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAccountsResponse) ProtoMessage() {} +func (*QueryAccountsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5a5573bbd1dc8788, []int{1} +} +func (m *QueryAccountsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAccountsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAccountsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAccountsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAccountsResponse.Merge(m, src) +} +func (m *QueryAccountsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAccountsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAccountsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAccountsResponse proto.InternalMessageInfo + +func (m *QueryAccountsResponse) GetAccounts() Accounts { + if m != nil { + return m.Accounts + } + return nil +} + +func (m *QueryAccountsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryPaymentRequest is request type for the Query/Payment RPC method +type QueryPaymentsRequest struct { + Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` + Xid string `protobuf:"bytes,2,opt,name=xid,proto3" json:"xid,omitempty"` + Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` + Owner string `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,6,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryPaymentsRequest) Reset() { *m = QueryPaymentsRequest{} } +func (m *QueryPaymentsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryPaymentsRequest) ProtoMessage() {} +func (*QueryPaymentsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_5a5573bbd1dc8788, []int{2} +} +func (m *QueryPaymentsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPaymentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPaymentsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPaymentsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPaymentsRequest.Merge(m, src) +} +func (m *QueryPaymentsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryPaymentsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPaymentsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPaymentsRequest proto.InternalMessageInfo + +func (m *QueryPaymentsRequest) GetScope() string { + if m != nil { + return m.Scope + } + return "" +} + +func (m *QueryPaymentsRequest) GetXid() string { + if m != nil { + return m.Xid + } + return "" +} + +func (m *QueryPaymentsRequest) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *QueryPaymentsRequest) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *QueryPaymentsRequest) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func (m *QueryPaymentsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryProvidersResponse is response type for the Query/Providers RPC method +type QueryPaymentsResponse struct { + Payments FractionalPayments `protobuf:"bytes,1,rep,name=payments,proto3,castrepeated=FractionalPayments" json:"payments"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryPaymentsResponse) Reset() { *m = QueryPaymentsResponse{} } +func (m *QueryPaymentsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryPaymentsResponse) ProtoMessage() {} +func (*QueryPaymentsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5a5573bbd1dc8788, []int{3} +} +func (m *QueryPaymentsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryPaymentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryPaymentsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryPaymentsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryPaymentsResponse.Merge(m, src) +} +func (m *QueryPaymentsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryPaymentsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryPaymentsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryPaymentsResponse proto.InternalMessageInfo + +func (m *QueryPaymentsResponse) GetPayments() FractionalPayments { + if m != nil { + return m.Payments + } + return nil +} + +func (m *QueryPaymentsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +func init() { + proto.RegisterType((*QueryAccountsRequest)(nil), "akash.escrow.v1.QueryAccountsRequest") + proto.RegisterType((*QueryAccountsResponse)(nil), "akash.escrow.v1.QueryAccountsResponse") + proto.RegisterType((*QueryPaymentsRequest)(nil), "akash.escrow.v1.QueryPaymentsRequest") + proto.RegisterType((*QueryPaymentsResponse)(nil), "akash.escrow.v1.QueryPaymentsResponse") +} + +func init() { proto.RegisterFile("akash/escrow/v1/query.proto", fileDescriptor_5a5573bbd1dc8788) } + +var fileDescriptor_5a5573bbd1dc8788 = []byte{ + // 524 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0x3f, 0x6f, 0x13, 0x31, + 0x14, 0xc0, 0xe3, 0x4b, 0x53, 0x05, 0x57, 0x82, 0xca, 0x0a, 0xd2, 0x29, 0xd0, 0x6b, 0x14, 0x41, + 0x88, 0x10, 0xb2, 0x95, 0x30, 0x30, 0x93, 0xa1, 0xac, 0x25, 0x63, 0x17, 0xe4, 0x5c, 0xcc, 0x71, + 0x4a, 0x7a, 0xef, 0x7a, 0x76, 0x52, 0xb2, 0xb2, 0xb0, 0x22, 0x31, 0xf2, 0x05, 0x10, 0x23, 0x33, + 0x13, 0x53, 0xc7, 0x4a, 0x2c, 0x4c, 0x80, 0x12, 0x3e, 0x08, 0x3a, 0xdb, 0x77, 0x69, 0x2f, 0x89, + 0x22, 0x10, 0xdb, 0x39, 0xef, 0x8f, 0x7f, 0xef, 0x67, 0x3b, 0xf8, 0x0e, 0x1f, 0x71, 0xf9, 0x8a, + 0x09, 0xe9, 0x27, 0x70, 0xce, 0xa6, 0x1d, 0x76, 0x36, 0x11, 0xc9, 0x8c, 0xc6, 0x09, 0x28, 0x20, + 0xb7, 0x74, 0x90, 0x9a, 0x20, 0x9d, 0x76, 0xea, 0xb5, 0x00, 0x02, 0xd0, 0x31, 0x96, 0x7e, 0x99, + 0xb4, 0xfa, 0xdd, 0x00, 0x20, 0x18, 0x0b, 0xc6, 0xe3, 0x90, 0xf1, 0x28, 0x02, 0xc5, 0x55, 0x08, + 0x91, 0xb4, 0xd1, 0x87, 0x3e, 0xc8, 0x53, 0x90, 0x6c, 0xc0, 0xa5, 0x30, 0xdd, 0xd9, 0xb4, 0x33, + 0x10, 0x8a, 0x77, 0x58, 0xcc, 0x83, 0x30, 0xd2, 0xc9, 0x36, 0xf7, 0xa0, 0x48, 0xc3, 0x7d, 0x1f, + 0x26, 0x91, 0xb2, 0xe1, 0x76, 0x31, 0xfc, 0x32, 0xe1, 0x7e, 0x5a, 0xce, 0xc7, 0x2f, 0x62, 0x3e, + 0x3b, 0x15, 0x59, 0x66, 0xf3, 0x33, 0xc2, 0xb5, 0xe7, 0xe9, 0x5e, 0x4f, 0x4d, 0x03, 0xd9, 0x17, + 0x67, 0x13, 0x21, 0x15, 0xa9, 0xe1, 0x8a, 0xf4, 0x21, 0x16, 0x2e, 0x6a, 0xa0, 0xf6, 0x8d, 0xbe, + 0x59, 0x90, 0x7d, 0x5c, 0x7e, 0x1d, 0x0e, 0x5d, 0x47, 0xff, 0x96, 0x7e, 0xa6, 0x79, 0x70, 0x1e, + 0x89, 0xc4, 0x2d, 0x9b, 0x3c, 0xbd, 0xd0, 0xd5, 0x8a, 0x2b, 0xe1, 0xee, 0xd8, 0xea, 0x74, 0x41, + 0x8e, 0x30, 0x5e, 0x4e, 0xe2, 0x56, 0x1a, 0xa8, 0xbd, 0xd7, 0x6d, 0x51, 0x33, 0x36, 0x4d, 0xc7, + 0xa6, 0x46, 0xaa, 0x1d, 0x9b, 0x1e, 0xf3, 0x40, 0x58, 0x9e, 0xfe, 0x95, 0xca, 0xe6, 0x47, 0x84, + 0x6f, 0x17, 0xa0, 0x65, 0x0c, 0x91, 0x4c, 0x77, 0xa8, 0x5a, 0x13, 0xd2, 0x45, 0x8d, 0x72, 0x7b, + 0xaf, 0xeb, 0xd2, 0xc2, 0xd9, 0x50, 0x5b, 0xd4, 0xdb, 0xbf, 0xf8, 0x71, 0x58, 0xfa, 0xf4, 0xf3, + 0xb0, 0x9a, 0x77, 0xc9, 0x6b, 0xc9, 0xb3, 0x6b, 0xa4, 0x8e, 0x26, 0x7d, 0xb0, 0x95, 0xd4, 0x40, + 0x5c, 0x43, 0xfd, 0x9a, 0xf9, 0x3d, 0x36, 0xda, 0xff, 0xda, 0xef, 0x4d, 0xec, 0x84, 0x43, 0x2b, + 0xd7, 0xb9, 0xea, 0x7b, 0x67, 0xad, 0xef, 0xca, 0x66, 0xdf, 0xbb, 0xff, 0xec, 0xfb, 0x4b, 0xe6, + 0x7b, 0x39, 0x84, 0xf5, 0x7d, 0x82, 0xab, 0xf6, 0x3e, 0x65, 0xbe, 0x9b, 0x2b, 0xbe, 0x8f, 0xf2, + 0xbb, 0x67, 0xcb, 0x7b, 0x75, 0x6b, 0x9e, 0xac, 0x84, 0x64, 0x3f, 0xef, 0xf7, 0xdf, 0xce, 0xa0, + 0xfb, 0xc1, 0xc1, 0x15, 0x8d, 0x4f, 0xde, 0x22, 0x9c, 0x9f, 0x36, 0xb9, 0xbf, 0x42, 0xba, 0xee, + 0x21, 0xd4, 0x5b, 0xdb, 0xd2, 0xcc, 0x8e, 0xcd, 0x47, 0x6f, 0xbe, 0xfd, 0x7e, 0xef, 0xb4, 0xc8, + 0x3d, 0x56, 0x7c, 0x7c, 0x6a, 0x16, 0x0b, 0x99, 0xbd, 0x50, 0xc9, 0xc6, 0xa1, 0x54, 0x9a, 0x24, + 0x9b, 0x79, 0x13, 0x49, 0xe1, 0xca, 0x6c, 0x22, 0x29, 0x1e, 0xca, 0x56, 0x92, 0xcc, 0xb0, 0x26, + 0xe9, 0x3d, 0xb9, 0x98, 0x7b, 0xe8, 0x72, 0xee, 0xa1, 0x5f, 0x73, 0x0f, 0xbd, 0x5b, 0x78, 0xa5, + 0xcb, 0x85, 0x57, 0xfa, 0xbe, 0xf0, 0x4a, 0x27, 0x07, 0xf1, 0x28, 0xa0, 0x7c, 0xa4, 0xe8, 0x50, + 0x4c, 0x59, 0x00, 0x2c, 0x82, 0xa1, 0x58, 0x36, 0x1b, 0xec, 0xea, 0x7f, 0x90, 0xc7, 0x7f, 0x02, + 0x00, 0x00, 0xff, 0xff, 0x8d, 0xdb, 0xe0, 0xc8, 0x1a, 0x05, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + // Accounts queries all accounts + Accounts(ctx context.Context, in *QueryAccountsRequest, opts ...grpc.CallOption) (*QueryAccountsResponse, error) + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + // Payments queries all payments + Payments(ctx context.Context, in *QueryPaymentsRequest, opts ...grpc.CallOption) (*QueryPaymentsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Accounts(ctx context.Context, in *QueryAccountsRequest, opts ...grpc.CallOption) (*QueryAccountsResponse, error) { + out := new(QueryAccountsResponse) + err := c.cc.Invoke(ctx, "/akash.escrow.v1.Query/Accounts", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Payments(ctx context.Context, in *QueryPaymentsRequest, opts ...grpc.CallOption) (*QueryPaymentsResponse, error) { + out := new(QueryPaymentsResponse) + err := c.cc.Invoke(ctx, "/akash.escrow.v1.Query/Payments", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + // Accounts queries all accounts + Accounts(context.Context, *QueryAccountsRequest) (*QueryAccountsResponse, error) + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + // Payments queries all payments + Payments(context.Context, *QueryPaymentsRequest) (*QueryPaymentsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Accounts(ctx context.Context, req *QueryAccountsRequest) (*QueryAccountsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Accounts not implemented") +} +func (*UnimplementedQueryServer) Payments(ctx context.Context, req *QueryPaymentsRequest) (*QueryPaymentsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Payments not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Accounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAccountsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Accounts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.escrow.v1.Query/Accounts", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Accounts(ctx, req.(*QueryAccountsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Payments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryPaymentsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Payments(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.escrow.v1.Query/Payments", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Payments(ctx, req.(*QueryPaymentsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.escrow.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Accounts", + Handler: _Query_Accounts_Handler, + }, + { + MethodName: "Payments", + Handler: _Query_Payments_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/escrow/v1/query.proto", +} + +func (m *QueryAccountsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAccountsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAccountsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintQuery(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x22 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x1a + } + if len(m.Xid) > 0 { + i -= len(m.Xid) + copy(dAtA[i:], m.Xid) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Xid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Scope) > 0 { + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAccountsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAccountsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAccountsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Accounts) > 0 { + for iNdEx := len(m.Accounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Accounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryPaymentsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPaymentsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPaymentsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintQuery(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x2a + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x22 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0x1a + } + if len(m.Xid) > 0 { + i -= len(m.Xid) + copy(dAtA[i:], m.Xid) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Xid))) + i-- + dAtA[i] = 0x12 + } + if len(m.Scope) > 0 { + i -= len(m.Scope) + copy(dAtA[i:], m.Scope) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Scope))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryPaymentsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryPaymentsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryPaymentsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Payments) > 0 { + for iNdEx := len(m.Payments) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Payments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryAccountsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Scope) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Xid) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAccountsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Accounts) > 0 { + for _, e := range m.Accounts { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryPaymentsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Scope) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Xid) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Id) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryPaymentsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Payments) > 0 { + for _, e := range m.Payments { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryAccountsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAccountsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAccountsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Xid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Xid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAccountsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAccountsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAccountsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Accounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Accounts = append(m.Accounts, Account{}) + if err := m.Accounts[len(m.Accounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPaymentsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPaymentsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPaymentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scope = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Xid", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Xid = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryPaymentsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryPaymentsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryPaymentsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Payments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Payments = append(m.Payments, FractionalPayment{}) + if err := m.Payments[len(m.Payments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/escrow/v1/query.pb.gw.go b/go/node/escrow/v1/query.pb.gw.go new file mode 100644 index 00000000..cb96ef68 --- /dev/null +++ b/go/node/escrow/v1/query.pb.gw.go @@ -0,0 +1,254 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: akash/escrow/v1/query.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_Query_Accounts_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Accounts_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAccountsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Accounts_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Accounts(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Accounts_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAccountsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Accounts_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Accounts(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Payments_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Payments_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPaymentsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Payments_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Payments(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Payments_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryPaymentsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Payments_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Payments(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Accounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Accounts_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Accounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Payments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Payments_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Payments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Accounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Accounts_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Accounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Payments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Payments_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Payments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Accounts_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"akash", "escrow", "v1", "types", "accounts", "list"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Payments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"akash", "escrow", "v1", "types", "payments", "list"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_Accounts_0 = runtime.ForwardResponseMessage + + forward_Query_Payments_0 = runtime.ForwardResponseMessage +) diff --git a/go/node/escrow/v1/validate.go b/go/node/escrow/v1/validate.go new file mode 100644 index 00000000..c79f72b8 --- /dev/null +++ b/go/node/escrow/v1/validate.go @@ -0,0 +1,54 @@ +package v1 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func (obj *AccountID) ValidateBasic() error { + if len(obj.Scope) == 0 { + return ErrInvalidAccountID.Wrap("empty scope") + } + + if len(obj.XID) == 0 { + return ErrInvalidAccountID.Wrap("empty XID") + } + + return nil +} + +func (m *Account) ValidateBasic() error { + if err := m.ID.ValidateBasic(); err != nil { + return ErrInvalidAccount.Wrap(err.Error()) + } + if _, err := sdk.AccAddressFromBech32(m.Owner); err != nil { + return ErrInvalidAccount.Wrap(err.Error()) + } + if m.State == AccountStateInvalid { + return ErrInvalidAccount.Wrap("invalid state") + } + if _, err := sdk.AccAddressFromBech32(m.Depositor); err != nil { + return ErrInvalidAccount.Wrapf("invalid depositor") + } + return nil +} + +func (obj *FractionalPayment) ValidateBasic() error { + if err := obj.AccountID.ValidateBasic(); err != nil { + return ErrInvalidPayment.Wrap(err.Error()) + } + if len(obj.PaymentID) == 0 { + return ErrInvalidPayment.Wrap("empty payment id") + } + if obj.Rate.IsZero() { + return ErrInvalidPayment.Wrap("payment rate zero") + } + if obj.State == PaymentStateInvalid { + return ErrInvalidPayment.Wrap("invalid state") + } + return nil +} + +// TotalBalance is the sum of Balance and Funds +func (m *Account) TotalBalance() sdk.DecCoin { + return m.Balance.Add(m.Funds) +} diff --git a/go/node/escrow/v1beta1/codec.go b/go/node/escrow/v1beta1/codec.go deleted file mode 100644 index 9076af5c..00000000 --- a/go/node/escrow/v1beta1/codec.go +++ /dev/null @@ -1,35 +0,0 @@ -package v1beta1 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/provider module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/provider and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterLegacyAminoCodec register concrete types on codec -func RegisterLegacyAminoCodec(_ *codec.LegacyAmino) { -} - -// RegisterInterfaces registers the x/provider interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil)) -} diff --git a/go/node/escrow/v1beta1/error.go b/go/node/escrow/v1beta1/error.go deleted file mode 100644 index 499e2055..00000000 --- a/go/node/escrow/v1beta1/error.go +++ /dev/null @@ -1,11 +0,0 @@ -package v1beta1 - -import ( - "errors" -) - -var ( - ErrInvalidPayment = errors.New("invalid payment") - ErrInvalidAccountID = errors.New("invalid account ID") - ErrInvalidAccount = errors.New("invalid account") -) diff --git a/go/node/escrow/v1beta1/genesis.pb.go b/go/node/escrow/v1beta1/genesis.pb.go deleted file mode 100644 index 83a1e8d7..00000000 --- a/go/node/escrow/v1beta1/genesis.pb.go +++ /dev/null @@ -1,398 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/escrow/v1beta1/genesis.proto - -package v1beta1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState defines the basic genesis state used by escrow module -type GenesisState struct { - Accounts []Account `protobuf:"bytes,1,rep,name=accounts,proto3" json:"accounts" yaml:"accounts"` - Payments []Payment `protobuf:"bytes,2,rep,name=payments,proto3" json:"payments" yaml:"payments"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_97636f5fac6c1bea, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetAccounts() []Account { - if m != nil { - return m.Accounts - } - return nil -} - -func (m *GenesisState) GetPayments() []Payment { - if m != nil { - return m.Payments - } - return nil -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.escrow.v1beta1.GenesisState") -} - -func init() { - proto.RegisterFile("akash/escrow/v1beta1/genesis.proto", fileDescriptor_97636f5fac6c1bea) -} - -var fileDescriptor_97636f5fac6c1bea = []byte{ - // 260 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x2d, 0x4e, 0x2e, 0xca, 0x2f, 0xd7, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, - 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, - 0x01, 0xab, 0xd1, 0x83, 0xa8, 0xd1, 0x83, 0xaa, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, - 0xd0, 0x07, 0xb1, 0x20, 0x6a, 0xa5, 0x14, 0xb0, 0x9a, 0x57, 0x52, 0x59, 0x90, 0x0a, 0x35, 0x4d, - 0xe9, 0x1c, 0x23, 0x17, 0x8f, 0x3b, 0xc4, 0xfc, 0xe0, 0x92, 0xc4, 0x92, 0x54, 0xa1, 0x38, 0x2e, - 0x8e, 0xc4, 0xe4, 0xe4, 0xfc, 0xd2, 0xbc, 0x92, 0x62, 0x09, 0x46, 0x05, 0x66, 0x0d, 0x6e, 0x23, - 0x59, 0x3d, 0x6c, 0x36, 0xea, 0x39, 0x42, 0x54, 0x39, 0x29, 0x9f, 0xb8, 0x27, 0xcf, 0xf0, 0xea, - 0x9e, 0x3c, 0x5c, 0xdb, 0xa7, 0x7b, 0xf2, 0xfc, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x30, 0x11, - 0xa5, 0x20, 0xb8, 0x24, 0xc8, 0xfc, 0x82, 0xc4, 0xca, 0xdc, 0x54, 0x90, 0xf9, 0x4c, 0xf8, 0xcc, - 0x0f, 0x80, 0xa8, 0x42, 0x98, 0x0f, 0xd3, 0x86, 0x30, 0x1f, 0x26, 0xa2, 0x14, 0x04, 0x97, 0x74, - 0x0a, 0x3e, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, - 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xcb, 0xf4, 0xcc, 0x92, - 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0x8d, 0xba, 0x79, 0xa9, 0x25, 0xe5, 0xf9, - 0x45, 0xd9, 0x50, 0x5e, 0x62, 0x41, 0xa6, 0x7e, 0x7a, 0xbe, 0x7e, 0x5e, 0x7e, 0x4a, 0x2a, 0x5a, - 0x88, 0x25, 0xb1, 0x81, 0x03, 0xcb, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x75, 0xff, 0x04, 0xce, - 0xa0, 0x01, 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Payments) > 0 { - for iNdEx := len(m.Payments) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Payments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Accounts) > 0 { - for iNdEx := len(m.Accounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Accounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Accounts) > 0 { - for _, e := range m.Accounts { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - if len(m.Payments) > 0 { - for _, e := range m.Payments { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Accounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Accounts = append(m.Accounts, Account{}) - if err := m.Accounts[len(m.Accounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payments = append(m.Payments, Payment{}) - if err := m.Payments[len(m.Payments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/escrow/v1beta1/key.go b/go/node/escrow/v1beta1/key.go deleted file mode 100644 index 50220fa1..00000000 --- a/go/node/escrow/v1beta1/key.go +++ /dev/null @@ -1,20 +0,0 @@ -package v1beta1 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "escrow" - - // StoreKey is the store key string for deployment - StoreKey = ModuleName - - // RouterKey is the message route for deployment - RouterKey = ModuleName -) - -func AccountKeyPrefix() []byte { - return []byte{0x01} -} - -func PaymentKeyPrefix() []byte { - return []byte{0x02} -} diff --git a/go/node/escrow/v1beta1/query.pb.go b/go/node/escrow/v1beta1/query.pb.go deleted file mode 100644 index 320b3922..00000000 --- a/go/node/escrow/v1beta1/query.pb.go +++ /dev/null @@ -1,1604 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/escrow/v1beta1/query.proto - -package v1beta1 - -import ( - context "context" - fmt "fmt" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryAccountRequest is request type for the Query/Account RPC method -type QueryAccountsRequest struct { - Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` - Xid string `protobuf:"bytes,2,opt,name=xid,proto3" json:"xid,omitempty"` - Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` - State string `protobuf:"bytes,4,opt,name=state,proto3" json:"state,omitempty"` - Pagination *query.PageRequest `protobuf:"bytes,5,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryAccountsRequest) Reset() { *m = QueryAccountsRequest{} } -func (m *QueryAccountsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryAccountsRequest) ProtoMessage() {} -func (*QueryAccountsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_70738d9a3b2d7124, []int{0} -} -func (m *QueryAccountsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryAccountsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryAccountsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryAccountsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryAccountsRequest.Merge(m, src) -} -func (m *QueryAccountsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryAccountsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryAccountsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryAccountsRequest proto.InternalMessageInfo - -func (m *QueryAccountsRequest) GetScope() string { - if m != nil { - return m.Scope - } - return "" -} - -func (m *QueryAccountsRequest) GetXid() string { - if m != nil { - return m.Xid - } - return "" -} - -func (m *QueryAccountsRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *QueryAccountsRequest) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func (m *QueryAccountsRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -type QueryAccountsResponse struct { - Accounts []Account `protobuf:"bytes,1,rep,name=accounts,proto3" json:"accounts"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryAccountsResponse) Reset() { *m = QueryAccountsResponse{} } -func (m *QueryAccountsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryAccountsResponse) ProtoMessage() {} -func (*QueryAccountsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_70738d9a3b2d7124, []int{1} -} -func (m *QueryAccountsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryAccountsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryAccountsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryAccountsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryAccountsResponse.Merge(m, src) -} -func (m *QueryAccountsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryAccountsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryAccountsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryAccountsResponse proto.InternalMessageInfo - -func (m *QueryAccountsResponse) GetAccounts() []Account { - if m != nil { - return m.Accounts - } - return nil -} - -func (m *QueryAccountsResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryPaymentRequest is request type for the Query/Payment RPC method -type QueryPaymentsRequest struct { - Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` - Xid string `protobuf:"bytes,2,opt,name=xid,proto3" json:"xid,omitempty"` - Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` - Owner string `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` - State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` - Pagination *query.PageRequest `protobuf:"bytes,6,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryPaymentsRequest) Reset() { *m = QueryPaymentsRequest{} } -func (m *QueryPaymentsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryPaymentsRequest) ProtoMessage() {} -func (*QueryPaymentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_70738d9a3b2d7124, []int{2} -} -func (m *QueryPaymentsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryPaymentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryPaymentsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryPaymentsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryPaymentsRequest.Merge(m, src) -} -func (m *QueryPaymentsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryPaymentsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryPaymentsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryPaymentsRequest proto.InternalMessageInfo - -func (m *QueryPaymentsRequest) GetScope() string { - if m != nil { - return m.Scope - } - return "" -} - -func (m *QueryPaymentsRequest) GetXid() string { - if m != nil { - return m.Xid - } - return "" -} - -func (m *QueryPaymentsRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *QueryPaymentsRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *QueryPaymentsRequest) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func (m *QueryPaymentsRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -type QueryPaymentsResponse struct { - Payments []Payment `protobuf:"bytes,1,rep,name=payments,proto3" json:"payments"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryPaymentsResponse) Reset() { *m = QueryPaymentsResponse{} } -func (m *QueryPaymentsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryPaymentsResponse) ProtoMessage() {} -func (*QueryPaymentsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_70738d9a3b2d7124, []int{3} -} -func (m *QueryPaymentsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryPaymentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryPaymentsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryPaymentsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryPaymentsResponse.Merge(m, src) -} -func (m *QueryPaymentsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryPaymentsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryPaymentsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryPaymentsResponse proto.InternalMessageInfo - -func (m *QueryPaymentsResponse) GetPayments() []Payment { - if m != nil { - return m.Payments - } - return nil -} - -func (m *QueryPaymentsResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -func init() { - proto.RegisterType((*QueryAccountsRequest)(nil), "akash.escrow.v1beta1.QueryAccountsRequest") - proto.RegisterType((*QueryAccountsResponse)(nil), "akash.escrow.v1beta1.QueryAccountsResponse") - proto.RegisterType((*QueryPaymentsRequest)(nil), "akash.escrow.v1beta1.QueryPaymentsRequest") - proto.RegisterType((*QueryPaymentsResponse)(nil), "akash.escrow.v1beta1.QueryPaymentsResponse") -} - -func init() { proto.RegisterFile("akash/escrow/v1beta1/query.proto", fileDescriptor_70738d9a3b2d7124) } - -var fileDescriptor_70738d9a3b2d7124 = []byte{ - // 493 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0x31, 0x6f, 0x13, 0x31, - 0x14, 0xc7, 0xe3, 0x4b, 0x52, 0x15, 0x57, 0x42, 0xc8, 0x0a, 0xd2, 0x29, 0x82, 0x23, 0xca, 0x00, - 0xa1, 0x15, 0xb6, 0x12, 0x26, 0x26, 0x44, 0x07, 0x58, 0x4b, 0xd8, 0xd8, 0x9c, 0xcb, 0xd3, 0xf5, - 0xd4, 0xc6, 0xef, 0x7a, 0x76, 0x08, 0x59, 0xf9, 0x04, 0x48, 0x48, 0xec, 0x7c, 0x05, 0x36, 0x56, - 0xa6, 0x8e, 0x95, 0x58, 0x98, 0x10, 0x4a, 0xf8, 0x20, 0xe8, 0x6c, 0x5f, 0xd3, 0x46, 0xd7, 0x50, - 0x01, 0xdb, 0xd9, 0xf9, 0x3f, 0xbf, 0x9f, 0x7f, 0x79, 0x77, 0xb4, 0x23, 0x8f, 0xa4, 0x3e, 0x14, - 0xa0, 0xe3, 0x1c, 0x67, 0xe2, 0x4d, 0x7f, 0x04, 0x46, 0xf6, 0xc5, 0xc9, 0x14, 0xf2, 0x39, 0xcf, - 0x72, 0x34, 0xc8, 0x5a, 0x36, 0xc1, 0x5d, 0x82, 0xfb, 0x44, 0xbb, 0x95, 0x60, 0x82, 0x36, 0x20, - 0x8a, 0x27, 0x97, 0x6d, 0xdf, 0x49, 0x10, 0x93, 0x63, 0x10, 0x32, 0x4b, 0x85, 0x54, 0x0a, 0x8d, - 0x34, 0x29, 0x2a, 0xed, 0x7f, 0xdd, 0x8d, 0x51, 0x4f, 0x50, 0x8b, 0x91, 0xd4, 0xe0, 0x5a, 0x9c, - 0x37, 0xcc, 0x64, 0x92, 0x2a, 0x1b, 0xf6, 0xd9, 0x6a, 0x2e, 0x33, 0xcf, 0xc0, 0x9f, 0xd6, 0xfd, - 0x4c, 0x68, 0xeb, 0x65, 0x71, 0xc8, 0xb3, 0x38, 0xc6, 0xa9, 0x32, 0x7a, 0x08, 0x27, 0x53, 0xd0, - 0x86, 0xb5, 0x68, 0x53, 0xc7, 0x98, 0x41, 0x48, 0x3a, 0xa4, 0x77, 0x63, 0xe8, 0x16, 0xec, 0x16, - 0xad, 0xbf, 0x4d, 0xc7, 0x61, 0x60, 0xf7, 0x8a, 0xc7, 0x22, 0x87, 0x33, 0x05, 0x79, 0x58, 0x77, - 0x39, 0xbb, 0xb0, 0xd5, 0x46, 0x1a, 0x08, 0x1b, 0xbe, 0xba, 0x58, 0xb0, 0xe7, 0x94, 0xae, 0x10, - 0xc3, 0x66, 0x87, 0xf4, 0x76, 0x06, 0xf7, 0xb9, 0xbb, 0x0f, 0x2f, 0xee, 0xc3, 0x9d, 0x32, 0x0f, - 0xca, 0x0f, 0x64, 0x02, 0x9e, 0x67, 0x78, 0xa1, 0xb2, 0xfb, 0x89, 0xd0, 0xdb, 0x6b, 0xd0, 0x3a, - 0x43, 0xa5, 0x81, 0x3d, 0xa5, 0xdb, 0xd2, 0xef, 0x85, 0xa4, 0x53, 0xef, 0xed, 0x0c, 0xee, 0xf2, - 0x2a, 0xf3, 0xdc, 0x57, 0xee, 0x37, 0x4e, 0x7f, 0xdc, 0xab, 0x0d, 0xcf, 0x8b, 0xd8, 0x8b, 0x4b, - 0x88, 0x81, 0x45, 0x7c, 0xf0, 0x47, 0x44, 0xd7, 0xfd, 0x12, 0xe3, 0xd7, 0x52, 0xec, 0x81, 0x9c, - 0x4f, 0xe0, 0x2f, 0xc4, 0xde, 0xa4, 0x41, 0x3a, 0xf6, 0x56, 0x83, 0x8b, 0xa2, 0x1b, 0x95, 0xa2, - 0x9b, 0x57, 0x8b, 0xde, 0xfa, 0x77, 0xd1, 0xab, 0x4b, 0xac, 0x44, 0x67, 0x7e, 0x6f, 0xb3, 0x68, - 0x5f, 0x59, 0x8a, 0x2e, 0x8b, 0xfe, 0x9b, 0xe8, 0xc1, 0x97, 0x80, 0x36, 0x2d, 0x23, 0xfb, 0x48, - 0xe8, 0x76, 0x39, 0x11, 0x6c, 0xb7, 0x1a, 0xa7, 0x6a, 0xd6, 0xdb, 0x7b, 0xd7, 0xca, 0xba, 0xde, - 0xdd, 0xfe, 0xbb, 0x6f, 0xbf, 0x3e, 0x04, 0x7b, 0xec, 0xa1, 0xb8, 0xfa, 0xe5, 0x12, 0xe5, 0x3c, - 0x89, 0xe3, 0x54, 0x1b, 0x0b, 0x56, 0x1a, 0xdc, 0x08, 0xb6, 0x36, 0x2b, 0x1b, 0xc1, 0xd6, 0xff, - 0x92, 0xeb, 0x81, 0x95, 0xfe, 0x2d, 0xd8, 0xfe, 0xab, 0xd3, 0x45, 0x44, 0xce, 0x16, 0x11, 0xf9, - 0xb9, 0x88, 0xc8, 0xfb, 0x65, 0x54, 0x3b, 0x5b, 0x46, 0xb5, 0xef, 0xcb, 0xa8, 0xf6, 0xfa, 0x49, - 0x92, 0x9a, 0xc3, 0xe9, 0x88, 0xc7, 0x38, 0x71, 0xc7, 0x3d, 0x52, 0x60, 0x66, 0x98, 0x1f, 0xf9, - 0x55, 0xf1, 0x6d, 0x4a, 0x50, 0x28, 0x1c, 0xc3, 0x5a, 0xa3, 0xd1, 0x96, 0xfd, 0xb2, 0x3c, 0xfe, - 0x1d, 0x00, 0x00, 0xff, 0xff, 0x6d, 0xb5, 0x17, 0x9a, 0x15, 0x05, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Accounts queries all accounts - Accounts(ctx context.Context, in *QueryAccountsRequest, opts ...grpc.CallOption) (*QueryAccountsResponse, error) - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Payments queries all payments - Payments(ctx context.Context, in *QueryPaymentsRequest, opts ...grpc.CallOption) (*QueryPaymentsResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Accounts(ctx context.Context, in *QueryAccountsRequest, opts ...grpc.CallOption) (*QueryAccountsResponse, error) { - out := new(QueryAccountsResponse) - err := c.cc.Invoke(ctx, "/akash.escrow.v1beta1.Query/Accounts", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Payments(ctx context.Context, in *QueryPaymentsRequest, opts ...grpc.CallOption) (*QueryPaymentsResponse, error) { - out := new(QueryPaymentsResponse) - err := c.cc.Invoke(ctx, "/akash.escrow.v1beta1.Query/Payments", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Accounts queries all accounts - Accounts(context.Context, *QueryAccountsRequest) (*QueryAccountsResponse, error) - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Payments queries all payments - Payments(context.Context, *QueryPaymentsRequest) (*QueryPaymentsResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Accounts(ctx context.Context, req *QueryAccountsRequest) (*QueryAccountsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Accounts not implemented") -} -func (*UnimplementedQueryServer) Payments(ctx context.Context, req *QueryPaymentsRequest) (*QueryPaymentsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Payments not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Accounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryAccountsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Accounts(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.escrow.v1beta1.Query/Accounts", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Accounts(ctx, req.(*QueryAccountsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Payments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryPaymentsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Payments(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.escrow.v1beta1.Query/Payments", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Payments(ctx, req.(*QueryPaymentsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.escrow.v1beta1.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Accounts", - Handler: _Query_Accounts_Handler, - }, - { - MethodName: "Payments", - Handler: _Query_Payments_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/escrow/v1beta1/query.proto", -} - -func (m *QueryAccountsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryAccountsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryAccountsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintQuery(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x22 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x1a - } - if len(m.Xid) > 0 { - i -= len(m.Xid) - copy(dAtA[i:], m.Xid) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Xid))) - i-- - dAtA[i] = 0x12 - } - if len(m.Scope) > 0 { - i -= len(m.Scope) - copy(dAtA[i:], m.Scope) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Scope))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryAccountsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryAccountsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryAccountsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Accounts) > 0 { - for iNdEx := len(m.Accounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Accounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryPaymentsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryPaymentsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryPaymentsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintQuery(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x2a - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x22 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0x1a - } - if len(m.Xid) > 0 { - i -= len(m.Xid) - copy(dAtA[i:], m.Xid) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Xid))) - i-- - dAtA[i] = 0x12 - } - if len(m.Scope) > 0 { - i -= len(m.Scope) - copy(dAtA[i:], m.Scope) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Scope))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryPaymentsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryPaymentsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryPaymentsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Payments) > 0 { - for iNdEx := len(m.Payments) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Payments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryAccountsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Scope) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Xid) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryAccountsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Accounts) > 0 { - for _, e := range m.Accounts { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryPaymentsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Scope) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Xid) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Id) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryPaymentsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Payments) > 0 { - for _, e := range m.Payments { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryAccountsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryAccountsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryAccountsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scope = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Xid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Xid = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryAccountsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryAccountsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryAccountsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Accounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Accounts = append(m.Accounts, Account{}) - if err := m.Accounts[len(m.Accounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryPaymentsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryPaymentsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryPaymentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scope = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Xid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Xid = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryPaymentsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryPaymentsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryPaymentsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payments = append(m.Payments, Payment{}) - if err := m.Payments[len(m.Payments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/escrow/v1beta1/query.pb.gw.go b/go/node/escrow/v1beta1/query.pb.gw.go deleted file mode 100644 index 62fb3f75..00000000 --- a/go/node/escrow/v1beta1/query.pb.gw.go +++ /dev/null @@ -1,254 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/escrow/v1beta1/query.proto - -/* -Package v1beta1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta1 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Accounts_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Accounts_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAccountsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Accounts_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Accounts(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Accounts_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAccountsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Accounts_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Accounts(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Payments_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Payments_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryPaymentsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Payments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Payments(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Payments_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryPaymentsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Payments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Payments(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Accounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Accounts_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Accounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Payments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Payments_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Payments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Accounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Accounts_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Accounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Payments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Payments_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Payments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Accounts_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"akash", "escrow", "v1beta1", "types", "accounts", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Payments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"akash", "escrow", "v1beta1", "types", "payments", "list"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Accounts_0 = runtime.ForwardResponseMessage - - forward_Query_Payments_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/escrow/v1beta1/types.pb.go b/go/node/escrow/v1beta1/types.pb.go deleted file mode 100644 index db23b2d4..00000000 --- a/go/node/escrow/v1beta1/types.pb.go +++ /dev/null @@ -1,1353 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/escrow/v1beta1/types.proto - -package v1beta1 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State stores state for an escrow account -type Account_State int32 - -const ( - // AccountStateInvalid is an invalid state - AccountStateInvalid Account_State = 0 - // AccountOpen is the state when an account is open - AccountOpen Account_State = 1 - // AccountClosed is the state when an account is closed - AccountClosed Account_State = 2 - // AccountOverdrawn is the state when an account is overdrawn - AccountOverdrawn Account_State = 3 -) - -var Account_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "closed", - 3: "overdrawn", -} - -var Account_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "closed": 2, - "overdrawn": 3, -} - -func (x Account_State) String() string { - return proto.EnumName(Account_State_name, int32(x)) -} - -func (Account_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_3d89eca75409f317, []int{1, 0} -} - -// Payment State -type Payment_State int32 - -const ( - // PaymentStateInvalid is the state when the payment is invalid - PaymentStateInvalid Payment_State = 0 - // PaymentStateOpen is the state when the payment is open - PaymentOpen Payment_State = 1 - // PaymentStateClosed is the state when the payment is closed - PaymentClosed Payment_State = 2 - // PaymentStateOverdrawn is the state when the payment is overdrawn - PaymentOverdrawn Payment_State = 3 -) - -var Payment_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "closed", - 3: "overdrawn", -} - -var Payment_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "closed": 2, - "overdrawn": 3, -} - -func (x Payment_State) String() string { - return proto.EnumName(Payment_State_name, int32(x)) -} - -func (Payment_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_3d89eca75409f317, []int{2, 0} -} - -// AccountID is the account identifier -type AccountID struct { - Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope" yaml:"scope"` - XID string `protobuf:"bytes,2,opt,name=xid,proto3" json:"xid" yaml:"xid"` -} - -func (m *AccountID) Reset() { *m = AccountID{} } -func (m *AccountID) String() string { return proto.CompactTextString(m) } -func (*AccountID) ProtoMessage() {} -func (*AccountID) Descriptor() ([]byte, []int) { - return fileDescriptor_3d89eca75409f317, []int{0} -} -func (m *AccountID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AccountID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AccountID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AccountID) XXX_Merge(src proto.Message) { - xxx_messageInfo_AccountID.Merge(m, src) -} -func (m *AccountID) XXX_Size() int { - return m.Size() -} -func (m *AccountID) XXX_DiscardUnknown() { - xxx_messageInfo_AccountID.DiscardUnknown(m) -} - -var xxx_messageInfo_AccountID proto.InternalMessageInfo - -func (m *AccountID) GetScope() string { - if m != nil { - return m.Scope - } - return "" -} - -func (m *AccountID) GetXID() string { - if m != nil { - return m.XID - } - return "" -} - -// Account stores state for an escrow account -type Account struct { - // unique identifier for this escrow account - ID AccountID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` - // bech32 encoded account address of the owner of this escrow account - Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner" yaml:"owner"` - // current state of this escrow account - State Account_State `protobuf:"varint,3,opt,name=state,proto3,enum=akash.escrow.v1beta1.Account_State" json:"state" yaml:"state"` - // unspent coins received from the owner's wallet - Balance types.Coin `protobuf:"bytes,4,opt,name=balance,proto3" json:"balance" yaml:"balance"` - // total coins spent by this account - Transferred types.Coin `protobuf:"bytes,5,opt,name=transferred,proto3" json:"transferred" yaml:"transferred"` - // block height at which this account was last settled - SettledAt int64 `protobuf:"varint,6,opt,name=settled_at,json=settledAt,proto3" json:"settledAt" yaml:"settledAt"` -} - -func (m *Account) Reset() { *m = Account{} } -func (m *Account) String() string { return proto.CompactTextString(m) } -func (*Account) ProtoMessage() {} -func (*Account) Descriptor() ([]byte, []int) { - return fileDescriptor_3d89eca75409f317, []int{1} -} -func (m *Account) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Account) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Account.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Account) XXX_Merge(src proto.Message) { - xxx_messageInfo_Account.Merge(m, src) -} -func (m *Account) XXX_Size() int { - return m.Size() -} -func (m *Account) XXX_DiscardUnknown() { - xxx_messageInfo_Account.DiscardUnknown(m) -} - -var xxx_messageInfo_Account proto.InternalMessageInfo - -func (m *Account) GetID() AccountID { - if m != nil { - return m.ID - } - return AccountID{} -} - -func (m *Account) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *Account) GetState() Account_State { - if m != nil { - return m.State - } - return AccountStateInvalid -} - -func (m *Account) GetBalance() types.Coin { - if m != nil { - return m.Balance - } - return types.Coin{} -} - -func (m *Account) GetTransferred() types.Coin { - if m != nil { - return m.Transferred - } - return types.Coin{} -} - -func (m *Account) GetSettledAt() int64 { - if m != nil { - return m.SettledAt - } - return 0 -} - -// Payment stores state for a payment -type Payment struct { - AccountID AccountID `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"accountID" yaml:"accountID"` - PaymentID string `protobuf:"bytes,2,opt,name=payment_id,json=paymentId,proto3" json:"paymentID" yaml:"paymentID"` - Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner" yaml:"owner"` - State Payment_State `protobuf:"varint,4,opt,name=state,proto3,enum=akash.escrow.v1beta1.Payment_State" json:"state" yaml:"state"` - Rate types.Coin `protobuf:"bytes,5,opt,name=rate,proto3" json:"rate" yaml:"rate"` - Balance types.Coin `protobuf:"bytes,6,opt,name=balance,proto3" json:"balance" yaml:"balance"` - Withdrawn types.Coin `protobuf:"bytes,7,opt,name=withdrawn,proto3" json:"withdrawn" yaml:"withdrawn"` -} - -func (m *Payment) Reset() { *m = Payment{} } -func (m *Payment) String() string { return proto.CompactTextString(m) } -func (*Payment) ProtoMessage() {} -func (*Payment) Descriptor() ([]byte, []int) { - return fileDescriptor_3d89eca75409f317, []int{2} -} -func (m *Payment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Payment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Payment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Payment) XXX_Merge(src proto.Message) { - xxx_messageInfo_Payment.Merge(m, src) -} -func (m *Payment) XXX_Size() int { - return m.Size() -} -func (m *Payment) XXX_DiscardUnknown() { - xxx_messageInfo_Payment.DiscardUnknown(m) -} - -var xxx_messageInfo_Payment proto.InternalMessageInfo - -func (m *Payment) GetAccountID() AccountID { - if m != nil { - return m.AccountID - } - return AccountID{} -} - -func (m *Payment) GetPaymentID() string { - if m != nil { - return m.PaymentID - } - return "" -} - -func (m *Payment) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *Payment) GetState() Payment_State { - if m != nil { - return m.State - } - return PaymentStateInvalid -} - -func (m *Payment) GetRate() types.Coin { - if m != nil { - return m.Rate - } - return types.Coin{} -} - -func (m *Payment) GetBalance() types.Coin { - if m != nil { - return m.Balance - } - return types.Coin{} -} - -func (m *Payment) GetWithdrawn() types.Coin { - if m != nil { - return m.Withdrawn - } - return types.Coin{} -} - -func init() { - proto.RegisterEnum("akash.escrow.v1beta1.Account_State", Account_State_name, Account_State_value) - proto.RegisterEnum("akash.escrow.v1beta1.Payment_State", Payment_State_name, Payment_State_value) - proto.RegisterType((*AccountID)(nil), "akash.escrow.v1beta1.AccountID") - proto.RegisterType((*Account)(nil), "akash.escrow.v1beta1.Account") - proto.RegisterType((*Payment)(nil), "akash.escrow.v1beta1.Payment") -} - -func init() { proto.RegisterFile("akash/escrow/v1beta1/types.proto", fileDescriptor_3d89eca75409f317) } - -var fileDescriptor_3d89eca75409f317 = []byte{ - // 735 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4d, 0x6f, 0xd3, 0x4a, - 0x14, 0x8d, 0xf3, 0xa9, 0x4c, 0xde, 0xeb, 0xcb, 0xf3, 0xab, 0xf4, 0xd2, 0x40, 0x3d, 0xc6, 0x05, - 0xa9, 0x2c, 0xb0, 0xd5, 0xb2, 0xa2, 0xbb, 0xa6, 0xdd, 0x14, 0x89, 0x0f, 0xb9, 0x08, 0x21, 0x16, - 0x54, 0x13, 0x7b, 0x9a, 0x5a, 0x4d, 0x3c, 0x96, 0x3d, 0x6d, 0xda, 0x3d, 0x0b, 0x94, 0x15, 0x62, - 0xc5, 0x26, 0x12, 0x12, 0x7f, 0xa6, 0xcb, 0x2e, 0x59, 0x8d, 0x50, 0xba, 0xcb, 0x32, 0xbf, 0x00, - 0xcd, 0x87, 0xed, 0x80, 0xaa, 0xb4, 0x48, 0xac, 0x92, 0x7b, 0xee, 0xb9, 0xc7, 0xd7, 0x67, 0xce, - 0xc8, 0xc0, 0x44, 0xc7, 0x28, 0x39, 0x72, 0x70, 0xe2, 0xc5, 0x64, 0xe8, 0x9c, 0x6e, 0x74, 0x31, - 0x45, 0x1b, 0x0e, 0x3d, 0x8f, 0x70, 0x62, 0x47, 0x31, 0xa1, 0x44, 0x5f, 0x16, 0x0c, 0x5b, 0x32, - 0x6c, 0xc5, 0x68, 0x2f, 0xf7, 0x48, 0x8f, 0x08, 0x82, 0xc3, 0xff, 0x49, 0x6e, 0xdb, 0xf0, 0x48, - 0x32, 0x20, 0x89, 0xd3, 0x45, 0x09, 0xce, 0xc4, 0x3c, 0x12, 0x84, 0xb2, 0x6f, 0xf5, 0x41, 0x7d, - 0xdb, 0xf3, 0xc8, 0x49, 0x48, 0xf7, 0x76, 0x75, 0x07, 0x54, 0x12, 0x8f, 0x44, 0xb8, 0xa5, 0x99, - 0xda, 0x7a, 0xbd, 0xb3, 0x32, 0x65, 0x50, 0x02, 0x33, 0x06, 0xff, 0x3a, 0x47, 0x83, 0xfe, 0x96, - 0x25, 0x4a, 0xcb, 0x95, 0xb0, 0x6e, 0x83, 0xd2, 0x59, 0xe0, 0xb7, 0x8a, 0x82, 0x7e, 0x77, 0xc2, - 0x60, 0xe9, 0xcd, 0xde, 0xee, 0x94, 0x41, 0x8e, 0xce, 0x18, 0x04, 0x72, 0xe6, 0x2c, 0xf0, 0x2d, - 0x97, 0x43, 0xd6, 0xfb, 0x0a, 0xa8, 0xa9, 0xc7, 0xe9, 0xcf, 0x41, 0x31, 0xf0, 0xc5, 0x93, 0x1a, - 0x9b, 0xd0, 0xbe, 0xee, 0x95, 0xec, 0x6c, 0xb3, 0xce, 0xea, 0x05, 0x83, 0x85, 0x09, 0x83, 0x45, - 0x21, 0x5f, 0x14, 0xea, 0x75, 0xa9, 0xce, 0xc5, 0x8b, 0x81, 0xcf, 0x97, 0x27, 0xc3, 0x10, 0xc7, - 0x6a, 0x1b, 0xb1, 0xbc, 0x00, 0xf2, 0xe5, 0x45, 0x69, 0xb9, 0x12, 0xd6, 0x5f, 0x81, 0x4a, 0x42, - 0x11, 0xc5, 0xad, 0x92, 0xa9, 0xad, 0x2f, 0x6d, 0xae, 0x2d, 0xdc, 0xc1, 0xde, 0xe7, 0x54, 0x65, - 0x09, 0xff, 0x3b, 0x67, 0x09, 0x2f, 0xb9, 0x25, 0xfc, 0x57, 0x7f, 0x0d, 0x6a, 0x5d, 0xd4, 0x47, - 0xa1, 0x87, 0x5b, 0x65, 0xf1, 0x6e, 0x2b, 0xb6, 0x3c, 0x02, 0x9b, 0x1f, 0x41, 0x26, 0xbb, 0x43, - 0x82, 0xb0, 0x73, 0x8f, 0xbf, 0xd5, 0x94, 0xc1, 0x74, 0x62, 0xc6, 0xe0, 0x92, 0xd4, 0x54, 0x80, - 0xe5, 0xa6, 0x2d, 0xfd, 0x10, 0x34, 0x68, 0x8c, 0xc2, 0xe4, 0x10, 0xc7, 0x31, 0xf6, 0x5b, 0x95, - 0x9b, 0xb4, 0x1f, 0x2a, 0xed, 0xf9, 0xa9, 0x19, 0x83, 0xba, 0xd4, 0x9f, 0x03, 0x2d, 0x77, 0x9e, - 0xa2, 0x3f, 0x03, 0x20, 0xc1, 0x94, 0xf6, 0xb1, 0x7f, 0x80, 0x68, 0xab, 0x6a, 0x6a, 0xeb, 0xa5, - 0x8e, 0x3d, 0x61, 0xb0, 0xbe, 0x2f, 0xd1, 0x6d, 0x3a, 0x65, 0xb0, 0x9e, 0xa4, 0xc5, 0x8c, 0xc1, - 0xa6, 0xb2, 0x21, 0x85, 0x2c, 0x37, 0x6f, 0x5b, 0x9f, 0x34, 0x50, 0x11, 0xd6, 0xe9, 0xf7, 0x41, - 0x2d, 0x08, 0x4f, 0x51, 0x3f, 0xf0, 0x9b, 0x85, 0xf6, 0xff, 0xa3, 0xb1, 0xf9, 0x9f, 0xb2, 0x56, - 0xb4, 0xf7, 0x64, 0x4b, 0x5f, 0x01, 0x65, 0x12, 0xe1, 0xb0, 0xa9, 0xb5, 0xff, 0x19, 0x8d, 0xcd, - 0x86, 0xa2, 0xbc, 0x88, 0x70, 0xa8, 0xaf, 0x82, 0xaa, 0xd7, 0x27, 0x09, 0xf6, 0x9b, 0xc5, 0xf6, - 0xbf, 0xa3, 0xb1, 0xf9, 0xb7, 0x6a, 0xee, 0x08, 0x50, 0x5f, 0x03, 0x75, 0x72, 0x8a, 0x63, 0x3f, - 0x46, 0xc3, 0xb0, 0x59, 0x6a, 0x2f, 0x8f, 0xc6, 0x66, 0x33, 0x1d, 0x4f, 0xf1, 0x76, 0xf9, 0xc3, - 0x57, 0xa3, 0x60, 0xcd, 0x2a, 0xa0, 0xf6, 0x12, 0x9d, 0x0f, 0x70, 0x48, 0xf5, 0x18, 0x00, 0x24, - 0x59, 0x07, 0xb7, 0x8f, 0xe3, 0xa6, 0x8a, 0x63, 0x7e, 0x77, 0xb8, 0x29, 0x28, 0x2d, 0x72, 0x53, - 0x32, 0xc8, 0x72, 0xb3, 0xb6, 0xf0, 0x38, 0x92, 0x8f, 0x3f, 0xc8, 0x6e, 0x8f, 0xf0, 0x58, 0x2d, - 0x25, 0xe5, 0xa2, 0xb4, 0xc8, 0xe5, 0x32, 0xc8, 0x72, 0xb3, 0xf6, 0x5c, 0xf2, 0x4b, 0xbf, 0x9b, - 0xfc, 0xf2, 0xa2, 0xe4, 0xab, 0x65, 0x6e, 0x9d, 0xfc, 0xa7, 0xa0, 0x1c, 0x73, 0xd1, 0x1b, 0xa3, - 0x79, 0x47, 0x45, 0x53, 0xd0, 0x67, 0x0c, 0x36, 0xa4, 0x5a, 0x2c, 0xc4, 0x04, 0x38, 0x7f, 0x8b, - 0xaa, 0x7f, 0xf2, 0x16, 0xbd, 0x03, 0xf5, 0x61, 0x40, 0x8f, 0x44, 0x18, 0x5a, 0xb5, 0x9b, 0x94, - 0x1f, 0x28, 0xe5, 0x7c, 0x26, 0x3f, 0x8a, 0x0c, 0xb2, 0xdc, 0xbc, 0xbd, 0x30, 0xee, 0xca, 0xcf, - 0x45, 0x71, 0x57, 0x94, 0xeb, 0xe3, 0xae, 0x9a, 0x0b, 0xe2, 0x9e, 0x8e, 0xff, 0x1c, 0xf7, 0xad, - 0xf2, 0xe7, 0x2f, 0x50, 0xeb, 0xec, 0x5f, 0x4c, 0x0c, 0xed, 0x72, 0x62, 0x68, 0xdf, 0x27, 0x86, - 0xf6, 0xf1, 0xca, 0x28, 0x5c, 0x5e, 0x19, 0x85, 0x6f, 0x57, 0x46, 0xe1, 0xed, 0x93, 0x5e, 0x40, - 0x8f, 0x4e, 0xba, 0xb6, 0x47, 0x06, 0x8e, 0x48, 0xc2, 0xa3, 0x10, 0xd3, 0x21, 0x89, 0x8f, 0x55, - 0x85, 0xa2, 0xc0, 0xe9, 0x11, 0x27, 0x24, 0x3e, 0xfe, 0xe5, 0xb3, 0xd4, 0xad, 0x8a, 0xaf, 0xc8, - 0xe3, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x88, 0xe1, 0xca, 0xb5, 0x06, 0x00, 0x00, -} - -func (m *AccountID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AccountID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AccountID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.XID) > 0 { - i -= len(m.XID) - copy(dAtA[i:], m.XID) - i = encodeVarintTypes(dAtA, i, uint64(len(m.XID))) - i-- - dAtA[i] = 0x12 - } - if len(m.Scope) > 0 { - i -= len(m.Scope) - copy(dAtA[i:], m.Scope) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Scope))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Account) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Account) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Account) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SettledAt != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.SettledAt)) - i-- - dAtA[i] = 0x30 - } - { - size, err := m.Transferred.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - { - size, err := m.Balance.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if m.State != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x18 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Payment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Payment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Payment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Withdrawn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - { - size, err := m.Balance.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - { - size, err := m.Rate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - if m.State != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x20 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x1a - } - if len(m.PaymentID) > 0 { - i -= len(m.PaymentID) - copy(dAtA[i:], m.PaymentID) - i = encodeVarintTypes(dAtA, i, uint64(len(m.PaymentID))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.AccountID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AccountID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Scope) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.XID) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func (m *Account) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovTypes(uint64(l)) - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.State != 0 { - n += 1 + sovTypes(uint64(m.State)) - } - l = m.Balance.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.Transferred.Size() - n += 1 + l + sovTypes(uint64(l)) - if m.SettledAt != 0 { - n += 1 + sovTypes(uint64(m.SettledAt)) - } - return n -} - -func (m *Payment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.AccountID.Size() - n += 1 + l + sovTypes(uint64(l)) - l = len(m.PaymentID) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.State != 0 { - n += 1 + sovTypes(uint64(m.State)) - } - l = m.Rate.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.Balance.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.Withdrawn.Size() - n += 1 + l + sovTypes(uint64(l)) - return n -} - -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *AccountID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AccountID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AccountID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scope = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field XID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.XID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Account) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Account: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Account: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Account_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Balance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Transferred", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Transferred.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SettledAt", wireType) - } - m.SettledAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SettledAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Payment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Payment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Payment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccountID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.AccountID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PaymentID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PaymentID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Payment_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Rate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Balance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Withdrawn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Withdrawn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTypes(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTypes - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTypes - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTypes - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/escrow/v1beta1/validate.go b/go/node/escrow/v1beta1/validate.go deleted file mode 100644 index 5488f965..00000000 --- a/go/node/escrow/v1beta1/validate.go +++ /dev/null @@ -1,45 +0,0 @@ -package v1beta1 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -func (obj *AccountID) ValidateBasic() error { - if len(obj.Scope) == 0 { - return errors.Wrap(ErrInvalidAccountID, "empty scope") - } - if len(obj.XID) == 0 { - return errors.Wrap(ErrInvalidAccountID, "empty scope") - } - return nil -} - -func (obj *Account) ValidateBasic() error { - if err := obj.ID.ValidateBasic(); err != nil { - return errors.Wrapf(ErrInvalidAccount, "invalid account: id - %s", err) - } - if _, err := sdk.AccAddressFromBech32(obj.Owner); err != nil { - return errors.Wrapf(ErrInvalidAccount, "invalid account: owner - %s", err) - } - if obj.State == AccountStateInvalid { - return errors.Wrapf(ErrInvalidAccount, "invalid account: state - %s", obj.State) - } - return nil -} - -func (obj *Payment) ValidateBasic() error { - if err := obj.AccountID.ValidateBasic(); err != nil { - return errors.Wrapf(ErrInvalidPayment, "invalid account id: %s", err) - } - if len(obj.PaymentID) == 0 { - return errors.Wrap(ErrInvalidPayment, "empty payment id") - } - if obj.Rate.IsZero() { - return errors.Wrap(ErrInvalidPayment, "payment rate zero") - } - if obj.State == PaymentStateInvalid { - return errors.Wrap(ErrInvalidPayment, "invalid state") - } - return nil -} diff --git a/go/node/escrow/v1beta2/codec.go b/go/node/escrow/v1beta2/codec.go deleted file mode 100644 index a2ddd516..00000000 --- a/go/node/escrow/v1beta2/codec.go +++ /dev/null @@ -1,35 +0,0 @@ -package v1beta2 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/provider module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/provider and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterLegacyAminoCodec register concrete types on codec -func RegisterLegacyAminoCodec(_ *codec.LegacyAmino) { -} - -// RegisterInterfaces registers the x/provider interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil)) -} diff --git a/go/node/escrow/v1beta2/error.go b/go/node/escrow/v1beta2/error.go deleted file mode 100644 index 0de3bf0c..00000000 --- a/go/node/escrow/v1beta2/error.go +++ /dev/null @@ -1,21 +0,0 @@ -package v1beta2 - -import ( - "errors" -) - -var ( - ErrAccountExists = errors.New("account exists") - ErrAccountClosed = errors.New("account closed") - ErrAccountNotFound = errors.New("account not found") - ErrAccountOverdrawn = errors.New("account overdrawn") - ErrInvalidDenomination = errors.New("invalid denomination") - ErrPaymentExists = errors.New("payment exists") - ErrPaymentClosed = errors.New("payment closed") - ErrPaymentNotFound = errors.New("payment not found") - ErrPaymentRateZero = errors.New("payment rate zero") - ErrInvalidPayment = errors.New("invalid payment") - ErrInvalidSettlement = errors.New("invalid settlement") - ErrInvalidAccountID = errors.New("invalid account ID") - ErrInvalidAccount = errors.New("invalid account") -) diff --git a/go/node/escrow/v1beta2/genesis.pb.go b/go/node/escrow/v1beta2/genesis.pb.go deleted file mode 100644 index dde023ad..00000000 --- a/go/node/escrow/v1beta2/genesis.pb.go +++ /dev/null @@ -1,399 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/escrow/v1beta2/genesis.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState defines the basic genesis state used by escrow module -type GenesisState struct { - Accounts []Account `protobuf:"bytes,1,rep,name=accounts,proto3" json:"accounts" yaml:"accounts"` - Payments []FractionalPayment `protobuf:"bytes,2,rep,name=payments,proto3" json:"payments" yaml:"payments"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_707a683aff806dd0, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetAccounts() []Account { - if m != nil { - return m.Accounts - } - return nil -} - -func (m *GenesisState) GetPayments() []FractionalPayment { - if m != nil { - return m.Payments - } - return nil -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.escrow.v1beta2.GenesisState") -} - -func init() { - proto.RegisterFile("akash/escrow/v1beta2/genesis.proto", fileDescriptor_707a683aff806dd0) -} - -var fileDescriptor_707a683aff806dd0 = []byte{ - // 275 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x2d, 0x4e, 0x2e, 0xca, 0x2f, 0xd7, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, - 0xd2, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, - 0x01, 0xab, 0xd1, 0x83, 0xa8, 0xd1, 0x83, 0xaa, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, - 0xd0, 0x07, 0xb1, 0x20, 0x6a, 0xa5, 0x14, 0xb0, 0x9a, 0x57, 0x52, 0x59, 0x90, 0x0a, 0x35, 0x4d, - 0xe9, 0x06, 0x23, 0x17, 0x8f, 0x3b, 0xc4, 0xfc, 0xe0, 0x92, 0xc4, 0x92, 0x54, 0xa1, 0x38, 0x2e, - 0x8e, 0xc4, 0xe4, 0xe4, 0xfc, 0xd2, 0xbc, 0x92, 0x62, 0x09, 0x46, 0x05, 0x66, 0x0d, 0x6e, 0x23, - 0x59, 0x3d, 0x6c, 0x36, 0xea, 0x39, 0x42, 0x54, 0x39, 0x29, 0x9f, 0xb8, 0x27, 0xcf, 0xf0, 0xea, - 0x9e, 0x3c, 0x5c, 0xdb, 0xa7, 0x7b, 0xf2, 0xfc, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x30, 0x11, - 0xa5, 0x20, 0xb8, 0xa4, 0x50, 0x06, 0x17, 0x47, 0x41, 0x62, 0x65, 0x6e, 0x2a, 0xc8, 0x7c, 0x26, - 0xb0, 0xf9, 0xea, 0xd8, 0xcd, 0x77, 0x2b, 0x4a, 0x4c, 0x2e, 0xc9, 0xcc, 0xcf, 0x4b, 0xcc, 0x09, - 0x80, 0xa8, 0x47, 0xd8, 0x04, 0x33, 0x00, 0x61, 0x13, 0x4c, 0x44, 0x29, 0x08, 0x2e, 0xe9, 0x14, - 0x7c, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, - 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0x96, 0xe9, 0x99, 0x25, 0x19, - 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x60, 0xbb, 0x75, 0xf3, 0x52, 0x4b, 0xca, 0xf3, 0x8b, - 0xb2, 0xa1, 0xbc, 0xc4, 0x82, 0x4c, 0xfd, 0xf4, 0x7c, 0xfd, 0xbc, 0xfc, 0x94, 0x54, 0xb4, 0xb0, - 0x4b, 0x62, 0x03, 0x07, 0x9b, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x57, 0x84, 0x55, 0x06, 0xaa, - 0x01, 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Payments) > 0 { - for iNdEx := len(m.Payments) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Payments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Accounts) > 0 { - for iNdEx := len(m.Accounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Accounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Accounts) > 0 { - for _, e := range m.Accounts { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - if len(m.Payments) > 0 { - for _, e := range m.Payments { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Accounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Accounts = append(m.Accounts, Account{}) - if err := m.Accounts[len(m.Accounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payments = append(m.Payments, FractionalPayment{}) - if err := m.Payments[len(m.Payments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/escrow/v1beta2/key.go b/go/node/escrow/v1beta2/key.go deleted file mode 100644 index c8662ad5..00000000 --- a/go/node/escrow/v1beta2/key.go +++ /dev/null @@ -1,20 +0,0 @@ -package v1beta2 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "escrow" - - // StoreKey is the store key string for deployment - StoreKey = ModuleName - - // RouterKey is the message route for deployment - RouterKey = ModuleName -) - -func AccountKeyPrefix() []byte { - return []byte{0x01} -} - -func PaymentKeyPrefix() []byte { - return []byte{0x02} -} diff --git a/go/node/escrow/v1beta2/query.pb.go b/go/node/escrow/v1beta2/query.pb.go deleted file mode 100644 index 4aef731a..00000000 --- a/go/node/escrow/v1beta2/query.pb.go +++ /dev/null @@ -1,1605 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/escrow/v1beta2/query.proto - -package v1beta2 - -import ( - context "context" - fmt "fmt" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryAccountRequest is request type for the Query/Account RPC method -type QueryAccountsRequest struct { - Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` - Xid string `protobuf:"bytes,2,opt,name=xid,proto3" json:"xid,omitempty"` - Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` - State string `protobuf:"bytes,4,opt,name=state,proto3" json:"state,omitempty"` - Pagination *query.PageRequest `protobuf:"bytes,5,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryAccountsRequest) Reset() { *m = QueryAccountsRequest{} } -func (m *QueryAccountsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryAccountsRequest) ProtoMessage() {} -func (*QueryAccountsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2bfcec870f18514a, []int{0} -} -func (m *QueryAccountsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryAccountsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryAccountsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryAccountsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryAccountsRequest.Merge(m, src) -} -func (m *QueryAccountsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryAccountsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryAccountsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryAccountsRequest proto.InternalMessageInfo - -func (m *QueryAccountsRequest) GetScope() string { - if m != nil { - return m.Scope - } - return "" -} - -func (m *QueryAccountsRequest) GetXid() string { - if m != nil { - return m.Xid - } - return "" -} - -func (m *QueryAccountsRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *QueryAccountsRequest) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func (m *QueryAccountsRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -type QueryAccountsResponse struct { - Accounts []Account `protobuf:"bytes,1,rep,name=accounts,proto3" json:"accounts"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryAccountsResponse) Reset() { *m = QueryAccountsResponse{} } -func (m *QueryAccountsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryAccountsResponse) ProtoMessage() {} -func (*QueryAccountsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2bfcec870f18514a, []int{1} -} -func (m *QueryAccountsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryAccountsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryAccountsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryAccountsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryAccountsResponse.Merge(m, src) -} -func (m *QueryAccountsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryAccountsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryAccountsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryAccountsResponse proto.InternalMessageInfo - -func (m *QueryAccountsResponse) GetAccounts() []Account { - if m != nil { - return m.Accounts - } - return nil -} - -func (m *QueryAccountsResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryPaymentRequest is request type for the Query/Payment RPC method -type QueryPaymentsRequest struct { - Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` - Xid string `protobuf:"bytes,2,opt,name=xid,proto3" json:"xid,omitempty"` - Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` - Owner string `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` - State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` - Pagination *query.PageRequest `protobuf:"bytes,6,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryPaymentsRequest) Reset() { *m = QueryPaymentsRequest{} } -func (m *QueryPaymentsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryPaymentsRequest) ProtoMessage() {} -func (*QueryPaymentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2bfcec870f18514a, []int{2} -} -func (m *QueryPaymentsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryPaymentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryPaymentsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryPaymentsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryPaymentsRequest.Merge(m, src) -} -func (m *QueryPaymentsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryPaymentsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryPaymentsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryPaymentsRequest proto.InternalMessageInfo - -func (m *QueryPaymentsRequest) GetScope() string { - if m != nil { - return m.Scope - } - return "" -} - -func (m *QueryPaymentsRequest) GetXid() string { - if m != nil { - return m.Xid - } - return "" -} - -func (m *QueryPaymentsRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *QueryPaymentsRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *QueryPaymentsRequest) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func (m *QueryPaymentsRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -type QueryPaymentsResponse struct { - Payments []FractionalPayment `protobuf:"bytes,1,rep,name=payments,proto3" json:"payments"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryPaymentsResponse) Reset() { *m = QueryPaymentsResponse{} } -func (m *QueryPaymentsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryPaymentsResponse) ProtoMessage() {} -func (*QueryPaymentsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2bfcec870f18514a, []int{3} -} -func (m *QueryPaymentsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryPaymentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryPaymentsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryPaymentsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryPaymentsResponse.Merge(m, src) -} -func (m *QueryPaymentsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryPaymentsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryPaymentsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryPaymentsResponse proto.InternalMessageInfo - -func (m *QueryPaymentsResponse) GetPayments() []FractionalPayment { - if m != nil { - return m.Payments - } - return nil -} - -func (m *QueryPaymentsResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -func init() { - proto.RegisterType((*QueryAccountsRequest)(nil), "akash.escrow.v1beta2.QueryAccountsRequest") - proto.RegisterType((*QueryAccountsResponse)(nil), "akash.escrow.v1beta2.QueryAccountsResponse") - proto.RegisterType((*QueryPaymentsRequest)(nil), "akash.escrow.v1beta2.QueryPaymentsRequest") - proto.RegisterType((*QueryPaymentsResponse)(nil), "akash.escrow.v1beta2.QueryPaymentsResponse") -} - -func init() { proto.RegisterFile("akash/escrow/v1beta2/query.proto", fileDescriptor_2bfcec870f18514a) } - -var fileDescriptor_2bfcec870f18514a = []byte{ - // 511 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xbf, 0x6f, 0x13, 0x31, - 0x14, 0xc7, 0xe3, 0x4b, 0x52, 0x05, 0x57, 0x42, 0xc8, 0x0a, 0xd2, 0x29, 0x82, 0x23, 0xca, 0x40, - 0x43, 0x2b, 0x6c, 0x25, 0x4c, 0x4c, 0x88, 0x0e, 0x45, 0x6c, 0x25, 0x6c, 0x6c, 0xce, 0xc5, 0xba, - 0x9e, 0x9a, 0xf8, 0x5d, 0xcf, 0x0e, 0x21, 0x2b, 0x7f, 0x01, 0x12, 0x12, 0x3b, 0x33, 0x1b, 0x1b, - 0x2b, 0x53, 0xc7, 0x4a, 0x2c, 0x4c, 0x08, 0x25, 0xfc, 0x21, 0xe8, 0xfc, 0x83, 0xb6, 0xa7, 0x6b, - 0xa9, 0xaa, 0x6e, 0x7e, 0xce, 0xf7, 0xbd, 0xf7, 0x7d, 0x9f, 0xbc, 0x33, 0xee, 0xf2, 0x43, 0xae, - 0x0e, 0x98, 0x50, 0x71, 0x0e, 0x0b, 0xf6, 0x76, 0x30, 0x16, 0x9a, 0x0f, 0xd9, 0xd1, 0x5c, 0xe4, - 0x4b, 0x9a, 0xe5, 0xa0, 0x81, 0xb4, 0x8d, 0x82, 0x5a, 0x05, 0x75, 0x8a, 0x4e, 0x3b, 0x81, 0x04, - 0x8c, 0x80, 0x15, 0x27, 0xab, 0xed, 0xdc, 0x4b, 0x00, 0x92, 0xa9, 0x60, 0x3c, 0x4b, 0x19, 0x97, - 0x12, 0x34, 0xd7, 0x29, 0x48, 0xe5, 0x7e, 0xdd, 0x8e, 0x41, 0xcd, 0x40, 0xb1, 0x31, 0x57, 0xc2, - 0xb6, 0x70, 0x0d, 0x07, 0x2c, 0xe3, 0x49, 0x2a, 0x8d, 0xd8, 0x69, 0xab, 0x7d, 0xe9, 0x65, 0x26, - 0x5c, 0xb5, 0xde, 0x57, 0x84, 0xdb, 0xaf, 0x8a, 0x22, 0xcf, 0xe3, 0x18, 0xe6, 0x52, 0xab, 0x91, - 0x38, 0x9a, 0x0b, 0xa5, 0x49, 0x1b, 0x37, 0x55, 0x0c, 0x99, 0x08, 0x51, 0x17, 0xf5, 0x6f, 0x8d, - 0x6c, 0x40, 0xee, 0xe0, 0xfa, 0xbb, 0x74, 0x12, 0x06, 0xe6, 0xae, 0x38, 0x16, 0x3a, 0x58, 0x48, - 0x91, 0x87, 0x75, 0xab, 0x33, 0x81, 0xc9, 0xd6, 0x5c, 0x8b, 0xb0, 0xe1, 0xb2, 0x8b, 0x80, 0xec, - 0x61, 0x7c, 0x6a, 0x31, 0x6c, 0x76, 0x51, 0x7f, 0x73, 0xf8, 0x90, 0xda, 0x79, 0x68, 0x31, 0x0f, - 0xb5, 0xc8, 0xdc, 0x3c, 0x74, 0x9f, 0x27, 0xc2, 0xf9, 0x19, 0x9d, 0xc9, 0xec, 0x7d, 0x46, 0xf8, - 0x6e, 0xc9, 0xb4, 0xca, 0x40, 0x2a, 0x41, 0x9e, 0xe1, 0x16, 0x77, 0x77, 0x21, 0xea, 0xd6, 0xfb, - 0x9b, 0xc3, 0xfb, 0xb4, 0x8a, 0x3c, 0x75, 0x99, 0xbb, 0x8d, 0xe3, 0x5f, 0x0f, 0x6a, 0xa3, 0x7f, - 0x49, 0xe4, 0xc5, 0x39, 0x8b, 0x81, 0xb1, 0xb8, 0xf5, 0x5f, 0x8b, 0xb6, 0xfb, 0x39, 0x8f, 0xdf, - 0x3d, 0xd8, 0x7d, 0xbe, 0x9c, 0x89, 0x6b, 0x80, 0xbd, 0x8d, 0x83, 0x74, 0xe2, 0xa8, 0x06, 0x67, - 0x41, 0x37, 0x2a, 0x41, 0x37, 0x2f, 0x06, 0xbd, 0x71, 0x6d, 0xd0, 0x5f, 0x3c, 0xe8, 0xd3, 0x21, - 0x1c, 0xe8, 0x97, 0xb8, 0x95, 0xb9, 0x3b, 0x07, 0x7a, 0xab, 0x1a, 0xf4, 0x5e, 0xce, 0xe3, 0xa2, - 0x16, 0x9f, 0xba, 0x1a, 0x1e, 0xb9, 0x4f, 0xbf, 0x31, 0xe4, 0xc3, 0x6f, 0x01, 0x6e, 0x1a, 0xb7, - 0xe4, 0x13, 0xc2, 0x2d, 0xbf, 0x1b, 0x64, 0xbb, 0xda, 0x58, 0xd5, 0xd6, 0x77, 0x76, 0xae, 0xa4, - 0xb5, 0xbd, 0x7b, 0x83, 0xf7, 0x3f, 0xfe, 0x7c, 0x0c, 0x76, 0xc8, 0x23, 0x76, 0xf1, 0x67, 0xc6, - 0xfc, 0x66, 0xb1, 0x69, 0xaa, 0xb4, 0x31, 0xe6, 0x59, 0x5e, 0x6a, 0xac, 0xb4, 0x35, 0x97, 0x1a, - 0x2b, 0xff, 0x39, 0x57, 0x33, 0xe6, 0xf9, 0x1b, 0x63, 0xbb, 0xaf, 0x8f, 0x57, 0x11, 0x3a, 0x59, - 0x45, 0xe8, 0xf7, 0x2a, 0x42, 0x1f, 0xd6, 0x51, 0xed, 0x64, 0x1d, 0xd5, 0x7e, 0xae, 0xa3, 0xda, - 0x9b, 0xa7, 0x49, 0xaa, 0x0f, 0xe6, 0x63, 0x1a, 0xc3, 0xcc, 0x96, 0x7b, 0x2c, 0x85, 0x5e, 0x40, - 0x7e, 0xe8, 0xa2, 0xe2, 0x95, 0x4a, 0x80, 0x49, 0x98, 0x88, 0x52, 0xa3, 0xf1, 0x86, 0x79, 0x63, - 0x9e, 0xfc, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x52, 0x61, 0x53, 0xc3, 0x1f, 0x05, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Accounts queries all accounts - Accounts(ctx context.Context, in *QueryAccountsRequest, opts ...grpc.CallOption) (*QueryAccountsResponse, error) - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Payments queries all payments - Payments(ctx context.Context, in *QueryPaymentsRequest, opts ...grpc.CallOption) (*QueryPaymentsResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Accounts(ctx context.Context, in *QueryAccountsRequest, opts ...grpc.CallOption) (*QueryAccountsResponse, error) { - out := new(QueryAccountsResponse) - err := c.cc.Invoke(ctx, "/akash.escrow.v1beta2.Query/Accounts", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Payments(ctx context.Context, in *QueryPaymentsRequest, opts ...grpc.CallOption) (*QueryPaymentsResponse, error) { - out := new(QueryPaymentsResponse) - err := c.cc.Invoke(ctx, "/akash.escrow.v1beta2.Query/Payments", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Accounts queries all accounts - Accounts(context.Context, *QueryAccountsRequest) (*QueryAccountsResponse, error) - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Payments queries all payments - Payments(context.Context, *QueryPaymentsRequest) (*QueryPaymentsResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Accounts(ctx context.Context, req *QueryAccountsRequest) (*QueryAccountsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Accounts not implemented") -} -func (*UnimplementedQueryServer) Payments(ctx context.Context, req *QueryPaymentsRequest) (*QueryPaymentsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Payments not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Accounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryAccountsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Accounts(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.escrow.v1beta2.Query/Accounts", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Accounts(ctx, req.(*QueryAccountsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Payments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryPaymentsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Payments(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.escrow.v1beta2.Query/Payments", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Payments(ctx, req.(*QueryPaymentsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.escrow.v1beta2.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Accounts", - Handler: _Query_Accounts_Handler, - }, - { - MethodName: "Payments", - Handler: _Query_Payments_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/escrow/v1beta2/query.proto", -} - -func (m *QueryAccountsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryAccountsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryAccountsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintQuery(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x22 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x1a - } - if len(m.Xid) > 0 { - i -= len(m.Xid) - copy(dAtA[i:], m.Xid) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Xid))) - i-- - dAtA[i] = 0x12 - } - if len(m.Scope) > 0 { - i -= len(m.Scope) - copy(dAtA[i:], m.Scope) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Scope))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryAccountsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryAccountsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryAccountsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Accounts) > 0 { - for iNdEx := len(m.Accounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Accounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryPaymentsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryPaymentsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryPaymentsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintQuery(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x2a - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x22 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0x1a - } - if len(m.Xid) > 0 { - i -= len(m.Xid) - copy(dAtA[i:], m.Xid) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Xid))) - i-- - dAtA[i] = 0x12 - } - if len(m.Scope) > 0 { - i -= len(m.Scope) - copy(dAtA[i:], m.Scope) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Scope))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryPaymentsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryPaymentsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryPaymentsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Payments) > 0 { - for iNdEx := len(m.Payments) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Payments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryAccountsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Scope) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Xid) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryAccountsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Accounts) > 0 { - for _, e := range m.Accounts { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryPaymentsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Scope) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Xid) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Id) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryPaymentsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Payments) > 0 { - for _, e := range m.Payments { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryAccountsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryAccountsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryAccountsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scope = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Xid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Xid = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryAccountsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryAccountsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryAccountsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Accounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Accounts = append(m.Accounts, Account{}) - if err := m.Accounts[len(m.Accounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryPaymentsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryPaymentsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryPaymentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scope = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Xid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Xid = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryPaymentsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryPaymentsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryPaymentsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payments = append(m.Payments, FractionalPayment{}) - if err := m.Payments[len(m.Payments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/escrow/v1beta2/query.pb.gw.go b/go/node/escrow/v1beta2/query.pb.gw.go deleted file mode 100644 index a22f74ef..00000000 --- a/go/node/escrow/v1beta2/query.pb.gw.go +++ /dev/null @@ -1,254 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/escrow/v1beta2/query.proto - -/* -Package v1beta2 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta2 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Accounts_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Accounts_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAccountsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Accounts_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Accounts(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Accounts_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAccountsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Accounts_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Accounts(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Payments_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Payments_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryPaymentsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Payments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Payments(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Payments_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryPaymentsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Payments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Payments(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Accounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Accounts_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Accounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Payments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Payments_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Payments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Accounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Accounts_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Accounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Payments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Payments_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Payments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Accounts_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"akash", "escrow", "v1beta2", "types", "accounts", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Payments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"akash", "escrow", "v1beta2", "types", "payments", "list"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Accounts_0 = runtime.ForwardResponseMessage - - forward_Query_Payments_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/escrow/v1beta2/types.pb.go b/go/node/escrow/v1beta2/types.pb.go deleted file mode 100644 index 8f618371..00000000 --- a/go/node/escrow/v1beta2/types.pb.go +++ /dev/null @@ -1,1467 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/escrow/v1beta2/types.proto - -package v1beta2 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State stores state for an escrow account -type Account_State int32 - -const ( - // AccountStateInvalid is an invalid state - AccountStateInvalid Account_State = 0 - // AccountOpen is the state when an account is open - AccountOpen Account_State = 1 - // AccountClosed is the state when an account is closed - AccountClosed Account_State = 2 - // AccountOverdrawn is the state when an account is overdrawn - AccountOverdrawn Account_State = 3 -) - -var Account_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "closed", - 3: "overdrawn", -} - -var Account_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "closed": 2, - "overdrawn": 3, -} - -func (x Account_State) String() string { - return proto.EnumName(Account_State_name, int32(x)) -} - -func (Account_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5b25ee303c78038d, []int{1, 0} -} - -// Payment State -type FractionalPayment_State int32 - -const ( - // PaymentStateInvalid is the state when the payment is invalid - PaymentStateInvalid FractionalPayment_State = 0 - // PaymentStateOpen is the state when the payment is open - PaymentOpen FractionalPayment_State = 1 - // PaymentStateClosed is the state when the payment is closed - PaymentClosed FractionalPayment_State = 2 - // PaymentStateOverdrawn is the state when the payment is overdrawn - PaymentOverdrawn FractionalPayment_State = 3 -) - -var FractionalPayment_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "closed", - 3: "overdrawn", -} - -var FractionalPayment_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "closed": 2, - "overdrawn": 3, -} - -func (x FractionalPayment_State) String() string { - return proto.EnumName(FractionalPayment_State_name, int32(x)) -} - -func (FractionalPayment_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_5b25ee303c78038d, []int{2, 0} -} - -// AccountID is the account identifier -type AccountID struct { - Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope" yaml:"scope"` - XID string `protobuf:"bytes,2,opt,name=xid,proto3" json:"xid" yaml:"xid"` -} - -func (m *AccountID) Reset() { *m = AccountID{} } -func (m *AccountID) String() string { return proto.CompactTextString(m) } -func (*AccountID) ProtoMessage() {} -func (*AccountID) Descriptor() ([]byte, []int) { - return fileDescriptor_5b25ee303c78038d, []int{0} -} -func (m *AccountID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AccountID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AccountID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AccountID) XXX_Merge(src proto.Message) { - xxx_messageInfo_AccountID.Merge(m, src) -} -func (m *AccountID) XXX_Size() int { - return m.Size() -} -func (m *AccountID) XXX_DiscardUnknown() { - xxx_messageInfo_AccountID.DiscardUnknown(m) -} - -var xxx_messageInfo_AccountID proto.InternalMessageInfo - -func (m *AccountID) GetScope() string { - if m != nil { - return m.Scope - } - return "" -} - -func (m *AccountID) GetXID() string { - if m != nil { - return m.XID - } - return "" -} - -// Account stores state for an escrow account -type Account struct { - // unique identifier for this escrow account - ID AccountID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` - // bech32 encoded account address of the owner of this escrow account - Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner" yaml:"owner"` - // current state of this escrow account - State Account_State `protobuf:"varint,3,opt,name=state,proto3,enum=akash.escrow.v1beta2.Account_State" json:"state" yaml:"state"` - // unspent coins received from the owner's wallet - Balance types.DecCoin `protobuf:"bytes,4,opt,name=balance,proto3" json:"balance" yaml:"balance"` - // total coins spent by this account - Transferred types.DecCoin `protobuf:"bytes,5,opt,name=transferred,proto3" json:"transferred" yaml:"transferred"` - // block height at which this account was last settled - SettledAt int64 `protobuf:"varint,6,opt,name=settled_at,json=settledAt,proto3" json:"settledAt" yaml:"settledAt"` - // bech32 encoded account address of the depositor. - // If depositor is same as the owner, then any incoming coins are added to the Balance. - // If depositor isn't same as the owner, then any incoming coins are added to the Funds. - Depositor string `protobuf:"bytes,7,opt,name=depositor,proto3" json:"depositor" yaml:"depositor"` - // Funds are unspent coins received from the (non-Owner) Depositor's wallet. - // If there are any funds, they should be spent before spending the Balance. - Funds types.DecCoin `protobuf:"bytes,8,opt,name=funds,proto3" json:"funds" yaml:"funds"` -} - -func (m *Account) Reset() { *m = Account{} } -func (m *Account) String() string { return proto.CompactTextString(m) } -func (*Account) ProtoMessage() {} -func (*Account) Descriptor() ([]byte, []int) { - return fileDescriptor_5b25ee303c78038d, []int{1} -} -func (m *Account) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Account) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Account.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Account) XXX_Merge(src proto.Message) { - xxx_messageInfo_Account.Merge(m, src) -} -func (m *Account) XXX_Size() int { - return m.Size() -} -func (m *Account) XXX_DiscardUnknown() { - xxx_messageInfo_Account.DiscardUnknown(m) -} - -var xxx_messageInfo_Account proto.InternalMessageInfo - -func (m *Account) GetID() AccountID { - if m != nil { - return m.ID - } - return AccountID{} -} - -func (m *Account) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *Account) GetState() Account_State { - if m != nil { - return m.State - } - return AccountStateInvalid -} - -func (m *Account) GetBalance() types.DecCoin { - if m != nil { - return m.Balance - } - return types.DecCoin{} -} - -func (m *Account) GetTransferred() types.DecCoin { - if m != nil { - return m.Transferred - } - return types.DecCoin{} -} - -func (m *Account) GetSettledAt() int64 { - if m != nil { - return m.SettledAt - } - return 0 -} - -func (m *Account) GetDepositor() string { - if m != nil { - return m.Depositor - } - return "" -} - -func (m *Account) GetFunds() types.DecCoin { - if m != nil { - return m.Funds - } - return types.DecCoin{} -} - -// Payment stores state for a payment -type FractionalPayment struct { - AccountID AccountID `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"accountID" yaml:"accountID"` - PaymentID string `protobuf:"bytes,2,opt,name=payment_id,json=paymentId,proto3" json:"paymentID" yaml:"paymentID"` - Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner" yaml:"owner"` - State FractionalPayment_State `protobuf:"varint,4,opt,name=state,proto3,enum=akash.escrow.v1beta2.FractionalPayment_State" json:"state" yaml:"state"` - Rate types.DecCoin `protobuf:"bytes,5,opt,name=rate,proto3" json:"rate" yaml:"rate"` - Balance types.DecCoin `protobuf:"bytes,6,opt,name=balance,proto3" json:"balance" yaml:"balance"` - Withdrawn types.Coin `protobuf:"bytes,7,opt,name=withdrawn,proto3" json:"withdrawn" yaml:"withdrawn"` -} - -func (m *FractionalPayment) Reset() { *m = FractionalPayment{} } -func (m *FractionalPayment) String() string { return proto.CompactTextString(m) } -func (*FractionalPayment) ProtoMessage() {} -func (*FractionalPayment) Descriptor() ([]byte, []int) { - return fileDescriptor_5b25ee303c78038d, []int{2} -} -func (m *FractionalPayment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FractionalPayment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FractionalPayment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FractionalPayment) XXX_Merge(src proto.Message) { - xxx_messageInfo_FractionalPayment.Merge(m, src) -} -func (m *FractionalPayment) XXX_Size() int { - return m.Size() -} -func (m *FractionalPayment) XXX_DiscardUnknown() { - xxx_messageInfo_FractionalPayment.DiscardUnknown(m) -} - -var xxx_messageInfo_FractionalPayment proto.InternalMessageInfo - -func (m *FractionalPayment) GetAccountID() AccountID { - if m != nil { - return m.AccountID - } - return AccountID{} -} - -func (m *FractionalPayment) GetPaymentID() string { - if m != nil { - return m.PaymentID - } - return "" -} - -func (m *FractionalPayment) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *FractionalPayment) GetState() FractionalPayment_State { - if m != nil { - return m.State - } - return PaymentStateInvalid -} - -func (m *FractionalPayment) GetRate() types.DecCoin { - if m != nil { - return m.Rate - } - return types.DecCoin{} -} - -func (m *FractionalPayment) GetBalance() types.DecCoin { - if m != nil { - return m.Balance - } - return types.DecCoin{} -} - -func (m *FractionalPayment) GetWithdrawn() types.Coin { - if m != nil { - return m.Withdrawn - } - return types.Coin{} -} - -func init() { - proto.RegisterEnum("akash.escrow.v1beta2.Account_State", Account_State_name, Account_State_value) - proto.RegisterEnum("akash.escrow.v1beta2.FractionalPayment_State", FractionalPayment_State_name, FractionalPayment_State_value) - proto.RegisterType((*AccountID)(nil), "akash.escrow.v1beta2.AccountID") - proto.RegisterType((*Account)(nil), "akash.escrow.v1beta2.Account") - proto.RegisterType((*FractionalPayment)(nil), "akash.escrow.v1beta2.FractionalPayment") -} - -func init() { proto.RegisterFile("akash/escrow/v1beta2/types.proto", fileDescriptor_5b25ee303c78038d) } - -var fileDescriptor_5b25ee303c78038d = []byte{ - // 810 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4d, 0x6f, 0xeb, 0x44, - 0x14, 0x8d, 0xf3, 0x89, 0x27, 0xf0, 0xc8, 0x1b, 0x2a, 0xe1, 0x86, 0x57, 0x8f, 0xdf, 0x3c, 0x90, - 0xca, 0xe2, 0xd9, 0x6a, 0x58, 0xf1, 0x36, 0xa8, 0x69, 0x85, 0xd4, 0x45, 0xf9, 0x70, 0x59, 0x00, - 0x8b, 0x56, 0x13, 0x7b, 0x9a, 0x5a, 0x4d, 0x3c, 0x96, 0x3d, 0x6d, 0xda, 0x7f, 0x80, 0x22, 0x16, - 0x88, 0x15, 0x9b, 0x20, 0x24, 0xfe, 0x4c, 0x97, 0x5d, 0xb2, 0xb2, 0x50, 0xba, 0xcb, 0x32, 0xbf, - 0x00, 0x79, 0x66, 0x6c, 0x07, 0x88, 0xa2, 0x2c, 0xde, 0x2a, 0xb9, 0xe7, 0x9e, 0x7b, 0xe6, 0xce, - 0xbd, 0x73, 0x64, 0x60, 0x91, 0x6b, 0x92, 0x5c, 0x39, 0x34, 0xf1, 0x62, 0x36, 0x71, 0x6e, 0x0f, - 0x06, 0x94, 0x93, 0x9e, 0xc3, 0xef, 0x23, 0x9a, 0xd8, 0x51, 0xcc, 0x38, 0x83, 0x3b, 0x82, 0x61, - 0x4b, 0x86, 0xad, 0x18, 0xdd, 0x9d, 0x21, 0x1b, 0x32, 0x41, 0x70, 0xb2, 0x7f, 0x92, 0xdb, 0x35, - 0x3d, 0x96, 0x8c, 0x59, 0xe2, 0x0c, 0x48, 0x42, 0x95, 0xd8, 0x81, 0xe3, 0xb1, 0x20, 0x94, 0x79, - 0x3c, 0x02, 0xfa, 0xa1, 0xe7, 0xb1, 0x9b, 0x90, 0x9f, 0x1c, 0x43, 0x07, 0x34, 0x12, 0x8f, 0x45, - 0xd4, 0xd0, 0x2c, 0x6d, 0x5f, 0xef, 0xef, 0x2e, 0x52, 0x24, 0x81, 0x65, 0x8a, 0xde, 0xbd, 0x27, - 0xe3, 0xd1, 0x1b, 0x2c, 0x42, 0xec, 0x4a, 0x18, 0xda, 0xa0, 0x76, 0x17, 0xf8, 0x46, 0x55, 0xd0, - 0x5f, 0xcc, 0x53, 0x54, 0xfb, 0xfe, 0xe4, 0x78, 0x91, 0xa2, 0x0c, 0x5d, 0xa6, 0x08, 0xc8, 0x9a, - 0xbb, 0xc0, 0xc7, 0x6e, 0x06, 0xe1, 0xdf, 0x9b, 0xa0, 0xa5, 0x8e, 0x83, 0x5f, 0x81, 0x6a, 0xe0, - 0x8b, 0x93, 0xda, 0x3d, 0x64, 0xaf, 0xbb, 0x92, 0x5d, 0x74, 0xd6, 0xdf, 0x7b, 0x48, 0x51, 0x65, - 0x9e, 0xa2, 0xaa, 0x90, 0xaf, 0x0a, 0x75, 0x5d, 0xaa, 0x67, 0xe2, 0xd5, 0xc0, 0xcf, 0x9a, 0x67, - 0x93, 0x90, 0xc6, 0xaa, 0x1b, 0xd1, 0xbc, 0x00, 0xca, 0xe6, 0x45, 0x88, 0x5d, 0x09, 0xc3, 0xef, - 0x40, 0x23, 0xe1, 0x84, 0x53, 0xa3, 0x66, 0x69, 0xfb, 0xcf, 0x7a, 0xaf, 0x36, 0xf6, 0x60, 0x9f, - 0x65, 0x54, 0x35, 0x92, 0xec, 0xef, 0xca, 0x48, 0xb2, 0x30, 0x1b, 0x49, 0xf6, 0x0b, 0x7f, 0x00, - 0xad, 0x01, 0x19, 0x91, 0xd0, 0xa3, 0x46, 0x5d, 0xdc, 0xed, 0x85, 0x2d, 0x57, 0x60, 0x67, 0x2b, - 0x50, 0xb2, 0x07, 0xf6, 0x31, 0xf5, 0x8e, 0x58, 0x10, 0xf6, 0x5f, 0x66, 0x17, 0x5b, 0xa4, 0x28, - 0x2f, 0x5a, 0xa6, 0xe8, 0x99, 0x94, 0x55, 0x00, 0x76, 0xf3, 0x14, 0x0c, 0x40, 0x9b, 0xc7, 0x24, - 0x4c, 0x2e, 0x69, 0x1c, 0x53, 0xdf, 0x68, 0x6c, 0x21, 0xff, 0xa9, 0x92, 0x5f, 0x2d, 0x5c, 0xa6, - 0x08, 0xca, 0x23, 0x56, 0x40, 0xec, 0xae, 0x52, 0xe0, 0x29, 0x00, 0x09, 0xe5, 0x7c, 0x44, 0xfd, - 0x0b, 0xc2, 0x8d, 0xa6, 0xa5, 0xed, 0xd7, 0xfa, 0xf6, 0x3c, 0x45, 0xfa, 0x99, 0x44, 0x0f, 0xf9, - 0x22, 0x45, 0x7a, 0x92, 0x07, 0xcb, 0x14, 0x75, 0xd4, 0x30, 0x72, 0x08, 0xbb, 0x65, 0x1a, 0x7e, - 0x01, 0x74, 0x9f, 0x46, 0x2c, 0x09, 0x38, 0x8b, 0x8d, 0x96, 0xd8, 0xcf, 0xcb, 0x4c, 0xa0, 0x00, - 0x4b, 0x81, 0x02, 0xc2, 0x6e, 0x99, 0x86, 0xdf, 0x82, 0xc6, 0xe5, 0x4d, 0xe8, 0x27, 0xc6, 0x3b, - 0x5b, 0x5c, 0x7a, 0x4f, 0x5d, 0x5a, 0x96, 0x94, 0x8b, 0x12, 0x21, 0x76, 0x25, 0x8c, 0x7f, 0xd5, - 0x40, 0x43, 0x2c, 0x15, 0x7e, 0x0c, 0x5a, 0x41, 0x78, 0x4b, 0x46, 0x81, 0xdf, 0xa9, 0x74, 0x3f, - 0x9c, 0xce, 0xac, 0x0f, 0xd4, 0xd2, 0x45, 0xfa, 0x44, 0xa6, 0xe0, 0x2e, 0xa8, 0xb3, 0x88, 0x86, - 0x1d, 0xad, 0xfb, 0xfe, 0x74, 0x66, 0xb5, 0x15, 0xe5, 0xeb, 0x88, 0x86, 0x70, 0x0f, 0x34, 0xbd, - 0x11, 0x4b, 0xa8, 0xdf, 0xa9, 0x76, 0x9f, 0x4f, 0x67, 0xd6, 0x7b, 0x2a, 0x79, 0x24, 0x40, 0xf8, - 0x0a, 0xe8, 0xec, 0x96, 0xc6, 0x7e, 0x4c, 0x26, 0x61, 0xa7, 0xd6, 0xdd, 0x99, 0xce, 0xac, 0x4e, - 0x5e, 0x9e, 0xe3, 0xdd, 0xfa, 0x4f, 0x7f, 0x9a, 0x15, 0xfc, 0x73, 0x13, 0x3c, 0xff, 0x32, 0x26, - 0x1e, 0x0f, 0x58, 0x48, 0x46, 0xdf, 0x90, 0xfb, 0x31, 0x0d, 0x39, 0x8c, 0x01, 0x20, 0x92, 0x7f, - 0xb1, 0xbd, 0x65, 0x7a, 0xca, 0x32, 0xa5, 0xbf, 0xb3, 0x89, 0x93, 0x3c, 0x28, 0x27, 0x5e, 0x40, - 0xd8, 0x2d, 0xd2, 0xe2, 0x05, 0x44, 0xf2, 0xf8, 0x8b, 0xc2, 0xe1, 0xe2, 0x05, 0xa8, 0xa6, 0xa4, - 0x5c, 0x94, 0x07, 0xa5, 0x5c, 0x01, 0x61, 0xb7, 0x48, 0xaf, 0xb8, 0xb3, 0xb6, 0xa5, 0x3b, 0xcf, - 0x73, 0x77, 0xd6, 0x85, 0x3b, 0x5f, 0xaf, 0xbf, 0xee, 0xff, 0x66, 0xb5, 0xb5, 0x4f, 0x4f, 0x41, - 0x3d, 0xce, 0xe4, 0xb7, 0x71, 0xd1, 0x47, 0xea, 0x41, 0x89, 0x8a, 0x65, 0x8a, 0xda, 0x52, 0x30, - 0x16, 0x7a, 0x02, 0x5c, 0xb5, 0x7d, 0xf3, 0x2d, 0xdb, 0xfe, 0x1c, 0xe8, 0x93, 0x80, 0x5f, 0x89, - 0x67, 0x22, 0xcc, 0xd3, 0xee, 0xed, 0xae, 0x15, 0x17, 0xca, 0x9f, 0x28, 0xe5, 0xb2, 0xa6, 0x5c, - 0x4d, 0x01, 0x61, 0xb7, 0x4c, 0x6f, 0x34, 0x82, 0x9a, 0xea, 0x26, 0x23, 0x28, 0xca, 0x7a, 0x23, - 0xa8, 0xe4, 0x06, 0x23, 0xe4, 0xe5, 0xff, 0x36, 0xc2, 0x9b, 0xfa, 0x6f, 0x7f, 0x20, 0xad, 0x7f, - 0xf6, 0x30, 0x37, 0xb5, 0xc7, 0xb9, 0xa9, 0xfd, 0x3d, 0x37, 0xb5, 0x5f, 0x9e, 0xcc, 0xca, 0xe3, - 0x93, 0x59, 0xf9, 0xeb, 0xc9, 0xac, 0xfc, 0xf8, 0xf9, 0x30, 0xe0, 0x57, 0x37, 0x03, 0xdb, 0x63, - 0x63, 0x47, 0xbc, 0x8c, 0xd7, 0x21, 0xe5, 0x13, 0x16, 0x5f, 0xab, 0x88, 0x44, 0x81, 0x33, 0x64, - 0x4e, 0xc8, 0x7c, 0xfa, 0x9f, 0x4f, 0xe9, 0xa0, 0x29, 0xbe, 0x7c, 0x9f, 0xfd, 0x13, 0x00, 0x00, - 0xff, 0xff, 0xef, 0x4c, 0x01, 0x25, 0x69, 0x07, 0x00, 0x00, -} - -func (m *AccountID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AccountID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AccountID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.XID) > 0 { - i -= len(m.XID) - copy(dAtA[i:], m.XID) - i = encodeVarintTypes(dAtA, i, uint64(len(m.XID))) - i-- - dAtA[i] = 0x12 - } - if len(m.Scope) > 0 { - i -= len(m.Scope) - copy(dAtA[i:], m.Scope) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Scope))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Account) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Account) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Account) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Funds.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - if len(m.Depositor) > 0 { - i -= len(m.Depositor) - copy(dAtA[i:], m.Depositor) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Depositor))) - i-- - dAtA[i] = 0x3a - } - if m.SettledAt != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.SettledAt)) - i-- - dAtA[i] = 0x30 - } - { - size, err := m.Transferred.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - { - size, err := m.Balance.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if m.State != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x18 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *FractionalPayment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FractionalPayment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FractionalPayment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Withdrawn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - { - size, err := m.Balance.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - { - size, err := m.Rate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - if m.State != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x20 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x1a - } - if len(m.PaymentID) > 0 { - i -= len(m.PaymentID) - copy(dAtA[i:], m.PaymentID) - i = encodeVarintTypes(dAtA, i, uint64(len(m.PaymentID))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.AccountID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AccountID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Scope) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.XID) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func (m *Account) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovTypes(uint64(l)) - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.State != 0 { - n += 1 + sovTypes(uint64(m.State)) - } - l = m.Balance.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.Transferred.Size() - n += 1 + l + sovTypes(uint64(l)) - if m.SettledAt != 0 { - n += 1 + sovTypes(uint64(m.SettledAt)) - } - l = len(m.Depositor) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = m.Funds.Size() - n += 1 + l + sovTypes(uint64(l)) - return n -} - -func (m *FractionalPayment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.AccountID.Size() - n += 1 + l + sovTypes(uint64(l)) - l = len(m.PaymentID) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.State != 0 { - n += 1 + sovTypes(uint64(m.State)) - } - l = m.Rate.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.Balance.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.Withdrawn.Size() - n += 1 + l + sovTypes(uint64(l)) - return n -} - -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *AccountID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AccountID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AccountID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scope = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field XID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.XID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Account) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Account: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Account: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Account_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Balance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Transferred", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Transferred.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SettledAt", wireType) - } - m.SettledAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SettledAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Depositor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Depositor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Funds", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Funds.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FractionalPayment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FractionalPayment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FractionalPayment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccountID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.AccountID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PaymentID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PaymentID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= FractionalPayment_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Rate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Balance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Withdrawn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Withdrawn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTypes(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTypes - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTypes - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTypes - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/escrow/v1beta2/validate.go b/go/node/escrow/v1beta2/validate.go deleted file mode 100644 index 7c2ebdfd..00000000 --- a/go/node/escrow/v1beta2/validate.go +++ /dev/null @@ -1,53 +0,0 @@ -package v1beta2 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -func (obj *AccountID) ValidateBasic() error { - if len(obj.Scope) == 0 { - return errors.Wrap(ErrInvalidAccountID, "empty scope") - } - if len(obj.XID) == 0 { - return errors.Wrap(ErrInvalidAccountID, "empty scope") - } - return nil -} - -func (obj *Account) ValidateBasic() error { - if err := obj.ID.ValidateBasic(); err != nil { - return errors.Wrapf(ErrInvalidAccount, "invalid account: id - %s", err) - } - if _, err := sdk.AccAddressFromBech32(obj.Owner); err != nil { - return errors.Wrapf(ErrInvalidAccount, "invalid account: owner - %s", err) - } - if obj.State == AccountStateInvalid { - return errors.Wrapf(ErrInvalidAccount, "invalid account: state - %s", obj.State) - } - if _, err := sdk.AccAddressFromBech32(obj.Depositor); err != nil { - return errors.Wrapf(ErrInvalidAccount, "invalid account: depositor - %s", err) - } - return nil -} - -func (obj *FractionalPayment) ValidateBasic() error { - if err := obj.AccountID.ValidateBasic(); err != nil { - return errors.Wrapf(ErrInvalidPayment, "invalid account id: %s", err) - } - if len(obj.PaymentID) == 0 { - return errors.Wrap(ErrInvalidPayment, "empty payment id") - } - if obj.Rate.IsZero() { - return errors.Wrap(ErrInvalidPayment, "payment rate zero") - } - if obj.State == PaymentStateInvalid { - return errors.Wrap(ErrInvalidPayment, "invalid state") - } - return nil -} - -// TotalBalance is the sum of Balance and Funds -func (obj *Account) TotalBalance() sdk.DecCoin { - return obj.Balance.Add(obj.Funds) -} diff --git a/go/node/escrow/v1beta3/account.go b/go/node/escrow/v1beta3/account.go deleted file mode 100644 index 53dbbd0d..00000000 --- a/go/node/escrow/v1beta3/account.go +++ /dev/null @@ -1,5 +0,0 @@ -package v1beta3 - -func (m *Account) HasDepositor() bool { - return m.Owner != m.Depositor -} diff --git a/go/node/escrow/v1beta3/codec.go b/go/node/escrow/v1beta3/codec.go deleted file mode 100644 index 6cd32bfa..00000000 --- a/go/node/escrow/v1beta3/codec.go +++ /dev/null @@ -1,35 +0,0 @@ -package v1beta3 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/provider module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/provider and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterLegacyAminoCodec register concrete types on codec -func RegisterLegacyAminoCodec(_ *codec.LegacyAmino) { -} - -// RegisterInterfaces registers the x/provider interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil)) -} diff --git a/go/node/escrow/v1beta3/error.go b/go/node/escrow/v1beta3/error.go deleted file mode 100644 index aa924e95..00000000 --- a/go/node/escrow/v1beta3/error.go +++ /dev/null @@ -1,39 +0,0 @@ -package v1beta3 - -import ( - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - errAccountExists uint32 = iota + 1 - errAccountClosed - errAccountNotFound - errAccountOverdrawn - errInvalidDenomination - errPaymentExists - errPaymentClosed - errPaymentNotFound - errPaymentRateZero - errInvalidPayment - errInvalidSettlement - errInvalidAccountID - errInvalidAccount - errInvalidAcountDepositor -) - -var ( - ErrAccountExists = sdkerrors.Register(ModuleName, errAccountExists, "account exists") - ErrAccountClosed = sdkerrors.Register(ModuleName, errAccountClosed, "account closed") - ErrAccountNotFound = sdkerrors.Register(ModuleName, errAccountNotFound, "account not found") - ErrAccountOverdrawn = sdkerrors.Register(ModuleName, errAccountOverdrawn, "account overdrawn") - ErrInvalidDenomination = sdkerrors.Register(ModuleName, errInvalidDenomination, "invalid denomination") - ErrPaymentExists = sdkerrors.Register(ModuleName, errPaymentExists, "payment exists") - ErrPaymentClosed = sdkerrors.Register(ModuleName, errPaymentClosed, "payment closed") - ErrPaymentNotFound = sdkerrors.Register(ModuleName, errPaymentNotFound, "payment not found") - ErrPaymentRateZero = sdkerrors.Register(ModuleName, errPaymentRateZero, "payment rate zero") - ErrInvalidPayment = sdkerrors.Register(ModuleName, errInvalidPayment, "invalid payment") - ErrInvalidSettlement = sdkerrors.Register(ModuleName, errInvalidSettlement, "invalid settlement") - ErrInvalidAccountID = sdkerrors.Register(ModuleName, errInvalidAccountID, "invalid account ID") - ErrInvalidAccount = sdkerrors.Register(ModuleName, errInvalidAccount, "invalid account") - ErrInvalidAccountDepositor = sdkerrors.Register(ModuleName, errInvalidAcountDepositor, "invalid account depositor") -) diff --git a/go/node/escrow/v1beta3/genesis.pb.go b/go/node/escrow/v1beta3/genesis.pb.go deleted file mode 100644 index 35b9f4cf..00000000 --- a/go/node/escrow/v1beta3/genesis.pb.go +++ /dev/null @@ -1,399 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/escrow/v1beta3/genesis.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState defines the basic genesis state used by escrow module -type GenesisState struct { - Accounts []Account `protobuf:"bytes,1,rep,name=accounts,proto3" json:"accounts" yaml:"accounts"` - Payments []FractionalPayment `protobuf:"bytes,2,rep,name=payments,proto3" json:"payments" yaml:"payments"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_d2dbb3d041f88a4c, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetAccounts() []Account { - if m != nil { - return m.Accounts - } - return nil -} - -func (m *GenesisState) GetPayments() []FractionalPayment { - if m != nil { - return m.Payments - } - return nil -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.escrow.v1beta3.GenesisState") -} - -func init() { - proto.RegisterFile("akash/escrow/v1beta3/genesis.proto", fileDescriptor_d2dbb3d041f88a4c) -} - -var fileDescriptor_d2dbb3d041f88a4c = []byte{ - // 275 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x2d, 0x4e, 0x2e, 0xca, 0x2f, 0xd7, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, - 0xd6, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, - 0x01, 0xab, 0xd1, 0x83, 0xa8, 0xd1, 0x83, 0xaa, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, - 0xd0, 0x07, 0xb1, 0x20, 0x6a, 0xa5, 0x14, 0xb0, 0x9a, 0x57, 0x52, 0x59, 0x90, 0x0a, 0x35, 0x4d, - 0xe9, 0x06, 0x23, 0x17, 0x8f, 0x3b, 0xc4, 0xfc, 0xe0, 0x92, 0xc4, 0x92, 0x54, 0xa1, 0x38, 0x2e, - 0x8e, 0xc4, 0xe4, 0xe4, 0xfc, 0xd2, 0xbc, 0x92, 0x62, 0x09, 0x46, 0x05, 0x66, 0x0d, 0x6e, 0x23, - 0x59, 0x3d, 0x6c, 0x36, 0xea, 0x39, 0x42, 0x54, 0x39, 0x29, 0x9f, 0xb8, 0x27, 0xcf, 0xf0, 0xea, - 0x9e, 0x3c, 0x5c, 0xdb, 0xa7, 0x7b, 0xf2, 0xfc, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x30, 0x11, - 0xa5, 0x20, 0xb8, 0xa4, 0x50, 0x06, 0x17, 0x47, 0x41, 0x62, 0x65, 0x6e, 0x2a, 0xc8, 0x7c, 0x26, - 0xb0, 0xf9, 0xea, 0xd8, 0xcd, 0x77, 0x2b, 0x4a, 0x4c, 0x2e, 0xc9, 0xcc, 0xcf, 0x4b, 0xcc, 0x09, - 0x80, 0xa8, 0x47, 0xd8, 0x04, 0x33, 0x00, 0x61, 0x13, 0x4c, 0x44, 0x29, 0x08, 0x2e, 0xe9, 0x14, - 0x7c, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, - 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0x96, 0xe9, 0x99, 0x25, 0x19, - 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x60, 0xbb, 0x75, 0xf3, 0x52, 0x4b, 0xca, 0xf3, 0x8b, - 0xb2, 0xa1, 0xbc, 0xc4, 0x82, 0x4c, 0xfd, 0xf4, 0x7c, 0xfd, 0xbc, 0xfc, 0x94, 0x54, 0xb4, 0xb0, - 0x4b, 0x62, 0x03, 0x07, 0x9b, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xbd, 0x18, 0x84, 0x9a, 0xaa, - 0x01, 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Payments) > 0 { - for iNdEx := len(m.Payments) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Payments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Accounts) > 0 { - for iNdEx := len(m.Accounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Accounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Accounts) > 0 { - for _, e := range m.Accounts { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - if len(m.Payments) > 0 { - for _, e := range m.Payments { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Accounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Accounts = append(m.Accounts, Account{}) - if err := m.Accounts[len(m.Accounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payments = append(m.Payments, FractionalPayment{}) - if err := m.Payments[len(m.Payments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/escrow/v1beta3/key.go b/go/node/escrow/v1beta3/key.go deleted file mode 100644 index 53bb7f9a..00000000 --- a/go/node/escrow/v1beta3/key.go +++ /dev/null @@ -1,20 +0,0 @@ -package v1beta3 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "escrow" - - // StoreKey is the store key string for deployment - StoreKey = ModuleName - - // RouterKey is the message route for deployment - RouterKey = ModuleName -) - -func AccountKeyPrefix() []byte { - return []byte{0x01} -} - -func PaymentKeyPrefix() []byte { - return []byte{0x02} -} diff --git a/go/node/escrow/v1beta3/query.pb.go b/go/node/escrow/v1beta3/query.pb.go deleted file mode 100644 index ef0ed242..00000000 --- a/go/node/escrow/v1beta3/query.pb.go +++ /dev/null @@ -1,1605 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/escrow/v1beta3/query.proto - -package v1beta3 - -import ( - context "context" - fmt "fmt" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryAccountRequest is request type for the Query/Account RPC method -type QueryAccountsRequest struct { - Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` - Xid string `protobuf:"bytes,2,opt,name=xid,proto3" json:"xid,omitempty"` - Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` - State string `protobuf:"bytes,4,opt,name=state,proto3" json:"state,omitempty"` - Pagination *query.PageRequest `protobuf:"bytes,5,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryAccountsRequest) Reset() { *m = QueryAccountsRequest{} } -func (m *QueryAccountsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryAccountsRequest) ProtoMessage() {} -func (*QueryAccountsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_84bc7a3aed491eed, []int{0} -} -func (m *QueryAccountsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryAccountsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryAccountsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryAccountsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryAccountsRequest.Merge(m, src) -} -func (m *QueryAccountsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryAccountsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryAccountsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryAccountsRequest proto.InternalMessageInfo - -func (m *QueryAccountsRequest) GetScope() string { - if m != nil { - return m.Scope - } - return "" -} - -func (m *QueryAccountsRequest) GetXid() string { - if m != nil { - return m.Xid - } - return "" -} - -func (m *QueryAccountsRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *QueryAccountsRequest) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func (m *QueryAccountsRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -type QueryAccountsResponse struct { - Accounts []Account `protobuf:"bytes,1,rep,name=accounts,proto3" json:"accounts"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryAccountsResponse) Reset() { *m = QueryAccountsResponse{} } -func (m *QueryAccountsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryAccountsResponse) ProtoMessage() {} -func (*QueryAccountsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_84bc7a3aed491eed, []int{1} -} -func (m *QueryAccountsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryAccountsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryAccountsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryAccountsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryAccountsResponse.Merge(m, src) -} -func (m *QueryAccountsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryAccountsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryAccountsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryAccountsResponse proto.InternalMessageInfo - -func (m *QueryAccountsResponse) GetAccounts() []Account { - if m != nil { - return m.Accounts - } - return nil -} - -func (m *QueryAccountsResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryPaymentRequest is request type for the Query/Payment RPC method -type QueryPaymentsRequest struct { - Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope,omitempty"` - Xid string `protobuf:"bytes,2,opt,name=xid,proto3" json:"xid,omitempty"` - Id string `protobuf:"bytes,3,opt,name=id,proto3" json:"id,omitempty"` - Owner string `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` - State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state,omitempty"` - Pagination *query.PageRequest `protobuf:"bytes,6,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryPaymentsRequest) Reset() { *m = QueryPaymentsRequest{} } -func (m *QueryPaymentsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryPaymentsRequest) ProtoMessage() {} -func (*QueryPaymentsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_84bc7a3aed491eed, []int{2} -} -func (m *QueryPaymentsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryPaymentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryPaymentsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryPaymentsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryPaymentsRequest.Merge(m, src) -} -func (m *QueryPaymentsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryPaymentsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryPaymentsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryPaymentsRequest proto.InternalMessageInfo - -func (m *QueryPaymentsRequest) GetScope() string { - if m != nil { - return m.Scope - } - return "" -} - -func (m *QueryPaymentsRequest) GetXid() string { - if m != nil { - return m.Xid - } - return "" -} - -func (m *QueryPaymentsRequest) GetId() string { - if m != nil { - return m.Id - } - return "" -} - -func (m *QueryPaymentsRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *QueryPaymentsRequest) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func (m *QueryPaymentsRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -type QueryPaymentsResponse struct { - Payments []FractionalPayment `protobuf:"bytes,1,rep,name=payments,proto3" json:"payments"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryPaymentsResponse) Reset() { *m = QueryPaymentsResponse{} } -func (m *QueryPaymentsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryPaymentsResponse) ProtoMessage() {} -func (*QueryPaymentsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_84bc7a3aed491eed, []int{3} -} -func (m *QueryPaymentsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryPaymentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryPaymentsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryPaymentsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryPaymentsResponse.Merge(m, src) -} -func (m *QueryPaymentsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryPaymentsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryPaymentsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryPaymentsResponse proto.InternalMessageInfo - -func (m *QueryPaymentsResponse) GetPayments() []FractionalPayment { - if m != nil { - return m.Payments - } - return nil -} - -func (m *QueryPaymentsResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -func init() { - proto.RegisterType((*QueryAccountsRequest)(nil), "akash.escrow.v1beta3.QueryAccountsRequest") - proto.RegisterType((*QueryAccountsResponse)(nil), "akash.escrow.v1beta3.QueryAccountsResponse") - proto.RegisterType((*QueryPaymentsRequest)(nil), "akash.escrow.v1beta3.QueryPaymentsRequest") - proto.RegisterType((*QueryPaymentsResponse)(nil), "akash.escrow.v1beta3.QueryPaymentsResponse") -} - -func init() { proto.RegisterFile("akash/escrow/v1beta3/query.proto", fileDescriptor_84bc7a3aed491eed) } - -var fileDescriptor_84bc7a3aed491eed = []byte{ - // 511 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xbf, 0x6f, 0x13, 0x31, - 0x14, 0xc7, 0xe3, 0x4b, 0x52, 0x05, 0x57, 0x42, 0xc8, 0x0a, 0xd2, 0x29, 0x82, 0x23, 0xca, 0x40, - 0x43, 0x2b, 0x6c, 0x25, 0x9d, 0x98, 0x10, 0x1d, 0x8a, 0xd8, 0x4a, 0xd8, 0xd8, 0x9c, 0x8b, 0x75, - 0x3d, 0x35, 0xf1, 0xbb, 0x9e, 0x1d, 0x42, 0x56, 0xfe, 0x02, 0x24, 0x24, 0x76, 0x66, 0x36, 0x36, - 0x56, 0xa6, 0x8e, 0x95, 0x58, 0x98, 0x10, 0x4a, 0xf8, 0x43, 0xd0, 0xf9, 0x07, 0x6d, 0x4f, 0xd7, - 0x52, 0x55, 0x6c, 0x7e, 0xce, 0xf7, 0xbd, 0xf7, 0x7d, 0x9f, 0xbc, 0x33, 0xee, 0xf2, 0x23, 0xae, - 0x0e, 0x99, 0x50, 0x71, 0x0e, 0x0b, 0xf6, 0x66, 0x30, 0x16, 0x9a, 0xef, 0xb2, 0xe3, 0xb9, 0xc8, - 0x97, 0x34, 0xcb, 0x41, 0x03, 0x69, 0x1b, 0x05, 0xb5, 0x0a, 0xea, 0x14, 0x9d, 0x76, 0x02, 0x09, - 0x18, 0x01, 0x2b, 0x4e, 0x56, 0xdb, 0xb9, 0x97, 0x00, 0x24, 0x53, 0xc1, 0x78, 0x96, 0x32, 0x2e, - 0x25, 0x68, 0xae, 0x53, 0x90, 0xca, 0xfd, 0xba, 0x1d, 0x83, 0x9a, 0x81, 0x62, 0x63, 0xae, 0x84, - 0x6d, 0xe1, 0x1a, 0x0e, 0x58, 0xc6, 0x93, 0x54, 0x1a, 0xb1, 0xd3, 0x56, 0xfb, 0xd2, 0xcb, 0x4c, - 0xb8, 0x6a, 0xbd, 0x2f, 0x08, 0xb7, 0x5f, 0x16, 0x45, 0x9e, 0xc5, 0x31, 0xcc, 0xa5, 0x56, 0x23, - 0x71, 0x3c, 0x17, 0x4a, 0x93, 0x36, 0x6e, 0xaa, 0x18, 0x32, 0x11, 0xa2, 0x2e, 0xea, 0xdf, 0x1a, - 0xd9, 0x80, 0xdc, 0xc1, 0xf5, 0xb7, 0xe9, 0x24, 0x0c, 0xcc, 0x5d, 0x71, 0x2c, 0x74, 0xb0, 0x90, - 0x22, 0x0f, 0xeb, 0x56, 0x67, 0x02, 0x93, 0xad, 0xb9, 0x16, 0x61, 0xc3, 0x65, 0x17, 0x01, 0xd9, - 0xc7, 0xf8, 0xcc, 0x62, 0xd8, 0xec, 0xa2, 0xfe, 0xe6, 0xf0, 0x21, 0xb5, 0xf3, 0xd0, 0x62, 0x1e, - 0x6a, 0x91, 0xb9, 0x79, 0xe8, 0x01, 0x4f, 0x84, 0xf3, 0x33, 0x3a, 0x97, 0xd9, 0xfb, 0x84, 0xf0, - 0xdd, 0x92, 0x69, 0x95, 0x81, 0x54, 0x82, 0x3c, 0xc5, 0x2d, 0xee, 0xee, 0x42, 0xd4, 0xad, 0xf7, - 0x37, 0x87, 0xf7, 0x69, 0x15, 0x79, 0xea, 0x32, 0xf7, 0x1a, 0x27, 0x3f, 0x1f, 0xd4, 0x46, 0x7f, - 0x93, 0xc8, 0xf3, 0x0b, 0x16, 0x03, 0x63, 0x71, 0xeb, 0x9f, 0x16, 0x6d, 0xf7, 0x0b, 0x1e, 0xbf, - 0x79, 0xb0, 0x07, 0x7c, 0x39, 0x13, 0x37, 0x00, 0x7b, 0x1b, 0x07, 0xe9, 0xc4, 0x51, 0x0d, 0xce, - 0x83, 0x6e, 0x54, 0x82, 0x6e, 0x5e, 0x0e, 0x7a, 0xe3, 0xc6, 0xa0, 0x3f, 0x7b, 0xd0, 0x67, 0x43, - 0x38, 0xd0, 0x2f, 0x70, 0x2b, 0x73, 0x77, 0x0e, 0xf4, 0x56, 0x35, 0xe8, 0xfd, 0x9c, 0xc7, 0x45, - 0x2d, 0x3e, 0x75, 0x35, 0x3c, 0x72, 0x9f, 0xfe, 0xdf, 0x90, 0x0f, 0xbf, 0x06, 0xb8, 0x69, 0xdc, - 0x92, 0x8f, 0x08, 0xb7, 0xfc, 0x6e, 0x90, 0xed, 0x6a, 0x63, 0x55, 0x5b, 0xdf, 0xd9, 0xb9, 0x96, - 0xd6, 0xf6, 0xee, 0x0d, 0xde, 0x7d, 0xff, 0xfd, 0x21, 0xd8, 0x21, 0x8f, 0xd8, 0xe5, 0x9f, 0x19, - 0xf3, 0x9b, 0xc5, 0xa6, 0xa9, 0xd2, 0xc6, 0x98, 0x67, 0x79, 0xa5, 0xb1, 0xd2, 0xd6, 0x5c, 0x69, - 0xac, 0xfc, 0xe7, 0x5c, 0xcf, 0x98, 0xe7, 0x6f, 0x8c, 0xed, 0xbd, 0x3a, 0x59, 0x45, 0xe8, 0x74, - 0x15, 0xa1, 0x5f, 0xab, 0x08, 0xbd, 0x5f, 0x47, 0xb5, 0xd3, 0x75, 0x54, 0xfb, 0xb1, 0x8e, 0x6a, - 0xaf, 0x9f, 0x24, 0xa9, 0x3e, 0x9c, 0x8f, 0x69, 0x0c, 0x33, 0x5b, 0xee, 0xb1, 0x14, 0x7a, 0x01, - 0xf9, 0x91, 0x8b, 0x8a, 0x57, 0x2a, 0x01, 0x26, 0x61, 0x22, 0x4a, 0x8d, 0xc6, 0x1b, 0xe6, 0x8d, - 0xd9, 0xfd, 0x13, 0x00, 0x00, 0xff, 0xff, 0xa3, 0x13, 0xf3, 0xcb, 0x1f, 0x05, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Accounts queries all accounts - Accounts(ctx context.Context, in *QueryAccountsRequest, opts ...grpc.CallOption) (*QueryAccountsResponse, error) - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Payments queries all payments - Payments(ctx context.Context, in *QueryPaymentsRequest, opts ...grpc.CallOption) (*QueryPaymentsResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Accounts(ctx context.Context, in *QueryAccountsRequest, opts ...grpc.CallOption) (*QueryAccountsResponse, error) { - out := new(QueryAccountsResponse) - err := c.cc.Invoke(ctx, "/akash.escrow.v1beta3.Query/Accounts", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Payments(ctx context.Context, in *QueryPaymentsRequest, opts ...grpc.CallOption) (*QueryPaymentsResponse, error) { - out := new(QueryPaymentsResponse) - err := c.cc.Invoke(ctx, "/akash.escrow.v1beta3.Query/Payments", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Accounts queries all accounts - Accounts(context.Context, *QueryAccountsRequest) (*QueryAccountsResponse, error) - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Payments queries all payments - Payments(context.Context, *QueryPaymentsRequest) (*QueryPaymentsResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Accounts(ctx context.Context, req *QueryAccountsRequest) (*QueryAccountsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Accounts not implemented") -} -func (*UnimplementedQueryServer) Payments(ctx context.Context, req *QueryPaymentsRequest) (*QueryPaymentsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Payments not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Accounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryAccountsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Accounts(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.escrow.v1beta3.Query/Accounts", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Accounts(ctx, req.(*QueryAccountsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Payments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryPaymentsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Payments(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.escrow.v1beta3.Query/Payments", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Payments(ctx, req.(*QueryPaymentsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.escrow.v1beta3.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Accounts", - Handler: _Query_Accounts_Handler, - }, - { - MethodName: "Payments", - Handler: _Query_Payments_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/escrow/v1beta3/query.proto", -} - -func (m *QueryAccountsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryAccountsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryAccountsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintQuery(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x22 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x1a - } - if len(m.Xid) > 0 { - i -= len(m.Xid) - copy(dAtA[i:], m.Xid) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Xid))) - i-- - dAtA[i] = 0x12 - } - if len(m.Scope) > 0 { - i -= len(m.Scope) - copy(dAtA[i:], m.Scope) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Scope))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryAccountsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryAccountsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryAccountsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Accounts) > 0 { - for iNdEx := len(m.Accounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Accounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryPaymentsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryPaymentsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryPaymentsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintQuery(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x2a - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x22 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0x1a - } - if len(m.Xid) > 0 { - i -= len(m.Xid) - copy(dAtA[i:], m.Xid) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Xid))) - i-- - dAtA[i] = 0x12 - } - if len(m.Scope) > 0 { - i -= len(m.Scope) - copy(dAtA[i:], m.Scope) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Scope))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryPaymentsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryPaymentsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryPaymentsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Payments) > 0 { - for iNdEx := len(m.Payments) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Payments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryAccountsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Scope) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Xid) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryAccountsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Accounts) > 0 { - for _, e := range m.Accounts { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryPaymentsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Scope) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Xid) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Id) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryPaymentsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Payments) > 0 { - for _, e := range m.Payments { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryAccountsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryAccountsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryAccountsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scope = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Xid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Xid = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryAccountsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryAccountsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryAccountsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Accounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Accounts = append(m.Accounts, Account{}) - if err := m.Accounts[len(m.Accounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryPaymentsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryPaymentsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryPaymentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scope = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Xid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Xid = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryPaymentsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryPaymentsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryPaymentsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Payments", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Payments = append(m.Payments, FractionalPayment{}) - if err := m.Payments[len(m.Payments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/escrow/v1beta3/query.pb.gw.go b/go/node/escrow/v1beta3/query.pb.gw.go deleted file mode 100644 index 2f3ae5ac..00000000 --- a/go/node/escrow/v1beta3/query.pb.gw.go +++ /dev/null @@ -1,254 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/escrow/v1beta3/query.proto - -/* -Package v1beta3 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta3 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Accounts_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Accounts_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAccountsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Accounts_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Accounts(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Accounts_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryAccountsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Accounts_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Accounts(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Payments_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Payments_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryPaymentsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Payments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Payments(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Payments_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryPaymentsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Payments_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Payments(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Accounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Accounts_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Accounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Payments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Payments_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Payments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Accounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Accounts_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Accounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Payments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Payments_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Payments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Accounts_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"akash", "escrow", "v1beta3", "types", "accounts", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Payments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4, 2, 5}, []string{"akash", "escrow", "v1beta3", "types", "payments", "list"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Accounts_0 = runtime.ForwardResponseMessage - - forward_Query_Payments_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/escrow/v1beta3/types.pb.go b/go/node/escrow/v1beta3/types.pb.go deleted file mode 100644 index 4b544f72..00000000 --- a/go/node/escrow/v1beta3/types.pb.go +++ /dev/null @@ -1,1467 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/escrow/v1beta3/types.proto - -package v1beta3 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State stores state for an escrow account -type Account_State int32 - -const ( - // AccountStateInvalid is an invalid state - AccountStateInvalid Account_State = 0 - // AccountOpen is the state when an account is open - AccountOpen Account_State = 1 - // AccountClosed is the state when an account is closed - AccountClosed Account_State = 2 - // AccountOverdrawn is the state when an account is overdrawn - AccountOverdrawn Account_State = 3 -) - -var Account_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "closed", - 3: "overdrawn", -} - -var Account_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "closed": 2, - "overdrawn": 3, -} - -func (x Account_State) String() string { - return proto.EnumName(Account_State_name, int32(x)) -} - -func (Account_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_c4875611bf6739ee, []int{1, 0} -} - -// Payment State -type FractionalPayment_State int32 - -const ( - // PaymentStateInvalid is the state when the payment is invalid - PaymentStateInvalid FractionalPayment_State = 0 - // PaymentStateOpen is the state when the payment is open - PaymentOpen FractionalPayment_State = 1 - // PaymentStateClosed is the state when the payment is closed - PaymentClosed FractionalPayment_State = 2 - // PaymentStateOverdrawn is the state when the payment is overdrawn - PaymentOverdrawn FractionalPayment_State = 3 -) - -var FractionalPayment_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "closed", - 3: "overdrawn", -} - -var FractionalPayment_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "closed": 2, - "overdrawn": 3, -} - -func (x FractionalPayment_State) String() string { - return proto.EnumName(FractionalPayment_State_name, int32(x)) -} - -func (FractionalPayment_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_c4875611bf6739ee, []int{2, 0} -} - -// AccountID is the account identifier -type AccountID struct { - Scope string `protobuf:"bytes,1,opt,name=scope,proto3" json:"scope" yaml:"scope"` - XID string `protobuf:"bytes,2,opt,name=xid,proto3" json:"xid" yaml:"xid"` -} - -func (m *AccountID) Reset() { *m = AccountID{} } -func (m *AccountID) String() string { return proto.CompactTextString(m) } -func (*AccountID) ProtoMessage() {} -func (*AccountID) Descriptor() ([]byte, []int) { - return fileDescriptor_c4875611bf6739ee, []int{0} -} -func (m *AccountID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AccountID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_AccountID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *AccountID) XXX_Merge(src proto.Message) { - xxx_messageInfo_AccountID.Merge(m, src) -} -func (m *AccountID) XXX_Size() int { - return m.Size() -} -func (m *AccountID) XXX_DiscardUnknown() { - xxx_messageInfo_AccountID.DiscardUnknown(m) -} - -var xxx_messageInfo_AccountID proto.InternalMessageInfo - -func (m *AccountID) GetScope() string { - if m != nil { - return m.Scope - } - return "" -} - -func (m *AccountID) GetXID() string { - if m != nil { - return m.XID - } - return "" -} - -// Account stores state for an escrow account -type Account struct { - // unique identifier for this escrow account - ID AccountID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` - // bech32 encoded account address of the owner of this escrow account - Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner" yaml:"owner"` - // current state of this escrow account - State Account_State `protobuf:"varint,3,opt,name=state,proto3,enum=akash.escrow.v1beta3.Account_State" json:"state" yaml:"state"` - // unspent coins received from the owner's wallet - Balance types.DecCoin `protobuf:"bytes,4,opt,name=balance,proto3" json:"balance" yaml:"balance"` - // total coins spent by this account - Transferred types.DecCoin `protobuf:"bytes,5,opt,name=transferred,proto3" json:"transferred" yaml:"transferred"` - // block height at which this account was last settled - SettledAt int64 `protobuf:"varint,6,opt,name=settled_at,json=settledAt,proto3" json:"settledAt" yaml:"settledAt"` - // bech32 encoded account address of the depositor. - // If depositor is same as the owner, then any incoming coins are added to the Balance. - // If depositor isn't same as the owner, then any incoming coins are added to the Funds. - Depositor string `protobuf:"bytes,7,opt,name=depositor,proto3" json:"depositor" yaml:"depositor"` - // Funds are unspent coins received from the (non-Owner) Depositor's wallet. - // If there are any funds, they should be spent before spending the Balance. - Funds types.DecCoin `protobuf:"bytes,8,opt,name=funds,proto3" json:"funds" yaml:"funds"` -} - -func (m *Account) Reset() { *m = Account{} } -func (m *Account) String() string { return proto.CompactTextString(m) } -func (*Account) ProtoMessage() {} -func (*Account) Descriptor() ([]byte, []int) { - return fileDescriptor_c4875611bf6739ee, []int{1} -} -func (m *Account) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Account) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Account.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Account) XXX_Merge(src proto.Message) { - xxx_messageInfo_Account.Merge(m, src) -} -func (m *Account) XXX_Size() int { - return m.Size() -} -func (m *Account) XXX_DiscardUnknown() { - xxx_messageInfo_Account.DiscardUnknown(m) -} - -var xxx_messageInfo_Account proto.InternalMessageInfo - -func (m *Account) GetID() AccountID { - if m != nil { - return m.ID - } - return AccountID{} -} - -func (m *Account) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *Account) GetState() Account_State { - if m != nil { - return m.State - } - return AccountStateInvalid -} - -func (m *Account) GetBalance() types.DecCoin { - if m != nil { - return m.Balance - } - return types.DecCoin{} -} - -func (m *Account) GetTransferred() types.DecCoin { - if m != nil { - return m.Transferred - } - return types.DecCoin{} -} - -func (m *Account) GetSettledAt() int64 { - if m != nil { - return m.SettledAt - } - return 0 -} - -func (m *Account) GetDepositor() string { - if m != nil { - return m.Depositor - } - return "" -} - -func (m *Account) GetFunds() types.DecCoin { - if m != nil { - return m.Funds - } - return types.DecCoin{} -} - -// Payment stores state for a payment -type FractionalPayment struct { - AccountID AccountID `protobuf:"bytes,1,opt,name=account_id,json=accountId,proto3" json:"accountID" yaml:"accountID"` - PaymentID string `protobuf:"bytes,2,opt,name=payment_id,json=paymentId,proto3" json:"paymentID" yaml:"paymentID"` - Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner" yaml:"owner"` - State FractionalPayment_State `protobuf:"varint,4,opt,name=state,proto3,enum=akash.escrow.v1beta3.FractionalPayment_State" json:"state" yaml:"state"` - Rate types.DecCoin `protobuf:"bytes,5,opt,name=rate,proto3" json:"rate" yaml:"rate"` - Balance types.DecCoin `protobuf:"bytes,6,opt,name=balance,proto3" json:"balance" yaml:"balance"` - Withdrawn types.Coin `protobuf:"bytes,7,opt,name=withdrawn,proto3" json:"withdrawn" yaml:"withdrawn"` -} - -func (m *FractionalPayment) Reset() { *m = FractionalPayment{} } -func (m *FractionalPayment) String() string { return proto.CompactTextString(m) } -func (*FractionalPayment) ProtoMessage() {} -func (*FractionalPayment) Descriptor() ([]byte, []int) { - return fileDescriptor_c4875611bf6739ee, []int{2} -} -func (m *FractionalPayment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FractionalPayment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_FractionalPayment.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *FractionalPayment) XXX_Merge(src proto.Message) { - xxx_messageInfo_FractionalPayment.Merge(m, src) -} -func (m *FractionalPayment) XXX_Size() int { - return m.Size() -} -func (m *FractionalPayment) XXX_DiscardUnknown() { - xxx_messageInfo_FractionalPayment.DiscardUnknown(m) -} - -var xxx_messageInfo_FractionalPayment proto.InternalMessageInfo - -func (m *FractionalPayment) GetAccountID() AccountID { - if m != nil { - return m.AccountID - } - return AccountID{} -} - -func (m *FractionalPayment) GetPaymentID() string { - if m != nil { - return m.PaymentID - } - return "" -} - -func (m *FractionalPayment) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *FractionalPayment) GetState() FractionalPayment_State { - if m != nil { - return m.State - } - return PaymentStateInvalid -} - -func (m *FractionalPayment) GetRate() types.DecCoin { - if m != nil { - return m.Rate - } - return types.DecCoin{} -} - -func (m *FractionalPayment) GetBalance() types.DecCoin { - if m != nil { - return m.Balance - } - return types.DecCoin{} -} - -func (m *FractionalPayment) GetWithdrawn() types.Coin { - if m != nil { - return m.Withdrawn - } - return types.Coin{} -} - -func init() { - proto.RegisterEnum("akash.escrow.v1beta3.Account_State", Account_State_name, Account_State_value) - proto.RegisterEnum("akash.escrow.v1beta3.FractionalPayment_State", FractionalPayment_State_name, FractionalPayment_State_value) - proto.RegisterType((*AccountID)(nil), "akash.escrow.v1beta3.AccountID") - proto.RegisterType((*Account)(nil), "akash.escrow.v1beta3.Account") - proto.RegisterType((*FractionalPayment)(nil), "akash.escrow.v1beta3.FractionalPayment") -} - -func init() { proto.RegisterFile("akash/escrow/v1beta3/types.proto", fileDescriptor_c4875611bf6739ee) } - -var fileDescriptor_c4875611bf6739ee = []byte{ - // 811 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x55, 0x4d, 0x6f, 0xeb, 0x44, - 0x14, 0x8d, 0xf3, 0x89, 0x27, 0xf0, 0xc8, 0x1b, 0x2a, 0xe1, 0x86, 0x57, 0x8f, 0xdf, 0x3c, 0x90, - 0xca, 0xe2, 0xd9, 0x6a, 0xde, 0x8a, 0xb7, 0x41, 0x4d, 0x2b, 0xa4, 0x2e, 0xca, 0x87, 0xcb, 0x02, - 0x58, 0xb4, 0x9a, 0xd8, 0xd3, 0xd4, 0x6a, 0xe2, 0xb1, 0xec, 0x69, 0xd3, 0xfe, 0x03, 0x14, 0xb1, - 0x40, 0xac, 0xd8, 0x04, 0x21, 0xf1, 0x67, 0xba, 0xec, 0x92, 0x95, 0x85, 0xd2, 0x5d, 0x96, 0xf9, - 0x05, 0xc8, 0x33, 0x63, 0x3b, 0x40, 0x14, 0x65, 0xc1, 0x2a, 0xb9, 0xe7, 0x9e, 0x7b, 0xe6, 0xce, - 0xbd, 0x73, 0x64, 0x60, 0x91, 0x6b, 0x92, 0x5c, 0x39, 0x34, 0xf1, 0x62, 0x36, 0x71, 0x6e, 0x0f, - 0x06, 0x94, 0x93, 0x37, 0x0e, 0xbf, 0x8f, 0x68, 0x62, 0x47, 0x31, 0xe3, 0x0c, 0xee, 0x08, 0x86, - 0x2d, 0x19, 0xb6, 0x62, 0x74, 0x77, 0x86, 0x6c, 0xc8, 0x04, 0xc1, 0xc9, 0xfe, 0x49, 0x6e, 0xd7, - 0xf4, 0x58, 0x32, 0x66, 0x89, 0x33, 0x20, 0x09, 0x55, 0x62, 0x07, 0x8e, 0xc7, 0x82, 0x50, 0xe6, - 0xf1, 0x08, 0xe8, 0x87, 0x9e, 0xc7, 0x6e, 0x42, 0x7e, 0x72, 0x0c, 0x1d, 0xd0, 0x48, 0x3c, 0x16, - 0x51, 0x43, 0xb3, 0xb4, 0x7d, 0xbd, 0xbf, 0xbb, 0x48, 0x91, 0x04, 0x96, 0x29, 0x7a, 0xf7, 0x9e, - 0x8c, 0x47, 0x6f, 0xb1, 0x08, 0xb1, 0x2b, 0x61, 0x68, 0x83, 0xda, 0x5d, 0xe0, 0x1b, 0x55, 0x41, - 0x7f, 0x31, 0x4f, 0x51, 0xed, 0xbb, 0x93, 0xe3, 0x45, 0x8a, 0x32, 0x74, 0x99, 0x22, 0x20, 0x6b, - 0xee, 0x02, 0x1f, 0xbb, 0x19, 0x84, 0x7f, 0x6b, 0x82, 0x96, 0x3a, 0x0e, 0x7e, 0x09, 0xaa, 0x81, - 0x2f, 0x4e, 0x6a, 0xf7, 0x90, 0xbd, 0xee, 0x4a, 0x76, 0xd1, 0x59, 0x7f, 0xef, 0x21, 0x45, 0x95, - 0x79, 0x8a, 0xaa, 0x42, 0xbe, 0x2a, 0xd4, 0x75, 0xa9, 0x9e, 0x89, 0x57, 0x03, 0x3f, 0x6b, 0x9e, - 0x4d, 0x42, 0x1a, 0xab, 0x6e, 0x44, 0xf3, 0x02, 0x28, 0x9b, 0x17, 0x21, 0x76, 0x25, 0x0c, 0xbf, - 0x05, 0x8d, 0x84, 0x13, 0x4e, 0x8d, 0x9a, 0xa5, 0xed, 0x3f, 0xeb, 0xbd, 0xda, 0xd8, 0x83, 0x7d, - 0x96, 0x51, 0xd5, 0x48, 0xb2, 0xbf, 0x2b, 0x23, 0xc9, 0xc2, 0x6c, 0x24, 0xd9, 0x2f, 0xfc, 0x1e, - 0xb4, 0x06, 0x64, 0x44, 0x42, 0x8f, 0x1a, 0x75, 0x71, 0xb7, 0x17, 0xb6, 0x5c, 0x81, 0x9d, 0xad, - 0x40, 0xc9, 0x1e, 0xd8, 0xc7, 0xd4, 0x3b, 0x62, 0x41, 0xd8, 0x7f, 0x99, 0x5d, 0x6c, 0x91, 0xa2, - 0xbc, 0x68, 0x99, 0xa2, 0x67, 0x52, 0x56, 0x01, 0xd8, 0xcd, 0x53, 0x30, 0x00, 0x6d, 0x1e, 0x93, - 0x30, 0xb9, 0xa4, 0x71, 0x4c, 0x7d, 0xa3, 0xb1, 0x85, 0xfc, 0xa7, 0x4a, 0x7e, 0xb5, 0x70, 0x99, - 0x22, 0x28, 0x8f, 0x58, 0x01, 0xb1, 0xbb, 0x4a, 0x81, 0xa7, 0x00, 0x24, 0x94, 0xf3, 0x11, 0xf5, - 0x2f, 0x08, 0x37, 0x9a, 0x96, 0xb6, 0x5f, 0xeb, 0xdb, 0xf3, 0x14, 0xe9, 0x67, 0x12, 0x3d, 0xe4, - 0x8b, 0x14, 0xe9, 0x49, 0x1e, 0x2c, 0x53, 0xd4, 0x51, 0xc3, 0xc8, 0x21, 0xec, 0x96, 0x69, 0xf8, - 0x39, 0xd0, 0x7d, 0x1a, 0xb1, 0x24, 0xe0, 0x2c, 0x36, 0x5a, 0x62, 0x3f, 0x2f, 0x33, 0x81, 0x02, - 0x2c, 0x05, 0x0a, 0x08, 0xbb, 0x65, 0x1a, 0x7e, 0x03, 0x1a, 0x97, 0x37, 0xa1, 0x9f, 0x18, 0xef, - 0x6c, 0x71, 0xe9, 0x3d, 0x75, 0x69, 0x59, 0x52, 0x2e, 0x4a, 0x84, 0xd8, 0x95, 0x30, 0xfe, 0x45, - 0x03, 0x0d, 0xb1, 0x54, 0xf8, 0x31, 0x68, 0x05, 0xe1, 0x2d, 0x19, 0x05, 0x7e, 0xa7, 0xd2, 0xfd, - 0x70, 0x3a, 0xb3, 0x3e, 0x50, 0x4b, 0x17, 0xe9, 0x13, 0x99, 0x82, 0xbb, 0xa0, 0xce, 0x22, 0x1a, - 0x76, 0xb4, 0xee, 0xfb, 0xd3, 0x99, 0xd5, 0x56, 0x94, 0xaf, 0x22, 0x1a, 0xc2, 0x3d, 0xd0, 0xf4, - 0x46, 0x2c, 0xa1, 0x7e, 0xa7, 0xda, 0x7d, 0x3e, 0x9d, 0x59, 0xef, 0xa9, 0xe4, 0x91, 0x00, 0xe1, - 0x2b, 0xa0, 0xb3, 0x5b, 0x1a, 0xfb, 0x31, 0x99, 0x84, 0x9d, 0x5a, 0x77, 0x67, 0x3a, 0xb3, 0x3a, - 0x79, 0x79, 0x8e, 0x77, 0xeb, 0x3f, 0xfe, 0x61, 0x56, 0xf0, 0x4f, 0x4d, 0xf0, 0xfc, 0x8b, 0x98, - 0x78, 0x3c, 0x60, 0x21, 0x19, 0x7d, 0x4d, 0xee, 0xc7, 0x34, 0xe4, 0x30, 0x06, 0x80, 0x48, 0xfe, - 0xc5, 0xf6, 0x96, 0xe9, 0x29, 0xcb, 0x94, 0xfe, 0xce, 0x26, 0x4e, 0xf2, 0xa0, 0x9c, 0x78, 0x01, - 0x61, 0xb7, 0x48, 0x8b, 0x17, 0x10, 0xc9, 0xe3, 0x2f, 0x0a, 0x87, 0x8b, 0x17, 0xa0, 0x9a, 0x92, - 0x72, 0x51, 0x1e, 0x94, 0x72, 0x05, 0x84, 0xdd, 0x22, 0xbd, 0xe2, 0xce, 0xda, 0x96, 0xee, 0x3c, - 0xcf, 0xdd, 0x59, 0x17, 0xee, 0x7c, 0xbd, 0xfe, 0xba, 0xff, 0x99, 0xd5, 0xd6, 0x3e, 0x3d, 0x05, - 0xf5, 0x38, 0x93, 0xdf, 0xc6, 0x45, 0x1f, 0xa9, 0x07, 0x25, 0x2a, 0x96, 0x29, 0x6a, 0x4b, 0xc1, - 0x58, 0xe8, 0x09, 0x70, 0xd5, 0xf6, 0xcd, 0xff, 0xd9, 0xf6, 0xe7, 0x40, 0x9f, 0x04, 0xfc, 0x4a, - 0x3c, 0x13, 0x61, 0x9e, 0x76, 0x6f, 0x77, 0xad, 0xb8, 0x50, 0xfe, 0x44, 0x29, 0x97, 0x35, 0xe5, - 0x6a, 0x0a, 0x08, 0xbb, 0x65, 0x7a, 0xa3, 0x11, 0xd4, 0x54, 0x37, 0x19, 0x41, 0x51, 0xd6, 0x1b, - 0x41, 0x25, 0x37, 0x18, 0x21, 0x2f, 0xff, 0xa7, 0x11, 0xde, 0xd6, 0x7f, 0xfd, 0x1d, 0x69, 0xfd, - 0xb3, 0x87, 0xb9, 0xa9, 0x3d, 0xce, 0x4d, 0xed, 0xaf, 0xb9, 0xa9, 0xfd, 0xfc, 0x64, 0x56, 0x1e, - 0x9f, 0xcc, 0xca, 0x9f, 0x4f, 0x66, 0xe5, 0x87, 0xcf, 0x86, 0x01, 0xbf, 0xba, 0x19, 0xd8, 0x1e, - 0x1b, 0x3b, 0xe2, 0x65, 0xbc, 0x0e, 0x29, 0x9f, 0xb0, 0xf8, 0x5a, 0x45, 0x24, 0x0a, 0x9c, 0x21, - 0x73, 0x42, 0xe6, 0xd3, 0x7f, 0x7d, 0x4a, 0x07, 0x4d, 0xf1, 0xe5, 0x7b, 0xf3, 0x77, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x5a, 0x62, 0xe1, 0x3e, 0x69, 0x07, 0x00, 0x00, -} - -func (m *AccountID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AccountID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AccountID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.XID) > 0 { - i -= len(m.XID) - copy(dAtA[i:], m.XID) - i = encodeVarintTypes(dAtA, i, uint64(len(m.XID))) - i-- - dAtA[i] = 0x12 - } - if len(m.Scope) > 0 { - i -= len(m.Scope) - copy(dAtA[i:], m.Scope) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Scope))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Account) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Account) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Account) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Funds.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - if len(m.Depositor) > 0 { - i -= len(m.Depositor) - copy(dAtA[i:], m.Depositor) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Depositor))) - i-- - dAtA[i] = 0x3a - } - if m.SettledAt != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.SettledAt)) - i-- - dAtA[i] = 0x30 - } - { - size, err := m.Transferred.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - { - size, err := m.Balance.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if m.State != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x18 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *FractionalPayment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FractionalPayment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FractionalPayment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Withdrawn.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - { - size, err := m.Balance.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - { - size, err := m.Rate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - if m.State != 0 { - i = encodeVarintTypes(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x20 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0x1a - } - if len(m.PaymentID) > 0 { - i -= len(m.PaymentID) - copy(dAtA[i:], m.PaymentID) - i = encodeVarintTypes(dAtA, i, uint64(len(m.PaymentID))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.AccountID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AccountID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Scope) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.XID) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func (m *Account) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovTypes(uint64(l)) - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.State != 0 { - n += 1 + sovTypes(uint64(m.State)) - } - l = m.Balance.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.Transferred.Size() - n += 1 + l + sovTypes(uint64(l)) - if m.SettledAt != 0 { - n += 1 + sovTypes(uint64(m.SettledAt)) - } - l = len(m.Depositor) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = m.Funds.Size() - n += 1 + l + sovTypes(uint64(l)) - return n -} - -func (m *FractionalPayment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.AccountID.Size() - n += 1 + l + sovTypes(uint64(l)) - l = len(m.PaymentID) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - if m.State != 0 { - n += 1 + sovTypes(uint64(m.State)) - } - l = m.Rate.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.Balance.Size() - n += 1 + l + sovTypes(uint64(l)) - l = m.Withdrawn.Size() - n += 1 + l + sovTypes(uint64(l)) - return n -} - -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *AccountID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AccountID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AccountID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scope = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field XID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.XID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Account) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Account: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Account: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Account_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Balance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Transferred", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Transferred.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SettledAt", wireType) - } - m.SettledAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SettledAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Depositor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Depositor = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Funds", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Funds.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FractionalPayment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FractionalPayment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FractionalPayment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccountID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.AccountID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PaymentID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PaymentID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= FractionalPayment_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Rate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Balance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Withdrawn", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Withdrawn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTypes(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTypes - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTypes - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTypes - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/escrow/v1beta3/validate.go b/go/node/escrow/v1beta3/validate.go deleted file mode 100644 index 8b29e7d1..00000000 --- a/go/node/escrow/v1beta3/validate.go +++ /dev/null @@ -1,53 +0,0 @@ -package v1beta3 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -func (obj *AccountID) ValidateBasic() error { - if len(obj.Scope) == 0 { - return errors.Wrap(ErrInvalidAccountID, "empty scope") - } - if len(obj.XID) == 0 { - return errors.Wrap(ErrInvalidAccountID, "empty scope") - } - return nil -} - -func (obj *Account) ValidateBasic() error { - if err := obj.ID.ValidateBasic(); err != nil { - return errors.Wrapf(ErrInvalidAccount, "invalid account: id - %s", err) - } - if _, err := sdk.AccAddressFromBech32(obj.Owner); err != nil { - return errors.Wrapf(ErrInvalidAccount, "invalid account: owner - %s", err) - } - if obj.State == AccountStateInvalid { - return errors.Wrapf(ErrInvalidAccount, "invalid account: state - %s", obj.State) - } - if _, err := sdk.AccAddressFromBech32(obj.Depositor); err != nil { - return errors.Wrapf(ErrInvalidAccount, "invalid account: depositor - %s", err) - } - return nil -} - -func (obj *FractionalPayment) ValidateBasic() error { - if err := obj.AccountID.ValidateBasic(); err != nil { - return errors.Wrapf(ErrInvalidPayment, "invalid account id: %s", err) - } - if len(obj.PaymentID) == 0 { - return errors.Wrap(ErrInvalidPayment, "empty payment id") - } - if obj.Rate.IsZero() { - return errors.Wrap(ErrInvalidPayment, "payment rate zero") - } - if obj.State == PaymentStateInvalid { - return errors.Wrap(ErrInvalidPayment, "invalid state") - } - return nil -} - -// TotalBalance is the sum of Balance and Funds -func (obj *Account) TotalBalance() sdk.DecCoin { - return obj.Balance.Add(obj.Funds) -} diff --git a/go/node/gov/v1beta3/codec.go b/go/node/gov/v1beta3/codec.go deleted file mode 100644 index cb3f6e09..00000000 --- a/go/node/gov/v1beta3/codec.go +++ /dev/null @@ -1,24 +0,0 @@ -package v1beta3 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/provider module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/provider and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} diff --git a/go/node/gov/v1beta3/genesis.pb.go b/go/node/gov/v1beta3/genesis.pb.go index b1255113..d9bd0229 100644 --- a/go/node/gov/v1beta3/genesis.pb.go +++ b/go/node/gov/v1beta3/genesis.pb.go @@ -5,8 +5,8 @@ package v1beta3 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -75,7 +75,7 @@ func init() { func init() { proto.RegisterFile("akash/gov/v1beta3/genesis.proto", fileDescriptor_2cb71729a1a68e2a) } var fileDescriptor_2cb71729a1a68e2a = []byte{ - // 238 bytes of a gzipped FileDescriptorProto + // 223 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcc, 0x4e, 0x2c, 0xce, 0xd0, 0x4f, 0xcf, 0x2f, 0xd3, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x04, 0x2b, 0xd0, @@ -85,12 +85,11 @@ var fileDescriptor_2cb71729a1a68e2a = []byte{ 0x92, 0x5a, 0x90, 0x5f, 0x9c, 0x59, 0x12, 0x0f, 0x51, 0x28, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0xa4, 0xa0, 0x87, 0x61, 0xa5, 0x9e, 0x0b, 0x44, 0x61, 0x00, 0x58, 0x9d, 0x93, 0xfe, 0x89, 0x7b, 0xf2, 0x0c, 0xaf, 0xee, 0xc9, 0xa3, 0xe9, 0xff, 0x74, 0x4f, 0x5e, 0xb4, 0x32, 0x31, 0x37, 0xc7, - 0x4a, 0x09, 0x55, 0x5c, 0x29, 0x88, 0x37, 0x05, 0x45, 0x7f, 0xc0, 0x89, 0x47, 0x72, 0x8c, 0x17, + 0x4a, 0x09, 0x55, 0x5c, 0x29, 0x88, 0x37, 0x05, 0x45, 0xbf, 0xe5, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, - 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x99, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, - 0xea, 0x83, 0x5d, 0xa1, 0x9b, 0x97, 0x5a, 0x52, 0x9e, 0x5f, 0x94, 0x0d, 0xe5, 0x25, 0x16, 0x64, - 0xea, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, 0xa7, 0xa4, 0x22, 0xfb, 0x34, 0x89, 0x0d, 0xec, 0x47, 0x63, - 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xec, 0xb2, 0x6b, 0xbd, 0x4f, 0x01, 0x00, 0x00, + 0x37, 0x1e, 0xcb, 0x31, 0x44, 0xc9, 0x17, 0x64, 0xa7, 0xeb, 0x25, 0x66, 0x97, 0xe8, 0xa5, 0xa4, + 0x96, 0xe9, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, 0xa7, 0xa4, 0x22, 0x7b, 0x29, 0x89, 0x0d, 0xec, 0x19, + 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbb, 0xfa, 0xf8, 0xbd, 0x38, 0x01, 0x00, 0x00, } func (m *GenesisState) Marshal() (dAtA []byte, err error) { diff --git a/go/node/gov/v1beta3/key.go b/go/node/gov/v1beta3/key.go index f2cce01e..3b7e46e1 100644 --- a/go/node/gov/v1beta3/key.go +++ b/go/node/gov/v1beta3/key.go @@ -1,15 +1,12 @@ package v1beta3 const ( - // ModuleName is the name of the module + // ModuleName is the module name constant used in many places ModuleName = "agov" - // StoreKey is the store key string for gov + // StoreKey is the store key string for deployment StoreKey = ModuleName - // RouterKey is the message route for gov + // RouterKey is the message route for deployment RouterKey = ModuleName - - // QuerierRoute is the querier route for gov - QuerierRoute = ModuleName ) diff --git a/go/node/gov/v1beta3/params.go b/go/node/gov/v1beta3/params.go index 8f9d085e..824b87f7 100644 --- a/go/node/gov/v1beta3/params.go +++ b/go/node/gov/v1beta3/params.go @@ -17,6 +17,8 @@ var ( KeyDepositParams = []byte("depositparams") ) +// ParamKeyTable for agov module +// Deprecated: now params can be accessed via cosmos-sdk gov store func ParamKeyTable() paramtypes.KeyTable { return paramtypes.NewKeyTable( paramtypes.NewParamSetPair(KeyDepositParams, DepositParams{}, validateDepositParams), diff --git a/go/node/gov/v1beta3/params.pb.go b/go/node/gov/v1beta3/params.pb.go index 560c4f33..89a12037 100644 --- a/go/node/gov/v1beta3/params.pb.go +++ b/go/node/gov/v1beta3/params.pb.go @@ -6,8 +6,8 @@ package v1beta3 import ( fmt "fmt" github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -71,7 +71,7 @@ func init() { func init() { proto.RegisterFile("akash/gov/v1beta3/params.proto", fileDescriptor_ff7c87bcce7fe71f) } var fileDescriptor_ff7c87bcce7fe71f = []byte{ - // 274 bytes of a gzipped FileDescriptorProto + // 267 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcc, 0x4e, 0x2c, 0xce, 0xd0, 0x4f, 0xcf, 0x2f, 0xd3, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x04, 0xcb, 0xeb, 0xa5, @@ -84,12 +84,11 @@ var fileDescriptor_ff7c87bcce7fe71f = []byte{ 0x53, 0xb2, 0xf5, 0x4b, 0x2a, 0x0b, 0x52, 0x8b, 0xf5, 0x5c, 0x52, 0x93, 0x1f, 0xdd, 0x93, 0x17, 0xf5, 0xcd, 0xcc, 0xf3, 0x84, 0x98, 0x08, 0xb5, 0x2e, 0x28, 0xb1, 0x24, 0xf5, 0xd5, 0x3d, 0x79, 0x9c, 0x76, 0x7d, 0xba, 0x27, 0x2f, 0x5f, 0x99, 0x98, 0x9b, 0x63, 0xa5, 0x84, 0x4b, 0x85, 0x52, - 0x90, 0x68, 0x2e, 0x36, 0x53, 0x9d, 0x02, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, + 0x90, 0x68, 0x2e, 0x36, 0x53, 0x9d, 0x2c, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, - 0x21, 0xca, 0x0c, 0xc9, 0xc1, 0xe0, 0x20, 0xd2, 0xcd, 0x4b, 0x2d, 0x29, 0xcf, 0x2f, 0xca, 0x86, - 0xf2, 0x12, 0x0b, 0x32, 0xf5, 0xd3, 0xf3, 0xf5, 0xf3, 0xf2, 0x53, 0x52, 0x91, 0x03, 0x37, 0x89, - 0x0d, 0x1c, 0x5a, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x88, 0x50, 0xf2, 0xd6, 0x78, 0x01, - 0x00, 0x00, + 0x21, 0x4a, 0xbe, 0x20, 0x3b, 0x5d, 0x2f, 0x31, 0xbb, 0x44, 0x2f, 0x25, 0xb5, 0x4c, 0x3f, 0x3d, + 0x5f, 0x3f, 0x2f, 0x3f, 0x25, 0x15, 0x39, 0x14, 0x93, 0xd8, 0xc0, 0xc1, 0x62, 0x0c, 0x08, 0x00, + 0x00, 0xff, 0xff, 0x41, 0x12, 0x21, 0x60, 0x61, 0x01, 0x00, 0x00, } func (m *DepositParams) Marshal() (dAtA []byte, err error) { diff --git a/go/node/inflation/types/v1beta2/genesis.pb.go b/go/node/inflation/types/v1beta2/genesis.pb.go index 37242296..6c2fd704 100644 --- a/go/node/inflation/types/v1beta2/genesis.pb.go +++ b/go/node/inflation/types/v1beta2/genesis.pb.go @@ -5,8 +5,8 @@ package v1beta2 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -77,7 +77,7 @@ func init() { } var fileDescriptor_912221706d9e5bb6 = []byte{ - // 236 bytes of a gzipped FileDescriptorProto + // 221 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xcc, 0x4e, 0x2c, 0xce, 0xd0, 0xcf, 0xcc, 0x4b, 0xcb, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd2, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, @@ -87,12 +87,11 @@ var fileDescriptor_912221706d9e5bb6 = []byte{ 0x50, 0x04, 0x17, 0x1b, 0x44, 0x5e, 0x82, 0x51, 0x81, 0x51, 0x83, 0xdb, 0x48, 0x5e, 0x0f, 0x87, 0xad, 0x7a, 0x01, 0x60, 0x65, 0x4e, 0xf2, 0x27, 0xee, 0xc9, 0x33, 0xbc, 0xba, 0x27, 0x0f, 0xd5, 0xf6, 0xe9, 0x9e, 0x3c, 0x6f, 0x65, 0x62, 0x6e, 0x8e, 0x95, 0x12, 0x84, 0xaf, 0x14, 0x04, 0x95, - 0x70, 0x8a, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, - 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xa7, 0xf4, 0xcc, - 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0x6d, 0xba, 0x79, 0xa9, 0x25, 0xe5, - 0xf9, 0x45, 0xd9, 0x50, 0x5e, 0x62, 0x41, 0xa6, 0x7e, 0x7a, 0xbe, 0x7e, 0x5e, 0x7e, 0x4a, 0x2a, - 0x92, 0x77, 0x4a, 0x2a, 0x0b, 0x52, 0x8b, 0x61, 0x9e, 0x4a, 0x62, 0x03, 0x7b, 0xc7, 0x18, 0x10, - 0x00, 0x00, 0xff, 0xff, 0x59, 0xd1, 0x90, 0x65, 0x4c, 0x01, 0x00, 0x00, + 0x70, 0x72, 0x3d, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, + 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xed, 0x82, 0xec, + 0x74, 0xbd, 0xc4, 0xec, 0x12, 0xbd, 0x94, 0xd4, 0x32, 0xfd, 0xf4, 0x7c, 0xfd, 0xbc, 0xfc, 0x94, + 0x54, 0x24, 0x77, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xc3, 0x5c, 0x9f, 0xc4, 0x06, 0x76, 0xb7, 0x31, + 0x20, 0x00, 0x00, 0xff, 0xff, 0x3a, 0xb0, 0xb6, 0x32, 0x35, 0x01, 0x00, 0x00, } func (m *GenesisState) Marshal() (dAtA []byte, err error) { diff --git a/go/node/inflation/types/v1beta2/params.pb.go b/go/node/inflation/types/v1beta2/params.pb.go index 885990e5..df246402 100644 --- a/go/node/inflation/types/v1beta2/params.pb.go +++ b/go/node/inflation/types/v1beta2/params.pb.go @@ -6,8 +6,8 @@ package v1beta2 import ( fmt "fmt" github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -78,30 +78,29 @@ func init() { } var fileDescriptor_fea313162cb1e23f = []byte{ - // 358 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0x3f, 0x4b, 0xc3, 0x40, - 0x18, 0xc6, 0x73, 0x16, 0x4a, 0xcd, 0x62, 0x0d, 0x45, 0x83, 0x60, 0x4e, 0x82, 0x88, 0x4b, 0x73, - 0xa8, 0x9b, 0x63, 0x29, 0x42, 0x37, 0xe9, 0x20, 0x28, 0x42, 0x79, 0x9b, 0xa6, 0xe9, 0xd1, 0x26, - 0x17, 0x72, 0xd7, 0x4a, 0x47, 0xbf, 0x81, 0x1f, 0xc1, 0x8f, 0xe1, 0x47, 0xe8, 0xd8, 0x51, 0x04, - 0x0f, 0x49, 0xb7, 0x8e, 0xfd, 0x04, 0xd2, 0x6b, 0x12, 0x4a, 0xad, 0x43, 0xa7, 0xfc, 0x79, 0x9f, - 0xf7, 0xf7, 0xfe, 0x86, 0x47, 0x3f, 0x87, 0x3e, 0xf0, 0x1e, 0xa1, 0x61, 0x77, 0x00, 0x82, 0xb2, - 0x90, 0x8c, 0xae, 0xda, 0x9e, 0x80, 0x6b, 0x12, 0x41, 0x0c, 0x01, 0x77, 0xa2, 0x98, 0x09, 0x66, - 0x1c, 0xab, 0x94, 0x93, 0xa7, 0x9c, 0x34, 0x75, 0x52, 0xf1, 0x99, 0xcf, 0x54, 0x86, 0x2c, 0xdf, - 0x56, 0x71, 0xfb, 0xbb, 0xa0, 0x17, 0xef, 0xd5, 0xbe, 0xf1, 0x81, 0xf4, 0xa3, 0x7c, 0xad, 0xd5, - 0xf1, 0x5c, 0x18, 0xb7, 0xba, 0xe0, 0x0a, 0x16, 0x9b, 0xe8, 0x0c, 0x5d, 0xee, 0xd7, 0x5e, 0xd1, - 0x44, 0x62, 0xed, 0x4b, 0xe2, 0x0b, 0x9f, 0x8a, 0xde, 0xb0, 0xed, 0xb8, 0x2c, 0x20, 0x2e, 0xe3, - 0x01, 0xe3, 0xe9, 0xa3, 0xca, 0x3b, 0x7d, 0x22, 0xc6, 0x91, 0xc7, 0x9d, 0xba, 0xe7, 0x26, 0x12, - 0x57, 0x1a, 0x19, 0xb0, 0xbe, 0xe4, 0xdd, 0x29, 0xdc, 0x5c, 0xe2, 0x7f, 0x0e, 0x2d, 0x24, 0x3e, - 0x1d, 0x43, 0x30, 0xb8, 0xb5, 0xb7, 0xcf, 0xed, 0x66, 0x85, 0x6e, 0x01, 0x1a, 0xef, 0x48, 0x3f, - 0xa4, 0x21, 0x15, 0x14, 0x06, 0xad, 0x3c, 0x60, 0xee, 0x29, 0x6b, 0xbe, 0xb3, 0x74, 0xb9, 0xb1, - 0x42, 0xe5, 0xee, 0x73, 0x89, 0xff, 0xe2, 0x17, 0x12, 0x9b, 0x99, 0xeb, 0xc6, 0xc8, 0x6e, 0x96, - 0xe9, 0x06, 0xc2, 0x18, 0xea, 0xa5, 0x11, 0xc4, 0x14, 0x42, 0xd7, 0x33, 0x0b, 0x4a, 0xec, 0x71, - 0x67, 0xb1, 0xd2, 0x43, 0x4a, 0x98, 0x4b, 0x9c, 0xd3, 0x16, 0x12, 0x1f, 0xac, 0x3c, 0xb2, 0x3f, - 0x76, 0x33, 0x1f, 0xd6, 0x9e, 0x27, 0x89, 0x85, 0xa6, 0x89, 0x85, 0x7e, 0x12, 0x0b, 0xbd, 0xcd, - 0x2c, 0x6d, 0x3a, 0xb3, 0xb4, 0xcf, 0x99, 0xa5, 0x3d, 0xd5, 0xd6, 0xce, 0xaa, 0xce, 0x54, 0x43, - 0x4f, 0xbc, 0xb0, 0xb8, 0x9f, 0x7e, 0x41, 0x44, 0x89, 0xcf, 0x48, 0xc8, 0x3a, 0xde, 0x5a, 0xe7, - 0x94, 0x4e, 0xd6, 0xbc, 0x76, 0x51, 0x95, 0xe8, 0xe6, 0x37, 0x00, 0x00, 0xff, 0xff, 0x9a, 0xe8, - 0x7c, 0xfd, 0x9b, 0x02, 0x00, 0x00, + // 352 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xb1, 0x4a, 0xc3, 0x40, + 0x18, 0xc7, 0x73, 0x16, 0x4a, 0xcd, 0x62, 0x0d, 0x45, 0x83, 0x60, 0x4e, 0x82, 0x88, 0x20, 0xde, + 0xa1, 0x6e, 0x8e, 0xa5, 0x0a, 0xdd, 0xa4, 0x83, 0xa0, 0x4b, 0xb9, 0x26, 0xd7, 0xf4, 0x48, 0x93, + 0x0b, 0xb9, 0x6b, 0xa1, 0xa3, 0x6f, 0xe0, 0x23, 0xf8, 0x18, 0x3e, 0x42, 0xc7, 0x8e, 0x22, 0x78, + 0x48, 0xba, 0x75, 0xec, 0x13, 0x48, 0xaf, 0x4d, 0x90, 0x5a, 0x87, 0x4e, 0x09, 0xf7, 0xfd, 0xbf, + 0xdf, 0xf7, 0x1b, 0xfe, 0xe6, 0x29, 0x09, 0x89, 0xe8, 0x61, 0x16, 0x77, 0xfb, 0x44, 0x32, 0x1e, + 0xe3, 0xe1, 0x55, 0x87, 0x4a, 0x72, 0x8d, 0x13, 0x92, 0x92, 0x48, 0xa0, 0x24, 0xe5, 0x92, 0x5b, + 0x87, 0x3a, 0x85, 0x8a, 0x14, 0x5a, 0xa5, 0x8e, 0x6a, 0x01, 0x0f, 0xb8, 0xce, 0xe0, 0xc5, 0xdf, + 0x32, 0xee, 0x7e, 0x95, 0xcc, 0xf2, 0x83, 0xde, 0xb7, 0xde, 0x81, 0x79, 0x50, 0xac, 0xb5, 0x7d, + 0xea, 0x91, 0x51, 0xbb, 0x4b, 0x3c, 0xc9, 0x53, 0x1b, 0x9c, 0x80, 0xf3, 0xdd, 0xfa, 0x0b, 0x18, + 0x2b, 0x68, 0x7c, 0x2a, 0x78, 0x16, 0x30, 0xd9, 0x1b, 0x74, 0x90, 0xc7, 0x23, 0xec, 0x71, 0x11, + 0x71, 0xb1, 0xfa, 0x5c, 0x0a, 0x3f, 0xc4, 0x72, 0x94, 0x50, 0x81, 0x1a, 0xd4, 0xcb, 0x14, 0xac, + 0x35, 0x73, 0x60, 0x63, 0xc1, 0xbb, 0xd7, 0xb8, 0x99, 0x82, 0xff, 0x1c, 0x9a, 0x2b, 0x78, 0x3c, + 0x22, 0x51, 0xff, 0xd6, 0xdd, 0x3c, 0x77, 0x5b, 0x35, 0xb6, 0x01, 0x68, 0xbd, 0x01, 0x73, 0x9f, + 0xc5, 0x4c, 0x32, 0xd2, 0x6f, 0x17, 0x01, 0x7b, 0x47, 0x5b, 0x8b, 0xad, 0xa5, 0xab, 0xcd, 0x25, + 0xaa, 0x70, 0x9f, 0x29, 0xf8, 0x17, 0x3f, 0x57, 0xd0, 0xce, 0x5d, 0xd7, 0x46, 0x6e, 0xab, 0xca, + 0xd6, 0x10, 0xd6, 0xc0, 0xac, 0x0c, 0x49, 0xca, 0x48, 0xec, 0x51, 0xbb, 0xa4, 0xc5, 0x9e, 0xb6, + 0x16, 0xab, 0x3c, 0xae, 0x08, 0x33, 0x05, 0x0b, 0xda, 0x5c, 0xc1, 0xbd, 0xa5, 0x47, 0xfe, 0xe2, + 0xb6, 0x8a, 0x61, 0xfd, 0x6e, 0x9c, 0x39, 0x60, 0x92, 0x39, 0xe0, 0x3b, 0x73, 0xc0, 0xeb, 0xd4, + 0x31, 0x26, 0x53, 0xc7, 0xf8, 0x98, 0x3a, 0xc6, 0xf3, 0x45, 0x12, 0x06, 0x88, 0x84, 0x12, 0xf9, + 0x74, 0x88, 0x03, 0x8e, 0x63, 0xee, 0xd3, 0x5f, 0xe5, 0xd2, 0x77, 0xf3, 0x8a, 0x75, 0xca, 0xba, + 0x2d, 0x37, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x4a, 0xd4, 0xaf, 0x92, 0x84, 0x02, 0x00, 0x00, } func (m *Params) Marshal() (dAtA []byte, err error) { diff --git a/go/node/inflation/v1beta2/errors.go b/go/node/inflation/v1beta2/errors.go deleted file mode 100644 index d0b1f9bd..00000000 --- a/go/node/inflation/v1beta2/errors.go +++ /dev/null @@ -1,12 +0,0 @@ -package v1beta2 - -import "errors" - -var ( - // ErrInvalidParam indicates an invalid chain parameter - ErrInvalidParam = errors.New("parameter invalid") - // ErrInvalidInitialInflation indicates an invalid initial_inflation parameter - ErrInvalidInitialInflation = errors.New("initial inflation parameter is invalid") - // ErrInvalidVariance indicates an invalid variance parameter - ErrInvalidVariance = errors.New("variance parameter is invalid") -) diff --git a/go/node/inflation/v1beta2/genesis.pb.go b/go/node/inflation/v1beta2/genesis.pb.go deleted file mode 100644 index f0f1c2a2..00000000 --- a/go/node/inflation/v1beta2/genesis.pb.go +++ /dev/null @@ -1,325 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/inflation/v1beta2/genesis.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState stores slice of genesis deployment instance -type GenesisState struct { - Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params" yaml:"params"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_912221706d9e5bb6, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetParams() Params { - if m != nil { - return m.Params - } - return Params{} -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.inflation.v1beta2.GenesisState") -} - -func init() { - proto.RegisterFile("akash/inflation/v1beta2/genesis.proto", fileDescriptor_912221706d9e5bb6) -} - -var fileDescriptor_912221706d9e5bb6 = []byte{ - // 231 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0xcf, 0xcc, 0x4b, 0xcb, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2f, 0x33, 0x4c, 0x4a, - 0x2d, 0x49, 0x34, 0xd2, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x12, 0x07, 0x2b, 0xd3, 0x83, 0x2b, 0xd3, 0x83, 0x2a, 0x93, 0x12, 0x49, 0xcf, 0x4f, - 0xcf, 0x07, 0xab, 0xd1, 0x07, 0xb1, 0x20, 0xca, 0xa5, 0x54, 0x70, 0x99, 0x5a, 0x90, 0x58, 0x94, - 0x98, 0x0b, 0x35, 0x54, 0x29, 0x83, 0x8b, 0xc7, 0x1d, 0x62, 0x4b, 0x70, 0x49, 0x62, 0x49, 0xaa, - 0x50, 0x04, 0x17, 0x1b, 0x44, 0x5e, 0x82, 0x51, 0x81, 0x51, 0x83, 0xdb, 0x48, 0x5e, 0x0f, 0x87, - 0xad, 0x7a, 0x01, 0x60, 0x65, 0x4e, 0xf2, 0x27, 0xee, 0xc9, 0x33, 0xbc, 0xba, 0x27, 0x0f, 0xd5, - 0xf6, 0xe9, 0x9e, 0x3c, 0x6f, 0x65, 0x62, 0x6e, 0x8e, 0x95, 0x12, 0x84, 0xaf, 0x14, 0x04, 0x95, - 0x70, 0x0a, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, - 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xf3, 0xf4, 0xcc, - 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0x6d, 0xba, 0x79, 0xa9, 0x25, 0xe5, - 0xf9, 0x45, 0xd9, 0xfa, 0x79, 0xf9, 0x29, 0xa9, 0xfa, 0x15, 0x48, 0x7e, 0x28, 0xa9, 0x2c, 0x48, - 0x2d, 0x86, 0xf9, 0x24, 0x89, 0x0d, 0xec, 0x07, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3f, - 0x22, 0xf7, 0xd9, 0x41, 0x01, 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Params.Size() - n += 1 + l + sovGenesis(uint64(l)) - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/inflation/v1beta2/inflation_calculator.go b/go/node/inflation/v1beta2/inflation_calculator.go deleted file mode 100644 index 27048080..00000000 --- a/go/node/inflation/v1beta2/inflation_calculator.go +++ /dev/null @@ -1,102 +0,0 @@ -package v1beta2 - -// todo akash-network/support#4 -// import ( -// "fmt" -// "time" -// -// sdk "github.com/cosmos/cosmos-sdk/types" -// minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" -// paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" -// "github.com/ericlagergren/decimal" -// ) -// -// func GetInflationCalculator( -// genesisTime time.Time, -// inflationParamSubspace paramstypes.Subspace, -// ) minttypes.InflationCalculationFn { -// return func(ctx sdk.Context, minter minttypes.Minter, params minttypes.Params, bondedRatio sdk.Dec) sdk.Dec { -// var inflationParams Params -// inflationParamSubspace.GetParamSet(ctx, &inflationParams) -// -// return inflationCalculator(ctx.BlockTime(), genesisTime, minter, params, inflationParams, bondedRatio) -// } -// } -// -// // inflationCalculator calculate current inflation value -// // - btime - block time from sdk.Context -// // - gtime - genesis time -// func inflationCalculator(btime, gtime time.Time, minter minttypes.Minter, mparams minttypes.Params, iparams Params, bondedRatio sdk.Dec) sdk.Dec { -// inflationDecayFactor := new(decimal.Big) -// if _, valid := inflationDecayFactor.SetString(iparams.InflationDecayFactor.String()); !valid { -// panic(fmt.Sprintf("InflationDecayFactor contains invalid value [%s]. expected integer/float", iparams.InflationDecayFactor.String())) -// } -// -// // years passed since genesis = seconds passed since genesis / number of seconds per year -// // can be a fraction, eg: 0.5 -// yearsPassed := decimal.WithPrecision(sdk.Precision). -// Quo( -// // seconds since genesis -// decimal.WithPrecision(sdk.Precision). -// Sub( -// decimal.New(btime.Unix(), 0), -// decimal.New(gtime.Unix(), 0), -// ), -// // Number of hours in an year = 8766 (averaging the leap year hours) -// // Number of minutes in an hour = 60 -// // Number of seconds in a minute = 60 -// // => Number of seconds per year = 60 * 60 * 8766 = 31557600 -// decimal.New(31557600, 0), -// ) -// // 2^(-t/tHalf) -// inflationCoefDec := decimal.WithPrecision(sdk.Precision) -// inflationCoefDec = inflationCoefDec.Context. -// Pow( -// inflationCoefDec, -// decimal.New(2, 0), -// decimal.WithPrecision(sdk.Precision). -// Mul( -// decimal.New(-1, 0), -// decimal.WithPrecision(sdk.Precision). -// Quo(yearsPassed, inflationDecayFactor), -// ), -// ) -// // convert inflationCoefDec to sdk.Dec with a 6 unit precision: sdk.Decimal(big.Int(pow * 10^6)) / 10^6 -// inflationCoef := sdk.NewDecFromBigInt( -// decimal.WithPrecision(sdk.Precision). -// Mul(inflationCoefDec, decimal.New(1000000, 0)). -// Int(nil), -// ).QuoInt64(1000000) -// -// idealInflation := iparams.InitialInflation.Mul(inflationCoef) -// -// // (1 - bondedRatio/GoalBonded) * InflationRateChange -// inflationRateChangePerYear := sdk.OneDec(). -// Sub(bondedRatio.Quo(mparams.GoalBonded)). -// Mul(mparams.InflationRateChange) -// -// inflationRateChange := inflationRateChangePerYear.Quo(sdk.NewDecFromInt(sdk.NewIntFromUint64(mparams.BlocksPerYear))) -// -// sdk.NewDecFromInt(sdk.NewIntFromUint64(mparams.BlocksPerYear)) -// -// // note inflationRateChange may be negative -// currentInflation := minter.Inflation.Add(inflationRateChange) -// -// // min, max currentInflation based on a defined range parameter 'r' -// // currentInflation range = [I(t) - I(t) * R, I(t) + I(t) * R] -// // R is from iparams.Variance -// minInflation := idealInflation.Sub(idealInflation.Mul(iparams.Variance)) -// maxInflation := idealInflation.Add(idealInflation.Mul(iparams.Variance)) -// -// // the lowest possible value of minInflation is set for 0 -// // tho it can be set to higher value in the future -// minInflation = sdk.MaxDec(sdk.ZeroDec(), minInflation) -// -// if currentInflation.LT(minInflation) { -// currentInflation = minInflation -// } else if currentInflation.GT(maxInflation) { -// currentInflation = maxInflation -// } -// -// return currentInflation -// } diff --git a/go/node/inflation/v1beta2/inflation_calculator_test.go b/go/node/inflation/v1beta2/inflation_calculator_test.go deleted file mode 100644 index 981a25cd..00000000 --- a/go/node/inflation/v1beta2/inflation_calculator_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package v1beta2 - -// todo akash-network/support#4 -// import ( -// "testing" -// "time" -// -// sdk "github.com/cosmos/cosmos-sdk/types" -// minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" -// "github.com/stretchr/testify/suite" -// ) -// -// const ( -// blocksPerYear = 5256000 -// ) -// -// type InflationCalculatorTestSuite struct { -// suite.Suite -// params Params -// genesistime time.Time -// } -// -// func (s *InflationCalculatorTestSuite) SetupSuite() { -// var err error -// s.genesistime, err = time.Parse(time.RFC3339, "2021-03-08T15:00:00Z") -// s.Require().NoError(err) -// -// s.params.InflationDecayFactor, err = sdk.NewDecFromStr("2.10306569") -// s.Require().NoError(err) -// -// s.params.InitialInflation, err = sdk.NewDecFromStr("48.546257") -// s.Require().NoError(err) -// -// s.params.Variance, err = sdk.NewDecFromStr("0.05") -// s.Require().NoError(err) -// } -// -// func TestIntegrationTestSuite(t *testing.T) { -// suite.Run(t, new(InflationCalculatorTestSuite)) -// } -// -// func (s *InflationCalculatorTestSuite) TestInflationCalculatorInvalidDecayFactor() { -// testFn := func() { -// inflationCalculator( -// time.Time{}, -// time.Time{}, -// minttypes.Minter{}, -// minttypes.Params{}, -// Params{}, -// sdk.Dec{}) -// } -// -// s.Panics(testFn) -// } -// -// func (s *InflationCalculatorTestSuite) TestInflationCalculator1() { -// goalBonded, err := sdk.NewDecFromStr("0.67") -// s.Require().NoError(err) -// -// currBonded, err := sdk.NewDecFromStr("0.7324") -// s.Require().NoError(err) -// -// currInflation, err := sdk.NewDecFromStr("0.230326319830867266") -// s.Require().NoError(err) -// -// blockTime, _ := time.Parse(time.RFC3339, "2022-04-18T18:28:26+00:00") -// -// res := inflationCalculator( -// blockTime, -// s.genesistime, -// minttypes.Minter{ -// Inflation: currInflation, -// }, -// minttypes.Params{ -// BlocksPerYear: blocksPerYear, -// GoalBonded: goalBonded, -// InflationRateChange: s.params.Variance, -// }, -// s.params, -// currBonded) -// -// s.Require().Equal("31.967899564902300000", res.String()) -// } diff --git a/go/node/inflation/v1beta2/key.go b/go/node/inflation/v1beta2/key.go deleted file mode 100644 index 4083fff8..00000000 --- a/go/node/inflation/v1beta2/key.go +++ /dev/null @@ -1,12 +0,0 @@ -package v1beta2 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "inflation" - - // StoreKey is the store key string for deployment - StoreKey = ModuleName - - // RouterKey is the message route for deployment - RouterKey = ModuleName -) diff --git a/go/node/inflation/v1beta2/params.go b/go/node/inflation/v1beta2/params.go deleted file mode 100644 index e5aaa85c..00000000 --- a/go/node/inflation/v1beta2/params.go +++ /dev/null @@ -1,94 +0,0 @@ -package v1beta2 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/pkg/errors" -) - -var _ paramtypes.ParamSet = (*Params)(nil) - -const ( - keyInflationDecayFactor = "InflationDecayFactor" - keyInitialInflation = "InitialInflation" - keyVariance = "Variance" -) - -func DefaultInflationDecayFactor() sdk.Dec { return sdk.NewDec(2) } // years - -func DefaultInitialInflation() sdk.Dec { return sdk.NewDec(100) } -func DefaultVarince() sdk.Dec { return sdk.MustNewDecFromStr("0.05") } - -func MaxInitialInflation() sdk.Dec { return sdk.NewDec(100) } -func MinInitialInflation() sdk.Dec { return sdk.ZeroDec() } - -func MaxVariance() sdk.Dec { return sdk.NewDec(1) } -func MinVariance() sdk.Dec { return sdk.ZeroDec() } - -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair([]byte(keyInflationDecayFactor), &p.InflationDecayFactor, validateInflationDecayFactor), - paramtypes.NewParamSetPair([]byte(keyInitialInflation), &p.InitialInflation, validateInitialInflation), - paramtypes.NewParamSetPair([]byte(keyVariance), &p.Variance, validateVariance), - } -} - -func DefaultParams() Params { - return Params{ - InflationDecayFactor: DefaultInflationDecayFactor(), - InitialInflation: DefaultInitialInflation(), - Variance: DefaultVarince(), - } -} - -func (p Params) Validate() error { - if err := validateInflationDecayFactor(p.InflationDecayFactor); err != nil { - return err - } - if err := validateInitialInflation(p.InitialInflation); err != nil { - return err - } - if err := validateVariance(p.Variance); err != nil { - return err - } - - return nil -} - -func validateInflationDecayFactor(i interface{}) error { - v, ok := i.(sdk.Dec) - if !ok || v.LT(sdk.NewDec(1)) { - return errors.Wrapf(ErrInvalidParam, "%T", i) - } - - return nil -} - -func validateInitialInflation(i interface{}) error { - v, ok := i.(sdk.Dec) - if !ok { - return errors.Wrapf(ErrInvalidParam, "%T", i) - } - if v.GT(MaxInitialInflation()) || v.LT(MinInitialInflation()) { - return errors.Wrapf(ErrInvalidInitialInflation, "%v", v) - } - - return nil -} - -func validateVariance(i interface{}) error { - v, ok := i.(sdk.Dec) - if !ok { - return errors.Wrapf(ErrInvalidParam, "%T", i) - } - if v.GT(MaxVariance()) || v.LT(MinVariance()) { - return errors.Wrapf(ErrInvalidVariance, "%v", v) - } - - return nil -} diff --git a/go/node/inflation/v1beta2/params.pb.go b/go/node/inflation/v1beta2/params.pb.go deleted file mode 100644 index 73b2569d..00000000 --- a/go/node/inflation/v1beta2/params.pb.go +++ /dev/null @@ -1,427 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/inflation/v1beta2/params.proto - -package v1beta2 - -import ( - fmt "fmt" - github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Params defines the parameters for the x/deployment package -type Params struct { - // InflationDecayFactor is the number of years it takes inflation to halve. - InflationDecayFactor github_com_cosmos_cosmos_sdk_types.Dec `protobuf:"bytes,1,opt,name=inflation_decay_factor,json=inflationDecayFactor,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Dec" json:"inflation_decay_factor" yaml:"inflation_decay_factor"` - // InitialInflation is the rate at which inflation starts at genesis. - // It is a decimal value in the range [0.0, 100.0]. - InitialInflation github_com_cosmos_cosmos_sdk_types.Dec `protobuf:"bytes,2,opt,name=initial_inflation,json=initialInflation,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Dec" json:"initial_inflation" yaml:"initial_inflation"` - // Variance defines the fraction by which inflation can vary from ideal inflation in a block. - // It is a decimal value in the range [0.0, 1.0]. - Variance github_com_cosmos_cosmos_sdk_types.Dec `protobuf:"bytes,3,opt,name=variance,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Dec" json:"variance" yaml:"variance"` -} - -func (m *Params) Reset() { *m = Params{} } -func (m *Params) String() string { return proto.CompactTextString(m) } -func (*Params) ProtoMessage() {} -func (*Params) Descriptor() ([]byte, []int) { - return fileDescriptor_fea313162cb1e23f, []int{0} -} -func (m *Params) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Params.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Params) XXX_Merge(src proto.Message) { - xxx_messageInfo_Params.Merge(m, src) -} -func (m *Params) XXX_Size() int { - return m.Size() -} -func (m *Params) XXX_DiscardUnknown() { - xxx_messageInfo_Params.DiscardUnknown(m) -} - -var xxx_messageInfo_Params proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Params)(nil), "akash.inflation.v1beta2.Params") -} - -func init() { - proto.RegisterFile("akash/inflation/v1beta2/params.proto", fileDescriptor_fea313162cb1e23f) -} - -var fileDescriptor_fea313162cb1e23f = []byte{ - // 355 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0xcf, 0xcc, 0x4b, 0xcb, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2f, 0x33, 0x4c, 0x4a, - 0x2d, 0x49, 0x34, 0xd2, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, - 0x17, 0x12, 0x07, 0xab, 0xd2, 0x83, 0xab, 0xd2, 0x83, 0xaa, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, - 0x07, 0xab, 0xd1, 0x07, 0xb1, 0x20, 0xca, 0x95, 0xee, 0x32, 0x73, 0xb1, 0x05, 0x80, 0xf5, 0x0b, - 0xed, 0x60, 0xe4, 0x12, 0x83, 0x6b, 0x8b, 0x4f, 0x49, 0x4d, 0x4e, 0xac, 0x8c, 0x4f, 0x4b, 0x4c, - 0x2e, 0xc9, 0x2f, 0x92, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x74, 0x6a, 0x64, 0x3c, 0x71, 0x4f, 0x9e, - 0xe1, 0xd6, 0x3d, 0x79, 0xb5, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0xfd, - 0xe4, 0xfc, 0xe2, 0xdc, 0xfc, 0x62, 0x28, 0xa5, 0x5b, 0x9c, 0x92, 0xad, 0x5f, 0x52, 0x59, 0x90, - 0x5a, 0xac, 0xe7, 0x92, 0x9a, 0xfc, 0xe8, 0x9e, 0xbc, 0x88, 0x27, 0xcc, 0x40, 0x17, 0x90, 0x79, - 0x6e, 0x60, 0xe3, 0x5e, 0xdd, 0x93, 0xc7, 0x61, 0xd1, 0xa7, 0x7b, 0xf2, 0xb2, 0x95, 0x89, 0xb9, - 0x39, 0x56, 0x4a, 0xd8, 0xe5, 0x95, 0x82, 0x44, 0x32, 0xb1, 0x18, 0x28, 0xb4, 0x80, 0x91, 0x4b, - 0x30, 0x33, 0x2f, 0xb3, 0x24, 0x33, 0x31, 0x27, 0x1e, 0xae, 0x40, 0x82, 0x09, 0xec, 0xea, 0x62, - 0x92, 0x1d, 0x2d, 0xe0, 0x09, 0x31, 0x0a, 0xee, 0xf6, 0x57, 0xf7, 0xe4, 0x31, 0x8d, 0xff, 0x74, - 0x4f, 0x5e, 0x02, 0xe6, 0x56, 0x34, 0x29, 0xa5, 0x20, 0x81, 0x4c, 0x34, 0x23, 0x84, 0x4a, 0xb9, - 0x38, 0xca, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x92, 0x53, 0x25, 0x98, 0xc1, 0x0e, 0x8b, 0x24, 0xd9, - 0x61, 0x1c, 0x61, 0x50, 0x13, 0x5e, 0xdd, 0x93, 0x87, 0x9b, 0xf6, 0xe9, 0x9e, 0x3c, 0x3f, 0xc4, - 0x1d, 0x30, 0x11, 0xa5, 0x20, 0xb8, 0xa4, 0x53, 0xe0, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, - 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, - 0xcb, 0x31, 0x44, 0x99, 0x23, 0x59, 0x0b, 0x4e, 0x33, 0xba, 0x79, 0xa9, 0x25, 0xe5, 0xf9, 0x45, - 0xd9, 0xfa, 0x79, 0xf9, 0x29, 0xa9, 0xfa, 0x15, 0x48, 0x09, 0x0d, 0xec, 0x06, 0x58, 0x72, 0x4b, - 0x62, 0x03, 0xa7, 0x1c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbf, 0x6e, 0xaf, 0x22, 0x90, - 0x02, 0x00, 0x00, -} - -func (m *Params) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Params) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size := m.Variance.Size() - i -= size - if _, err := m.Variance.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintParams(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size := m.InitialInflation.Size() - i -= size - if _, err := m.InitialInflation.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintParams(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size := m.InflationDecayFactor.Size() - i -= size - if _, err := m.InflationDecayFactor.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintParams(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintParams(dAtA []byte, offset int, v uint64) int { - offset -= sovParams(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Params) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.InflationDecayFactor.Size() - n += 1 + l + sovParams(uint64(l)) - l = m.InitialInflation.Size() - n += 1 + l + sovParams(uint64(l)) - l = m.Variance.Size() - n += 1 + l + sovParams(uint64(l)) - return n -} - -func sovParams(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozParams(x uint64) (n int) { - return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Params) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Params: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InflationDecayFactor", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.InflationDecayFactor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialInflation", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.InitialInflation.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Variance", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Variance.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipParams(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipParams(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthParams - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupParams - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthParams - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/inflation/v1beta3/errors.go b/go/node/inflation/v1beta3/errors.go deleted file mode 100644 index 71679b09..00000000 --- a/go/node/inflation/v1beta3/errors.go +++ /dev/null @@ -1,12 +0,0 @@ -package v1beta3 - -import "errors" - -var ( - // ErrInvalidParam indicates an invalid chain parameter - ErrInvalidParam = errors.New("parameter invalid") - // ErrInvalidInitialInflation indicates an invalid initial_inflation parameter - ErrInvalidInitialInflation = errors.New("initial inflation parameter is invalid") - // ErrInvalidVariance indicates an invalid variance parameter - ErrInvalidVariance = errors.New("variance parameter is invalid") -) diff --git a/go/node/inflation/v1beta3/genesis.pb.go b/go/node/inflation/v1beta3/genesis.pb.go index 4150c2b8..dfe67572 100644 --- a/go/node/inflation/v1beta3/genesis.pb.go +++ b/go/node/inflation/v1beta3/genesis.pb.go @@ -5,8 +5,8 @@ package v1beta3 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -77,7 +77,7 @@ func init() { } var fileDescriptor_9715c8a93ab9af0b = []byte{ - // 229 bytes of a gzipped FileDescriptorProto + // 214 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xcc, 0x4e, 0x2c, 0xce, 0xd0, 0xcf, 0xcc, 0x4b, 0xcb, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, @@ -87,12 +87,11 @@ var fileDescriptor_9715c8a93ab9af0b = []byte{ 0x50, 0x04, 0x17, 0x1b, 0x44, 0x5e, 0x82, 0x51, 0x81, 0x51, 0x83, 0xdb, 0x48, 0x5e, 0x0f, 0x87, 0xad, 0x7a, 0x01, 0x60, 0x65, 0x4e, 0xf2, 0x27, 0xee, 0xc9, 0x33, 0xbc, 0xba, 0x27, 0x0f, 0xd5, 0xf6, 0xe9, 0x9e, 0x3c, 0x6f, 0x65, 0x62, 0x6e, 0x8e, 0x95, 0x12, 0x84, 0xaf, 0x14, 0x04, 0x95, - 0x70, 0x0a, 0x3b, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, - 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0x9b, 0xf4, 0xcc, - 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0x6d, 0xba, 0x79, 0xa9, 0x25, 0xe5, - 0xf9, 0x45, 0xd9, 0x50, 0x5e, 0x62, 0x41, 0xa6, 0x7e, 0x7a, 0xbe, 0x7e, 0x5e, 0x7e, 0x4a, 0x2a, - 0xa6, 0x77, 0x92, 0xd8, 0xc0, 0x1e, 0x31, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x75, 0xa3, 0xaf, - 0xd7, 0x46, 0x01, 0x00, 0x00, + 0x70, 0xb2, 0x3f, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, + 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xd5, 0x82, 0xec, + 0x74, 0xbd, 0xc4, 0xec, 0x12, 0xbd, 0x94, 0xd4, 0x32, 0xfd, 0xf4, 0x7c, 0xfd, 0xbc, 0xfc, 0x94, + 0x54, 0x4c, 0x77, 0x27, 0xb1, 0x81, 0x5d, 0x6c, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xab, 0x4e, + 0xc6, 0xae, 0x2f, 0x01, 0x00, 0x00, } func (m *GenesisState) Marshal() (dAtA []byte, err error) { diff --git a/go/node/inflation/v1beta3/inflation_calculator.go b/go/node/inflation/v1beta3/inflation_calculator.go deleted file mode 100644 index 78782d25..00000000 --- a/go/node/inflation/v1beta3/inflation_calculator.go +++ /dev/null @@ -1,102 +0,0 @@ -package v1beta3 - -// todo akash-network/support#4 -// import ( -// "fmt" -// "time" -// -// sdk "github.com/cosmos/cosmos-sdk/types" -// minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" -// paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" -// "github.com/ericlagergren/decimal" -// ) -// -// func GetInflationCalculator( -// genesisTime time.Time, -// inflationParamSubspace paramstypes.Subspace, -// ) minttypes.InflationCalculationFn { -// return func(ctx sdk.Context, minter minttypes.Minter, params minttypes.Params, bondedRatio sdk.Dec) sdk.Dec { -// var inflationParams Params -// inflationParamSubspace.GetParamSet(ctx, &inflationParams) -// -// return inflationCalculator(ctx.BlockTime(), genesisTime, minter, params, inflationParams, bondedRatio) -// } -// } -// -// // inflationCalculator calculate current inflation value -// // - btime - block time from sdk.Context -// // - gtime - genesis time -// func inflationCalculator(btime, gtime time.Time, minter minttypes.Minter, mparams minttypes.Params, iparams Params, bondedRatio sdk.Dec) sdk.Dec { -// inflationDecayFactor := new(decimal.Big) -// if _, valid := inflationDecayFactor.SetString(iparams.InflationDecayFactor.String()); !valid { -// panic(fmt.Sprintf("InflationDecayFactor contains invalid value [%s]. expected integer/float", iparams.InflationDecayFactor.String())) -// } -// -// // years passed since genesis = seconds passed since genesis / number of seconds per year -// // can be a fraction, eg: 0.5 -// yearsPassed := decimal.WithPrecision(sdk.Precision). -// Quo( -// // seconds since genesis -// decimal.WithPrecision(sdk.Precision). -// Sub( -// decimal.New(btime.Unix(), 0), -// decimal.New(gtime.Unix(), 0), -// ), -// // Number of hours in an year = 8766 (averaging the leap year hours) -// // Number of minutes in an hour = 60 -// // Number of seconds in a minute = 60 -// // => Number of seconds per year = 60 * 60 * 8766 = 31557600 -// decimal.New(31557600, 0), -// ) -// // 2^(-t/tHalf) -// inflationCoefDec := decimal.WithPrecision(sdk.Precision) -// inflationCoefDec = inflationCoefDec.Context. -// Pow( -// inflationCoefDec, -// decimal.New(2, 0), -// decimal.WithPrecision(sdk.Precision). -// Mul( -// decimal.New(-1, 0), -// decimal.WithPrecision(sdk.Precision). -// Quo(yearsPassed, inflationDecayFactor), -// ), -// ) -// // convert inflationCoefDec to sdk.Dec with a 6 unit precision: sdk.Decimal(big.Int(pow * 10^6)) / 10^6 -// inflationCoef := sdk.NewDecFromBigInt( -// decimal.WithPrecision(sdk.Precision). -// Mul(inflationCoefDec, decimal.New(1000000, 0)). -// Int(nil), -// ).QuoInt64(1000000) -// -// idealInflation := iparams.InitialInflation.Mul(inflationCoef) -// -// // (1 - bondedRatio/GoalBonded) * InflationRateChange -// inflationRateChangePerYear := sdk.OneDec(). -// Sub(bondedRatio.Quo(mparams.GoalBonded)). -// Mul(mparams.InflationRateChange) -// -// inflationRateChange := inflationRateChangePerYear.Quo(sdk.NewDecFromInt(sdk.NewIntFromUint64(mparams.BlocksPerYear))) -// -// sdk.NewDecFromInt(sdk.NewIntFromUint64(mparams.BlocksPerYear)) -// -// // note inflationRateChange may be negative -// currentInflation := minter.Inflation.Add(inflationRateChange) -// -// // min, max currentInflation based on a defined range parameter 'r' -// // currentInflation range = [I(t) - I(t) * R, I(t) + I(t) * R] -// // R is from iparams.Variance -// minInflation := idealInflation.Sub(idealInflation.Mul(iparams.Variance)) -// maxInflation := idealInflation.Add(idealInflation.Mul(iparams.Variance)) -// -// // the lowest possible value of minInflation is set for 0 -// // tho it can be set to higher value in the future -// minInflation = sdk.MaxDec(sdk.ZeroDec(), minInflation) -// -// if currentInflation.LT(minInflation) { -// currentInflation = minInflation -// } else if currentInflation.GT(maxInflation) { -// currentInflation = maxInflation -// } -// -// return currentInflation -// } diff --git a/go/node/inflation/v1beta3/inflation_calculator_test.go b/go/node/inflation/v1beta3/inflation_calculator_test.go deleted file mode 100644 index 0a9dd17f..00000000 --- a/go/node/inflation/v1beta3/inflation_calculator_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package v1beta3 - -// todo akash-network/support#4 -// import ( -// "testing" -// "time" -// -// sdk "github.com/cosmos/cosmos-sdk/types" -// minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" -// "github.com/stretchr/testify/suite" -// ) -// -// const ( -// blocksPerYear = 5256000 -// ) -// -// type InflationCalculatorTestSuite struct { -// suite.Suite -// params Params -// genesistime time.Time -// } -// -// func (s *InflationCalculatorTestSuite) SetupSuite() { -// var err error -// s.genesistime, err = time.Parse(time.RFC3339, "2021-03-08T15:00:00Z") -// s.Require().NoError(err) -// -// s.params.InflationDecayFactor, err = sdk.NewDecFromStr("2.10306569") -// s.Require().NoError(err) -// -// s.params.InitialInflation, err = sdk.NewDecFromStr("48.546257") -// s.Require().NoError(err) -// -// s.params.Variance, err = sdk.NewDecFromStr("0.05") -// s.Require().NoError(err) -// } -// -// func TestIntegrationTestSuite(t *testing.T) { -// suite.Run(t, new(InflationCalculatorTestSuite)) -// } -// -// func (s *InflationCalculatorTestSuite) TestInflationCalculatorInvalidDecayFactor() { -// testFn := func() { -// inflationCalculator( -// time.Time{}, -// time.Time{}, -// minttypes.Minter{}, -// minttypes.Params{}, -// Params{}, -// sdk.Dec{}) -// } -// -// s.Panics(testFn) -// } -// -// func (s *InflationCalculatorTestSuite) TestInflationCalculator1() { -// goalBonded, err := sdk.NewDecFromStr("0.67") -// s.Require().NoError(err) -// -// currBonded, err := sdk.NewDecFromStr("0.7324") -// s.Require().NoError(err) -// -// currInflation, err := sdk.NewDecFromStr("0.230326319830867266") -// s.Require().NoError(err) -// -// blockTime, _ := time.Parse(time.RFC3339, "2022-04-18T18:28:26+00:00") -// -// res := inflationCalculator( -// blockTime, -// s.genesistime, -// minttypes.Minter{ -// Inflation: currInflation, -// }, -// minttypes.Params{ -// BlocksPerYear: blocksPerYear, -// GoalBonded: goalBonded, -// InflationRateChange: s.params.Variance, -// }, -// s.params, -// currBonded) -// -// s.Require().Equal("31.967899564902300000", res.String()) -// } diff --git a/go/node/inflation/v1beta3/params.go b/go/node/inflation/v1beta3/params.go deleted file mode 100644 index c09e26c1..00000000 --- a/go/node/inflation/v1beta3/params.go +++ /dev/null @@ -1,94 +0,0 @@ -package v1beta3 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/pkg/errors" -) - -var _ paramtypes.ParamSet = (*Params)(nil) - -const ( - keyInflationDecayFactor = "InflationDecayFactor" - keyInitialInflation = "InitialInflation" - keyVariance = "Variance" -) - -func DefaultInflationDecayFactor() sdk.Dec { return sdk.NewDec(2) } // years - -func DefaultInitialInflation() sdk.Dec { return sdk.NewDec(100) } -func DefaultVarince() sdk.Dec { return sdk.MustNewDecFromStr("0.05") } - -func MaxInitialInflation() sdk.Dec { return sdk.NewDec(100) } -func MinInitialInflation() sdk.Dec { return sdk.ZeroDec() } - -func MaxVariance() sdk.Dec { return sdk.NewDec(1) } -func MinVariance() sdk.Dec { return sdk.ZeroDec() } - -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair([]byte(keyInflationDecayFactor), &p.InflationDecayFactor, validateInflationDecayFactor), - paramtypes.NewParamSetPair([]byte(keyInitialInflation), &p.InitialInflation, validateInitialInflation), - paramtypes.NewParamSetPair([]byte(keyVariance), &p.Variance, validateVariance), - } -} - -func DefaultParams() Params { - return Params{ - InflationDecayFactor: DefaultInflationDecayFactor(), - InitialInflation: DefaultInitialInflation(), - Variance: DefaultVarince(), - } -} - -func (p Params) Validate() error { - if err := validateInflationDecayFactor(p.InflationDecayFactor); err != nil { - return err - } - if err := validateInitialInflation(p.InitialInflation); err != nil { - return err - } - if err := validateVariance(p.Variance); err != nil { - return err - } - - return nil -} - -func validateInflationDecayFactor(i interface{}) error { - v, ok := i.(sdk.Dec) - if !ok || v.LT(sdk.NewDec(1)) { - return errors.Wrapf(ErrInvalidParam, "%T", i) - } - - return nil -} - -func validateInitialInflation(i interface{}) error { - v, ok := i.(sdk.Dec) - if !ok { - return errors.Wrapf(ErrInvalidParam, "%T", i) - } - if v.GT(MaxInitialInflation()) || v.LT(MinInitialInflation()) { - return errors.Wrapf(ErrInvalidInitialInflation, "%v", v) - } - - return nil -} - -func validateVariance(i interface{}) error { - v, ok := i.(sdk.Dec) - if !ok { - return errors.Wrapf(ErrInvalidParam, "%T", i) - } - if v.GT(MaxVariance()) || v.LT(MinVariance()) { - return errors.Wrapf(ErrInvalidVariance, "%v", v) - } - - return nil -} diff --git a/go/node/inflation/v1beta3/params.pb.go b/go/node/inflation/v1beta3/params.pb.go index 06f332d9..678be612 100644 --- a/go/node/inflation/v1beta3/params.pb.go +++ b/go/node/inflation/v1beta3/params.pb.go @@ -6,8 +6,8 @@ package v1beta3 import ( fmt "fmt" github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -78,30 +78,29 @@ func init() { } var fileDescriptor_07a125eb3d20d95e = []byte{ - // 356 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0xcf, 0xcc, 0x4b, 0xcb, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2f, 0x33, 0x4c, 0x4a, - 0x2d, 0x49, 0x34, 0xd6, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, - 0x17, 0x12, 0x07, 0xab, 0xd2, 0x83, 0xab, 0xd2, 0x83, 0xaa, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, - 0x07, 0xab, 0xd1, 0x07, 0xb1, 0x20, 0xca, 0x95, 0xee, 0x32, 0x73, 0xb1, 0x05, 0x80, 0xf5, 0x0b, - 0xed, 0x60, 0xe4, 0x12, 0x83, 0x6b, 0x8b, 0x4f, 0x49, 0x4d, 0x4e, 0xac, 0x8c, 0x4f, 0x4b, 0x4c, - 0x2e, 0xc9, 0x2f, 0x92, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x74, 0x6a, 0x64, 0x3c, 0x71, 0x4f, 0x9e, - 0xe1, 0xd6, 0x3d, 0x79, 0xb5, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0xfd, - 0xe4, 0xfc, 0xe2, 0xdc, 0xfc, 0x62, 0x28, 0xa5, 0x5b, 0x9c, 0x92, 0xad, 0x5f, 0x52, 0x59, 0x90, - 0x5a, 0xac, 0xe7, 0x92, 0x9a, 0xfc, 0xe8, 0x9e, 0xbc, 0x88, 0x27, 0xcc, 0x40, 0x17, 0x90, 0x79, - 0x6e, 0x60, 0xe3, 0x5e, 0xdd, 0x93, 0xc7, 0x61, 0xd1, 0xa7, 0x7b, 0xf2, 0xb2, 0x95, 0x89, 0xb9, - 0x39, 0x56, 0x4a, 0xd8, 0xe5, 0x95, 0x82, 0x44, 0x32, 0xb1, 0x18, 0x28, 0xb4, 0x80, 0x91, 0x4b, - 0x30, 0x33, 0x2f, 0xb3, 0x24, 0x33, 0x31, 0x27, 0x1e, 0xae, 0x40, 0x82, 0x09, 0xec, 0xea, 0x62, - 0x92, 0x1d, 0x2d, 0xe0, 0x09, 0x31, 0x0a, 0xee, 0xf6, 0x57, 0xf7, 0xe4, 0x31, 0x8d, 0xff, 0x74, - 0x4f, 0x5e, 0x02, 0xe6, 0x56, 0x34, 0x29, 0xa5, 0x20, 0x81, 0x4c, 0x34, 0x23, 0x84, 0x4a, 0xb9, - 0x38, 0xca, 0x12, 0x8b, 0x32, 0x13, 0xf3, 0x92, 0x53, 0x25, 0x98, 0xc1, 0x0e, 0x8b, 0x24, 0xd9, - 0x61, 0x1c, 0x61, 0x50, 0x13, 0x5e, 0xdd, 0x93, 0x87, 0x9b, 0xf6, 0xe9, 0x9e, 0x3c, 0x3f, 0xc4, - 0x1d, 0x30, 0x11, 0xa5, 0x20, 0xb8, 0xa4, 0x53, 0xd8, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, - 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, - 0xcb, 0x31, 0x44, 0xd9, 0x20, 0x59, 0x0b, 0x4e, 0x33, 0xba, 0x79, 0xa9, 0x25, 0xe5, 0xf9, 0x45, - 0xd9, 0x50, 0x5e, 0x62, 0x41, 0xa6, 0x7e, 0x7a, 0xbe, 0x7e, 0x5e, 0x7e, 0x4a, 0x2a, 0x66, 0x9a, - 0x4b, 0x62, 0x03, 0x27, 0x1f, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x20, 0x3a, 0x16, 0xfa, - 0x95, 0x02, 0x00, 0x00, + // 348 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xb1, 0x4a, 0xc3, 0x40, + 0x18, 0xc7, 0x73, 0x16, 0x4a, 0xcd, 0x62, 0x0d, 0x45, 0x83, 0x60, 0x4e, 0x82, 0x8a, 0x8b, 0x77, + 0x48, 0x37, 0x17, 0xa1, 0x14, 0xa1, 0x9b, 0x74, 0x10, 0x74, 0x29, 0xd7, 0xe4, 0x9a, 0x1e, 0x69, + 0x72, 0x21, 0x77, 0x2d, 0x74, 0xf4, 0x0d, 0x7c, 0x04, 0x1f, 0xc3, 0x47, 0xe8, 0xd8, 0x51, 0x04, + 0x0f, 0x49, 0xb7, 0x8e, 0x7d, 0x02, 0xe9, 0xb5, 0xc9, 0xd0, 0xd6, 0xa1, 0xd3, 0x1d, 0xf7, 0xff, + 0x7f, 0xbf, 0xfb, 0x0d, 0x9f, 0x79, 0x49, 0x42, 0x22, 0xfa, 0x98, 0xc5, 0xbd, 0x01, 0x91, 0x8c, + 0xc7, 0x78, 0x74, 0xd7, 0xa5, 0x92, 0xd4, 0x71, 0x42, 0x52, 0x12, 0x09, 0x94, 0xa4, 0x5c, 0x72, + 0xeb, 0x54, 0xb7, 0x50, 0xd1, 0x42, 0xeb, 0xd6, 0x59, 0x2d, 0xe0, 0x01, 0xd7, 0x1d, 0xbc, 0xbc, + 0xad, 0xea, 0xee, 0x4f, 0xc9, 0x2c, 0x3f, 0xe9, 0x79, 0xeb, 0x13, 0x98, 0x27, 0xc5, 0x58, 0xc7, + 0xa7, 0x1e, 0x19, 0x77, 0x7a, 0xc4, 0x93, 0x3c, 0xb5, 0xc1, 0x05, 0xb8, 0x39, 0x6c, 0xbc, 0x81, + 0x89, 0x82, 0xc6, 0xb7, 0x82, 0xd7, 0x01, 0x93, 0xfd, 0x61, 0x17, 0x79, 0x3c, 0xc2, 0x1e, 0x17, + 0x11, 0x17, 0xeb, 0xe3, 0x56, 0xf8, 0x21, 0x96, 0xe3, 0x84, 0x0a, 0xd4, 0xa4, 0x5e, 0xa6, 0x60, + 0xad, 0x95, 0x03, 0x9b, 0x4b, 0xde, 0xa3, 0xc6, 0xcd, 0x15, 0xfc, 0xe7, 0xa3, 0x85, 0x82, 0xe7, + 0x63, 0x12, 0x0d, 0xee, 0xdd, 0xdd, 0xb9, 0xdb, 0xae, 0xb1, 0x1d, 0x40, 0xeb, 0x03, 0x98, 0xc7, + 0x2c, 0x66, 0x92, 0x91, 0x41, 0xa7, 0x28, 0xd8, 0x07, 0xda, 0x5a, 0xec, 0x2d, 0x5d, 0x6d, 0xad, + 0x50, 0x85, 0xfb, 0x5c, 0xc1, 0x6d, 0xfc, 0x42, 0x41, 0x3b, 0x77, 0xdd, 0x88, 0xdc, 0x76, 0x95, + 0x6d, 0x20, 0xac, 0xa1, 0x59, 0x19, 0x91, 0x94, 0x91, 0xd8, 0xa3, 0x76, 0x49, 0x8b, 0xbd, 0xec, + 0x2d, 0x56, 0x79, 0x5e, 0x13, 0xe6, 0x0a, 0x16, 0xb4, 0x85, 0x82, 0x47, 0x2b, 0x8f, 0xfc, 0xc5, + 0x6d, 0x17, 0x61, 0xe3, 0x61, 0x92, 0x39, 0x60, 0x9a, 0x39, 0xe0, 0x37, 0x73, 0xc0, 0xfb, 0xcc, + 0x31, 0xa6, 0x33, 0xc7, 0xf8, 0x9a, 0x39, 0xc6, 0xeb, 0x55, 0x12, 0x06, 0x88, 0x84, 0x12, 0xf9, + 0x74, 0x84, 0x03, 0x8e, 0x63, 0xee, 0xd3, 0xed, 0xe5, 0xea, 0x96, 0xf5, 0x9e, 0xd4, 0xff, 0x02, + 0x00, 0x00, 0xff, 0xff, 0x6a, 0x66, 0x29, 0x6c, 0x7e, 0x02, 0x00, 0x00, } func (m *Params) Marshal() (dAtA []byte, err error) { diff --git a/go/node/market/v1/bid.go b/go/node/market/v1/bid.go new file mode 100644 index 00000000..cfd3f35f --- /dev/null +++ b/go/node/market/v1/bid.go @@ -0,0 +1,72 @@ +package v1 + +import ( + "fmt" + + cerrors "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + dtypes "pkg.akt.dev/go/node/deployment/v1" +) + +// MakeBidID returns BidID instance with provided order details and provider +func MakeBidID(id OrderID, provider sdk.AccAddress) BidID { + return BidID{ + Owner: id.Owner, + DSeq: id.DSeq, + GSeq: id.GSeq, + OSeq: id.OSeq, + Provider: provider.String(), + } +} + +// Equals method compares specific bid with provided bid +func (id BidID) Equals(other BidID) bool { + return id.OrderID().Equals(other.OrderID()) && + id.Provider == other.Provider +} + +// LeaseID method returns lease details of bid +func (id BidID) LeaseID() LeaseID { + return LeaseID(id) +} + +// OrderID method returns OrderID details with specific bid details +func (id BidID) OrderID() OrderID { + return OrderID{ + Owner: id.Owner, + DSeq: id.DSeq, + GSeq: id.GSeq, + OSeq: id.OSeq, + } +} + +// String method for consistent output. +func (id BidID) String() string { + return fmt.Sprintf("%s/%v", id.OrderID(), id.Provider) +} + +// GroupID method returns GroupID details with specific bid details +func (id BidID) GroupID() dtypes.GroupID { + return id.OrderID().GroupID() +} + +// DeploymentID method returns deployment details with specific bid details +func (id BidID) DeploymentID() dtypes.DeploymentID { + return id.GroupID().DeploymentID() +} + +// Validate validates bid instance and returns nil +func (id BidID) Validate() error { + if err := id.OrderID().Validate(); err != nil { + return cerrors.Wrap(err, "BidID: Invalid OrderID") + } + if _, err := sdk.AccAddressFromBech32(id.Provider); err != nil { + return sdkerrors.ErrInvalidAddress.Wrap("BidID: Invalid Provider Address") + } + if id.Owner == id.Provider { + return sdkerrors.ErrConflict.Wrap("BidID: self-bid") + } + return nil +} diff --git a/go/node/market/v1/bid.pb.go b/go/node/market/v1/bid.pb.go new file mode 100644 index 00000000..f1567ab5 --- /dev/null +++ b/go/node/market/v1/bid.pb.go @@ -0,0 +1,485 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1/bid.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// BidID stores owner and all other seq numbers +// A successful bid becomes a Lease(ID). +type BidID struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` + OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` + Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` +} + +func (m *BidID) Reset() { *m = BidID{} } +func (*BidID) ProtoMessage() {} +func (*BidID) Descriptor() ([]byte, []int) { + return fileDescriptor_3938cb3dd8faff6a, []int{0} +} +func (m *BidID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BidID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BidID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BidID) XXX_Merge(src proto.Message) { + xxx_messageInfo_BidID.Merge(m, src) +} +func (m *BidID) XXX_Size() int { + return m.Size() +} +func (m *BidID) XXX_DiscardUnknown() { + xxx_messageInfo_BidID.DiscardUnknown(m) +} + +var xxx_messageInfo_BidID proto.InternalMessageInfo + +func (m *BidID) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *BidID) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *BidID) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func (m *BidID) GetOSeq() uint32 { + if m != nil { + return m.OSeq + } + return 0 +} + +func (m *BidID) GetProvider() string { + if m != nil { + return m.Provider + } + return "" +} + +func init() { + proto.RegisterType((*BidID)(nil), "akash.market.v1.BidID") +} + +func init() { proto.RegisterFile("akash/market/v1/bid.proto", fileDescriptor_3938cb3dd8faff6a) } + +var fileDescriptor_3938cb3dd8faff6a = []byte{ + // 338 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0xd1, 0xb1, 0x6a, 0xc2, 0x40, + 0x18, 0x07, 0xf0, 0x9c, 0x8d, 0xc5, 0xa6, 0x2d, 0x42, 0x70, 0x50, 0xa1, 0x39, 0xc9, 0xe4, 0xd2, + 0x04, 0x71, 0x10, 0xdc, 0x1a, 0x04, 0xe9, 0x54, 0xd0, 0xad, 0x4b, 0x89, 0xbd, 0xe3, 0x1a, 0x52, + 0xfd, 0xf4, 0x12, 0x52, 0xfa, 0x16, 0x1d, 0x3b, 0xfa, 0x10, 0x7d, 0x88, 0x4e, 0x45, 0x3a, 0x75, + 0x3a, 0x4a, 0x5c, 0x8a, 0xa3, 0x4f, 0x50, 0xee, 0x2e, 0x55, 0x1c, 0x3a, 0x25, 0xdf, 0xff, 0x7f, + 0xbf, 0x83, 0xe3, 0xb3, 0x1a, 0x61, 0x1c, 0x26, 0x0f, 0xfe, 0x34, 0xe4, 0x31, 0x4d, 0xfd, 0xac, + 0xe3, 0x4f, 0x22, 0xe2, 0xcd, 0x39, 0xa4, 0x60, 0x57, 0x55, 0xe5, 0xe9, 0xca, 0xcb, 0x3a, 0xcd, + 0x1a, 0x03, 0x06, 0xaa, 0xf3, 0xe5, 0x9f, 0x3e, 0xd6, 0x6c, 0xdc, 0x43, 0x32, 0x85, 0xe4, 0x4e, + 0x17, 0x7a, 0xd0, 0x95, 0xfb, 0x51, 0xb2, 0xca, 0x41, 0x44, 0xae, 0x07, 0xf6, 0xd0, 0x2a, 0xc3, + 0xd3, 0x8c, 0xf2, 0x3a, 0x6a, 0xa1, 0xf6, 0x49, 0xd0, 0xd9, 0x08, 0xac, 0x83, 0xad, 0xc0, 0x67, + 0xcf, 0xe1, 0xf4, 0xb1, 0xef, 0xaa, 0xd1, 0xfd, 0x7c, 0xbb, 0xac, 0x15, 0x77, 0x5c, 0x11, 0xc2, + 0x69, 0x92, 0x8c, 0x53, 0x1e, 0xcd, 0xd8, 0x48, 0x1f, 0xb7, 0xbb, 0x96, 0x49, 0x12, 0xba, 0xa8, + 0x97, 0x5a, 0xa8, 0x6d, 0x06, 0x38, 0x17, 0xd8, 0x1c, 0x8c, 0xe9, 0x62, 0x23, 0xb0, 0xca, 0xb7, + 0x02, 0x9f, 0xea, 0xeb, 0xe4, 0xe4, 0x8e, 0x54, 0x28, 0x11, 0x93, 0xe8, 0xa8, 0x85, 0xda, 0xe7, + 0x1a, 0x0d, 0x0b, 0xc4, 0x0e, 0x10, 0xd3, 0x88, 0x15, 0x08, 0x24, 0x32, 0xf7, 0xe8, 0xa6, 0x40, + 0x70, 0x80, 0x40, 0x23, 0xf9, 0xb1, 0xc7, 0x56, 0x65, 0xce, 0x21, 0x8b, 0x08, 0xe5, 0xf5, 0xb2, + 0x7a, 0x6a, 0x6f, 0x23, 0xf0, 0x2e, 0xdb, 0x0a, 0x5c, 0xd5, 0xe8, 0x2f, 0xf9, 0xff, 0xc1, 0x3b, + 0xd4, 0xaf, 0xbc, 0x2e, 0xb1, 0xf1, 0xb3, 0xc4, 0x46, 0xd0, 0x7b, 0xcf, 0x1d, 0xb4, 0xca, 0x1d, + 0xf4, 0x9d, 0x3b, 0xe8, 0x65, 0xed, 0x18, 0xab, 0xb5, 0x63, 0x7c, 0xad, 0x1d, 0xe3, 0xf6, 0x62, + 0x1e, 0x33, 0x2f, 0x8c, 0x53, 0x8f, 0xd0, 0xcc, 0x67, 0xe0, 0xcf, 0x80, 0xd0, 0xfd, 0x56, 0x27, + 0xc7, 0x6a, 0x21, 0xdd, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe6, 0x43, 0xac, 0x89, 0xef, 0x01, + 0x00, 0x00, +} + +func (m *BidID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BidID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BidID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Provider) > 0 { + i -= len(m.Provider) + copy(dAtA[i:], m.Provider) + i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) + i-- + dAtA[i] = 0x2a + } + if m.OSeq != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.OSeq)) + i-- + dAtA[i] = 0x20 + } + if m.GSeq != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintBid(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintBid(dAtA []byte, offset int, v uint64) int { + offset -= sovBid(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BidID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovBid(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovBid(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovBid(uint64(m.GSeq)) + } + if m.OSeq != 0 { + n += 1 + sovBid(uint64(m.OSeq)) + } + l = len(m.Provider) + if l > 0 { + n += 1 + l + sovBid(uint64(l)) + } + return n +} + +func sovBid(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBid(x uint64) (n int) { + return sovBid(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *BidID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BidID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BidID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) + } + m.OSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provider = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBid(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBid + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBid(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBid + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupBid + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthBid + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthBid = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBid = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupBid = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1/event.pb.go b/go/node/market/v1/event.pb.go new file mode 100644 index 00000000..8af0e99e --- /dev/null +++ b/go/node/market/v1/event.pb.go @@ -0,0 +1,1305 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1/event.proto + +package v1 + +import ( + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// EventOrderCreated +type EventOrderCreated struct { + ID OrderID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *EventOrderCreated) Reset() { *m = EventOrderCreated{} } +func (m *EventOrderCreated) String() string { return proto.CompactTextString(m) } +func (*EventOrderCreated) ProtoMessage() {} +func (*EventOrderCreated) Descriptor() ([]byte, []int) { + return fileDescriptor_95cc31d2d9808e8a, []int{0} +} +func (m *EventOrderCreated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventOrderCreated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventOrderCreated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventOrderCreated) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventOrderCreated.Merge(m, src) +} +func (m *EventOrderCreated) XXX_Size() int { + return m.Size() +} +func (m *EventOrderCreated) XXX_DiscardUnknown() { + xxx_messageInfo_EventOrderCreated.DiscardUnknown(m) +} + +var xxx_messageInfo_EventOrderCreated proto.InternalMessageInfo + +func (m *EventOrderCreated) GetID() OrderID { + if m != nil { + return m.ID + } + return OrderID{} +} + +// EventOrderClosed +type EventOrderClosed struct { + ID OrderID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *EventOrderClosed) Reset() { *m = EventOrderClosed{} } +func (m *EventOrderClosed) String() string { return proto.CompactTextString(m) } +func (*EventOrderClosed) ProtoMessage() {} +func (*EventOrderClosed) Descriptor() ([]byte, []int) { + return fileDescriptor_95cc31d2d9808e8a, []int{1} +} +func (m *EventOrderClosed) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventOrderClosed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventOrderClosed.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventOrderClosed) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventOrderClosed.Merge(m, src) +} +func (m *EventOrderClosed) XXX_Size() int { + return m.Size() +} +func (m *EventOrderClosed) XXX_DiscardUnknown() { + xxx_messageInfo_EventOrderClosed.DiscardUnknown(m) +} + +var xxx_messageInfo_EventOrderClosed proto.InternalMessageInfo + +func (m *EventOrderClosed) GetID() OrderID { + if m != nil { + return m.ID + } + return OrderID{} +} + +// EventBidCreated +type EventBidCreated struct { + ID BidID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` +} + +func (m *EventBidCreated) Reset() { *m = EventBidCreated{} } +func (m *EventBidCreated) String() string { return proto.CompactTextString(m) } +func (*EventBidCreated) ProtoMessage() {} +func (*EventBidCreated) Descriptor() ([]byte, []int) { + return fileDescriptor_95cc31d2d9808e8a, []int{2} +} +func (m *EventBidCreated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventBidCreated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventBidCreated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventBidCreated) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventBidCreated.Merge(m, src) +} +func (m *EventBidCreated) XXX_Size() int { + return m.Size() +} +func (m *EventBidCreated) XXX_DiscardUnknown() { + xxx_messageInfo_EventBidCreated.DiscardUnknown(m) +} + +var xxx_messageInfo_EventBidCreated proto.InternalMessageInfo + +func (m *EventBidCreated) GetID() BidID { + if m != nil { + return m.ID + } + return BidID{} +} + +func (m *EventBidCreated) GetPrice() types.DecCoin { + if m != nil { + return m.Price + } + return types.DecCoin{} +} + +// EventBidClosed +type EventBidClosed struct { + ID BidID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *EventBidClosed) Reset() { *m = EventBidClosed{} } +func (m *EventBidClosed) String() string { return proto.CompactTextString(m) } +func (*EventBidClosed) ProtoMessage() {} +func (*EventBidClosed) Descriptor() ([]byte, []int) { + return fileDescriptor_95cc31d2d9808e8a, []int{3} +} +func (m *EventBidClosed) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventBidClosed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventBidClosed.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventBidClosed) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventBidClosed.Merge(m, src) +} +func (m *EventBidClosed) XXX_Size() int { + return m.Size() +} +func (m *EventBidClosed) XXX_DiscardUnknown() { + xxx_messageInfo_EventBidClosed.DiscardUnknown(m) +} + +var xxx_messageInfo_EventBidClosed proto.InternalMessageInfo + +func (m *EventBidClosed) GetID() BidID { + if m != nil { + return m.ID + } + return BidID{} +} + +// EventLeaseCreated +type EventLeaseCreated struct { + ID LeaseID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` +} + +func (m *EventLeaseCreated) Reset() { *m = EventLeaseCreated{} } +func (m *EventLeaseCreated) String() string { return proto.CompactTextString(m) } +func (*EventLeaseCreated) ProtoMessage() {} +func (*EventLeaseCreated) Descriptor() ([]byte, []int) { + return fileDescriptor_95cc31d2d9808e8a, []int{4} +} +func (m *EventLeaseCreated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventLeaseCreated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventLeaseCreated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventLeaseCreated) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventLeaseCreated.Merge(m, src) +} +func (m *EventLeaseCreated) XXX_Size() int { + return m.Size() +} +func (m *EventLeaseCreated) XXX_DiscardUnknown() { + xxx_messageInfo_EventLeaseCreated.DiscardUnknown(m) +} + +var xxx_messageInfo_EventLeaseCreated proto.InternalMessageInfo + +func (m *EventLeaseCreated) GetID() LeaseID { + if m != nil { + return m.ID + } + return LeaseID{} +} + +func (m *EventLeaseCreated) GetPrice() types.DecCoin { + if m != nil { + return m.Price + } + return types.DecCoin{} +} + +// EventLeaseClosed +type EventLeaseClosed struct { + ID LeaseID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *EventLeaseClosed) Reset() { *m = EventLeaseClosed{} } +func (m *EventLeaseClosed) String() string { return proto.CompactTextString(m) } +func (*EventLeaseClosed) ProtoMessage() {} +func (*EventLeaseClosed) Descriptor() ([]byte, []int) { + return fileDescriptor_95cc31d2d9808e8a, []int{5} +} +func (m *EventLeaseClosed) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventLeaseClosed) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventLeaseClosed.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventLeaseClosed) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventLeaseClosed.Merge(m, src) +} +func (m *EventLeaseClosed) XXX_Size() int { + return m.Size() +} +func (m *EventLeaseClosed) XXX_DiscardUnknown() { + xxx_messageInfo_EventLeaseClosed.DiscardUnknown(m) +} + +var xxx_messageInfo_EventLeaseClosed proto.InternalMessageInfo + +func (m *EventLeaseClosed) GetID() LeaseID { + if m != nil { + return m.ID + } + return LeaseID{} +} + +func init() { + proto.RegisterType((*EventOrderCreated)(nil), "akash.market.v1.EventOrderCreated") + proto.RegisterType((*EventOrderClosed)(nil), "akash.market.v1.EventOrderClosed") + proto.RegisterType((*EventBidCreated)(nil), "akash.market.v1.EventBidCreated") + proto.RegisterType((*EventBidClosed)(nil), "akash.market.v1.EventBidClosed") + proto.RegisterType((*EventLeaseCreated)(nil), "akash.market.v1.EventLeaseCreated") + proto.RegisterType((*EventLeaseClosed)(nil), "akash.market.v1.EventLeaseClosed") +} + +func init() { proto.RegisterFile("akash/market/v1/event.proto", fileDescriptor_95cc31d2d9808e8a) } + +var fileDescriptor_95cc31d2d9808e8a = []byte{ + // 391 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x93, 0xbf, 0x4a, 0xc3, 0x40, + 0x1c, 0xc7, 0x93, 0xf8, 0x07, 0x8c, 0x62, 0xb5, 0x88, 0xd4, 0x6a, 0x73, 0x92, 0xc9, 0xe9, 0x8e, + 0xe8, 0x20, 0x74, 0x4c, 0x5b, 0xa4, 0x20, 0x88, 0x1d, 0x05, 0x87, 0x4b, 0xee, 0x88, 0x47, 0xda, + 0x5c, 0x49, 0x42, 0xc0, 0xb7, 0xf0, 0x11, 0x7c, 0x05, 0x07, 0xdf, 0xa1, 0x63, 0x47, 0xa7, 0x20, + 0xe9, 0x22, 0x1d, 0xfb, 0x04, 0x92, 0xbb, 0x54, 0x4b, 0x8b, 0x90, 0xa1, 0xe0, 0x96, 0xe4, 0xfb, + 0xbb, 0xef, 0xe7, 0x9b, 0xfb, 0xde, 0xe9, 0xa7, 0xd8, 0xc7, 0xd1, 0x13, 0x1a, 0xe0, 0xd0, 0xa7, + 0x31, 0x4a, 0x2c, 0x44, 0x13, 0x1a, 0xc4, 0x70, 0x18, 0xf2, 0x98, 0x57, 0x2b, 0x42, 0x84, 0x52, + 0x84, 0x89, 0x55, 0x3f, 0xf2, 0xb8, 0xc7, 0x85, 0x86, 0xf2, 0x27, 0x39, 0x56, 0x37, 0x5c, 0x1e, + 0x0d, 0x78, 0x84, 0x1c, 0x1c, 0x51, 0x94, 0x58, 0x0e, 0x8d, 0xb1, 0x85, 0x5c, 0xce, 0x82, 0x42, + 0x3f, 0x59, 0x66, 0x38, 0x8c, 0x14, 0xd2, 0x0a, 0x9e, 0x87, 0x84, 0x86, 0x7f, 0x89, 0x7d, 0x8a, + 0x23, 0x2a, 0x45, 0xd3, 0xd1, 0x0f, 0x3b, 0x79, 0xd4, 0xbb, 0x7c, 0x41, 0x2b, 0xa4, 0x38, 0xa6, + 0xa4, 0x7a, 0xa3, 0x6b, 0x8c, 0xd4, 0xd4, 0x73, 0xf5, 0x62, 0xf7, 0xb2, 0x06, 0x97, 0xd2, 0x43, + 0x31, 0xda, 0x6d, 0xdb, 0x8d, 0x51, 0x0a, 0x94, 0x2c, 0x05, 0x5a, 0xb7, 0x3d, 0x4d, 0x81, 0xc6, + 0xc8, 0x2c, 0x05, 0x3b, 0xcf, 0x78, 0xd0, 0x6f, 0x9a, 0x8c, 0x98, 0x3d, 0x8d, 0x91, 0xe6, 0xe6, + 0xd7, 0x2b, 0x50, 0x4c, 0xac, 0x1f, 0x2c, 0x30, 0xfa, 0x3c, 0x5a, 0x3f, 0xe2, 0x4d, 0xd5, 0x2b, + 0x82, 0x61, 0x33, 0x32, 0xff, 0x8b, 0xce, 0x02, 0xe2, 0x78, 0x05, 0x61, 0x33, 0x52, 0x0a, 0x50, + 0xbd, 0xd7, 0xb7, 0x86, 0x21, 0x73, 0x69, 0x6d, 0x43, 0x38, 0x9d, 0x41, 0x59, 0x13, 0xcc, 0x6b, + 0x82, 0x45, 0x4d, 0xb0, 0x4d, 0xdd, 0x16, 0x67, 0x81, 0xf4, 0x9b, 0xa6, 0x40, 0x2e, 0x99, 0xa5, + 0x60, 0x4f, 0x9a, 0x89, 0x57, 0xb3, 0x27, 0x3f, 0x17, 0x99, 0x1f, 0xf5, 0xfd, 0x9f, 0xc8, 0x72, + 0x53, 0xd6, 0x93, 0xb8, 0xb0, 0x7f, 0x57, 0x8b, 0x6a, 0x6f, 0xf3, 0xba, 0xcb, 0x55, 0x2b, 0x46, + 0xff, 0x79, 0x5b, 0xe6, 0xa7, 0x45, 0xc6, 0x2e, 0x73, 0x5a, 0xca, 0xa7, 0x96, 0x08, 0xfb, 0x7a, + 0x94, 0x19, 0xea, 0x38, 0x33, 0xd4, 0xcf, 0xcc, 0x50, 0x5f, 0x26, 0x86, 0x32, 0x9e, 0x18, 0xca, + 0xc7, 0xc4, 0x50, 0x1e, 0x1a, 0x43, 0xdf, 0x83, 0xd8, 0x8f, 0x21, 0xa1, 0x09, 0xf2, 0x38, 0x0a, + 0x38, 0xa1, 0xbf, 0x37, 0xc7, 0xd9, 0x16, 0x97, 0xe6, 0xea, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x8a, + 0x50, 0x3d, 0xa2, 0xef, 0x03, 0x00, 0x00, +} + +func (m *EventOrderCreated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventOrderCreated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventOrderCreated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventOrderClosed) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventOrderClosed) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventOrderClosed) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventBidCreated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventBidCreated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventBidCreated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventBidClosed) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventBidClosed) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventBidClosed) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventLeaseCreated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventLeaseCreated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventLeaseCreated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventLeaseClosed) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventLeaseClosed) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventLeaseClosed) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintEvent(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintEvent(dAtA []byte, offset int, v uint64) int { + offset -= sovEvent(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EventOrderCreated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovEvent(uint64(l)) + return n +} + +func (m *EventOrderClosed) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovEvent(uint64(l)) + return n +} + +func (m *EventBidCreated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovEvent(uint64(l)) + l = m.Price.Size() + n += 1 + l + sovEvent(uint64(l)) + return n +} + +func (m *EventBidClosed) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovEvent(uint64(l)) + return n +} + +func (m *EventLeaseCreated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovEvent(uint64(l)) + l = m.Price.Size() + n += 1 + l + sovEvent(uint64(l)) + return n +} + +func (m *EventLeaseClosed) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovEvent(uint64(l)) + return n +} + +func sovEvent(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvent(x uint64) (n int) { + return sovEvent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EventOrderCreated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventOrderCreated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventOrderCreated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventOrderClosed) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventOrderClosed: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventOrderClosed: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventBidCreated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventBidCreated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventBidCreated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventBidClosed) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventBidClosed: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventBidClosed: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventLeaseCreated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventLeaseCreated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventLeaseCreated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventLeaseClosed) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventLeaseClosed: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventLeaseClosed: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvent(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvent + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvent + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvent + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvent = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvent = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvent = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1/filters.go b/go/node/market/v1/filters.go new file mode 100644 index 00000000..116f0c4f --- /dev/null +++ b/go/node/market/v1/filters.go @@ -0,0 +1,36 @@ +package v1 + +// Accept returns whether lease filters valid or not +func (filters *LeaseFilters) Accept(obj Lease, stateVal Lease_State) bool { + // Checking owner filter + if filters.Owner != "" && filters.Owner != obj.ID.Owner { + return false + } + + // Checking dseq filter + if filters.DSeq != 0 && filters.DSeq != obj.ID.DSeq { + return false + } + + // Checking gseq filter + if filters.GSeq != 0 && filters.GSeq != obj.ID.GSeq { + return false + } + + // Checking oseq filter + if filters.OSeq != 0 && filters.OSeq != obj.ID.OSeq { + return false + } + + // Checking provider filter + if filters.Provider != "" && filters.Provider != obj.ID.Provider { + return false + } + + // Checking state filter + if stateVal != 0 && stateVal != obj.State { + return false + } + + return true +} diff --git a/go/node/market/v1/filters.pb.go b/go/node/market/v1/filters.pb.go new file mode 100644 index 00000000..e5998e5f --- /dev/null +++ b/go/node/market/v1/filters.pb.go @@ -0,0 +1,537 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1/filters.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// LeaseFilters defines flags for lease list filter +type LeaseFilters struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` + OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` + Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` + State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state" yaml:"state"` +} + +func (m *LeaseFilters) Reset() { *m = LeaseFilters{} } +func (m *LeaseFilters) String() string { return proto.CompactTextString(m) } +func (*LeaseFilters) ProtoMessage() {} +func (*LeaseFilters) Descriptor() ([]byte, []int) { + return fileDescriptor_601e383ab343d6e3, []int{0} +} +func (m *LeaseFilters) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseFilters.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseFilters) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseFilters.Merge(m, src) +} +func (m *LeaseFilters) XXX_Size() int { + return m.Size() +} +func (m *LeaseFilters) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseFilters.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseFilters proto.InternalMessageInfo + +func (m *LeaseFilters) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *LeaseFilters) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *LeaseFilters) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func (m *LeaseFilters) GetOSeq() uint32 { + if m != nil { + return m.OSeq + } + return 0 +} + +func (m *LeaseFilters) GetProvider() string { + if m != nil { + return m.Provider + } + return "" +} + +func (m *LeaseFilters) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func init() { + proto.RegisterType((*LeaseFilters)(nil), "akash.market.v1.LeaseFilters") +} + +func init() { proto.RegisterFile("akash/market/v1/filters.proto", fileDescriptor_601e383ab343d6e3) } + +var fileDescriptor_601e383ab343d6e3 = []byte{ + // 365 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xc1, 0x6a, 0xea, 0x40, + 0x14, 0x86, 0x93, 0x6b, 0x94, 0x7b, 0x73, 0xbd, 0x08, 0xc1, 0x45, 0x14, 0xcc, 0x48, 0x56, 0x6e, + 0x6e, 0x82, 0xb8, 0x10, 0xdc, 0x55, 0x4a, 0xdd, 0x14, 0x0a, 0xba, 0xeb, 0xa6, 0x4c, 0x9b, 0xe9, + 0x54, 0xa2, 0x8e, 0xce, 0x0c, 0x29, 0xdd, 0xf6, 0x09, 0xfa, 0x08, 0x7d, 0x88, 0x3e, 0x44, 0x97, + 0xd2, 0x55, 0x57, 0x43, 0x89, 0x9b, 0xe2, 0xd2, 0x27, 0x28, 0x33, 0x27, 0x55, 0x5c, 0x74, 0x95, + 0xfc, 0xff, 0x7f, 0xbe, 0x1f, 0xce, 0x49, 0xdc, 0x16, 0x4e, 0xb1, 0xb8, 0x8b, 0xe7, 0x98, 0xa7, + 0x44, 0xc6, 0x59, 0x37, 0xbe, 0x9d, 0xce, 0x24, 0xe1, 0x22, 0x5a, 0x72, 0x26, 0x99, 0x57, 0x33, + 0x71, 0x04, 0x71, 0x94, 0x75, 0x9b, 0x75, 0xca, 0x28, 0x33, 0x59, 0xac, 0xdf, 0x60, 0xac, 0xd9, + 0xb8, 0x61, 0x62, 0xce, 0xc4, 0x15, 0x04, 0x20, 0x20, 0x0a, 0x1f, 0x4b, 0x6e, 0xf5, 0x9c, 0x60, + 0x41, 0xce, 0xa0, 0xd8, 0x1b, 0xb9, 0x65, 0x76, 0xbf, 0x20, 0xdc, 0xb7, 0xdb, 0x76, 0xe7, 0xcf, + 0xb0, 0xbb, 0x55, 0x08, 0x8c, 0x9d, 0x42, 0xd5, 0x07, 0x3c, 0x9f, 0x0d, 0x42, 0x23, 0xc3, 0xb7, + 0x97, 0xff, 0xf5, 0xa2, 0xea, 0x24, 0x49, 0x38, 0x11, 0x62, 0x22, 0xf9, 0x74, 0x41, 0xc7, 0x30, + 0xee, 0xf5, 0x5c, 0x27, 0x11, 0x64, 0xe5, 0xff, 0x6a, 0xdb, 0x1d, 0x67, 0x88, 0x72, 0x85, 0x9c, + 0xd3, 0x09, 0x59, 0x6d, 0x15, 0x32, 0xfe, 0x4e, 0xa1, 0xbf, 0x50, 0xa7, 0x55, 0x38, 0x36, 0xa6, + 0x86, 0xa8, 0x86, 0x4a, 0x6d, 0xbb, 0xf3, 0x0f, 0xa0, 0x51, 0x01, 0xd1, 0x23, 0x88, 0x02, 0x44, + 0x0b, 0x88, 0x69, 0xc8, 0x39, 0x40, 0x17, 0x05, 0xc4, 0x8e, 0x20, 0x06, 0x90, 0x7e, 0x78, 0x13, + 0xf7, 0xf7, 0x92, 0xb3, 0x6c, 0x9a, 0x10, 0xee, 0x97, 0xcd, 0xaa, 0xfd, 0xad, 0x42, 0x7b, 0x6f, + 0xa7, 0x50, 0x0d, 0xa0, 0x6f, 0xe7, 0xe7, 0x85, 0xf7, 0x90, 0x17, 0xbb, 0x65, 0x21, 0xb1, 0x24, + 0x7e, 0xc5, 0x34, 0x36, 0xf4, 0xf1, 0x8c, 0x71, 0x38, 0x9e, 0x91, 0xe1, 0x18, 0xec, 0x81, 0xf3, + 0xf9, 0x8c, 0xac, 0x61, 0xff, 0x35, 0x0f, 0xec, 0x75, 0x1e, 0xd8, 0x1f, 0x79, 0x60, 0x3f, 0x6d, + 0x02, 0x6b, 0xbd, 0x09, 0xac, 0xf7, 0x4d, 0x60, 0x5d, 0xb6, 0x96, 0x29, 0x8d, 0x70, 0x2a, 0xa3, + 0x84, 0x64, 0x31, 0x65, 0xf1, 0x82, 0x25, 0xe4, 0xf0, 0x37, 0x5c, 0x57, 0xcc, 0x47, 0xec, 0x7d, + 0x05, 0x00, 0x00, 0xff, 0xff, 0x36, 0xfb, 0x56, 0x68, 0x27, 0x02, 0x00, 0x00, +} + +func (m *LeaseFilters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseFilters) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintFilters(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x32 + } + if len(m.Provider) > 0 { + i -= len(m.Provider) + copy(dAtA[i:], m.Provider) + i = encodeVarintFilters(dAtA, i, uint64(len(m.Provider))) + i-- + dAtA[i] = 0x2a + } + if m.OSeq != 0 { + i = encodeVarintFilters(dAtA, i, uint64(m.OSeq)) + i-- + dAtA[i] = 0x20 + } + if m.GSeq != 0 { + i = encodeVarintFilters(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintFilters(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintFilters(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintFilters(dAtA []byte, offset int, v uint64) int { + offset -= sovFilters(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LeaseFilters) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovFilters(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovFilters(uint64(m.GSeq)) + } + if m.OSeq != 0 { + n += 1 + sovFilters(uint64(m.OSeq)) + } + l = len(m.Provider) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + return n +} + +func sovFilters(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFilters(x uint64) (n int) { + return sovFilters(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LeaseFilters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseFilters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseFilters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) + } + m.OSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provider = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFilters(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthFilters + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFilters(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilters + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilters + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilters + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFilters + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupFilters + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthFilters + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthFilters = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFilters = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupFilters = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1/lease.go b/go/node/market/v1/lease.go new file mode 100644 index 00000000..60f57351 --- /dev/null +++ b/go/node/market/v1/lease.go @@ -0,0 +1,107 @@ +package v1 + +import ( + "strings" + + sdkerrors "cosmossdk.io/errors" + "gopkg.in/yaml.v3" + + dtypes "pkg.akt.dev/go/node/deployment/v1" +) + +// MakeLeaseID returns LeaseID instance with provided bid details +func MakeLeaseID(id BidID) LeaseID { + return LeaseID(id) +} + +// Equals method compares specific lease with provided lease +func (id LeaseID) Equals(other LeaseID) bool { + return id.BidID().Equals(other.BidID()) +} + +// Validate calls the BidID's validator and returns any error. +func (id LeaseID) Validate() error { + if err := id.BidID().Validate(); err != nil { + return sdkerrors.Wrap(err, "LeaseID: Invalid BidID") + } + return nil +} + +// BidID method returns BidID details with specific LeaseID +func (id LeaseID) BidID() BidID { + return BidID(id) +} + +// OrderID method returns OrderID details with specific lease details +func (id LeaseID) OrderID() OrderID { + return id.BidID().OrderID() +} + +// GroupID method returns GroupID details with specific lease details +func (id LeaseID) GroupID() dtypes.GroupID { + return id.OrderID().GroupID() +} + +// DeploymentID method returns deployment details with specific lease details +func (id LeaseID) DeploymentID() dtypes.DeploymentID { + return id.GroupID().DeploymentID() +} + +// String method provides human-readable representation of LeaseID. +func (id LeaseID) String() string { + return id.BidID().String() +} + +// String implements the Stringer interface for a Lease object. +func (obj Lease) String() string { + out, _ := yaml.Marshal(obj) + return string(out) +} + +// Leases is a collection of Lease +type Leases []Lease + +// String implements the Stringer interface for a Leases object. +func (l Leases) String() string { + var out string + for _, order := range l { + out += order.String() + "\n" + } + + return strings.TrimSpace(out) +} + +// Filters returns whether lease filters valid or not +func (obj Lease) Filters(filters LeaseFilters, stateVal Lease_State) bool { + // Checking owner filter + if filters.Owner != "" && filters.Owner != obj.ID.Owner { + return false + } + + // Checking dseq filter + if filters.DSeq != 0 && filters.DSeq != obj.ID.DSeq { + return false + } + + // Checking gseq filter + if filters.GSeq != 0 && filters.GSeq != obj.ID.GSeq { + return false + } + + // Checking oseq filter + if filters.OSeq != 0 && filters.OSeq != obj.ID.OSeq { + return false + } + + // Checking provider filter + if filters.Provider != "" && filters.Provider != obj.ID.Provider { + return false + } + + // Checking state filter + if stateVal != 0 && stateVal != obj.State { + return false + } + + return true +} diff --git a/go/node/market/v1/lease.pb.go b/go/node/market/v1/lease.pb.go new file mode 100644 index 00000000..4e188807 --- /dev/null +++ b/go/node/market/v1/lease.pb.go @@ -0,0 +1,871 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1/lease.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State is an enum which refers to state of lease +type Lease_State int32 + +const ( + // Prefix should start with 0 in enum. So declaring dummy state + LeaseStateInvalid Lease_State = 0 + // LeaseActive denotes state for lease active + LeaseActive Lease_State = 1 + // LeaseInsufficientFunds denotes state for lease insufficient_funds + LeaseInsufficientFunds Lease_State = 2 + // LeaseClosed denotes state for lease closed + LeaseClosed Lease_State = 3 +) + +var Lease_State_name = map[int32]string{ + 0: "invalid", + 1: "active", + 2: "insufficient_funds", + 3: "closed", +} + +var Lease_State_value = map[string]int32{ + "invalid": 0, + "active": 1, + "insufficient_funds": 2, + "closed": 3, +} + +func (x Lease_State) String() string { + return proto.EnumName(Lease_State_name, int32(x)) +} + +func (Lease_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_122c076f440f07dc, []int{1, 0} +} + +// LeaseID stores bid details of lease +type LeaseID struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` + OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` + Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` +} + +func (m *LeaseID) Reset() { *m = LeaseID{} } +func (*LeaseID) ProtoMessage() {} +func (*LeaseID) Descriptor() ([]byte, []int) { + return fileDescriptor_122c076f440f07dc, []int{0} +} +func (m *LeaseID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseID) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseID.Merge(m, src) +} +func (m *LeaseID) XXX_Size() int { + return m.Size() +} +func (m *LeaseID) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseID.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseID proto.InternalMessageInfo + +func (m *LeaseID) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *LeaseID) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *LeaseID) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func (m *LeaseID) GetOSeq() uint32 { + if m != nil { + return m.OSeq + } + return 0 +} + +func (m *LeaseID) GetProvider() string { + if m != nil { + return m.Provider + } + return "" +} + +// Lease stores LeaseID, state of lease and price +type Lease struct { + ID LeaseID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + State Lease_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1.Lease_State" json:"state" yaml:"state"` + Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at" yaml:"created_at"` + ClosedOn int64 `protobuf:"varint,5,opt,name=closed_on,json=closedOn,proto3" json:"closed_on" yaml:"closed_on"` +} + +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { + return fileDescriptor_122c076f440f07dc, []int{1} +} +func (m *Lease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Lease.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Lease) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lease.Merge(m, src) +} +func (m *Lease) XXX_Size() int { + return m.Size() +} +func (m *Lease) XXX_DiscardUnknown() { + xxx_messageInfo_Lease.DiscardUnknown(m) +} + +var xxx_messageInfo_Lease proto.InternalMessageInfo + +func (m *Lease) GetID() LeaseID { + if m != nil { + return m.ID + } + return LeaseID{} +} + +func (m *Lease) GetState() Lease_State { + if m != nil { + return m.State + } + return LeaseStateInvalid +} + +func (m *Lease) GetPrice() types.DecCoin { + if m != nil { + return m.Price + } + return types.DecCoin{} +} + +func (m *Lease) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *Lease) GetClosedOn() int64 { + if m != nil { + return m.ClosedOn + } + return 0 +} + +func init() { + proto.RegisterEnum("akash.market.v1.Lease_State", Lease_State_name, Lease_State_value) + proto.RegisterType((*LeaseID)(nil), "akash.market.v1.LeaseID") + proto.RegisterType((*Lease)(nil), "akash.market.v1.Lease") +} + +func init() { proto.RegisterFile("akash/market/v1/lease.proto", fileDescriptor_122c076f440f07dc) } + +var fileDescriptor_122c076f440f07dc = []byte{ + // 647 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x94, 0x41, 0x4f, 0xdb, 0x30, + 0x14, 0xc7, 0x93, 0xb4, 0x05, 0xea, 0x6e, 0xa3, 0x44, 0x6c, 0x2a, 0x01, 0xe2, 0x2e, 0xbb, 0x70, + 0x59, 0xa2, 0x96, 0x03, 0x12, 0x87, 0x49, 0x84, 0x6a, 0x08, 0x69, 0x13, 0x5a, 0x7a, 0xdb, 0xa5, + 0x32, 0xb1, 0xc9, 0xac, 0x96, 0xb8, 0x24, 0x21, 0xd3, 0xbe, 0xc1, 0xc4, 0x69, 0xc7, 0x5d, 0xd0, + 0xd0, 0xf6, 0x15, 0xf6, 0x21, 0x38, 0xa2, 0x9d, 0x76, 0xb2, 0xa6, 0x72, 0x99, 0x7a, 0xec, 0x27, + 0x98, 0x6c, 0x07, 0x02, 0xd3, 0x76, 0xaa, 0xdf, 0xff, 0xff, 0x7e, 0xcf, 0xf6, 0x7b, 0x6e, 0xc0, + 0x2a, 0x1a, 0xa2, 0xf4, 0x9d, 0x77, 0x8c, 0x92, 0x21, 0xc9, 0xbc, 0xbc, 0xe3, 0x8d, 0x08, 0x4a, + 0x89, 0x3b, 0x4e, 0x58, 0xc6, 0xcc, 0x45, 0x69, 0xba, 0xca, 0x74, 0xf3, 0x8e, 0xb5, 0x1c, 0xb1, + 0x88, 0x49, 0xcf, 0x13, 0x2b, 0x95, 0x66, 0xad, 0x84, 0x2c, 0x3d, 0x66, 0xe9, 0x40, 0x19, 0x2a, + 0x28, 0x2c, 0x5b, 0x45, 0xde, 0x21, 0x4a, 0x89, 0x97, 0x77, 0x0e, 0x49, 0x86, 0x3a, 0x5e, 0xc8, + 0x68, 0xac, 0x7c, 0xe7, 0xca, 0x00, 0xf3, 0xaf, 0xc4, 0x8e, 0xfb, 0x3d, 0x73, 0x0f, 0xd4, 0xd8, + 0xfb, 0x98, 0x24, 0x2d, 0xbd, 0xad, 0x6f, 0xd4, 0xfd, 0xce, 0x94, 0x43, 0x25, 0xcc, 0x38, 0x7c, + 0xf0, 0x01, 0x1d, 0x8f, 0xb6, 0x1d, 0x19, 0x3a, 0x3f, 0xbe, 0x3f, 0x5f, 0x2e, 0x76, 0xd9, 0xc1, + 0x38, 0x21, 0x69, 0xda, 0xcf, 0x12, 0x1a, 0x47, 0x81, 0x4a, 0x37, 0x37, 0x41, 0x15, 0xa7, 0xe4, + 0xa4, 0x65, 0xb4, 0xf5, 0x8d, 0xaa, 0x0f, 0x27, 0x1c, 0x56, 0x7b, 0x7d, 0x72, 0x32, 0xe5, 0x50, + 0xea, 0x33, 0x0e, 0x1b, 0xaa, 0x9c, 0x88, 0x9c, 0x40, 0x8a, 0x02, 0x8a, 0x04, 0x54, 0x69, 0xeb, + 0x1b, 0x0f, 0x15, 0xb4, 0x57, 0x40, 0xd1, 0x3d, 0x28, 0x52, 0x50, 0x54, 0x40, 0x4c, 0x40, 0xd5, + 0x12, 0x3a, 0x28, 0x20, 0x76, 0x0f, 0x62, 0x0a, 0x12, 0x3f, 0x66, 0x1f, 0x2c, 0x8c, 0x13, 0x96, + 0x53, 0x4c, 0x92, 0x56, 0x4d, 0x5e, 0x75, 0x6b, 0xca, 0xe1, 0xad, 0x36, 0xe3, 0x70, 0x51, 0x41, + 0x37, 0xca, 0xff, 0x2f, 0x7c, 0x0b, 0x6d, 0x2f, 0x7c, 0xbe, 0x80, 0xda, 0xef, 0x0b, 0xa8, 0x39, + 0x5f, 0xab, 0xa0, 0x26, 0x5b, 0x6a, 0xee, 0x01, 0x83, 0x62, 0xd9, 0xcd, 0x46, 0xb7, 0xe5, 0xfe, + 0x35, 0x4b, 0xb7, 0x68, 0xbb, 0xbf, 0x7e, 0xc9, 0xa1, 0x36, 0xe1, 0xd0, 0xd8, 0xef, 0x4d, 0x39, + 0x34, 0x28, 0x9e, 0x71, 0x58, 0x57, 0x07, 0xa0, 0xd8, 0x09, 0x0c, 0x8a, 0xcd, 0xd7, 0xa0, 0x96, + 0x66, 0x28, 0x23, 0xb2, 0xa3, 0x8f, 0xba, 0x6b, 0xff, 0xae, 0xe5, 0xf6, 0x45, 0x8e, 0xbf, 0x22, + 0xe6, 0x26, 0xd3, 0xcb, 0xb9, 0xc9, 0xd0, 0x09, 0x94, 0x6c, 0xbe, 0x01, 0xb5, 0x71, 0x42, 0x43, + 0x22, 0x7b, 0xdd, 0xe8, 0xae, 0xb9, 0xc5, 0xdd, 0xc4, 0x23, 0x71, 0x8b, 0x47, 0xe2, 0xf6, 0x48, + 0xb8, 0xcb, 0x68, 0xac, 0x8e, 0x27, 0x4a, 0x4a, 0xa4, 0x2c, 0x29, 0x43, 0x27, 0x50, 0xb2, 0xe9, + 0x03, 0x10, 0x26, 0x04, 0x65, 0x04, 0x0f, 0x50, 0x26, 0xc7, 0x51, 0xf1, 0x9f, 0x4d, 0x39, 0xbc, + 0xa3, 0xce, 0x38, 0x5c, 0x52, 0x68, 0xa9, 0x39, 0x41, 0xbd, 0x08, 0x76, 0x32, 0xf3, 0x05, 0xa8, + 0x87, 0x23, 0x96, 0x12, 0x3c, 0x60, 0xb1, 0x1c, 0x4c, 0xc5, 0x7f, 0x3a, 0xe5, 0xb0, 0x14, 0x67, + 0x1c, 0x36, 0x8b, 0x0a, 0x37, 0x92, 0x13, 0x2c, 0xa8, 0xf5, 0x41, 0xec, 0x7c, 0xd1, 0x41, 0x4d, + 0xb6, 0xc0, 0x74, 0xc0, 0x3c, 0x8d, 0x73, 0x34, 0xa2, 0xb8, 0xa9, 0x59, 0x8f, 0xcf, 0xce, 0xdb, + 0x4b, 0xb2, 0x41, 0xd2, 0xdc, 0x57, 0x86, 0xb9, 0x0a, 0xe6, 0x50, 0x98, 0xd1, 0x9c, 0x34, 0x75, + 0x6b, 0xf1, 0xec, 0xbc, 0xdd, 0x90, 0x29, 0x3b, 0x52, 0x32, 0xbb, 0xc0, 0xa4, 0x71, 0x7a, 0x7a, + 0x74, 0x44, 0x43, 0x4a, 0xe2, 0x6c, 0x70, 0x74, 0x1a, 0xe3, 0xb4, 0x69, 0x58, 0xd6, 0xd9, 0x79, + 0xfb, 0x89, 0x1a, 0xdc, 0x1d, 0xfb, 0xa5, 0x70, 0x45, 0x41, 0x75, 0x94, 0x66, 0xe5, 0x4e, 0xc1, + 0x5d, 0x29, 0x59, 0xd5, 0x8f, 0xdf, 0x6c, 0xad, 0x7c, 0x24, 0xfe, 0xd6, 0xe5, 0xc4, 0xd6, 0xaf, + 0x26, 0xb6, 0xfe, 0x6b, 0x62, 0xeb, 0x9f, 0xae, 0x6d, 0xed, 0xea, 0xda, 0xd6, 0x7e, 0x5e, 0xdb, + 0xda, 0xdb, 0xf5, 0xf1, 0x30, 0x72, 0xd1, 0x30, 0x73, 0x31, 0xc9, 0xbd, 0x88, 0x79, 0x31, 0xc3, + 0xa4, 0xfc, 0x3c, 0x1c, 0xce, 0xc9, 0xff, 0xed, 0xe6, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcb, + 0x76, 0xed, 0x4c, 0x38, 0x04, 0x00, 0x00, +} + +func (m *LeaseID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Provider) > 0 { + i -= len(m.Provider) + copy(dAtA[i:], m.Provider) + i = encodeVarintLease(dAtA, i, uint64(len(m.Provider))) + i-- + dAtA[i] = 0x2a + } + if m.OSeq != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.OSeq)) + i-- + dAtA[i] = 0x20 + } + if m.GSeq != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintLease(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Lease) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Lease) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClosedOn != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.ClosedOn)) + i-- + dAtA[i] = 0x28 + } + if m.CreatedAt != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLease(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.State != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLease(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintLease(dAtA []byte, offset int, v uint64) int { + offset -= sovLease(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LeaseID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovLease(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovLease(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovLease(uint64(m.GSeq)) + } + if m.OSeq != 0 { + n += 1 + sovLease(uint64(m.OSeq)) + } + l = len(m.Provider) + if l > 0 { + n += 1 + l + sovLease(uint64(l)) + } + return n +} + +func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovLease(uint64(l)) + if m.State != 0 { + n += 1 + sovLease(uint64(m.State)) + } + l = m.Price.Size() + n += 1 + l + sovLease(uint64(l)) + if m.CreatedAt != 0 { + n += 1 + sovLease(uint64(m.CreatedAt)) + } + if m.ClosedOn != 0 { + n += 1 + sovLease(uint64(m.ClosedOn)) + } + return n +} + +func sovLease(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozLease(x uint64) (n int) { + return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LeaseID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) + } + m.OSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provider = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Lease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lease: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Lease_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClosedOn", wireType) + } + m.ClosedOn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClosedOn |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLease(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLease + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLease + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLease + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthLease + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupLease + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthLease + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLease = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupLease = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1/order.go b/go/node/market/v1/order.go new file mode 100644 index 00000000..de10200f --- /dev/null +++ b/go/node/market/v1/order.go @@ -0,0 +1,50 @@ +package v1 + +import ( + "fmt" + + cerrors "cosmossdk.io/errors" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + dtypes "pkg.akt.dev/go/node/deployment/v1" +) + +// MakeOrderID returns OrderID instance with provided groupID details and oseq +func MakeOrderID(id dtypes.GroupID, oseq uint32) OrderID { + return OrderID{ + Owner: id.Owner, + DSeq: id.DSeq, + GSeq: id.GSeq, + OSeq: oseq, + } +} + +// GroupID method returns groupID details for specific order +func (id OrderID) GroupID() dtypes.GroupID { + return dtypes.GroupID{ + Owner: id.Owner, + DSeq: id.DSeq, + GSeq: id.GSeq, + } +} + +// Equals method compares specific order with provided order +func (id OrderID) Equals(other OrderID) bool { + return id.GroupID().Equals(other.GroupID()) && id.OSeq == other.OSeq +} + +// Validate method for OrderID and returns nil +func (id OrderID) Validate() error { + if err := id.GroupID().Validate(); err != nil { + return cerrors.Wrap(err, "OrderID: Invalid GroupID") + } + if id.OSeq == 0 { + return sdkerrors.ErrInvalidSequence.Wrap("OrderID: Invalid Order Sequence") + } + return nil +} + +// String provides stringer interface to save reflected formatting. +func (id OrderID) String() string { + return fmt.Sprintf("%s/%v", id.GroupID(), id.OSeq) +} diff --git a/go/node/market/v1/order.pb.go b/go/node/market/v1/order.pb.go new file mode 100644 index 00000000..34d3ea39 --- /dev/null +++ b/go/node/market/v1/order.pb.go @@ -0,0 +1,431 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1/order.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// OrderID stores owner and all other seq numbers +type OrderID struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` + OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` +} + +func (m *OrderID) Reset() { *m = OrderID{} } +func (*OrderID) ProtoMessage() {} +func (*OrderID) Descriptor() ([]byte, []int) { + return fileDescriptor_84d7e2ed6a8c3a6e, []int{0} +} +func (m *OrderID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OrderID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OrderID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *OrderID) XXX_Merge(src proto.Message) { + xxx_messageInfo_OrderID.Merge(m, src) +} +func (m *OrderID) XXX_Size() int { + return m.Size() +} +func (m *OrderID) XXX_DiscardUnknown() { + xxx_messageInfo_OrderID.DiscardUnknown(m) +} + +var xxx_messageInfo_OrderID proto.InternalMessageInfo + +func (m *OrderID) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *OrderID) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *OrderID) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func (m *OrderID) GetOSeq() uint32 { + if m != nil { + return m.OSeq + } + return 0 +} + +func init() { + proto.RegisterType((*OrderID)(nil), "akash.market.v1.OrderID") +} + +func init() { proto.RegisterFile("akash/market/v1/order.proto", fileDescriptor_84d7e2ed6a8c3a6e) } + +var fileDescriptor_84d7e2ed6a8c3a6e = []byte{ + // 307 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0xcf, 0x4d, 0x2c, 0xca, 0x4e, 0x2d, 0xd1, 0x2f, 0x33, 0xd4, 0xcf, 0x2f, 0x4a, 0x49, + 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x07, 0x4b, 0xea, 0x41, 0x24, 0xf5, 0xca, + 0x0c, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x72, 0xfa, 0x20, 0x16, 0x44, 0x99, 0x94, 0x64, + 0x72, 0x7e, 0x71, 0x6e, 0x7e, 0x71, 0x3c, 0x44, 0x02, 0xc2, 0x81, 0x48, 0x29, 0xfd, 0x66, 0xe4, + 0x62, 0xf7, 0x07, 0x99, 0xe8, 0xe9, 0x22, 0xe4, 0xce, 0xc5, 0x9a, 0x5f, 0x9e, 0x97, 0x5a, 0x24, + 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0xe9, 0x64, 0xf8, 0xea, 0x9e, 0x3c, 0x44, 0xe0, 0xd3, 0x3d, 0x79, + 0x9e, 0xca, 0xc4, 0xdc, 0x1c, 0x2b, 0x25, 0x30, 0x57, 0xe9, 0xd2, 0x16, 0x5d, 0x11, 0xa8, 0x29, + 0x8e, 0x29, 0x29, 0x45, 0xa9, 0xc5, 0xc5, 0xc1, 0x25, 0x45, 0x99, 0x79, 0xe9, 0x41, 0x10, 0xe5, + 0x42, 0xc6, 0x5c, 0x2c, 0x29, 0xc5, 0xa9, 0x85, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0x2c, 0x4e, 0xf2, + 0x8f, 0xee, 0xc9, 0xb3, 0xb8, 0x04, 0xa7, 0x16, 0xbe, 0xba, 0x27, 0x0f, 0x16, 0xff, 0x74, 0x4f, + 0x9e, 0x1b, 0x62, 0x1c, 0x88, 0xa7, 0x14, 0x04, 0x16, 0x04, 0x69, 0x4a, 0x07, 0x69, 0x62, 0x56, + 0x60, 0xd4, 0xe0, 0x85, 0x68, 0x72, 0x87, 0x6a, 0x4a, 0x47, 0xd1, 0x94, 0x0e, 0xd1, 0x94, 0x0e, + 0xd5, 0x94, 0x0f, 0xd2, 0xc4, 0x82, 0xd0, 0xe4, 0x0f, 0xd5, 0x94, 0x8f, 0xa2, 0x29, 0x1f, 0xa2, + 0x09, 0x44, 0x59, 0x71, 0xcc, 0x58, 0x20, 0xcf, 0xf0, 0x62, 0x81, 0x3c, 0x83, 0x93, 0xf9, 0x89, + 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, + 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0xc9, 0x16, 0x64, 0xa7, 0xeb, 0x25, 0x66, + 0x97, 0xe8, 0xa5, 0xa4, 0x96, 0xe9, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, 0xa7, 0xa4, 0x22, 0x22, 0x21, + 0x89, 0x0d, 0x1c, 0x7a, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb1, 0x56, 0x9f, 0x15, 0x9e, + 0x01, 0x00, 0x00, +} + +func (m *OrderID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrderID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OrderID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OSeq != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.OSeq)) + i-- + dAtA[i] = 0x20 + } + if m.GSeq != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintOrder(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintOrder(dAtA []byte, offset int, v uint64) int { + offset -= sovOrder(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *OrderID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovOrder(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovOrder(uint64(m.GSeq)) + } + if m.OSeq != 0 { + n += 1 + sovOrder(uint64(m.OSeq)) + } + return n +} + +func sovOrder(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozOrder(x uint64) (n int) { + return sovOrder(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *OrderID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OrderID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OrderID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) + } + m.OSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOrder(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOrder + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipOrder(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOrder + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOrder + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOrder + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthOrder + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupOrder + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthOrder + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthOrder = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowOrder = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupOrder = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta1/bid.pb.go b/go/node/market/v1beta1/bid.pb.go deleted file mode 100644 index 7a550c6a..00000000 --- a/go/node/market/v1beta1/bid.pb.go +++ /dev/null @@ -1,1966 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta1/bid.proto - -package v1beta1 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of bid -type Bid_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - BidStateInvalid Bid_State = 0 - // BidOpen denotes state for bid open - BidOpen Bid_State = 1 - // BidMatched denotes state for bid open - BidActive Bid_State = 2 - // BidLost denotes state for bid lost - BidLost Bid_State = 3 - // BidClosed denotes state for bid closed - BidClosed Bid_State = 4 -) - -var Bid_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "active", - 3: "lost", - 4: "closed", -} - -var Bid_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "active": 2, - "lost": 3, - "closed": 4, -} - -func (x Bid_State) String() string { - return proto.EnumName(Bid_State_name, int32(x)) -} - -func (Bid_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_057fd80e533b030c, []int{5, 0} -} - -// MsgCreateBid defines an SDK message for creating Bid -type MsgCreateBid struct { - Order OrderID `protobuf:"bytes,1,opt,name=order,proto3" json:"order" yaml:"order"` - Provider string `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider" yaml:"provider"` - Price types.Coin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` - Deposit types.Coin `protobuf:"bytes,4,opt,name=deposit,proto3" json:"deposit" yaml:"deposit"` -} - -func (m *MsgCreateBid) Reset() { *m = MsgCreateBid{} } -func (m *MsgCreateBid) String() string { return proto.CompactTextString(m) } -func (*MsgCreateBid) ProtoMessage() {} -func (*MsgCreateBid) Descriptor() ([]byte, []int) { - return fileDescriptor_057fd80e533b030c, []int{0} -} -func (m *MsgCreateBid) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateBid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateBid.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateBid) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateBid.Merge(m, src) -} -func (m *MsgCreateBid) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateBid) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateBid.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateBid proto.InternalMessageInfo - -func (m *MsgCreateBid) GetOrder() OrderID { - if m != nil { - return m.Order - } - return OrderID{} -} - -func (m *MsgCreateBid) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *MsgCreateBid) GetPrice() types.Coin { - if m != nil { - return m.Price - } - return types.Coin{} -} - -func (m *MsgCreateBid) GetDeposit() types.Coin { - if m != nil { - return m.Deposit - } - return types.Coin{} -} - -// MsgCreateBidResponse defines the Msg/CreateBid response type. -type MsgCreateBidResponse struct { -} - -func (m *MsgCreateBidResponse) Reset() { *m = MsgCreateBidResponse{} } -func (m *MsgCreateBidResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateBidResponse) ProtoMessage() {} -func (*MsgCreateBidResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_057fd80e533b030c, []int{1} -} -func (m *MsgCreateBidResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateBidResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateBidResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateBidResponse.Merge(m, src) -} -func (m *MsgCreateBidResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateBidResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateBidResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateBidResponse proto.InternalMessageInfo - -// MsgCloseBid defines an SDK message for closing bid -type MsgCloseBid struct { - BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseBid) Reset() { *m = MsgCloseBid{} } -func (m *MsgCloseBid) String() string { return proto.CompactTextString(m) } -func (*MsgCloseBid) ProtoMessage() {} -func (*MsgCloseBid) Descriptor() ([]byte, []int) { - return fileDescriptor_057fd80e533b030c, []int{2} -} -func (m *MsgCloseBid) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseBid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseBid.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseBid) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseBid.Merge(m, src) -} -func (m *MsgCloseBid) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseBid) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseBid.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseBid proto.InternalMessageInfo - -func (m *MsgCloseBid) GetBidID() BidID { - if m != nil { - return m.BidID - } - return BidID{} -} - -// MsgCloseBidResponse defines the Msg/CloseBid response type. -type MsgCloseBidResponse struct { -} - -func (m *MsgCloseBidResponse) Reset() { *m = MsgCloseBidResponse{} } -func (m *MsgCloseBidResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseBidResponse) ProtoMessage() {} -func (*MsgCloseBidResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_057fd80e533b030c, []int{3} -} -func (m *MsgCloseBidResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseBidResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseBidResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseBidResponse.Merge(m, src) -} -func (m *MsgCloseBidResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseBidResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseBidResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseBidResponse proto.InternalMessageInfo - -// BidID stores owner and all other seq numbers -// A successful bid becomes a Lease(ID). -type BidID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` -} - -func (m *BidID) Reset() { *m = BidID{} } -func (*BidID) ProtoMessage() {} -func (*BidID) Descriptor() ([]byte, []int) { - return fileDescriptor_057fd80e533b030c, []int{4} -} -func (m *BidID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BidID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BidID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BidID) XXX_Merge(src proto.Message) { - xxx_messageInfo_BidID.Merge(m, src) -} -func (m *BidID) XXX_Size() int { - return m.Size() -} -func (m *BidID) XXX_DiscardUnknown() { - xxx_messageInfo_BidID.DiscardUnknown(m) -} - -var xxx_messageInfo_BidID proto.InternalMessageInfo - -func (m *BidID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *BidID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *BidID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *BidID) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *BidID) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -// Bid stores BidID, state of bid and price -type Bid struct { - BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` - State Bid_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta1.Bid_State" json:"state" yaml:"state"` - Price types.Coin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Bid) Reset() { *m = Bid{} } -func (*Bid) ProtoMessage() {} -func (*Bid) Descriptor() ([]byte, []int) { - return fileDescriptor_057fd80e533b030c, []int{5} -} -func (m *Bid) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Bid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Bid.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Bid) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bid.Merge(m, src) -} -func (m *Bid) XXX_Size() int { - return m.Size() -} -func (m *Bid) XXX_DiscardUnknown() { - xxx_messageInfo_Bid.DiscardUnknown(m) -} - -var xxx_messageInfo_Bid proto.InternalMessageInfo - -func (m *Bid) GetBidID() BidID { - if m != nil { - return m.BidID - } - return BidID{} -} - -func (m *Bid) GetState() Bid_State { - if m != nil { - return m.State - } - return BidStateInvalid -} - -func (m *Bid) GetPrice() types.Coin { - if m != nil { - return m.Price - } - return types.Coin{} -} - -func (m *Bid) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -// BidFilters defines flags for bid list filter -type BidFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` - State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *BidFilters) Reset() { *m = BidFilters{} } -func (m *BidFilters) String() string { return proto.CompactTextString(m) } -func (*BidFilters) ProtoMessage() {} -func (*BidFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_057fd80e533b030c, []int{6} -} -func (m *BidFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BidFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BidFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BidFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_BidFilters.Merge(m, src) -} -func (m *BidFilters) XXX_Size() int { - return m.Size() -} -func (m *BidFilters) XXX_DiscardUnknown() { - xxx_messageInfo_BidFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_BidFilters proto.InternalMessageInfo - -func (m *BidFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *BidFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *BidFilters) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *BidFilters) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *BidFilters) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *BidFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func init() { - proto.RegisterEnum("akash.market.v1beta1.Bid_State", Bid_State_name, Bid_State_value) - proto.RegisterType((*MsgCreateBid)(nil), "akash.market.v1beta1.MsgCreateBid") - proto.RegisterType((*MsgCreateBidResponse)(nil), "akash.market.v1beta1.MsgCreateBidResponse") - proto.RegisterType((*MsgCloseBid)(nil), "akash.market.v1beta1.MsgCloseBid") - proto.RegisterType((*MsgCloseBidResponse)(nil), "akash.market.v1beta1.MsgCloseBidResponse") - proto.RegisterType((*BidID)(nil), "akash.market.v1beta1.BidID") - proto.RegisterType((*Bid)(nil), "akash.market.v1beta1.Bid") - proto.RegisterType((*BidFilters)(nil), "akash.market.v1beta1.BidFilters") -} - -func init() { proto.RegisterFile("akash/market/v1beta1/bid.proto", fileDescriptor_057fd80e533b030c) } - -var fileDescriptor_057fd80e533b030c = []byte{ - // 734 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x95, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0xed, 0xc4, 0x4e, 0x9b, 0x4d, 0x3f, 0x22, 0xb7, 0x45, 0x6d, 0x50, 0xbd, 0xc6, 0x07, - 0xd4, 0x93, 0xad, 0xb6, 0xb7, 0x72, 0xaa, 0x5b, 0x01, 0x91, 0x80, 0x22, 0x17, 0x71, 0x80, 0x43, - 0xe5, 0x78, 0x57, 0xee, 0xaa, 0x49, 0xd6, 0xf5, 0x9a, 0x40, 0xdf, 0x00, 0xf5, 0x84, 0xc4, 0x85, - 0x4b, 0x51, 0x25, 0x5e, 0x83, 0x07, 0xe8, 0xb1, 0x27, 0xc4, 0xc9, 0x42, 0xe9, 0x05, 0xe5, 0x98, - 0x27, 0x40, 0xbb, 0xeb, 0x38, 0xa9, 0x54, 0x40, 0x48, 0x70, 0xe3, 0xe4, 0xcc, 0x7f, 0xe6, 0x37, - 0x19, 0xcf, 0x8c, 0x77, 0x81, 0x19, 0x1c, 0x05, 0xec, 0xd0, 0xed, 0x04, 0xc9, 0x11, 0x4e, 0xdd, - 0xde, 0x7a, 0x0b, 0xa7, 0xc1, 0xba, 0xdb, 0x22, 0xc8, 0x89, 0x13, 0x9a, 0x52, 0x63, 0x51, 0xf8, - 0x1d, 0xe9, 0x77, 0x72, 0x7f, 0x63, 0x31, 0xa2, 0x11, 0x15, 0x01, 0x2e, 0xff, 0x25, 0x63, 0x1b, - 0xd6, 0x8d, 0xb9, 0x68, 0x82, 0x70, 0x92, 0x47, 0x98, 0x21, 0x65, 0x1d, 0xca, 0xdc, 0x56, 0xc0, - 0x70, 0x11, 0x10, 0x52, 0xd2, 0x95, 0x7e, 0xfb, 0x4b, 0x09, 0xcc, 0x3c, 0x66, 0xd1, 0x4e, 0x82, - 0x83, 0x14, 0x7b, 0x04, 0x19, 0x2f, 0x81, 0x2e, 0xf8, 0x65, 0xd5, 0x52, 0xd7, 0x6a, 0x1b, 0xab, - 0xce, 0x4d, 0xe5, 0x38, 0x7b, 0x3c, 0xa4, 0xb9, 0xeb, 0xdd, 0xbd, 0xc8, 0xa0, 0xd2, 0xcf, 0xa0, - 0x2e, 0x84, 0x41, 0x06, 0x25, 0x3c, 0xcc, 0xe0, 0xcc, 0x49, 0xd0, 0x69, 0x6f, 0xd9, 0xc2, 0xb4, - 0x7d, 0x29, 0x1b, 0xf7, 0xc0, 0x74, 0x9c, 0xd0, 0x1e, 0xe1, 0xf9, 0x4b, 0x96, 0xba, 0x56, 0xf5, - 0xe0, 0x20, 0x83, 0x85, 0x36, 0xcc, 0xe0, 0xbc, 0xc4, 0x46, 0x8a, 0xed, 0x17, 0x4e, 0xe3, 0x09, - 0xd0, 0xe3, 0x84, 0x84, 0x78, 0xb9, 0x2c, 0x2a, 0x5b, 0x71, 0xe4, 0xab, 0x39, 0xfc, 0xd5, 0x8a, - 0xc2, 0x76, 0x28, 0xe9, 0x7a, 0xab, 0xbc, 0x2a, 0x5e, 0x8c, 0x88, 0x1f, 0x17, 0x23, 0x4c, 0xdb, - 0x97, 0xb2, 0xf1, 0x1c, 0x4c, 0x21, 0x1c, 0x53, 0x46, 0xd2, 0x65, 0xed, 0x77, 0x19, 0xef, 0xe4, - 0x19, 0x47, 0xc4, 0x30, 0x83, 0x73, 0x32, 0x67, 0x2e, 0xd8, 0xfe, 0xc8, 0xb5, 0xa5, 0x7d, 0x3f, - 0x87, 0x8a, 0x7d, 0x0b, 0x2c, 0x4e, 0xf6, 0xd5, 0xc7, 0x2c, 0xa6, 0x5d, 0x86, 0x6d, 0x02, 0x6a, - 0x5c, 0x6f, 0x53, 0x26, 0xda, 0xfd, 0x0c, 0x54, 0x5a, 0x04, 0x1d, 0x10, 0x94, 0xf7, 0xfb, 0xf6, - 0xcd, 0xfd, 0xf6, 0x08, 0x6a, 0xee, 0x7a, 0xd6, 0xa8, 0xdb, 0xc2, 0x1c, 0x64, 0xb0, 0x44, 0xd0, - 0x30, 0x83, 0x55, 0x59, 0x09, 0x41, 0xb6, 0xaf, 0xb7, 0x08, 0x6a, 0xa2, 0xbc, 0x84, 0x25, 0xb0, - 0x30, 0xf1, 0x57, 0x45, 0x05, 0x1f, 0x4b, 0x40, 0x26, 0x30, 0x5c, 0xa0, 0xd3, 0xd7, 0xdd, 0x7c, - 0xd6, 0x55, 0x6f, 0x45, 0xcc, 0x8f, 0x0b, 0x13, 0xf3, 0xe3, 0x26, 0x9f, 0x1f, 0x7f, 0x1a, 0x9b, - 0x40, 0x43, 0x0c, 0x1f, 0x8b, 0xd9, 0x69, 0x1e, 0xec, 0x67, 0x50, 0xdb, 0xdd, 0xc7, 0xc7, 0x83, - 0x0c, 0x0a, 0x7d, 0x98, 0xc1, 0x5a, 0xde, 0x15, 0x86, 0x8f, 0x6d, 0x5f, 0x88, 0x1c, 0x8a, 0x38, - 0xc4, 0xc7, 0x36, 0x2b, 0xa1, 0x07, 0x39, 0x14, 0x5d, 0x83, 0x22, 0x09, 0x45, 0x39, 0x44, 0x39, - 0xa4, 0x8d, 0xa1, 0xbd, 0x1c, 0xa2, 0xd7, 0x20, 0x2a, 0x21, 0xfe, 0xb8, 0xb6, 0x5e, 0xfa, 0x1f, - 0xae, 0xd7, 0xd6, 0xf4, 0x87, 0x73, 0xa8, 0x88, 0xbe, 0x7d, 0x2e, 0x83, 0xf2, 0x3f, 0x9b, 0x8d, - 0xf1, 0x14, 0xe8, 0x2c, 0x0d, 0x52, 0x2c, 0x9a, 0x38, 0xb7, 0x01, 0x7f, 0x9a, 0xd4, 0xd9, 0xe7, - 0x61, 0x72, 0x2a, 0x82, 0x18, 0x4f, 0x45, 0x98, 0xb6, 0x2f, 0xe5, 0xbf, 0xfe, 0x61, 0xac, 0x02, - 0x10, 0x8a, 0xbd, 0x45, 0x07, 0x81, 0xfc, 0x36, 0xca, 0x7e, 0x35, 0x57, 0xb6, 0x53, 0xfb, 0xbd, - 0x0a, 0x74, 0x51, 0x9a, 0x61, 0x81, 0x29, 0xd2, 0xed, 0x05, 0x6d, 0x82, 0xea, 0x4a, 0x63, 0xe1, - 0xf4, 0xcc, 0x9a, 0xf7, 0x08, 0x12, 0xae, 0xa6, 0x94, 0x8d, 0x25, 0xa0, 0xd1, 0x18, 0x77, 0xeb, - 0x6a, 0xa3, 0x76, 0x7a, 0x66, 0x4d, 0x79, 0x04, 0xed, 0xc5, 0xb8, 0x6b, 0xac, 0x80, 0x4a, 0x10, - 0xa6, 0xa4, 0x87, 0xeb, 0xa5, 0xc6, 0xec, 0xe9, 0x99, 0x55, 0xf5, 0x08, 0xda, 0x16, 0x02, 0x27, - 0xda, 0x94, 0xa5, 0xf5, 0x72, 0x41, 0x3c, 0xa2, 0x2c, 0xe5, 0x44, 0xc8, 0x17, 0x19, 0xd5, 0xb5, - 0x82, 0x10, 0x9b, 0x8d, 0x1a, 0xda, 0xdb, 0x4f, 0xa6, 0x32, 0x31, 0xbe, 0xcb, 0x12, 0x00, 0x1e, - 0x41, 0xf7, 0x49, 0x3b, 0xc5, 0x09, 0xfb, 0xbf, 0xe4, 0x93, 0x67, 0xa8, 0x3b, 0x5a, 0xbe, 0xca, - 0xb8, 0x19, 0xbf, 0xda, 0x2d, 0x79, 0x92, 0x78, 0x0f, 0x2f, 0xfa, 0xa6, 0x7a, 0xd9, 0x37, 0xd5, - 0x6f, 0x7d, 0x53, 0x7d, 0x77, 0x65, 0x2a, 0x97, 0x57, 0xa6, 0xf2, 0xf5, 0xca, 0x54, 0x5e, 0x38, - 0x11, 0x49, 0x0f, 0x5f, 0xb5, 0x9c, 0x90, 0x76, 0x5c, 0xda, 0x4b, 0xc2, 0xf6, 0x91, 0x2b, 0xef, - 0xa4, 0x37, 0xa3, 0x5b, 0x29, 0x3d, 0x89, 0x31, 0x1b, 0x5d, 0x3d, 0xad, 0x8a, 0xb8, 0x76, 0x36, - 0x7f, 0x04, 0x00, 0x00, 0xff, 0xff, 0x9a, 0xef, 0x46, 0xe7, 0x06, 0x07, 0x00, 0x00, -} - -func (m *MsgCreateBid) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateBid) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateBid) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Deposit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Order.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCreateBidResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateBidResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgCloseBid) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseBid) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseBid) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseBidResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseBidResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *BidID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BidID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BidID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintBid(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Bid) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Bid) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Bid) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *BidFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BidFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BidFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintBid(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x32 - } - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintBid(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintBid(dAtA []byte, offset int, v uint64) int { - offset -= sovBid(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MsgCreateBid) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Order.Size() - n += 1 + l + sovBid(uint64(l)) - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - l = m.Price.Size() - n += 1 + l + sovBid(uint64(l)) - l = m.Deposit.Size() - n += 1 + l + sovBid(uint64(l)) - return n -} - -func (m *MsgCreateBidResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgCloseBid) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidID.Size() - n += 1 + l + sovBid(uint64(l)) - return n -} - -func (m *MsgCloseBidResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *BidID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovBid(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovBid(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovBid(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - return n -} - -func (m *Bid) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidID.Size() - n += 1 + l + sovBid(uint64(l)) - if m.State != 0 { - n += 1 + sovBid(uint64(m.State)) - } - l = m.Price.Size() - n += 1 + l + sovBid(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovBid(uint64(m.CreatedAt)) - } - return n -} - -func (m *BidFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovBid(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovBid(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovBid(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - return n -} - -func sovBid(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozBid(x uint64) (n int) { - return sovBid(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MsgCreateBid) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateBid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateBid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Order.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Deposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateBidResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateBidResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseBid) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseBid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseBid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseBidResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseBidResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BidID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BidID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BidID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Bid) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Bid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Bid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Bid_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BidFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BidFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BidFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipBid(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthBid - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupBid - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthBid - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthBid = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowBid = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupBid = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta1/codec.go b/go/node/market/v1beta1/codec.go deleted file mode 100644 index ba062059..00000000 --- a/go/node/market/v1beta1/codec.go +++ /dev/null @@ -1,50 +0,0 @@ -package v1beta1 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/market module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/market and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterCodec registers the necessary x/market interfaces and concrete types -// on the provided Amino codec. These types are used for Amino JSON serialization. -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgCreateBid{}, ModuleName+"/"+MsgTypeCreateBid, nil) - cdc.RegisterConcrete(&MsgCloseBid{}, ModuleName+"/"+MsgTypeCloseBid, nil) - cdc.RegisterConcrete(&MsgCreateLease{}, ModuleName+"/"+MsgTypeCreateLease, nil) - cdc.RegisterConcrete(&MsgWithdrawLease{}, ModuleName+"/"+MsgTypeWithdrawLease, nil) - cdc.RegisterConcrete(&MsgCloseLease{}, ModuleName+"/"+MsgTypeCloseLease, nil) -} - -// RegisterInterfaces registers the x/market interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgCreateBid{}, - &MsgCloseBid{}, - &MsgCreateLease{}, - &MsgWithdrawLease{}, - &MsgCloseLease{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/market/v1beta1/errors.go b/go/node/market/v1beta1/errors.go deleted file mode 100644 index 66dba9f2..00000000 --- a/go/node/market/v1beta1/errors.go +++ /dev/null @@ -1,22 +0,0 @@ -package v1beta1 - -import ( - "errors" -) - -var ( - // ErrEmptyProvider is the error when provider is empty - ErrEmptyProvider = errors.New("empty provider") - // ErrSameAccount is the error when owner and provider are the same account - ErrSameAccount = errors.New("owner and provider are the same account") - // ErrBidZeroPrice zero price - ErrBidZeroPrice = errors.New("invalid bid: zero price") - // ErrOrderActive order active - ErrOrderActive = errors.New("order active") - // ErrOrderClosed order closed - ErrOrderClosed = errors.New("order closed") - // ErrInvalidParam indicates an invalid chain parameter - ErrInvalidParam = errors.New("parameter invalid") - // ErrInvalidBid indicates an invalid chain parameter - ErrInvalidBid = errors.New("unknown provider") -) diff --git a/go/node/market/v1beta1/escrow.go b/go/node/market/v1beta1/escrow.go deleted file mode 100644 index 40fbe101..00000000 --- a/go/node/market/v1beta1/escrow.go +++ /dev/null @@ -1,60 +0,0 @@ -package v1beta1 - -import ( - "fmt" - "strconv" - "strings" - - sdk "github.com/cosmos/cosmos-sdk/types" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta1" - etypes "github.com/akash-network/akash-api/go/node/escrow/v1beta1" -) - -const ( - bidEscrowScope = "bid" -) - -func EscrowAccountForBid(id BidID) etypes.AccountID { - return etypes.AccountID{ - Scope: bidEscrowScope, - XID: id.String(), - } -} - -func EscrowPaymentForLease(id LeaseID) string { - return fmt.Sprintf("%v/%v/%s", id.GSeq, id.OSeq, id.Provider) -} - -func LeaseIDFromEscrowAccount(id etypes.AccountID, pid string) (LeaseID, bool) { - did, ok := dtypes.DeploymentIDFromEscrowAccount(id) - if !ok { - return LeaseID{}, false - } - - parts := strings.Split(pid, "/") - if len(parts) != 3 { - return LeaseID{}, false - } - - gseq, err := strconv.ParseUint(parts[0], 10, 32) - if err != nil { - return LeaseID{}, false - } - - oseq, err := strconv.ParseUint(parts[1], 10, 32) - if err != nil { - return LeaseID{}, false - } - - owner, err := sdk.AccAddressFromBech32(parts[2]) - if err != nil { - return LeaseID{}, false - } - - return MakeLeaseID( - MakeBidID( - MakeOrderID( - dtypes.MakeGroupID( - did, uint32(gseq)), uint32(oseq)), owner)), true -} diff --git a/go/node/market/v1beta1/event.go b/go/node/market/v1beta1/event.go deleted file mode 100644 index be7d2d7f..00000000 --- a/go/node/market/v1beta1/event.go +++ /dev/null @@ -1,359 +0,0 @@ -package v1beta1 - -import ( - "strconv" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta1" - "github.com/akash-network/akash-api/go/sdkutil" -) - -const ( - evActionOrderCreated = "order-created" - evActionOrderClosed = "order-closed" - evActionBidCreated = "bid-created" - evActionBidClosed = "bid-closed" - evActionLeaseCreated = "lease-created" - evActionLeaseClosed = "lease-closed" - - evOSeqKey = "oseq" - evProviderKey = "provider" - evPriceDenomKey = "price-denom" - evPriceAmountKey = "price-amount" -) - -var ( - ErrParsingPrice = errors.New("error parsing price") -) - -// EventOrderCreated struct -type EventOrderCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID OrderID `json:"id"` -} - -func NewEventOrderCreated(id OrderID) EventOrderCreated { - return EventOrderCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionOrderCreated, - }, - ID: id, - } -} - -// ToSDKEvent method creates new sdk event for EventOrderCreated struct -func (e EventOrderCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionOrderCreated), - }, orderIDEVAttributes(e.ID)...)..., - ) -} - -// EventOrderClosed struct -type EventOrderClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID OrderID `json:"id"` -} - -func NewEventOrderClosed(id OrderID) EventOrderClosed { - return EventOrderClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionOrderClosed, - }, - ID: id, - } -} - -// ToSDKEvent method creates new sdk event for EventOrderClosed struct -func (e EventOrderClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionOrderClosed), - }, orderIDEVAttributes(e.ID)...)..., - ) -} - -// EventBidCreated struct -type EventBidCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID BidID `json:"id"` - Price sdk.Coin `json:"price"` -} - -func NewEventBidCreated(id BidID, price sdk.Coin) EventBidCreated { - return EventBidCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionBidCreated, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventBidCreated struct -func (e EventBidCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionBidCreated), - }, bidIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)..., - ) -} - -// EventBidClosed struct -type EventBidClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID BidID `json:"id"` - Price sdk.Coin `json:"price"` -} - -func NewEventBidClosed(id BidID, price sdk.Coin) EventBidClosed { - return EventBidClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionBidClosed, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventBidClosed struct -func (e EventBidClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionBidClosed), - }, bidIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)..., - ) -} - -// EventLeaseCreated struct -type EventLeaseCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID LeaseID `json:"id"` - Price sdk.Coin `json:"price"` -} - -func NewEventLeaseCreated(id LeaseID, price sdk.Coin) EventLeaseCreated { - return EventLeaseCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionLeaseCreated, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventLeaseCreated struct -func (e EventLeaseCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionLeaseCreated), - }, leaseIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)...) -} - -// EventLeaseClosed struct -type EventLeaseClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID LeaseID `json:"id"` - Price sdk.Coin `json:"price"` -} - -func NewEventLeaseClosed(id LeaseID, price sdk.Coin) EventLeaseClosed { - return EventLeaseClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionLeaseClosed, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventLeaseClosed struct -func (e EventLeaseClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionLeaseClosed), - }, leaseIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)...) -} - -// orderIDEVAttributes returns event attribues for given orderID -func orderIDEVAttributes(id OrderID) []sdk.Attribute { - return append(dtypes.GroupIDEVAttributes(id.GroupID()), - sdk.NewAttribute(evOSeqKey, strconv.FormatUint(uint64(id.OSeq), 10))) -} - -// parseEVOrderID returns orderID for given event attributes -func parseEVOrderID(attrs []sdk.Attribute) (OrderID, error) { - gid, err := dtypes.ParseEVGroupID(attrs) - if err != nil { - return OrderID{}, err - } - oseq, err := sdkutil.GetUint64(attrs, evOSeqKey) - if err != nil { - return OrderID{}, err - } - - return OrderID{ - Owner: gid.Owner, - DSeq: gid.DSeq, - GSeq: gid.GSeq, - OSeq: uint32(oseq), - }, nil - -} - -// bidIDEVAttributes returns event attribues for given bidID -func bidIDEVAttributes(id BidID) []sdk.Attribute { - return append(orderIDEVAttributes(id.OrderID()), - sdk.NewAttribute(evProviderKey, id.Provider)) -} - -// parseEVBidID returns bidID for given event attributes -func parseEVBidID(attrs []sdk.Attribute) (BidID, error) { - oid, err := parseEVOrderID(attrs) - if err != nil { - return BidID{}, err - } - - provider, err := sdkutil.GetAccAddress(attrs, evProviderKey) - if err != nil { - return BidID{}, err - } - - return BidID{ - Owner: oid.Owner, - DSeq: oid.DSeq, - GSeq: oid.GSeq, - OSeq: oid.OSeq, - Provider: provider.String(), - }, nil -} - -// leaseIDEVAttributes returns event attribues for given LeaseID -func leaseIDEVAttributes(id LeaseID) []sdk.Attribute { - return append(orderIDEVAttributes(id.OrderID()), - sdk.NewAttribute(evProviderKey, id.Provider)) -} - -// parseEVLeaseID returns leaseID for given event attributes -func parseEVLeaseID(attrs []sdk.Attribute) (LeaseID, error) { - bid, err := parseEVBidID(attrs) - if err != nil { - return LeaseID{}, err - } - return LeaseID(bid), nil -} - -func priceEVAttributes(price sdk.Coin) []sdk.Attribute { - return []sdk.Attribute{ - sdk.NewAttribute(evPriceDenomKey, price.Denom), - sdk.NewAttribute(evPriceAmountKey, price.Amount.String()), - } -} - -func parseEVPriceAttributes(attrs []sdk.Attribute) (sdk.Coin, error) { - denom, err := sdkutil.GetString(attrs, evPriceDenomKey) - if err != nil { - return sdk.Coin{}, err - } - - amounts, err := sdkutil.GetString(attrs, evPriceAmountKey) - if err != nil { - return sdk.Coin{}, err - } - - amount, ok := sdk.NewIntFromString(amounts) - if !ok { - return sdk.Coin{}, ErrParsingPrice - } - - return sdk.NewCoin(denom, amount), nil -} - -// ParseEvent parses event and returns details of event and error if occurred -func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { - if ev.Type != sdkutil.EventTypeMessage { - return nil, sdkutil.ErrUnknownType - } - if ev.Module != ModuleName { - return nil, sdkutil.ErrUnknownModule - } - switch ev.Action { - - case evActionOrderCreated: - id, err := parseEVOrderID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventOrderCreated(id), nil - case evActionOrderClosed: - id, err := parseEVOrderID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventOrderClosed(id), nil - - case evActionBidCreated: - id, err := parseEVBidID(ev.Attributes) - if err != nil { - return nil, err - } - price, err := parseEVPriceAttributes(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventBidCreated(id, price), nil - case evActionBidClosed: - id, err := parseEVBidID(ev.Attributes) - if err != nil { - return nil, err - } - // optional price - price, _ := parseEVPriceAttributes(ev.Attributes) - return NewEventBidClosed(id, price), nil - - case evActionLeaseCreated: - id, err := parseEVLeaseID(ev.Attributes) - if err != nil { - return nil, err - } - price, err := parseEVPriceAttributes(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventLeaseCreated(id, price), nil - case evActionLeaseClosed: - id, err := parseEVLeaseID(ev.Attributes) - if err != nil { - return nil, err - } - // optional price - price, _ := parseEVPriceAttributes(ev.Attributes) - return NewEventLeaseClosed(id, price), nil - - default: - return nil, sdkutil.ErrUnknownAction - } -} diff --git a/go/node/market/v1beta1/events_test.go b/go/node/market/v1beta1/events_test.go deleted file mode 100644 index a4457775..00000000 --- a/go/node/market/v1beta1/events_test.go +++ /dev/null @@ -1,460 +0,0 @@ -package v1beta1 - -import ( - "fmt" - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/pkg/errors" - - "github.com/stretchr/testify/require" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -var ( - errWildcard = errors.New("wildcard string error can't be matched") - evOwnerKey = "owner" - evDSeqKey = "dseq" - evGSeqKey = "gseq" -) - -type testEventParsing struct { - msg sdkutil.Event - expErr error -} - -func (tep testEventParsing) testMessageType() func(t *testing.T) { - _, err := ParseEvent(tep.msg) - return func(t *testing.T) { - // if the error expected is errWildcard to catch untyped errors, don't fail the test, the error was expected. - if errors.Is(tep.expErr, errWildcard) { - require.Error(t, err) - } else { - require.Equal(t, tep.expErr, err) - } - } -} - -var TEPS = []testEventParsing{ - { - msg: sdkutil.Event{ - Type: "nil", - }, - expErr: sdkutil.ErrUnknownType, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - }, - expErr: sdkutil.ErrUnknownAction, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: "nil", - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: "nil", - }, - expErr: sdkutil.ErrUnknownAction, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionOrderCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionOrderCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "nooo", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionOrderCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "abc", - }, - }, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionOrderClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - }, - }, - expErr: nil, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionBidCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionBidCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "yesss", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionBidCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "hello", - }, - }, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionBidClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: nil, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionLeaseCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionLeaseCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "hello", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionLeaseClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: nil, - }, -} - -func TestEventParsing(t *testing.T) { - for i, test := range TEPS { - t.Run(fmt.Sprintf("%d", i), - test.testMessageType()) - } -} diff --git a/go/node/market/v1beta1/genesis.pb.go b/go/node/market/v1beta1/genesis.pb.go deleted file mode 100644 index a03e558c..00000000 --- a/go/node/market/v1beta1/genesis.pb.go +++ /dev/null @@ -1,453 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta1/genesis.proto - -package v1beta1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState defines the basic genesis state used by market module -type GenesisState struct { - Orders []Order `protobuf:"bytes,1,rep,name=orders,proto3" json:"orders" yaml:"orders"` - Leases []Lease `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases" yaml:"leases"` - Params Params `protobuf:"bytes,3,opt,name=params,proto3" json:"params" yaml:"params"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_3add0908026fd9bf, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetOrders() []Order { - if m != nil { - return m.Orders - } - return nil -} - -func (m *GenesisState) GetLeases() []Lease { - if m != nil { - return m.Leases - } - return nil -} - -func (m *GenesisState) GetParams() Params { - if m != nil { - return m.Params - } - return Params{} -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.market.v1beta1.GenesisState") -} - -func init() { - proto.RegisterFile("akash/market/v1beta1/genesis.proto", fileDescriptor_3add0908026fd9bf) -} - -var fileDescriptor_3add0908026fd9bf = []byte{ - // 296 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0xb1, 0x4e, 0xc3, 0x30, - 0x14, 0x45, 0xe3, 0x56, 0xca, 0x90, 0xc2, 0x12, 0x75, 0x88, 0x0a, 0x72, 0x82, 0xa7, 0x4e, 0xb6, - 0x5a, 0x36, 0xc6, 0x2e, 0x30, 0x20, 0x81, 0x02, 0x2c, 0x6c, 0x4e, 0xb1, 0xd2, 0x2a, 0x09, 0x8e, - 0x6c, 0x53, 0xd1, 0xbf, 0x80, 0xbf, 0xea, 0xd8, 0x91, 0x29, 0x42, 0xc9, 0xc6, 0xc8, 0x17, 0x54, - 0xb1, 0x5d, 0x65, 0x89, 0xba, 0xe5, 0xe9, 0x9d, 0x77, 0x72, 0x7d, 0x3d, 0x44, 0x33, 0x2a, 0x57, - 0xa4, 0xa0, 0x22, 0x63, 0x8a, 0x6c, 0x66, 0x09, 0x53, 0x74, 0x46, 0x52, 0xf6, 0xce, 0xe4, 0x5a, - 0xe2, 0x52, 0x70, 0xc5, 0xfd, 0xb1, 0x66, 0xb0, 0x61, 0xb0, 0x65, 0x26, 0xe3, 0x94, 0xa7, 0x5c, - 0x03, 0xa4, 0xfd, 0x32, 0xec, 0x24, 0xea, 0xf5, 0x71, 0xf1, 0xc6, 0xc4, 0x49, 0x22, 0x67, 0x54, - 0x32, 0x4b, 0x5c, 0xf5, 0x12, 0x25, 0x15, 0xb4, 0xb0, 0x91, 0xd0, 0xf7, 0xc0, 0x3b, 0xbb, 0x35, - 0x21, 0x9f, 0x14, 0x55, 0xcc, 0x7f, 0xf6, 0x5c, 0xfd, 0x13, 0x19, 0x80, 0x68, 0x38, 0x1d, 0xcd, - 0x2f, 0x70, 0x5f, 0x68, 0xfc, 0xd0, 0x32, 0x8b, 0x70, 0x57, 0x85, 0xce, 0x5f, 0x15, 0xda, 0x93, - 0xff, 0x2a, 0x3c, 0xdf, 0xd2, 0x22, 0xbf, 0x41, 0x66, 0x46, 0xb1, 0x5d, 0xb4, 0x56, 0x1d, 0x4c, - 0x06, 0x83, 0x53, 0xd6, 0xfb, 0x96, 0xe9, 0xac, 0xe6, 0xa4, 0xb3, 0x9a, 0x19, 0xc5, 0x76, 0xe1, - 0xbf, 0x78, 0xae, 0x79, 0x4c, 0x30, 0x8c, 0xc0, 0x74, 0x34, 0xbf, 0xec, 0xb7, 0x3e, 0x6a, 0xa6, - 0xd3, 0x9a, 0x9b, 0x4e, 0x6b, 0x66, 0x14, 0xdb, 0xc5, 0xe2, 0x6e, 0x57, 0x43, 0xb0, 0xaf, 0x21, - 0xf8, 0xad, 0x21, 0xf8, 0x6a, 0xa0, 0xb3, 0x6f, 0xa0, 0xf3, 0xd3, 0x40, 0xe7, 0x15, 0xa7, 0x6b, - 0xb5, 0xfa, 0x48, 0xf0, 0x92, 0x17, 0x84, 0x6f, 0xc4, 0x32, 0xcf, 0x88, 0xa9, 0xf8, 0xf3, 0x58, - 0xb2, 0xda, 0x96, 0x4c, 0x1e, 0xab, 0x4e, 0x5c, 0x5d, 0xf2, 0xf5, 0x21, 0x00, 0x00, 0xff, 0xff, - 0x8b, 0xfa, 0xcc, 0xec, 0x1d, 0x02, 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.Leases) > 0 { - for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Orders) > 0 { - for iNdEx := len(m.Orders) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Orders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Orders) > 0 { - for _, e := range m.Orders { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - if len(m.Leases) > 0 { - for _, e := range m.Leases { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - l = m.Params.Size() - n += 1 + l + sovGenesis(uint64(l)) - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Orders = append(m.Orders, Order{}) - if err := m.Orders[len(m.Orders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Leases = append(m.Leases, Lease{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta1/id.go b/go/node/market/v1beta1/id.go deleted file mode 100644 index 7bce98d1..00000000 --- a/go/node/market/v1beta1/id.go +++ /dev/null @@ -1,154 +0,0 @@ -package v1beta1 - -import ( - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta1" -) - -// MakeOrderID returns OrderID instance with provided groupID details and oseq -func MakeOrderID(id dtypes.GroupID, oseq uint32) OrderID { - return OrderID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - OSeq: oseq, - } -} - -// GroupID method returns groupID details for specific order -func (id OrderID) GroupID() dtypes.GroupID { - return dtypes.GroupID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - } -} - -// Equals method compares specific order with provided order -func (id OrderID) Equals(other OrderID) bool { - return id.GroupID().Equals(other.GroupID()) && id.OSeq == other.OSeq -} - -// Validate method for OrderID and returns nil -func (id OrderID) Validate() error { - if err := id.GroupID().Validate(); err != nil { - return sdkerrors.Wrap(err, "OrderID: Invalid GroupID") - } - if id.OSeq == 0 { - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "OrderID: Invalid Order Sequence") - } - return nil -} - -// String provides stringer interface to save reflected formatting. -func (id OrderID) String() string { - return fmt.Sprintf("%s/%v", id.GroupID(), id.OSeq) -} - -// MakeBidID returns BidID instance with provided order details and provider -func MakeBidID(id OrderID, provider sdk.AccAddress) BidID { - return BidID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - OSeq: id.OSeq, - Provider: provider.String(), - } -} - -// Equals method compares specific bid with provided bid -func (id BidID) Equals(other BidID) bool { - return id.OrderID().Equals(other.OrderID()) && - id.Provider == other.Provider -} - -// LeaseID method returns lease details of bid -func (id BidID) LeaseID() LeaseID { - return LeaseID(id) -} - -// OrderID method returns OrderID details with specific bid details -func (id BidID) OrderID() OrderID { - return OrderID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - OSeq: id.OSeq, - } -} - -// String method for consistent output. -func (id BidID) String() string { - return fmt.Sprintf("%s/%v", id.OrderID(), id.Provider) -} - -// GroupID method returns GroupID details with specific bid details -func (id BidID) GroupID() dtypes.GroupID { - return id.OrderID().GroupID() -} - -// DeploymentID method returns deployment details with specific bid details -func (id BidID) DeploymentID() dtypes.DeploymentID { - return id.GroupID().DeploymentID() -} - -// Validate validates bid instance and returns nil -func (id BidID) Validate() error { - if err := id.OrderID().Validate(); err != nil { - return sdkerrors.Wrap(err, "BidID: Invalid OrderID") - } - if _, err := sdk.AccAddressFromBech32(id.Provider); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "BidID: Invalid Provider Address") - } - if id.Owner == id.Provider { - return sdkerrors.Wrap(sdkerrors.ErrConflict, "BidID: self-bid") - } - return nil -} - -// MakeLeaseID returns LeaseID instance with provided bid details -func MakeLeaseID(id BidID) LeaseID { - return LeaseID(id) -} - -// Equals method compares specific lease with provided lease -func (id LeaseID) Equals(other LeaseID) bool { - return id.BidID().Equals(other.BidID()) -} - -// Validate calls the BidID's validator and returns any error. -func (id LeaseID) Validate() error { - if err := id.BidID().Validate(); err != nil { - return sdkerrors.Wrap(err, "LeaseID: Invalid BidID") - } - return nil -} - -// BidID method returns BidID details with specific LeaseID -func (id LeaseID) BidID() BidID { - return BidID(id) -} - -// OrderID method returns OrderID details with specific lease details -func (id LeaseID) OrderID() OrderID { - return id.BidID().OrderID() -} - -// GroupID method returns GroupID details with specific lease details -func (id LeaseID) GroupID() dtypes.GroupID { - return id.OrderID().GroupID() -} - -// DeploymentID method returns deployment details with specific lease details -func (id LeaseID) DeploymentID() dtypes.DeploymentID { - return id.GroupID().DeploymentID() -} - -// String method provides human readable representation of LeaseID. -func (id LeaseID) String() string { - return id.BidID().String() -} diff --git a/go/node/market/v1beta1/key.go b/go/node/market/v1beta1/key.go deleted file mode 100644 index 396358af..00000000 --- a/go/node/market/v1beta1/key.go +++ /dev/null @@ -1,24 +0,0 @@ -package v1beta1 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "market" - - // StoreKey is the store key string for market - StoreKey = ModuleName - - // RouterKey is the message route for market - RouterKey = ModuleName -) - -func OrderPrefix() []byte { - return []byte{0x01, 0x00} -} - -func BidPrefix() []byte { - return []byte{0x02, 0x00} -} - -func LeasePrefix() []byte { - return []byte{0x03, 0x00} -} diff --git a/go/node/market/v1beta1/lease.pb.go b/go/node/market/v1beta1/lease.pb.go deleted file mode 100644 index 0becfdf5..00000000 --- a/go/node/market/v1beta1/lease.pb.go +++ /dev/null @@ -1,2097 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta1/lease.proto - -package v1beta1 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of lease -type Lease_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - LeaseStateInvalid Lease_State = 0 - // LeaseActive denotes state for lease active - LeaseActive Lease_State = 1 - // LeaseInsufficientFunds denotes state for lease insufficient_funds - LeaseInsufficientFunds Lease_State = 2 - // LeaseClosed denotes state for lease closed - LeaseClosed Lease_State = 3 -) - -var Lease_State_name = map[int32]string{ - 0: "invalid", - 1: "active", - 2: "insufficient_funds", - 3: "closed", -} - -var Lease_State_value = map[string]int32{ - "invalid": 0, - "active": 1, - "insufficient_funds": 2, - "closed": 3, -} - -func (x Lease_State) String() string { - return proto.EnumName(Lease_State_name, int32(x)) -} - -func (Lease_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_b81e11575e79ba08, []int{1, 0} -} - -// LeaseID stores bid details of lease -type LeaseID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` -} - -func (m *LeaseID) Reset() { *m = LeaseID{} } -func (*LeaseID) ProtoMessage() {} -func (*LeaseID) Descriptor() ([]byte, []int) { - return fileDescriptor_b81e11575e79ba08, []int{0} -} -func (m *LeaseID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseID) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseID.Merge(m, src) -} -func (m *LeaseID) XXX_Size() int { - return m.Size() -} -func (m *LeaseID) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseID.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseID proto.InternalMessageInfo - -func (m *LeaseID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *LeaseID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *LeaseID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *LeaseID) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *LeaseID) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -// Lease stores LeaseID, state of lease and price -type Lease struct { - LeaseID LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"id" yaml:"id"` - State Lease_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta1.Lease_State" json:"state" yaml:"state"` - Price types.Coin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_b81e11575e79ba08, []int{1} -} -func (m *Lease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Lease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Lease) XXX_Merge(src proto.Message) { - xxx_messageInfo_Lease.Merge(m, src) -} -func (m *Lease) XXX_Size() int { - return m.Size() -} -func (m *Lease) XXX_DiscardUnknown() { - xxx_messageInfo_Lease.DiscardUnknown(m) -} - -var xxx_messageInfo_Lease proto.InternalMessageInfo - -func (m *Lease) GetLeaseID() LeaseID { - if m != nil { - return m.LeaseID - } - return LeaseID{} -} - -func (m *Lease) GetState() Lease_State { - if m != nil { - return m.State - } - return LeaseStateInvalid -} - -func (m *Lease) GetPrice() types.Coin { - if m != nil { - return m.Price - } - return types.Coin{} -} - -func (m *Lease) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -// LeaseFilters defines flags for lease list filter -type LeaseFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` - State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *LeaseFilters) Reset() { *m = LeaseFilters{} } -func (m *LeaseFilters) String() string { return proto.CompactTextString(m) } -func (*LeaseFilters) ProtoMessage() {} -func (*LeaseFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_b81e11575e79ba08, []int{2} -} -func (m *LeaseFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseFilters.Merge(m, src) -} -func (m *LeaseFilters) XXX_Size() int { - return m.Size() -} -func (m *LeaseFilters) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseFilters proto.InternalMessageInfo - -func (m *LeaseFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *LeaseFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *LeaseFilters) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *LeaseFilters) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *LeaseFilters) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *LeaseFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -// MsgCreateLease is sent to create a lease -type MsgCreateLease struct { - BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCreateLease) Reset() { *m = MsgCreateLease{} } -func (m *MsgCreateLease) String() string { return proto.CompactTextString(m) } -func (*MsgCreateLease) ProtoMessage() {} -func (*MsgCreateLease) Descriptor() ([]byte, []int) { - return fileDescriptor_b81e11575e79ba08, []int{3} -} -func (m *MsgCreateLease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateLease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateLease) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateLease.Merge(m, src) -} -func (m *MsgCreateLease) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateLease) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateLease.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateLease proto.InternalMessageInfo - -func (m *MsgCreateLease) GetBidID() BidID { - if m != nil { - return m.BidID - } - return BidID{} -} - -// MsgCreateLeaseResponse is the response from creating a lease -type MsgCreateLeaseResponse struct { -} - -func (m *MsgCreateLeaseResponse) Reset() { *m = MsgCreateLeaseResponse{} } -func (m *MsgCreateLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateLeaseResponse) ProtoMessage() {} -func (*MsgCreateLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b81e11575e79ba08, []int{4} -} -func (m *MsgCreateLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateLeaseResponse.Merge(m, src) -} -func (m *MsgCreateLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateLeaseResponse proto.InternalMessageInfo - -// MsgWithdrawLease defines an SDK message for closing bid -type MsgWithdrawLease struct { - LeaseID LeaseID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgWithdrawLease) Reset() { *m = MsgWithdrawLease{} } -func (m *MsgWithdrawLease) String() string { return proto.CompactTextString(m) } -func (*MsgWithdrawLease) ProtoMessage() {} -func (*MsgWithdrawLease) Descriptor() ([]byte, []int) { - return fileDescriptor_b81e11575e79ba08, []int{5} -} -func (m *MsgWithdrawLease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgWithdrawLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgWithdrawLease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgWithdrawLease) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgWithdrawLease.Merge(m, src) -} -func (m *MsgWithdrawLease) XXX_Size() int { - return m.Size() -} -func (m *MsgWithdrawLease) XXX_DiscardUnknown() { - xxx_messageInfo_MsgWithdrawLease.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgWithdrawLease proto.InternalMessageInfo - -func (m *MsgWithdrawLease) GetLeaseID() LeaseID { - if m != nil { - return m.LeaseID - } - return LeaseID{} -} - -// MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. -type MsgWithdrawLeaseResponse struct { -} - -func (m *MsgWithdrawLeaseResponse) Reset() { *m = MsgWithdrawLeaseResponse{} } -func (m *MsgWithdrawLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*MsgWithdrawLeaseResponse) ProtoMessage() {} -func (*MsgWithdrawLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b81e11575e79ba08, []int{6} -} -func (m *MsgWithdrawLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgWithdrawLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgWithdrawLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgWithdrawLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgWithdrawLeaseResponse.Merge(m, src) -} -func (m *MsgWithdrawLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgWithdrawLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgWithdrawLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgWithdrawLeaseResponse proto.InternalMessageInfo - -// MsgCloseLease defines an SDK message for closing order -type MsgCloseLease struct { - LeaseID LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseLease) Reset() { *m = MsgCloseLease{} } -func (m *MsgCloseLease) String() string { return proto.CompactTextString(m) } -func (*MsgCloseLease) ProtoMessage() {} -func (*MsgCloseLease) Descriptor() ([]byte, []int) { - return fileDescriptor_b81e11575e79ba08, []int{7} -} -func (m *MsgCloseLease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseLease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseLease) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseLease.Merge(m, src) -} -func (m *MsgCloseLease) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseLease) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseLease.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseLease proto.InternalMessageInfo - -func (m *MsgCloseLease) GetLeaseID() LeaseID { - if m != nil { - return m.LeaseID - } - return LeaseID{} -} - -// MsgCloseLeaseResponse defines the Msg/CloseLease response type. -type MsgCloseLeaseResponse struct { -} - -func (m *MsgCloseLeaseResponse) Reset() { *m = MsgCloseLeaseResponse{} } -func (m *MsgCloseLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseLeaseResponse) ProtoMessage() {} -func (*MsgCloseLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b81e11575e79ba08, []int{8} -} -func (m *MsgCloseLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseLeaseResponse.Merge(m, src) -} -func (m *MsgCloseLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseLeaseResponse proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("akash.market.v1beta1.Lease_State", Lease_State_name, Lease_State_value) - proto.RegisterType((*LeaseID)(nil), "akash.market.v1beta1.LeaseID") - proto.RegisterType((*Lease)(nil), "akash.market.v1beta1.Lease") - proto.RegisterType((*LeaseFilters)(nil), "akash.market.v1beta1.LeaseFilters") - proto.RegisterType((*MsgCreateLease)(nil), "akash.market.v1beta1.MsgCreateLease") - proto.RegisterType((*MsgCreateLeaseResponse)(nil), "akash.market.v1beta1.MsgCreateLeaseResponse") - proto.RegisterType((*MsgWithdrawLease)(nil), "akash.market.v1beta1.MsgWithdrawLease") - proto.RegisterType((*MsgWithdrawLeaseResponse)(nil), "akash.market.v1beta1.MsgWithdrawLeaseResponse") - proto.RegisterType((*MsgCloseLease)(nil), "akash.market.v1beta1.MsgCloseLease") - proto.RegisterType((*MsgCloseLeaseResponse)(nil), "akash.market.v1beta1.MsgCloseLeaseResponse") -} - -func init() { proto.RegisterFile("akash/market/v1beta1/lease.proto", fileDescriptor_b81e11575e79ba08) } - -var fileDescriptor_b81e11575e79ba08 = []byte{ - // 731 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4d, 0x4f, 0xdb, 0x4a, - 0x14, 0xb5, 0x93, 0x38, 0xc0, 0x84, 0x8f, 0x3c, 0x0b, 0x78, 0xc1, 0x08, 0x8f, 0x9f, 0x57, 0xac, - 0x6c, 0x11, 0x76, 0xbc, 0x15, 0x01, 0xf1, 0x5e, 0xa4, 0xd2, 0x4a, 0xa6, 0x52, 0xab, 0xaa, 0x12, - 0x72, 0x3c, 0x83, 0x19, 0x91, 0x78, 0x82, 0xc7, 0x84, 0xf2, 0x0f, 0x2a, 0x56, 0x5d, 0x76, 0x83, - 0x8a, 0xd4, 0x3f, 0xc3, 0x12, 0x55, 0x5d, 0x74, 0x65, 0x55, 0x61, 0x53, 0x65, 0x99, 0x5f, 0x50, - 0xcd, 0x8c, 0xf3, 0x85, 0x22, 0x56, 0x55, 0x57, 0x5d, 0x25, 0x73, 0xee, 0x3d, 0xf7, 0x9e, 0x9c, - 0x7b, 0x27, 0x03, 0x2c, 0xff, 0xcc, 0x67, 0xa7, 0x6e, 0xcb, 0x8f, 0xcf, 0x70, 0xe2, 0x76, 0xb6, - 0x1a, 0x38, 0xf1, 0xb7, 0xdc, 0x26, 0xf6, 0x19, 0x76, 0xda, 0x31, 0x4d, 0xa8, 0xbe, 0x2c, 0x32, - 0x1c, 0x99, 0xe1, 0x64, 0x19, 0xc6, 0x72, 0x48, 0x43, 0x2a, 0x12, 0x5c, 0xfe, 0x4d, 0xe6, 0x1a, - 0x66, 0x40, 0x59, 0x8b, 0x32, 0xb7, 0xe1, 0x33, 0x3c, 0x2c, 0x16, 0x50, 0x12, 0x0d, 0xe2, 0x53, - 0xbb, 0x35, 0x08, 0x92, 0x71, 0xfb, 0x36, 0x07, 0x66, 0x9e, 0xf1, 0xde, 0xf5, 0x7d, 0xdd, 0x05, - 0x1a, 0xbd, 0x8c, 0x70, 0x5c, 0x51, 0x2d, 0x75, 0x73, 0xae, 0xb6, 0xd6, 0x4b, 0xa1, 0x04, 0xfa, - 0x29, 0x9c, 0xbf, 0xf2, 0x5b, 0xcd, 0x1d, 0x5b, 0x1c, 0x6d, 0x4f, 0xc2, 0xfa, 0x36, 0x28, 0x20, - 0x86, 0xcf, 0x2b, 0x39, 0x4b, 0xdd, 0x2c, 0xd4, 0x60, 0x37, 0x85, 0x85, 0xfd, 0x23, 0x7c, 0xde, - 0x4b, 0xa1, 0xc0, 0xfb, 0x29, 0x2c, 0x49, 0x1a, 0x3f, 0xd9, 0x9e, 0x00, 0x39, 0x29, 0xe4, 0xa4, - 0xbc, 0xa5, 0x6e, 0x2e, 0x48, 0xd2, 0x7f, 0x19, 0x29, 0x9c, 0x20, 0x85, 0x92, 0x14, 0x66, 0x24, - 0xca, 0x49, 0x85, 0x11, 0xe9, 0x45, 0x46, 0xa2, 0x13, 0x24, 0x2a, 0x49, 0xfc, 0x43, 0xff, 0x17, - 0xcc, 0xb6, 0x63, 0xda, 0x21, 0x08, 0xc7, 0x15, 0x4d, 0xfc, 0x24, 0xd8, 0x4b, 0xe1, 0x10, 0xeb, - 0xa7, 0x70, 0x49, 0x92, 0x06, 0x88, 0xed, 0x0d, 0x83, 0x3b, 0xb3, 0x1f, 0x6f, 0xa1, 0xf2, 0xe3, - 0x16, 0x2a, 0xf6, 0xd7, 0x3c, 0xd0, 0x84, 0x45, 0xfa, 0x5b, 0x30, 0x2b, 0xe6, 0x74, 0x4c, 0x90, - 0xf0, 0xa8, 0x54, 0xdd, 0x70, 0xa6, 0xcd, 0xca, 0xc9, 0x1c, 0xad, 0xd9, 0x77, 0x29, 0x54, 0xba, - 0x29, 0x1c, 0x58, 0xdc, 0x4b, 0x61, 0x8e, 0xa0, 0x7e, 0x0a, 0xe7, 0x64, 0x63, 0x82, 0x6c, 0x6f, - 0x46, 0x94, 0xac, 0x23, 0xdd, 0x03, 0x1a, 0x4b, 0xfc, 0x04, 0x0b, 0x3b, 0x17, 0xab, 0xff, 0x3c, - 0x51, 0xda, 0x39, 0xe2, 0x89, 0x72, 0x42, 0x82, 0x33, 0x9a, 0x90, 0x38, 0xda, 0x9e, 0x84, 0xf5, - 0xe7, 0x40, 0x6b, 0xc7, 0x24, 0xc0, 0xc2, 0xed, 0x52, 0x75, 0xcd, 0x91, 0xeb, 0xe2, 0xf0, 0x75, - 0x19, 0x96, 0xdc, 0xa3, 0x24, 0xaa, 0x6d, 0x70, 0xa9, 0xbc, 0x9e, 0xc8, 0x1f, 0xd5, 0x13, 0x47, - 0xdb, 0x93, 0xb0, 0xbe, 0x01, 0x40, 0x10, 0x63, 0x3f, 0xc1, 0xe8, 0xd8, 0x4f, 0xc4, 0x34, 0xf2, - 0xde, 0x5c, 0x86, 0xec, 0x26, 0xf6, 0x27, 0x15, 0x68, 0x42, 0x9a, 0x6e, 0x83, 0x19, 0x12, 0x75, - 0xfc, 0x26, 0x41, 0x65, 0xc5, 0x58, 0xb9, 0xbe, 0xb1, 0xfe, 0x12, 0xc2, 0x45, 0xb0, 0x2e, 0x03, - 0xfa, 0x3a, 0x28, 0xfa, 0x41, 0x42, 0x3a, 0xb8, 0xac, 0x1a, 0x4b, 0xd7, 0x37, 0x56, 0x49, 0xa4, - 0xec, 0x0a, 0x48, 0xaf, 0x02, 0x9d, 0x44, 0xec, 0xe2, 0xe4, 0x84, 0x04, 0x04, 0x47, 0xc9, 0xf1, - 0xc9, 0x45, 0x84, 0x58, 0x39, 0x67, 0x18, 0xd7, 0x37, 0xd6, 0xaa, 0xb4, 0x73, 0x2c, 0x7c, 0xc0, - 0xa3, 0xbc, 0x60, 0xd0, 0xa4, 0x0c, 0xa3, 0x72, 0x7e, 0xac, 0xe0, 0x9e, 0x80, 0x8c, 0xc2, 0xfb, - 0xcf, 0xa6, 0x32, 0x36, 0xd6, 0x2f, 0x39, 0x30, 0x2f, 0xe2, 0x07, 0xa4, 0x99, 0xe0, 0x98, 0xfd, - 0x59, 0xff, 0xb1, 0xf5, 0xe7, 0x66, 0xc8, 0x65, 0x2c, 0x8e, 0xcc, 0x78, 0x6a, 0xd3, 0x76, 0x0a, - 0xc2, 0xd4, 0x26, 0x58, 0x3c, 0x64, 0xe1, 0x9e, 0x58, 0x08, 0x79, 0x67, 0x5e, 0x82, 0x62, 0x83, - 0xa0, 0xd1, 0x8d, 0x59, 0x9f, 0xbe, 0xd6, 0x35, 0x82, 0xea, 0xfb, 0x35, 0x2b, 0xbb, 0x2f, 0x9a, - 0x38, 0x4e, 0xbb, 0x2d, 0x5a, 0x83, 0xa0, 0x3a, 0xca, 0xba, 0x55, 0xc0, 0xea, 0x64, 0x37, 0x0f, - 0xb3, 0x36, 0x8d, 0x18, 0xb6, 0x63, 0x50, 0x3e, 0x64, 0xe1, 0x2b, 0x92, 0x9c, 0xa2, 0xd8, 0xbf, - 0x94, 0x4a, 0x5e, 0x3f, 0x52, 0xf2, 0x0b, 0xee, 0xee, 0x84, 0x1a, 0x03, 0x54, 0x1e, 0xf7, 0x1c, - 0xea, 0x61, 0x60, 0x81, 0x2b, 0xe5, 0x9b, 0xf8, 0x1b, 0xfe, 0x4a, 0x32, 0x41, 0x7f, 0x83, 0x95, - 0x89, 0xa6, 0x03, 0x35, 0xb5, 0xff, 0xef, 0xba, 0xa6, 0x7a, 0xdf, 0x35, 0xd5, 0xef, 0x5d, 0x53, - 0xfd, 0xf0, 0x60, 0x2a, 0xf7, 0x0f, 0xa6, 0xf2, 0xed, 0xc1, 0x54, 0xde, 0x38, 0x21, 0x49, 0x4e, - 0x2f, 0x1a, 0x4e, 0x40, 0x5b, 0x2e, 0xed, 0xc4, 0x41, 0xf3, 0xcc, 0x95, 0x0f, 0xc8, 0xbb, 0xc1, - 0x13, 0x92, 0x5c, 0xb5, 0x31, 0x1b, 0x3c, 0x24, 0x8d, 0xa2, 0x78, 0x45, 0xb6, 0x7f, 0x06, 0x00, - 0x00, 0xff, 0xff, 0x0a, 0x38, 0xdf, 0xb1, 0xd5, 0x06, 0x00, 0x00, -} - -func (m *LeaseID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintLease(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintLease(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Lease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *LeaseFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintLease(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x32 - } - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintLease(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintLease(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateLease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateLease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCreateLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgWithdrawLease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgWithdrawLease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgWithdrawLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgWithdrawLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgWithdrawLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgWithdrawLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgCloseLease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseLease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintLease(dAtA []byte, offset int, v uint64) int { - offset -= sovLease(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *LeaseID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovLease(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovLease(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovLease(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - return n -} - -func (m *Lease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LeaseID.Size() - n += 1 + l + sovLease(uint64(l)) - if m.State != 0 { - n += 1 + sovLease(uint64(m.State)) - } - l = m.Price.Size() - n += 1 + l + sovLease(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovLease(uint64(m.CreatedAt)) - } - return n -} - -func (m *LeaseFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovLease(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovLease(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovLease(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - return n -} - -func (m *MsgCreateLease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidID.Size() - n += 1 + l + sovLease(uint64(l)) - return n -} - -func (m *MsgCreateLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgWithdrawLease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LeaseID.Size() - n += 1 + l + sovLease(uint64(l)) - return n -} - -func (m *MsgWithdrawLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgCloseLease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LeaseID.Size() - n += 1 + l + sovLease(uint64(l)) - return n -} - -func (m *MsgCloseLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovLease(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozLease(x uint64) (n int) { - return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *LeaseID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Lease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Lease_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateLease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateLease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateLease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgWithdrawLease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgWithdrawLease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgWithdrawLease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgWithdrawLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgWithdrawLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgWithdrawLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseLease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseLease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseLease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipLease(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthLease - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupLease - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthLease - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowLease = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupLease = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta1/msgs.go b/go/node/market/v1beta1/msgs.go deleted file mode 100644 index 525c9947..00000000 --- a/go/node/market/v1beta1/msgs.go +++ /dev/null @@ -1,215 +0,0 @@ -package v1beta1 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -const ( - MsgTypeCreateBid = "create-bid" - MsgTypeCloseBid = "close-bid" - MsgTypeCreateLease = "create-lease" - MsgTypeWithdrawLease = "withdraw-lease" - MsgTypeCloseLease = "close-lease" -) - -var ( - _ sdk.Msg = &MsgCreateBid{} - _ sdk.Msg = &MsgCloseBid{} - _ sdk.Msg = &MsgCreateLease{} - _ sdk.Msg = &MsgWithdrawLease{} - _ sdk.Msg = &MsgCloseLease{} -) - -// NewMsgCreateBid creates a new MsgCreateBid instance -func NewMsgCreateBid(id OrderID, provider sdk.AccAddress, price sdk.Coin, deposit sdk.Coin) *MsgCreateBid { - return &MsgCreateBid{ - Order: id, - Provider: provider.String(), - Price: price, - Deposit: deposit, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateBid) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateBid) Type() string { return MsgTypeCreateBid } - -// GetSignBytes encodes the message for signing -func (msg MsgCreateBid) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateBid) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.Provider) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic does basic validation of provider and order -func (msg MsgCreateBid) ValidateBasic() error { - if err := msg.Order.Validate(); err != nil { - return err - } - - provider, err := sdk.AccAddressFromBech32(msg.Provider) - if err != nil { - return ErrEmptyProvider - } - - owner, err := sdk.AccAddressFromBech32(msg.Order.Owner) - if err != nil { - return errors.Wrap(ErrInvalidBid, "empty owner") - } - - if provider.Equals(owner) { - return ErrSameAccount - } - - if msg.Price.IsZero() { - return ErrBidZeroPrice - } - - return nil -} - -// NewMsgWithdrawLease creates a new MsgWithdrawLease instance -func NewMsgWithdrawLease(id LeaseID) *MsgWithdrawLease { - return &MsgWithdrawLease{ - LeaseID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgWithdrawLease) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgWithdrawLease) Type() string { return MsgTypeWithdrawLease } - -// GetSignBytes encodes the message for signing -func (msg MsgWithdrawLease) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgWithdrawLease) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.GetLeaseID().Provider) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic does basic validation of provider and order -func (msg MsgWithdrawLease) ValidateBasic() error { - if err := msg.LeaseID.Validate(); err != nil { - return err - } - return nil -} - -// NewMsgCreateLease creates a new MsgCreateLease instance -func NewMsgCreateLease(id BidID) *MsgCreateLease { - return &MsgCreateLease{ - BidID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateLease) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateLease) Type() string { return MsgTypeCreateLease } - -// GetSignBytes encodes the message for signing -func (msg MsgCreateLease) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateLease) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.BidID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic method for MsgCreateLease -func (msg MsgCreateLease) ValidateBasic() error { - return msg.BidID.Validate() -} - -// NewMsgCloseBid creates a new MsgCloseBid instance -func NewMsgCloseBid(id BidID) *MsgCloseBid { - return &MsgCloseBid{ - BidID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCloseBid) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCloseBid) Type() string { return MsgTypeCloseBid } - -// GetSignBytes encodes the message for signing -func (msg MsgCloseBid) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseBid) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.BidID.Provider) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic method for MsgCloseBid -func (msg MsgCloseBid) ValidateBasic() error { - return msg.BidID.Validate() -} - -// NewMsgCloseLease creates a new MsgCloseLease instance -func NewMsgCloseLease(id LeaseID) *MsgCloseLease { - return &MsgCloseLease{ - LeaseID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCloseLease) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCloseLease) Type() string { return MsgTypeCloseLease } - -// GetSignBytes encodes the message for signing -func (msg MsgCloseLease) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseLease) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.LeaseID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// ValidateBasic method for MsgCloseLease -func (msg MsgCloseLease) ValidateBasic() error { - return msg.LeaseID.Validate() -} diff --git a/go/node/market/v1beta1/order.pb.go b/go/node/market/v1beta1/order.pb.go deleted file mode 100644 index ab64bf4e..00000000 --- a/go/node/market/v1beta1/order.pb.go +++ /dev/null @@ -1,1107 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta1/order.proto - -package v1beta1 - -import ( - fmt "fmt" - v1beta1 "github.com/akash-network/akash-api/go/node/deployment/v1beta1" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of order -type Order_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - OrderStateInvalid Order_State = 0 - // OrderOpen denotes state for order open - OrderOpen Order_State = 1 - // OrderMatched denotes state for order matched - OrderActive Order_State = 2 - // OrderClosed denotes state for order lost - OrderClosed Order_State = 3 -) - -var Order_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "active", - 3: "closed", -} - -var Order_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "active": 2, - "closed": 3, -} - -func (x Order_State) String() string { - return proto.EnumName(Order_State_name, int32(x)) -} - -func (Order_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_0d97b6ff257f8a05, []int{1, 0} -} - -// OrderID stores owner and all other seq numbers -type OrderID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` -} - -func (m *OrderID) Reset() { *m = OrderID{} } -func (*OrderID) ProtoMessage() {} -func (*OrderID) Descriptor() ([]byte, []int) { - return fileDescriptor_0d97b6ff257f8a05, []int{0} -} -func (m *OrderID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OrderID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_OrderID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *OrderID) XXX_Merge(src proto.Message) { - xxx_messageInfo_OrderID.Merge(m, src) -} -func (m *OrderID) XXX_Size() int { - return m.Size() -} -func (m *OrderID) XXX_DiscardUnknown() { - xxx_messageInfo_OrderID.DiscardUnknown(m) -} - -var xxx_messageInfo_OrderID proto.InternalMessageInfo - -func (m *OrderID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *OrderID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *OrderID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *OrderID) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -// Order stores orderID, state of order and other details -type Order struct { - OrderID OrderID `protobuf:"bytes,1,opt,name=order_id,json=orderId,proto3" json:"id" yaml:"id"` - State Order_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta1.Order_State" json:"state" yaml:"state"` - Spec v1beta1.GroupSpec `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec" yaml:"spec"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Order) Reset() { *m = Order{} } -func (*Order) ProtoMessage() {} -func (*Order) Descriptor() ([]byte, []int) { - return fileDescriptor_0d97b6ff257f8a05, []int{1} -} -func (m *Order) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Order.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Order) XXX_Merge(src proto.Message) { - xxx_messageInfo_Order.Merge(m, src) -} -func (m *Order) XXX_Size() int { - return m.Size() -} -func (m *Order) XXX_DiscardUnknown() { - xxx_messageInfo_Order.DiscardUnknown(m) -} - -var xxx_messageInfo_Order proto.InternalMessageInfo - -func (m *Order) GetOrderID() OrderID { - if m != nil { - return m.OrderID - } - return OrderID{} -} - -func (m *Order) GetState() Order_State { - if m != nil { - return m.State - } - return OrderStateInvalid -} - -func (m *Order) GetSpec() v1beta1.GroupSpec { - if m != nil { - return m.Spec - } - return v1beta1.GroupSpec{} -} - -func (m *Order) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -// OrderFilters defines flags for order list filter -type OrderFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *OrderFilters) Reset() { *m = OrderFilters{} } -func (m *OrderFilters) String() string { return proto.CompactTextString(m) } -func (*OrderFilters) ProtoMessage() {} -func (*OrderFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_0d97b6ff257f8a05, []int{2} -} -func (m *OrderFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OrderFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_OrderFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *OrderFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_OrderFilters.Merge(m, src) -} -func (m *OrderFilters) XXX_Size() int { - return m.Size() -} -func (m *OrderFilters) XXX_DiscardUnknown() { - xxx_messageInfo_OrderFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_OrderFilters proto.InternalMessageInfo - -func (m *OrderFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *OrderFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *OrderFilters) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *OrderFilters) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *OrderFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func init() { - proto.RegisterEnum("akash.market.v1beta1.Order_State", Order_State_name, Order_State_value) - proto.RegisterType((*OrderID)(nil), "akash.market.v1beta1.OrderID") - proto.RegisterType((*Order)(nil), "akash.market.v1beta1.Order") - proto.RegisterType((*OrderFilters)(nil), "akash.market.v1beta1.OrderFilters") -} - -func init() { proto.RegisterFile("akash/market/v1beta1/order.proto", fileDescriptor_0d97b6ff257f8a05) } - -var fileDescriptor_0d97b6ff257f8a05 = []byte{ - // 579 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x94, 0x31, 0x6f, 0xd3, 0x4e, - 0x18, 0xc6, 0xed, 0xc4, 0x69, 0x9a, 0x4b, 0xfb, 0xff, 0x07, 0xab, 0x88, 0xe2, 0xaa, 0x3e, 0x63, - 0x18, 0x32, 0xd9, 0x6a, 0xba, 0x65, 0x6b, 0xa8, 0x28, 0x99, 0x22, 0x39, 0x4c, 0x08, 0xa9, 0x72, - 0xec, 0x93, 0x6b, 0xc5, 0xc9, 0xb9, 0xf6, 0x35, 0x90, 0x9d, 0x01, 0x65, 0x62, 0x41, 0x62, 0x89, - 0x54, 0x89, 0x0f, 0xc2, 0xda, 0xb1, 0x23, 0x93, 0x85, 0x92, 0x05, 0x65, 0xcc, 0x27, 0x40, 0xf7, - 0x9e, 0x83, 0x5b, 0x09, 0xf5, 0x03, 0x30, 0x25, 0xf7, 0xbc, 0xcf, 0xef, 0xf5, 0xdd, 0x73, 0xa7, - 0x17, 0x19, 0xee, 0xd0, 0x4d, 0x2f, 0xec, 0x91, 0x9b, 0x0c, 0x09, 0xb3, 0x27, 0x47, 0x03, 0xc2, - 0xdc, 0x23, 0x9b, 0x26, 0x3e, 0x49, 0xac, 0x38, 0xa1, 0x8c, 0xaa, 0x7b, 0xe0, 0xb0, 0x84, 0xc3, - 0xca, 0x1d, 0xda, 0x5e, 0x40, 0x03, 0x0a, 0x06, 0x9b, 0xff, 0x13, 0x5e, 0xed, 0x85, 0xe8, 0xe6, - 0x93, 0x38, 0xa2, 0xd3, 0x11, 0x19, 0x17, 0x1d, 0x83, 0x84, 0x5e, 0xc5, 0xc2, 0x65, 0x2e, 0x65, - 0x54, 0xed, 0xf1, 0x2f, 0x74, 0x4f, 0x55, 0x1b, 0x55, 0xe8, 0xfb, 0x31, 0x49, 0xf6, 0x65, 0x43, - 0x6e, 0xd6, 0x3a, 0x4f, 0x57, 0x19, 0x16, 0xc2, 0x3a, 0xc3, 0x3b, 0x53, 0x77, 0x14, 0xb5, 0x4d, - 0x58, 0x9a, 0x8e, 0x90, 0xd5, 0x63, 0xa4, 0xf8, 0x29, 0xb9, 0xdc, 0x2f, 0x19, 0x72, 0x53, 0xe9, - 0xe0, 0x45, 0x86, 0x95, 0xd3, 0x3e, 0xb9, 0x5c, 0x65, 0x18, 0xf4, 0x75, 0x86, 0xeb, 0x02, 0xe3, - 0x2b, 0xd3, 0x01, 0x91, 0x43, 0x01, 0x87, 0xca, 0x86, 0xdc, 0xdc, 0x15, 0xd0, 0x59, 0x0e, 0x05, - 0xf7, 0xa0, 0x40, 0x40, 0x41, 0x0e, 0x51, 0x0e, 0x29, 0x05, 0xd4, 0xcb, 0x21, 0x7a, 0x0f, 0xa2, - 0x02, 0xe2, 0x3f, 0xed, 0xed, 0xaf, 0xd7, 0x58, 0xfa, 0x75, 0x8d, 0x25, 0xf3, 0x7b, 0x19, 0x55, - 0xe0, 0x94, 0xea, 0x3b, 0xb4, 0x0d, 0x81, 0x9e, 0x87, 0x3e, 0x1c, 0xb3, 0xde, 0x3a, 0xb4, 0xfe, - 0x16, 0xaa, 0x95, 0x87, 0xd2, 0x31, 0x6f, 0x32, 0x2c, 0x2d, 0x32, 0xbc, 0x49, 0x69, 0x95, 0xe1, - 0x52, 0xe8, 0xaf, 0x33, 0x5c, 0x13, 0x1f, 0x0c, 0x7d, 0xd3, 0xa9, 0x42, 0xcb, 0xae, 0xaf, 0x3a, - 0xa8, 0x92, 0x32, 0x97, 0x11, 0x48, 0xe4, 0xbf, 0xd6, 0xb3, 0x07, 0x5a, 0x5b, 0x7d, 0x6e, 0x14, - 0x21, 0x03, 0x53, 0x84, 0x0c, 0x4b, 0xd3, 0x11, 0xb2, 0xfa, 0x06, 0x29, 0x69, 0x4c, 0x3c, 0xc8, - 0xab, 0xde, 0x7a, 0x9e, 0xb7, 0x2c, 0xae, 0xf5, 0x4f, 0xdb, 0x33, 0x7e, 0xad, 0xfd, 0x98, 0x78, - 0x9d, 0x03, 0xbe, 0x67, 0x9e, 0x0d, 0x07, 0x8b, 0x6c, 0xf8, 0xca, 0x74, 0x40, 0x54, 0x0f, 0x11, - 0xf2, 0x12, 0xe2, 0x32, 0xe2, 0x9f, 0xbb, 0x0c, 0x62, 0x2d, 0x3b, 0xb5, 0x5c, 0x39, 0x61, 0xe6, - 0x47, 0x19, 0x55, 0x60, 0x83, 0xaa, 0x89, 0xaa, 0xe1, 0x78, 0xe2, 0x46, 0xa1, 0xdf, 0x90, 0xb4, - 0xc7, 0xb3, 0xb9, 0xf1, 0x08, 0xb6, 0x0f, 0xc5, 0xae, 0x28, 0xa8, 0x4f, 0x90, 0x42, 0x63, 0x32, - 0x6e, 0xc8, 0xda, 0xee, 0x6c, 0x6e, 0xd4, 0xc0, 0xd0, 0x8b, 0xc9, 0x58, 0x3d, 0x40, 0x5b, 0xae, - 0xc7, 0xc2, 0x09, 0x69, 0x94, 0xb4, 0xff, 0x67, 0x73, 0xa3, 0x0e, 0xa5, 0x13, 0x90, 0x78, 0xd1, - 0x8b, 0x68, 0x4a, 0xfc, 0x46, 0xf9, 0x4e, 0xf1, 0x25, 0x48, 0x9a, 0xf2, 0xe9, 0x9b, 0x2e, 0xdd, - 0xb9, 0xc1, 0x2f, 0x25, 0xb4, 0x03, 0xf5, 0x57, 0x61, 0xc4, 0x48, 0x92, 0xfe, 0x6b, 0x8f, 0x95, - 0x9f, 0x47, 0x3c, 0x9d, 0x4a, 0x71, 0x9e, 0x87, 0xde, 0x45, 0x5b, 0xe1, 0xb9, 0x74, 0x5e, 0xdf, - 0x2c, 0x74, 0xf9, 0x76, 0xa1, 0xcb, 0x3f, 0x17, 0xba, 0xfc, 0x79, 0xa9, 0x4b, 0xb7, 0x4b, 0x5d, - 0xfa, 0xb1, 0xd4, 0xa5, 0xb7, 0x56, 0x10, 0xb2, 0x8b, 0xab, 0x81, 0xe5, 0xd1, 0x91, 0x4d, 0x27, - 0x89, 0x17, 0x0d, 0x6d, 0x31, 0x11, 0x3e, 0x6c, 0x26, 0x0c, 0x9b, 0xc6, 0x24, 0xdd, 0x4c, 0x85, - 0xc1, 0x16, 0x0c, 0x84, 0xe3, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7a, 0x86, 0x03, 0x4f, 0x86, - 0x04, 0x00, 0x00, -} - -func (m *OrderID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OrderID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OrderID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.OSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintOrder(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Order) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Order) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Order) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOrder(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.OrderID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOrder(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *OrderFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OrderFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OrderFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintOrder(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintOrder(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintOrder(dAtA []byte, offset int, v uint64) int { - offset -= sovOrder(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *OrderID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovOrder(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovOrder(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovOrder(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovOrder(uint64(m.OSeq)) - } - return n -} - -func (m *Order) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.OrderID.Size() - n += 1 + l + sovOrder(uint64(l)) - if m.State != 0 { - n += 1 + sovOrder(uint64(m.State)) - } - l = m.Spec.Size() - n += 1 + l + sovOrder(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovOrder(uint64(m.CreatedAt)) - } - return n -} - -func (m *OrderFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovOrder(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovOrder(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovOrder(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovOrder(uint64(m.OSeq)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovOrder(uint64(l)) - } - return n -} - -func sovOrder(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozOrder(x uint64) (n int) { - return sovOrder(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *OrderID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OrderID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OrderID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOrder(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOrder - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Order) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Order: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Order: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OrderID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.OrderID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Order_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOrder(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOrder - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OrderFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OrderFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OrderFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOrder(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOrder - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipOrder(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOrder - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOrder - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOrder - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthOrder - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupOrder - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthOrder - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthOrder = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowOrder = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupOrder = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta1/params.go b/go/node/market/v1beta1/params.go deleted file mode 100644 index 996b951c..00000000 --- a/go/node/market/v1beta1/params.go +++ /dev/null @@ -1,76 +0,0 @@ -package v1beta1 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/pkg/errors" -) - -var _ paramtypes.ParamSet = (*Params)(nil) - -var ( - DefaultBidMinDeposit = sdk.NewCoin("uakt", sdk.NewInt(50000000)) - defaultOrderMaxBids uint32 = 20 - maxOrderMaxBids uint32 = 500 -) - -const ( - keyBidMinDeposit = "BidMinDeposit" - keyOrderMaxBids = "OrderMaxBids" -) - -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair([]byte(keyBidMinDeposit), &p.BidMinDeposit, validateCoin), - paramtypes.NewParamSetPair([]byte(keyOrderMaxBids), &p.OrderMaxBids, validateOrderMaxBids), - } -} - -func DefaultParams() Params { - return Params{ - BidMinDeposit: DefaultBidMinDeposit, - OrderMaxBids: defaultOrderMaxBids, - } -} - -func (p Params) Validate() error { - if err := validateCoin(p.BidMinDeposit); err != nil { - return err - } - - if err := validateOrderMaxBids(p.OrderMaxBids); err != nil { - return err - } - return nil -} - -func validateCoin(i interface{}) error { - _, ok := i.(sdk.Coin) - if !ok { - return errors.Wrapf(ErrInvalidParam, "invalid type %T", i) - } - - return nil -} - -func validateOrderMaxBids(i interface{}) error { - val, ok := i.(uint32) - - if !ok { - return errors.Wrapf(ErrInvalidParam, "invalid type %T", i) - } - - if val == 0 { - return errors.Wrap(ErrInvalidParam, "order max bids too low") - } - - if val > maxOrderMaxBids { - return errors.Wrap(ErrInvalidParam, "order max bids too high") - } - - return nil -} diff --git a/go/node/market/v1beta1/params.pb.go b/go/node/market/v1beta1/params.pb.go deleted file mode 100644 index b332f0bf..00000000 --- a/go/node/market/v1beta1/params.pb.go +++ /dev/null @@ -1,364 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta1/params.proto - -package v1beta1 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Params is the params for the x/market module -type Params struct { - BidMinDeposit types.Coin `protobuf:"bytes,1,opt,name=bid_min_deposit,json=bidMinDeposit,proto3" json:"bid_min_deposit" yaml:"bid_min_deposit"` - OrderMaxBids uint32 `protobuf:"varint,2,opt,name=order_max_bids,json=orderMaxBids,proto3" json:"order_max_bids" yaml:"order_max_bids"` -} - -func (m *Params) Reset() { *m = Params{} } -func (m *Params) String() string { return proto.CompactTextString(m) } -func (*Params) ProtoMessage() {} -func (*Params) Descriptor() ([]byte, []int) { - return fileDescriptor_7d76da213caa5dbb, []int{0} -} -func (m *Params) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Params.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Params) XXX_Merge(src proto.Message) { - xxx_messageInfo_Params.Merge(m, src) -} -func (m *Params) XXX_Size() int { - return m.Size() -} -func (m *Params) XXX_DiscardUnknown() { - xxx_messageInfo_Params.DiscardUnknown(m) -} - -var xxx_messageInfo_Params proto.InternalMessageInfo - -func (m *Params) GetBidMinDeposit() types.Coin { - if m != nil { - return m.BidMinDeposit - } - return types.Coin{} -} - -func (m *Params) GetOrderMaxBids() uint32 { - if m != nil { - return m.OrderMaxBids - } - return 0 -} - -func init() { - proto.RegisterType((*Params)(nil), "akash.market.v1beta1.Params") -} - -func init() { proto.RegisterFile("akash/market/v1beta1/params.proto", fileDescriptor_7d76da213caa5dbb) } - -var fileDescriptor_7d76da213caa5dbb = []byte{ - // 313 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0x31, 0x4f, 0x02, 0x31, - 0x14, 0xc7, 0xaf, 0x0e, 0x0c, 0x27, 0x68, 0x42, 0xd0, 0x20, 0x43, 0x8b, 0x37, 0x31, 0xb5, 0x41, - 0x37, 0xdd, 0x4e, 0x07, 0x17, 0xa2, 0x61, 0x74, 0xb9, 0xb4, 0x77, 0x17, 0x68, 0xa0, 0xf7, 0x2e, - 0xd7, 0x4a, 0xe0, 0x03, 0xb8, 0xfb, 0xb1, 0x18, 0x19, 0x9d, 0x1a, 0x73, 0x6c, 0x8c, 0xf8, 0x05, - 0x0c, 0x57, 0x31, 0xc8, 0xd6, 0xf4, 0xff, 0xfb, 0xff, 0x5e, 0xf2, 0x9e, 0x7f, 0xcd, 0x27, 0x5c, - 0x8f, 0x99, 0xe2, 0xc5, 0x24, 0x35, 0x6c, 0xd6, 0x17, 0xa9, 0xe1, 0x7d, 0x96, 0xf3, 0x82, 0x2b, - 0x4d, 0xf3, 0x02, 0x0c, 0x34, 0x5b, 0x15, 0x42, 0x1d, 0x42, 0x7f, 0x91, 0x4e, 0x6b, 0x04, 0x23, - 0xa8, 0x00, 0xb6, 0x7b, 0x39, 0xb6, 0x83, 0x63, 0xd0, 0x0a, 0x34, 0x13, 0x5c, 0xa7, 0x7f, 0xb6, - 0x18, 0x64, 0xe6, 0xf2, 0xe0, 0x1b, 0xf9, 0xb5, 0x97, 0x4a, 0xde, 0x7c, 0x47, 0xfe, 0xb9, 0x90, - 0x49, 0xa4, 0x64, 0x16, 0x25, 0x69, 0x0e, 0x5a, 0x9a, 0x36, 0xea, 0xa2, 0xde, 0xe9, 0xcd, 0x15, - 0x75, 0x16, 0xba, 0xb3, 0xec, 0x07, 0xd2, 0x07, 0x90, 0x59, 0x18, 0x2e, 0x2d, 0xf1, 0x4a, 0x4b, - 0x1a, 0xa1, 0x4c, 0x06, 0x32, 0x7b, 0x74, 0xbd, 0x8d, 0x25, 0xc7, 0xaa, 0xad, 0x25, 0x97, 0x0b, - 0xae, 0xa6, 0x77, 0xc1, 0x51, 0x10, 0x0c, 0x1b, 0xe2, 0xb0, 0xdb, 0xe4, 0xfe, 0x19, 0x14, 0x49, - 0x5a, 0x44, 0x8a, 0xcf, 0x23, 0x21, 0x13, 0xdd, 0x3e, 0xe9, 0xa2, 0x5e, 0x23, 0xbc, 0x2f, 0x2d, - 0xa9, 0x3f, 0xef, 0x92, 0x01, 0x9f, 0x87, 0x32, 0xd1, 0x1b, 0x4b, 0x8e, 0xc8, 0xad, 0x25, 0x17, - 0x6e, 0xc8, 0xff, 0xff, 0x60, 0x58, 0x87, 0x83, 0x62, 0xf8, 0xb4, 0x2c, 0x31, 0x5a, 0x95, 0x18, - 0x7d, 0x95, 0x18, 0x7d, 0xac, 0xb1, 0xb7, 0x5a, 0x63, 0xef, 0x73, 0x8d, 0xbd, 0x57, 0x3a, 0x92, - 0x66, 0xfc, 0x26, 0x68, 0x0c, 0x8a, 0xc1, 0xac, 0x88, 0xa7, 0x13, 0xe6, 0x0e, 0x32, 0xdf, 0x9f, - 0xc4, 0x2c, 0xf2, 0x54, 0xef, 0x57, 0x29, 0x6a, 0xd5, 0x1a, 0x6f, 0x7f, 0x02, 0x00, 0x00, 0xff, - 0xff, 0x4a, 0x69, 0x9c, 0x2e, 0xb7, 0x01, 0x00, 0x00, -} - -func (m *Params) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Params) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.OrderMaxBids != 0 { - i = encodeVarintParams(dAtA, i, uint64(m.OrderMaxBids)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.BidMinDeposit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintParams(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintParams(dAtA []byte, offset int, v uint64) int { - offset -= sovParams(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Params) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidMinDeposit.Size() - n += 1 + l + sovParams(uint64(l)) - if m.OrderMaxBids != 0 { - n += 1 + sovParams(uint64(m.OrderMaxBids)) - } - return n -} - -func sovParams(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozParams(x uint64) (n int) { - return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Params) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Params: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidMinDeposit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidMinDeposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OrderMaxBids", wireType) - } - m.OrderMaxBids = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OrderMaxBids |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipParams(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipParams(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthParams - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupParams - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthParams - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta1/query.pb.go b/go/node/market/v1beta1/query.pb.go deleted file mode 100644 index 356a275c..00000000 --- a/go/node/market/v1beta1/query.pb.go +++ /dev/null @@ -1,3033 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta1/query.proto - -package v1beta1 - -import ( - context "context" - fmt "fmt" - v1beta1 "github.com/akash-network/akash-api/go/node/escrow/v1beta1" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryOrdersRequest is request type for the Query/Orders RPC method -type QueryOrdersRequest struct { - Filters OrderFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryOrdersRequest) Reset() { *m = QueryOrdersRequest{} } -func (m *QueryOrdersRequest) String() string { return proto.CompactTextString(m) } -func (*QueryOrdersRequest) ProtoMessage() {} -func (*QueryOrdersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_50f1db5c661b7517, []int{0} -} -func (m *QueryOrdersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrdersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrdersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrdersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrdersRequest.Merge(m, src) -} -func (m *QueryOrdersRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryOrdersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrdersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrdersRequest proto.InternalMessageInfo - -func (m *QueryOrdersRequest) GetFilters() OrderFilters { - if m != nil { - return m.Filters - } - return OrderFilters{} -} - -func (m *QueryOrdersRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryOrdersResponse is response type for the Query/Orders RPC method -type QueryOrdersResponse struct { - Orders Orders `protobuf:"bytes,1,rep,name=orders,proto3,castrepeated=Orders" json:"orders"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryOrdersResponse) Reset() { *m = QueryOrdersResponse{} } -func (m *QueryOrdersResponse) String() string { return proto.CompactTextString(m) } -func (*QueryOrdersResponse) ProtoMessage() {} -func (*QueryOrdersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_50f1db5c661b7517, []int{1} -} -func (m *QueryOrdersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrdersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrdersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrdersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrdersResponse.Merge(m, src) -} -func (m *QueryOrdersResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryOrdersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrdersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrdersResponse proto.InternalMessageInfo - -func (m *QueryOrdersResponse) GetOrders() Orders { - if m != nil { - return m.Orders - } - return nil -} - -func (m *QueryOrdersResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryOrderRequest is request type for the Query/Order RPC method -type QueryOrderRequest struct { - ID OrderID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryOrderRequest) Reset() { *m = QueryOrderRequest{} } -func (m *QueryOrderRequest) String() string { return proto.CompactTextString(m) } -func (*QueryOrderRequest) ProtoMessage() {} -func (*QueryOrderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_50f1db5c661b7517, []int{2} -} -func (m *QueryOrderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrderRequest.Merge(m, src) -} -func (m *QueryOrderRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryOrderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrderRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrderRequest proto.InternalMessageInfo - -func (m *QueryOrderRequest) GetID() OrderID { - if m != nil { - return m.ID - } - return OrderID{} -} - -// QueryOrderResponse is response type for the Query/Order RPC method -type QueryOrderResponse struct { - Order Order `protobuf:"bytes,1,opt,name=order,proto3" json:"order"` -} - -func (m *QueryOrderResponse) Reset() { *m = QueryOrderResponse{} } -func (m *QueryOrderResponse) String() string { return proto.CompactTextString(m) } -func (*QueryOrderResponse) ProtoMessage() {} -func (*QueryOrderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_50f1db5c661b7517, []int{3} -} -func (m *QueryOrderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrderResponse.Merge(m, src) -} -func (m *QueryOrderResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryOrderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrderResponse proto.InternalMessageInfo - -func (m *QueryOrderResponse) GetOrder() Order { - if m != nil { - return m.Order - } - return Order{} -} - -// QueryBidsRequest is request type for the Query/Bids RPC method -type QueryBidsRequest struct { - Filters BidFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryBidsRequest) Reset() { *m = QueryBidsRequest{} } -func (m *QueryBidsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryBidsRequest) ProtoMessage() {} -func (*QueryBidsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_50f1db5c661b7517, []int{4} -} -func (m *QueryBidsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidsRequest.Merge(m, src) -} -func (m *QueryBidsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryBidsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidsRequest proto.InternalMessageInfo - -func (m *QueryBidsRequest) GetFilters() BidFilters { - if m != nil { - return m.Filters - } - return BidFilters{} -} - -func (m *QueryBidsRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryBidsResponse is response type for the Query/Bids RPC method -type QueryBidsResponse struct { - Bids []QueryBidResponse `protobuf:"bytes,1,rep,name=bids,proto3" json:"bids"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryBidsResponse) Reset() { *m = QueryBidsResponse{} } -func (m *QueryBidsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryBidsResponse) ProtoMessage() {} -func (*QueryBidsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_50f1db5c661b7517, []int{5} -} -func (m *QueryBidsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidsResponse.Merge(m, src) -} -func (m *QueryBidsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryBidsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidsResponse proto.InternalMessageInfo - -func (m *QueryBidsResponse) GetBids() []QueryBidResponse { - if m != nil { - return m.Bids - } - return nil -} - -func (m *QueryBidsResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryBidRequest is request type for the Query/Bid RPC method -type QueryBidRequest struct { - ID BidID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryBidRequest) Reset() { *m = QueryBidRequest{} } -func (m *QueryBidRequest) String() string { return proto.CompactTextString(m) } -func (*QueryBidRequest) ProtoMessage() {} -func (*QueryBidRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_50f1db5c661b7517, []int{6} -} -func (m *QueryBidRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidRequest.Merge(m, src) -} -func (m *QueryBidRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryBidRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidRequest proto.InternalMessageInfo - -func (m *QueryBidRequest) GetID() BidID { - if m != nil { - return m.ID - } - return BidID{} -} - -// QueryBidResponse is response type for the Query/Bid RPC method -type QueryBidResponse struct { - Bid Bid `protobuf:"bytes,1,opt,name=bid,proto3" json:"bid"` - EscrowAccount v1beta1.Account `protobuf:"bytes,2,opt,name=escrow_account,json=escrowAccount,proto3" json:"escrow_account"` -} - -func (m *QueryBidResponse) Reset() { *m = QueryBidResponse{} } -func (m *QueryBidResponse) String() string { return proto.CompactTextString(m) } -func (*QueryBidResponse) ProtoMessage() {} -func (*QueryBidResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_50f1db5c661b7517, []int{7} -} -func (m *QueryBidResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidResponse.Merge(m, src) -} -func (m *QueryBidResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryBidResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidResponse proto.InternalMessageInfo - -func (m *QueryBidResponse) GetBid() Bid { - if m != nil { - return m.Bid - } - return Bid{} -} - -func (m *QueryBidResponse) GetEscrowAccount() v1beta1.Account { - if m != nil { - return m.EscrowAccount - } - return v1beta1.Account{} -} - -// QueryLeasesRequest is request type for the Query/Leases RPC method -type QueryLeasesRequest struct { - Filters LeaseFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryLeasesRequest) Reset() { *m = QueryLeasesRequest{} } -func (m *QueryLeasesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryLeasesRequest) ProtoMessage() {} -func (*QueryLeasesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_50f1db5c661b7517, []int{8} -} -func (m *QueryLeasesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeasesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeasesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeasesRequest.Merge(m, src) -} -func (m *QueryLeasesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryLeasesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeasesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeasesRequest proto.InternalMessageInfo - -func (m *QueryLeasesRequest) GetFilters() LeaseFilters { - if m != nil { - return m.Filters - } - return LeaseFilters{} -} - -func (m *QueryLeasesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryLeasesResponse is response type for the Query/Leases RPC method -type QueryLeasesResponse struct { - Leases []QueryLeaseResponse `protobuf:"bytes,1,rep,name=leases,proto3" json:"leases"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryLeasesResponse) Reset() { *m = QueryLeasesResponse{} } -func (m *QueryLeasesResponse) String() string { return proto.CompactTextString(m) } -func (*QueryLeasesResponse) ProtoMessage() {} -func (*QueryLeasesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_50f1db5c661b7517, []int{9} -} -func (m *QueryLeasesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeasesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeasesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeasesResponse.Merge(m, src) -} -func (m *QueryLeasesResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryLeasesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeasesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeasesResponse proto.InternalMessageInfo - -func (m *QueryLeasesResponse) GetLeases() []QueryLeaseResponse { - if m != nil { - return m.Leases - } - return nil -} - -func (m *QueryLeasesResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryLeaseRequest is request type for the Query/Lease RPC method -type QueryLeaseRequest struct { - ID LeaseID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryLeaseRequest) Reset() { *m = QueryLeaseRequest{} } -func (m *QueryLeaseRequest) String() string { return proto.CompactTextString(m) } -func (*QueryLeaseRequest) ProtoMessage() {} -func (*QueryLeaseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_50f1db5c661b7517, []int{10} -} -func (m *QueryLeaseRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeaseRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeaseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeaseRequest.Merge(m, src) -} -func (m *QueryLeaseRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryLeaseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeaseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeaseRequest proto.InternalMessageInfo - -func (m *QueryLeaseRequest) GetID() LeaseID { - if m != nil { - return m.ID - } - return LeaseID{} -} - -// QueryLeaseResponse is response type for the Query/Lease RPC method -type QueryLeaseResponse struct { - Lease Lease `protobuf:"bytes,1,opt,name=lease,proto3" json:"lease"` - EscrowPayment v1beta1.Payment `protobuf:"bytes,2,opt,name=escrow_payment,json=escrowPayment,proto3" json:"escrow_payment"` -} - -func (m *QueryLeaseResponse) Reset() { *m = QueryLeaseResponse{} } -func (m *QueryLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*QueryLeaseResponse) ProtoMessage() {} -func (*QueryLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_50f1db5c661b7517, []int{11} -} -func (m *QueryLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeaseResponse.Merge(m, src) -} -func (m *QueryLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeaseResponse proto.InternalMessageInfo - -func (m *QueryLeaseResponse) GetLease() Lease { - if m != nil { - return m.Lease - } - return Lease{} -} - -func (m *QueryLeaseResponse) GetEscrowPayment() v1beta1.Payment { - if m != nil { - return m.EscrowPayment - } - return v1beta1.Payment{} -} - -func init() { - proto.RegisterType((*QueryOrdersRequest)(nil), "akash.market.v1beta1.QueryOrdersRequest") - proto.RegisterType((*QueryOrdersResponse)(nil), "akash.market.v1beta1.QueryOrdersResponse") - proto.RegisterType((*QueryOrderRequest)(nil), "akash.market.v1beta1.QueryOrderRequest") - proto.RegisterType((*QueryOrderResponse)(nil), "akash.market.v1beta1.QueryOrderResponse") - proto.RegisterType((*QueryBidsRequest)(nil), "akash.market.v1beta1.QueryBidsRequest") - proto.RegisterType((*QueryBidsResponse)(nil), "akash.market.v1beta1.QueryBidsResponse") - proto.RegisterType((*QueryBidRequest)(nil), "akash.market.v1beta1.QueryBidRequest") - proto.RegisterType((*QueryBidResponse)(nil), "akash.market.v1beta1.QueryBidResponse") - proto.RegisterType((*QueryLeasesRequest)(nil), "akash.market.v1beta1.QueryLeasesRequest") - proto.RegisterType((*QueryLeasesResponse)(nil), "akash.market.v1beta1.QueryLeasesResponse") - proto.RegisterType((*QueryLeaseRequest)(nil), "akash.market.v1beta1.QueryLeaseRequest") - proto.RegisterType((*QueryLeaseResponse)(nil), "akash.market.v1beta1.QueryLeaseResponse") -} - -func init() { proto.RegisterFile("akash/market/v1beta1/query.proto", fileDescriptor_50f1db5c661b7517) } - -var fileDescriptor_50f1db5c661b7517 = []byte{ - // 779 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4d, 0x6f, 0xd3, 0x4a, - 0x14, 0x8d, 0xd3, 0x36, 0x4f, 0x9a, 0xea, 0xf5, 0xbd, 0x37, 0xaf, 0x8b, 0x92, 0x82, 0xd3, 0x06, - 0xd1, 0xa4, 0x2c, 0x6c, 0xb5, 0x2c, 0x2a, 0x76, 0xc5, 0x54, 0x85, 0x56, 0x7c, 0x94, 0x2c, 0xd9, - 0xa0, 0x71, 0x3c, 0x75, 0x47, 0x4d, 0x3c, 0xa9, 0xc7, 0x29, 0x74, 0x81, 0x84, 0x90, 0x90, 0x58, - 0x82, 0x58, 0x21, 0x21, 0x84, 0x84, 0xc4, 0x82, 0x0d, 0x7f, 0xa3, 0xcb, 0x4a, 0x6c, 0x58, 0x15, - 0x94, 0xf2, 0x43, 0x90, 0xe7, 0xc3, 0xb1, 0x2b, 0xc7, 0x76, 0xa5, 0x76, 0x17, 0xc5, 0xe7, 0xde, - 0x39, 0xf7, 0x9e, 0x33, 0xc7, 0x06, 0x73, 0x68, 0x17, 0xb1, 0x1d, 0xb3, 0x8b, 0xfc, 0x5d, 0x1c, - 0x98, 0xfb, 0x4b, 0x36, 0x0e, 0xd0, 0x92, 0xb9, 0xd7, 0xc7, 0xfe, 0x81, 0xd1, 0xf3, 0x69, 0x40, - 0xe1, 0x34, 0x47, 0x18, 0x02, 0x61, 0x48, 0x44, 0x75, 0xda, 0xa5, 0x2e, 0xe5, 0x00, 0x33, 0xfc, - 0x25, 0xb0, 0xd5, 0xcb, 0x2e, 0xa5, 0x6e, 0x07, 0x9b, 0xa8, 0x47, 0x4c, 0xe4, 0x79, 0x34, 0x40, - 0x01, 0xa1, 0x1e, 0x93, 0x4f, 0xaf, 0xb7, 0x29, 0xeb, 0x52, 0x66, 0xda, 0x88, 0x61, 0x71, 0x44, - 0x74, 0x60, 0x0f, 0xb9, 0xc4, 0xe3, 0x60, 0x89, 0x4d, 0xe7, 0x45, 0x7d, 0x07, 0xfb, 0x12, 0xa1, - 0xa7, 0x22, 0x6c, 0xe2, 0x64, 0x76, 0xe8, 0x60, 0xc4, 0x70, 0x12, 0x81, 0x59, 0xdb, 0xa7, 0x4f, - 0x23, 0x44, 0x70, 0xd0, 0xc3, 0x92, 0x71, 0xfd, 0x93, 0x06, 0xe0, 0xa3, 0x90, 0xe8, 0xc3, 0xf0, - 0x60, 0xd6, 0xc2, 0x7b, 0x7d, 0xcc, 0x02, 0x68, 0x81, 0xbf, 0xb6, 0x49, 0x27, 0xc0, 0x3e, 0x9b, - 0xd1, 0xe6, 0xb4, 0xe6, 0xe4, 0x72, 0xdd, 0x48, 0x5b, 0x92, 0xc1, 0xab, 0xd6, 0x05, 0xd2, 0x1a, - 0x3f, 0x3c, 0xae, 0x95, 0x5a, 0xaa, 0x10, 0xae, 0x03, 0x30, 0x1c, 0x7a, 0xa6, 0xcc, 0xdb, 0x2c, - 0x18, 0x62, 0x43, 0x46, 0xb8, 0x21, 0x43, 0x88, 0xa0, 0x7a, 0x6d, 0x21, 0x17, 0xcb, 0xf3, 0x5b, - 0xb1, 0xca, 0xfa, 0x67, 0x0d, 0xfc, 0x9f, 0xa0, 0xc8, 0x7a, 0xd4, 0x63, 0x18, 0xde, 0x06, 0x15, - 0xbe, 0xad, 0x90, 0xe2, 0x58, 0x73, 0x72, 0x79, 0x36, 0x83, 0xa2, 0x35, 0x15, 0x72, 0xfb, 0xfa, - 0xb3, 0x56, 0x91, 0x4d, 0x64, 0x29, 0xbc, 0x93, 0x42, 0xb2, 0x91, 0x4b, 0x52, 0x30, 0x48, 0xb0, - 0x7c, 0x00, 0xfe, 0x1b, 0x92, 0x54, 0x6b, 0xbc, 0x09, 0xca, 0xc4, 0x91, 0x1b, 0xbc, 0x92, 0x41, - 0x6f, 0x63, 0xcd, 0x02, 0x21, 0xc1, 0xc1, 0x71, 0xad, 0xbc, 0xb1, 0xd6, 0x2a, 0x13, 0xa7, 0x7e, - 0x3f, 0xae, 0x4b, 0x34, 0xf3, 0x0a, 0x98, 0xe0, 0xc4, 0x65, 0xcf, 0xcc, 0x91, 0x85, 0x1c, 0x02, - 0x5f, 0xff, 0xa0, 0x81, 0x7f, 0x79, 0x3f, 0x8b, 0x38, 0x91, 0xca, 0xab, 0xa7, 0x55, 0x9e, 0x4b, - 0xef, 0x67, 0x11, 0xe7, 0x82, 0x35, 0xfe, 0xa8, 0xc9, 0xf5, 0x09, 0x7a, 0x72, 0xda, 0x55, 0x30, - 0x6e, 0x13, 0x47, 0xe9, 0xbb, 0x90, 0x4e, 0x4e, 0x95, 0xa9, 0x2a, 0x49, 0x91, 0x57, 0x9e, 0x9f, - 0xbc, 0x9b, 0xe0, 0x9f, 0xe1, 0x41, 0x62, 0x7b, 0x2b, 0x31, 0x71, 0x67, 0x47, 0x2e, 0x2e, 0x45, - 0xda, 0xb7, 0x31, 0x2d, 0xa2, 0x59, 0x97, 0xc0, 0x98, 0x1d, 0xb5, 0xbb, 0x34, 0xb2, 0x9d, 0x9c, - 0x2e, 0xc4, 0xc2, 0x4d, 0x30, 0x25, 0x6e, 0xf6, 0x13, 0xd4, 0x6e, 0xd3, 0xbe, 0x17, 0xc8, 0x01, - 0x95, 0xd3, 0xc4, 0xc3, 0xa8, 0xfa, 0x96, 0x00, 0xc9, 0x0e, 0x7f, 0x8b, 0xa7, 0xf2, 0xcf, 0x61, - 0x0e, 0xdc, 0x0b, 0xe3, 0xe3, 0xcc, 0x39, 0xc0, 0xab, 0x2e, 0xd8, 0x23, 0x5f, 0x54, 0x0e, 0x28, - 0x8a, 0x72, 0x73, 0xeb, 0xa0, 0xc2, 0x33, 0x4f, 0xf9, 0xa4, 0x99, 0xe1, 0x13, 0x5e, 0x7a, 0xca, - 0x29, 0xb2, 0xfa, 0xfc, 0xa3, 0x40, 0x1e, 0x56, 0x38, 0x0a, 0x38, 0x3e, 0xc5, 0x2f, 0xef, 0x13, - 0xda, 0xc4, 0xb3, 0x80, 0x33, 0xcf, 0xb6, 0x20, 0xaf, 0x51, 0x59, 0xc0, 0xf1, 0x31, 0xdf, 0xf4, - 0xd0, 0x41, 0x17, 0xe7, 0xf9, 0x66, 0x4b, 0x80, 0x92, 0xbe, 0x91, 0x7f, 0x2e, 0x7f, 0xab, 0x80, - 0x09, 0xce, 0x0d, 0xbe, 0xd6, 0x80, 0x0c, 0x57, 0x98, 0xa5, 0x40, 0xe2, 0x3d, 0x53, 0x5d, 0x2c, - 0x80, 0x14, 0xe3, 0xd6, 0x17, 0x5f, 0x7e, 0xff, 0xfd, 0xae, 0x7c, 0x15, 0xce, 0x9b, 0xa3, 0x5f, - 0x9c, 0xcc, 0xec, 0x10, 0x16, 0xc0, 0x57, 0x1a, 0x98, 0xe0, 0xd5, 0xb0, 0x91, 0xd7, 0x5f, 0x11, - 0x69, 0xe6, 0x03, 0xcf, 0xc4, 0x83, 0x78, 0xdb, 0x14, 0xbe, 0xd0, 0xc0, 0x78, 0x18, 0x68, 0x30, - 0x27, 0xba, 0xa2, 0x75, 0x34, 0x72, 0x71, 0x92, 0x44, 0x83, 0x93, 0x98, 0x87, 0x35, 0x73, 0xd4, - 0x37, 0x82, 0x5c, 0xc5, 0x73, 0x30, 0x66, 0x11, 0x07, 0x5e, 0xcb, 0xcb, 0x4e, 0x71, 0x7e, 0xc1, - 0x88, 0x2d, 0x74, 0x3c, 0xdf, 0x40, 0x68, 0x0a, 0x71, 0x5d, 0x61, 0xee, 0xb5, 0x2c, 0x64, 0x8a, - 0xe4, 0xdd, 0xcf, 0x13, 0x43, 0xdc, 0xec, 0xa1, 0x29, 0x78, 0x75, 0xa6, 0x29, 0xe2, 0x77, 0xb6, - 0x5a, 0x38, 0x49, 0x0a, 0xf2, 0x08, 0x57, 0x62, 0xdd, 0x3d, 0x1c, 0xe8, 0xda, 0xd1, 0x40, 0xd7, - 0x7e, 0x0d, 0x74, 0xed, 0xcd, 0x89, 0x5e, 0x3a, 0x3a, 0xd1, 0x4b, 0x3f, 0x4e, 0xf4, 0xd2, 0x63, - 0xc3, 0x25, 0xc1, 0x4e, 0xdf, 0x36, 0xda, 0xb4, 0x6b, 0xd2, 0x7d, 0xbf, 0xdd, 0xd9, 0x95, 0xdd, - 0x9e, 0xa9, 0x7e, 0xfc, 0xcb, 0x4d, 0x75, 0xb5, 0x2b, 0xfc, 0x13, 0xee, 0xc6, 0x9f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0xf5, 0xbf, 0x6e, 0xf5, 0xe2, 0x0a, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Orders queries orders with filters - Orders(ctx context.Context, in *QueryOrdersRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) - // Order queries order details - Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) - // Bids queries bids with filters - Bids(ctx context.Context, in *QueryBidsRequest, opts ...grpc.CallOption) (*QueryBidsResponse, error) - // Bid queries bid details - Bid(ctx context.Context, in *QueryBidRequest, opts ...grpc.CallOption) (*QueryBidResponse, error) - // Leases queries leases with filters - Leases(ctx context.Context, in *QueryLeasesRequest, opts ...grpc.CallOption) (*QueryLeasesResponse, error) - // Lease queries lease details - Lease(ctx context.Context, in *QueryLeaseRequest, opts ...grpc.CallOption) (*QueryLeaseResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Orders(ctx context.Context, in *QueryOrdersRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) { - out := new(QueryOrdersResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta1.Query/Orders", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) { - out := new(QueryOrderResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta1.Query/Order", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Bids(ctx context.Context, in *QueryBidsRequest, opts ...grpc.CallOption) (*QueryBidsResponse, error) { - out := new(QueryBidsResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta1.Query/Bids", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Bid(ctx context.Context, in *QueryBidRequest, opts ...grpc.CallOption) (*QueryBidResponse, error) { - out := new(QueryBidResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta1.Query/Bid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Leases(ctx context.Context, in *QueryLeasesRequest, opts ...grpc.CallOption) (*QueryLeasesResponse, error) { - out := new(QueryLeasesResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta1.Query/Leases", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Lease(ctx context.Context, in *QueryLeaseRequest, opts ...grpc.CallOption) (*QueryLeaseResponse, error) { - out := new(QueryLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta1.Query/Lease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Orders queries orders with filters - Orders(context.Context, *QueryOrdersRequest) (*QueryOrdersResponse, error) - // Order queries order details - Order(context.Context, *QueryOrderRequest) (*QueryOrderResponse, error) - // Bids queries bids with filters - Bids(context.Context, *QueryBidsRequest) (*QueryBidsResponse, error) - // Bid queries bid details - Bid(context.Context, *QueryBidRequest) (*QueryBidResponse, error) - // Leases queries leases with filters - Leases(context.Context, *QueryLeasesRequest) (*QueryLeasesResponse, error) - // Lease queries lease details - Lease(context.Context, *QueryLeaseRequest) (*QueryLeaseResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Orders(ctx context.Context, req *QueryOrdersRequest) (*QueryOrdersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Orders not implemented") -} -func (*UnimplementedQueryServer) Order(ctx context.Context, req *QueryOrderRequest) (*QueryOrderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Order not implemented") -} -func (*UnimplementedQueryServer) Bids(ctx context.Context, req *QueryBidsRequest) (*QueryBidsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Bids not implemented") -} -func (*UnimplementedQueryServer) Bid(ctx context.Context, req *QueryBidRequest) (*QueryBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Bid not implemented") -} -func (*UnimplementedQueryServer) Leases(ctx context.Context, req *QueryLeasesRequest) (*QueryLeasesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Leases not implemented") -} -func (*UnimplementedQueryServer) Lease(ctx context.Context, req *QueryLeaseRequest) (*QueryLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Lease not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Orders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryOrdersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Orders(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta1.Query/Orders", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Orders(ctx, req.(*QueryOrdersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Order_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryOrderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Order(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta1.Query/Order", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Order(ctx, req.(*QueryOrderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Bids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryBidsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Bids(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta1.Query/Bids", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Bids(ctx, req.(*QueryBidsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Bid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryBidRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Bid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta1.Query/Bid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Bid(ctx, req.(*QueryBidRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Leases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryLeasesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Leases(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta1.Query/Leases", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Leases(ctx, req.(*QueryLeasesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Lease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryLeaseRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Lease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta1.Query/Lease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Lease(ctx, req.(*QueryLeaseRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.market.v1beta1.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Orders", - Handler: _Query_Orders_Handler, - }, - { - MethodName: "Order", - Handler: _Query_Order_Handler, - }, - { - MethodName: "Bids", - Handler: _Query_Bids_Handler, - }, - { - MethodName: "Bid", - Handler: _Query_Bid_Handler, - }, - { - MethodName: "Leases", - Handler: _Query_Leases_Handler, - }, - { - MethodName: "Lease", - Handler: _Query_Lease_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/market/v1beta1/query.proto", -} - -func (m *QueryOrdersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrdersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrdersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryOrdersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrdersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrdersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Orders) > 0 { - for iNdEx := len(m.Orders) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Orders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryOrderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryOrderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Order.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryBidsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryBidsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Bids) > 0 { - for iNdEx := len(m.Bids) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Bids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryBidRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryBidResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.EscrowAccount.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.Bid.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryLeasesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeasesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryLeasesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeasesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Leases) > 0 { - for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryLeaseRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeaseRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeaseRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.EscrowPayment.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.Lease.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryOrdersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryOrdersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Orders) > 0 { - for _, e := range m.Orders { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryOrderRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryOrderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Order.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryBidsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryBidsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Bids) > 0 { - for _, e := range m.Bids { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryBidRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryBidResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Bid.Size() - n += 1 + l + sovQuery(uint64(l)) - l = m.EscrowAccount.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryLeasesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryLeasesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Leases) > 0 { - for _, e := range m.Leases { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryLeaseRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Lease.Size() - n += 1 + l + sovQuery(uint64(l)) - l = m.EscrowPayment.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryOrdersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrdersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrdersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOrdersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrdersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrdersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Orders = append(m.Orders, Order{}) - if err := m.Orders[len(m.Orders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOrderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOrderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Order.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bids", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bids = append(m.Bids, QueryBidResponse{}) - if err := m.Bids[len(m.Bids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bid", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Bid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EscrowAccount", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EscrowAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeasesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeasesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeasesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeasesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Leases = append(m.Leases, QueryLeaseResponse{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeaseRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeaseRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeaseRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Lease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EscrowPayment", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EscrowPayment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta1/query.pb.gw.go b/go/node/market/v1beta1/query.pb.gw.go deleted file mode 100644 index 66d32ed7..00000000 --- a/go/node/market/v1beta1/query.pb.gw.go +++ /dev/null @@ -1,586 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/market/v1beta2/query.proto - -/* -Package v1beta1 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta1 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Orders_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Orders_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrdersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Orders_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Orders(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Orders_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrdersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Orders_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Orders(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Order_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrderRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Order_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Order(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrderRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Order_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Order(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Bids_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Bids_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bids_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Bids(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Bids_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bids_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Bids(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Bid_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Bid_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bid_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Bid(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Bid_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bid_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Bid(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Leases_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Leases_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeasesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Leases_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Leases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Leases_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeasesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Leases_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Leases(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Lease_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Lease_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeaseRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Lease_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Lease(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Lease_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeaseRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Lease_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Lease(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Orders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Orders_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Orders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Order_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Bids_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bid_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Bid_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bid_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Leases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Leases_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Leases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Lease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Lease_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Lease_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Orders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Orders_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Orders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Order_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Bids_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bid_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Bid_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bid_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Leases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Leases_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Leases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Lease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Lease_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Lease_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Orders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta2", "orders", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Order_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta2", "orders", "info"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Bids_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta2", "bids", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Bid_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta2", "bids", "info"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Leases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta2", "leases", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Lease_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta2", "leases", "info"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Orders_0 = runtime.ForwardResponseMessage - - forward_Query_Order_0 = runtime.ForwardResponseMessage - - forward_Query_Bids_0 = runtime.ForwardResponseMessage - - forward_Query_Bid_0 = runtime.ForwardResponseMessage - - forward_Query_Leases_0 = runtime.ForwardResponseMessage - - forward_Query_Lease_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/market/v1beta1/service.pb.go b/go/node/market/v1beta1/service.pb.go deleted file mode 100644 index 8dd2bcea..00000000 --- a/go/node/market/v1beta1/service.pb.go +++ /dev/null @@ -1,286 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta1/service.proto - -package v1beta1 - -import ( - context "context" - fmt "fmt" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { - proto.RegisterFile("akash/market/v1beta1/service.proto", fileDescriptor_2f0b59ab7d50774b) -} - -var fileDescriptor_2f0b59ab7d50774b = []byte{ - // 287 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0xcf, 0x4d, 0x2c, 0xca, 0x4e, 0x2d, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, - 0xd4, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, - 0x01, 0xab, 0xd1, 0x83, 0xa8, 0xd1, 0x83, 0xaa, 0x91, 0x92, 0xc3, 0xaa, 0x33, 0x29, 0x33, 0x05, - 0xa2, 0x4b, 0x4a, 0x01, 0xab, 0x7c, 0x4e, 0x6a, 0x62, 0x31, 0xd4, 0x5c, 0xa3, 0x17, 0xcc, 0x5c, - 0xcc, 0xbe, 0xc5, 0xe9, 0x42, 0xd1, 0x5c, 0x9c, 0xce, 0x45, 0xa9, 0x89, 0x25, 0xa9, 0x4e, 0x99, - 0x29, 0x42, 0x4a, 0x7a, 0xd8, 0x6c, 0xd3, 0xf3, 0x2d, 0x4e, 0x87, 0xab, 0x91, 0xd2, 0x22, 0xac, - 0x26, 0x28, 0xb5, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0x28, 0x82, 0x8b, 0xc3, 0x39, 0x27, 0xbf, - 0x18, 0x6c, 0xb6, 0x22, 0x6e, 0x7d, 0x50, 0x25, 0x52, 0x9a, 0x04, 0x95, 0xc0, 0x4d, 0x4e, 0xe7, - 0xe2, 0x0d, 0xcf, 0x2c, 0xc9, 0x48, 0x29, 0x4a, 0x2c, 0xf7, 0x01, 0xf9, 0x4a, 0x48, 0x0d, 0xa7, - 0x5e, 0x14, 0x75, 0x52, 0x7a, 0xc4, 0xa9, 0x83, 0x5b, 0x94, 0xc8, 0xc5, 0x0d, 0xf1, 0x17, 0xc4, - 0x1a, 0x15, 0x02, 0xbe, 0x87, 0x58, 0xa2, 0x43, 0x8c, 0x2a, 0xb8, 0x15, 0x71, 0x5c, 0x5c, 0x60, - 0xff, 0x41, 0x6c, 0x50, 0xc6, 0x1f, 0x08, 0x10, 0x0b, 0xb4, 0x89, 0x50, 0x04, 0x33, 0xdf, 0xc9, - 0xe3, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, - 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0xf4, 0xd2, 0x33, 0x4b, 0x32, - 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xf3, 0xcb, 0x8a, 0x92, 0x73, 0xb2, 0xf5, 0x21, 0x09, - 0xa7, 0x02, 0x96, 0x74, 0x4a, 0x2a, 0x0b, 0x52, 0x8b, 0x61, 0x09, 0x28, 0x89, 0x0d, 0x9c, 0x76, - 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x44, 0x61, 0x73, 0xee, 0xb9, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // CreateBid defines a method to create a bid given proper inputs. - CreateBid(ctx context.Context, in *MsgCreateBid, opts ...grpc.CallOption) (*MsgCreateBidResponse, error) - // CloseBid defines a method to close a bid given proper inputs. - CloseBid(ctx context.Context, in *MsgCloseBid, opts ...grpc.CallOption) (*MsgCloseBidResponse, error) - // WithdrawLease withdraws accrued funds from the lease payment - WithdrawLease(ctx context.Context, in *MsgWithdrawLease, opts ...grpc.CallOption) (*MsgWithdrawLeaseResponse, error) - // CreateLease creates a new lease - CreateLease(ctx context.Context, in *MsgCreateLease, opts ...grpc.CallOption) (*MsgCreateLeaseResponse, error) - // CloseLease defines a method to close an order given proper inputs. - CloseLease(ctx context.Context, in *MsgCloseLease, opts ...grpc.CallOption) (*MsgCloseLeaseResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) CreateBid(ctx context.Context, in *MsgCreateBid, opts ...grpc.CallOption) (*MsgCreateBidResponse, error) { - out := new(MsgCreateBidResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta1.Msg/CreateBid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseBid(ctx context.Context, in *MsgCloseBid, opts ...grpc.CallOption) (*MsgCloseBidResponse, error) { - out := new(MsgCloseBidResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta1.Msg/CloseBid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) WithdrawLease(ctx context.Context, in *MsgWithdrawLease, opts ...grpc.CallOption) (*MsgWithdrawLeaseResponse, error) { - out := new(MsgWithdrawLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta1.Msg/WithdrawLease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CreateLease(ctx context.Context, in *MsgCreateLease, opts ...grpc.CallOption) (*MsgCreateLeaseResponse, error) { - out := new(MsgCreateLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta1.Msg/CreateLease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseLease(ctx context.Context, in *MsgCloseLease, opts ...grpc.CallOption) (*MsgCloseLeaseResponse, error) { - out := new(MsgCloseLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta1.Msg/CloseLease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // CreateBid defines a method to create a bid given proper inputs. - CreateBid(context.Context, *MsgCreateBid) (*MsgCreateBidResponse, error) - // CloseBid defines a method to close a bid given proper inputs. - CloseBid(context.Context, *MsgCloseBid) (*MsgCloseBidResponse, error) - // WithdrawLease withdraws accrued funds from the lease payment - WithdrawLease(context.Context, *MsgWithdrawLease) (*MsgWithdrawLeaseResponse, error) - // CreateLease creates a new lease - CreateLease(context.Context, *MsgCreateLease) (*MsgCreateLeaseResponse, error) - // CloseLease defines a method to close an order given proper inputs. - CloseLease(context.Context, *MsgCloseLease) (*MsgCloseLeaseResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) CreateBid(ctx context.Context, req *MsgCreateBid) (*MsgCreateBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateBid not implemented") -} -func (*UnimplementedMsgServer) CloseBid(ctx context.Context, req *MsgCloseBid) (*MsgCloseBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseBid not implemented") -} -func (*UnimplementedMsgServer) WithdrawLease(ctx context.Context, req *MsgWithdrawLease) (*MsgWithdrawLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method WithdrawLease not implemented") -} -func (*UnimplementedMsgServer) CreateLease(ctx context.Context, req *MsgCreateLease) (*MsgCreateLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateLease not implemented") -} -func (*UnimplementedMsgServer) CloseLease(ctx context.Context, req *MsgCloseLease) (*MsgCloseLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseLease not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_CreateBid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateBid) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateBid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta1.Msg/CreateBid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateBid(ctx, req.(*MsgCreateBid)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseBid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseBid) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseBid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta1.Msg/CloseBid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseBid(ctx, req.(*MsgCloseBid)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_WithdrawLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgWithdrawLease) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).WithdrawLease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta1.Msg/WithdrawLease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).WithdrawLease(ctx, req.(*MsgWithdrawLease)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CreateLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateLease) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateLease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta1.Msg/CreateLease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateLease(ctx, req.(*MsgCreateLease)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseLease) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseLease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta1.Msg/CloseLease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseLease(ctx, req.(*MsgCloseLease)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.market.v1beta1.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateBid", - Handler: _Msg_CreateBid_Handler, - }, - { - MethodName: "CloseBid", - Handler: _Msg_CloseBid_Handler, - }, - { - MethodName: "WithdrawLease", - Handler: _Msg_WithdrawLease_Handler, - }, - { - MethodName: "CreateLease", - Handler: _Msg_CreateLease_Handler, - }, - { - MethodName: "CloseLease", - Handler: _Msg_CloseLease_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/market/v1beta1/service.proto", -} diff --git a/go/node/market/v1beta1/types.go b/go/node/market/v1beta1/types.go deleted file mode 100644 index e078c156..00000000 --- a/go/node/market/v1beta1/types.go +++ /dev/null @@ -1,225 +0,0 @@ -package v1beta1 - -import ( - "strings" - - sdk "github.com/cosmos/cosmos-sdk/types" - - atypes "github.com/akash-network/akash-api/go/node/audit/v1beta1" - types "github.com/akash-network/akash-api/go/node/types/v1beta1" - - "gopkg.in/yaml.v3" -) - -// ID method returns OrderID details of specific order -func (o Order) ID() OrderID { - return o.OrderID -} - -// String implements the Stringer interface for a Order object. -func (o Order) String() string { - out, _ := yaml.Marshal(o) - return string(out) -} - -// Orders is a collection of Order -type Orders []Order - -// String implements the Stringer interface for a Orders object. -func (o Orders) String() string { - var out string - for _, order := range o { - out += order.String() + "\n" - } - - return strings.TrimSpace(out) -} - -// ValidateCanBid method validates whether order is open or not and -// returns error if not -func (o Order) ValidateCanBid() error { - switch o.State { - case OrderOpen: - return nil - case OrderActive: - return ErrOrderActive - default: - return ErrOrderClosed - } -} - -// ValidateInactive method validates whether order is open or not and -// returns error if not -func (o Order) ValidateInactive() error { - switch o.State { - case OrderClosed: - return nil - case OrderActive: - return ErrOrderActive - default: - return ErrOrderClosed - } -} - -// Price method returns price of specific order -func (o Order) Price() sdk.Coin { - return o.Spec.Price() -} - -// MatchAttributes method compares provided attributes with specific order attributes -func (o Order) MatchAttributes(attrs []types.Attribute) bool { - return o.Spec.MatchAttributes(attrs) -} - -// MatchRequirements method compares provided attributes with specific order attributes -func (o Order) MatchRequirements(prov []atypes.Provider) bool { - return o.Spec.MatchRequirements(prov) -} - -// Accept returns whether order filters valid or not -func (filters OrderFilters) Accept(obj Order, stateVal Order_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.OrderID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.OrderID.DSeq { - return false - } - - // Checking gseq filter - if filters.GSeq != 0 && filters.GSeq != obj.OrderID.GSeq { - return false - } - - // Checking oseq filter - if filters.OSeq != 0 && filters.OSeq != obj.OrderID.OSeq { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} - -// ID method returns BidID details of specific bid -func (obj Bid) ID() BidID { - return obj.BidID -} - -// String implements the Stringer interface for a Bid object. -func (obj Bid) String() string { - out, _ := yaml.Marshal(obj) - return string(out) -} - -// Bids is a collection of Bid -type Bids []Bid - -// String implements the Stringer interface for a Bids object. -func (b Bids) String() string { - var out string - for _, bid := range b { - out += bid.String() + "\n" - } - - return strings.TrimSpace(out) -} - -// Accept returns whether bid filters valid or not -func (filters BidFilters) Accept(obj Bid, stateVal Bid_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.BidID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.BidID.DSeq { - return false - } - - // Checking gseq filter - if filters.GSeq != 0 && filters.GSeq != obj.BidID.GSeq { - return false - } - - // Checking oseq filter - if filters.OSeq != 0 && filters.OSeq != obj.BidID.OSeq { - return false - } - - // Checking provider filter - if filters.Provider != "" && filters.Provider != obj.BidID.Provider { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} - -// ID method returns LeaseID details of specific lease -func (obj Lease) ID() LeaseID { - return obj.LeaseID -} - -// String implements the Stringer interface for a Lease object. -func (obj Lease) String() string { - out, _ := yaml.Marshal(obj) - return string(out) -} - -// Leases is a collection of Lease -type Leases []Lease - -// String implements the Stringer interface for a Leases object. -func (l Leases) String() string { - var out string - for _, order := range l { - out += order.String() + "\n" - } - - return strings.TrimSpace(out) -} - -// Accept returns whether lease filters valid or not -func (filters LeaseFilters) Accept(obj Lease, stateVal Lease_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.LeaseID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.LeaseID.DSeq { - return false - } - - // Checking gseq filter - if filters.GSeq != 0 && filters.GSeq != obj.LeaseID.GSeq { - return false - } - - // Checking oseq filter - if filters.OSeq != 0 && filters.OSeq != obj.LeaseID.OSeq { - return false - } - - // Checking provider filter - if filters.Provider != "" && filters.Provider != obj.LeaseID.Provider { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} diff --git a/go/node/market/v1beta2/bid.pb.go b/go/node/market/v1beta2/bid.pb.go deleted file mode 100644 index 027435ac..00000000 --- a/go/node/market/v1beta2/bid.pb.go +++ /dev/null @@ -1,1967 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta2/bid.proto - -package v1beta2 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of bid -type Bid_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - BidStateInvalid Bid_State = 0 - // BidOpen denotes state for bid open - BidOpen Bid_State = 1 - // BidMatched denotes state for bid open - BidActive Bid_State = 2 - // BidLost denotes state for bid lost - BidLost Bid_State = 3 - // BidClosed denotes state for bid closed - BidClosed Bid_State = 4 -) - -var Bid_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "active", - 3: "lost", - 4: "closed", -} - -var Bid_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "active": 2, - "lost": 3, - "closed": 4, -} - -func (x Bid_State) String() string { - return proto.EnumName(Bid_State_name, int32(x)) -} - -func (Bid_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_4790fafc515256e6, []int{5, 0} -} - -// MsgCreateBid defines an SDK message for creating Bid -type MsgCreateBid struct { - Order OrderID `protobuf:"bytes,1,opt,name=order,proto3" json:"order" yaml:"order"` - Provider string `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider" yaml:"provider"` - Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` - Deposit types.Coin `protobuf:"bytes,4,opt,name=deposit,proto3" json:"deposit" yaml:"deposit"` -} - -func (m *MsgCreateBid) Reset() { *m = MsgCreateBid{} } -func (m *MsgCreateBid) String() string { return proto.CompactTextString(m) } -func (*MsgCreateBid) ProtoMessage() {} -func (*MsgCreateBid) Descriptor() ([]byte, []int) { - return fileDescriptor_4790fafc515256e6, []int{0} -} -func (m *MsgCreateBid) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateBid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateBid.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateBid) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateBid.Merge(m, src) -} -func (m *MsgCreateBid) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateBid) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateBid.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateBid proto.InternalMessageInfo - -func (m *MsgCreateBid) GetOrder() OrderID { - if m != nil { - return m.Order - } - return OrderID{} -} - -func (m *MsgCreateBid) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *MsgCreateBid) GetPrice() types.DecCoin { - if m != nil { - return m.Price - } - return types.DecCoin{} -} - -func (m *MsgCreateBid) GetDeposit() types.Coin { - if m != nil { - return m.Deposit - } - return types.Coin{} -} - -// MsgCreateBidResponse defines the Msg/CreateBid response type. -type MsgCreateBidResponse struct { -} - -func (m *MsgCreateBidResponse) Reset() { *m = MsgCreateBidResponse{} } -func (m *MsgCreateBidResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateBidResponse) ProtoMessage() {} -func (*MsgCreateBidResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_4790fafc515256e6, []int{1} -} -func (m *MsgCreateBidResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateBidResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateBidResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateBidResponse.Merge(m, src) -} -func (m *MsgCreateBidResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateBidResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateBidResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateBidResponse proto.InternalMessageInfo - -// MsgCloseBid defines an SDK message for closing bid -type MsgCloseBid struct { - BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseBid) Reset() { *m = MsgCloseBid{} } -func (m *MsgCloseBid) String() string { return proto.CompactTextString(m) } -func (*MsgCloseBid) ProtoMessage() {} -func (*MsgCloseBid) Descriptor() ([]byte, []int) { - return fileDescriptor_4790fafc515256e6, []int{2} -} -func (m *MsgCloseBid) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseBid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseBid.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseBid) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseBid.Merge(m, src) -} -func (m *MsgCloseBid) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseBid) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseBid.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseBid proto.InternalMessageInfo - -func (m *MsgCloseBid) GetBidID() BidID { - if m != nil { - return m.BidID - } - return BidID{} -} - -// MsgCloseBidResponse defines the Msg/CloseBid response type. -type MsgCloseBidResponse struct { -} - -func (m *MsgCloseBidResponse) Reset() { *m = MsgCloseBidResponse{} } -func (m *MsgCloseBidResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseBidResponse) ProtoMessage() {} -func (*MsgCloseBidResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_4790fafc515256e6, []int{3} -} -func (m *MsgCloseBidResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseBidResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseBidResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseBidResponse.Merge(m, src) -} -func (m *MsgCloseBidResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseBidResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseBidResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseBidResponse proto.InternalMessageInfo - -// BidID stores owner and all other seq numbers -// A successful bid becomes a Lease(ID). -type BidID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` -} - -func (m *BidID) Reset() { *m = BidID{} } -func (*BidID) ProtoMessage() {} -func (*BidID) Descriptor() ([]byte, []int) { - return fileDescriptor_4790fafc515256e6, []int{4} -} -func (m *BidID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BidID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BidID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BidID) XXX_Merge(src proto.Message) { - xxx_messageInfo_BidID.Merge(m, src) -} -func (m *BidID) XXX_Size() int { - return m.Size() -} -func (m *BidID) XXX_DiscardUnknown() { - xxx_messageInfo_BidID.DiscardUnknown(m) -} - -var xxx_messageInfo_BidID proto.InternalMessageInfo - -func (m *BidID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *BidID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *BidID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *BidID) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *BidID) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -// Bid stores BidID, state of bid and price -type Bid struct { - BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` - State Bid_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta2.Bid_State" json:"state" yaml:"state"` - Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Bid) Reset() { *m = Bid{} } -func (*Bid) ProtoMessage() {} -func (*Bid) Descriptor() ([]byte, []int) { - return fileDescriptor_4790fafc515256e6, []int{5} -} -func (m *Bid) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Bid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Bid.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Bid) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bid.Merge(m, src) -} -func (m *Bid) XXX_Size() int { - return m.Size() -} -func (m *Bid) XXX_DiscardUnknown() { - xxx_messageInfo_Bid.DiscardUnknown(m) -} - -var xxx_messageInfo_Bid proto.InternalMessageInfo - -func (m *Bid) GetBidID() BidID { - if m != nil { - return m.BidID - } - return BidID{} -} - -func (m *Bid) GetState() Bid_State { - if m != nil { - return m.State - } - return BidStateInvalid -} - -func (m *Bid) GetPrice() types.DecCoin { - if m != nil { - return m.Price - } - return types.DecCoin{} -} - -func (m *Bid) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -// BidFilters defines flags for bid list filter -type BidFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` - State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *BidFilters) Reset() { *m = BidFilters{} } -func (m *BidFilters) String() string { return proto.CompactTextString(m) } -func (*BidFilters) ProtoMessage() {} -func (*BidFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_4790fafc515256e6, []int{6} -} -func (m *BidFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BidFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BidFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BidFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_BidFilters.Merge(m, src) -} -func (m *BidFilters) XXX_Size() int { - return m.Size() -} -func (m *BidFilters) XXX_DiscardUnknown() { - xxx_messageInfo_BidFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_BidFilters proto.InternalMessageInfo - -func (m *BidFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *BidFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *BidFilters) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *BidFilters) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *BidFilters) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *BidFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func init() { - proto.RegisterEnum("akash.market.v1beta2.Bid_State", Bid_State_name, Bid_State_value) - proto.RegisterType((*MsgCreateBid)(nil), "akash.market.v1beta2.MsgCreateBid") - proto.RegisterType((*MsgCreateBidResponse)(nil), "akash.market.v1beta2.MsgCreateBidResponse") - proto.RegisterType((*MsgCloseBid)(nil), "akash.market.v1beta2.MsgCloseBid") - proto.RegisterType((*MsgCloseBidResponse)(nil), "akash.market.v1beta2.MsgCloseBidResponse") - proto.RegisterType((*BidID)(nil), "akash.market.v1beta2.BidID") - proto.RegisterType((*Bid)(nil), "akash.market.v1beta2.Bid") - proto.RegisterType((*BidFilters)(nil), "akash.market.v1beta2.BidFilters") -} - -func init() { proto.RegisterFile("akash/market/v1beta2/bid.proto", fileDescriptor_4790fafc515256e6) } - -var fileDescriptor_4790fafc515256e6 = []byte{ - // 750 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x95, 0xbd, 0x6e, 0xdb, 0x48, - 0x10, 0xc7, 0x49, 0x89, 0x94, 0xad, 0x95, 0x3f, 0x04, 0xda, 0x3e, 0xd8, 0xba, 0x33, 0x97, 0xc7, - 0xe2, 0xe0, 0xe6, 0x48, 0xd8, 0xae, 0xce, 0x57, 0x99, 0x16, 0xee, 0x20, 0x20, 0x81, 0x13, 0x3a, - 0x48, 0x91, 0x14, 0x06, 0xc5, 0x5d, 0xd0, 0x0b, 0x4b, 0x5c, 0x9a, 0x64, 0x64, 0xe4, 0x0d, 0x02, - 0x57, 0x01, 0xd2, 0xa4, 0x71, 0x60, 0x20, 0x0f, 0x92, 0xd6, 0xa5, 0xcb, 0xa4, 0x21, 0x02, 0xb9, - 0x09, 0x54, 0xea, 0x09, 0x82, 0xfd, 0xd0, 0x87, 0x03, 0x25, 0x40, 0x0a, 0x77, 0xa9, 0xa8, 0xfd, - 0xcf, 0xfc, 0x46, 0xc3, 0xf9, 0x0f, 0x49, 0x60, 0x06, 0xa7, 0x41, 0x76, 0xe2, 0x76, 0x83, 0xf4, - 0x14, 0xe7, 0x6e, 0x6f, 0xbb, 0x8d, 0xf3, 0x60, 0xc7, 0x6d, 0x13, 0xe4, 0x24, 0x29, 0xcd, 0xa9, - 0xb1, 0xca, 0xe3, 0x8e, 0x88, 0x3b, 0x32, 0xde, 0x58, 0x8d, 0x68, 0x44, 0x79, 0x82, 0xcb, 0x7e, - 0x89, 0xdc, 0x86, 0x35, 0xb3, 0x16, 0x4d, 0x11, 0x4e, 0x65, 0x86, 0x19, 0xd2, 0xac, 0x4b, 0x33, - 0xb7, 0x1d, 0x64, 0x58, 0x26, 0x6c, 0xbb, 0x21, 0x25, 0xb1, 0x88, 0xdb, 0x9f, 0x4a, 0x60, 0xe1, - 0x61, 0x16, 0x1d, 0xa4, 0x38, 0xc8, 0xb1, 0x47, 0x90, 0xf1, 0x1c, 0xe8, 0x9c, 0x5f, 0x57, 0x2d, - 0x75, 0xab, 0xb6, 0xb3, 0xe9, 0xcc, 0x6a, 0xc7, 0x39, 0x64, 0x29, 0xad, 0xa6, 0xf7, 0xd7, 0x75, - 0x01, 0x95, 0x7e, 0x01, 0x75, 0x2e, 0x0c, 0x0a, 0x28, 0xe0, 0x61, 0x01, 0x17, 0x5e, 0x06, 0xdd, - 0xce, 0x9e, 0xcd, 0x8f, 0xb6, 0x2f, 0x64, 0xe3, 0x5f, 0x30, 0x9f, 0xa4, 0xb4, 0x47, 0x58, 0xfd, - 0x92, 0xa5, 0x6e, 0x55, 0x3d, 0x38, 0x28, 0xe0, 0x58, 0x1b, 0x16, 0x70, 0x59, 0x60, 0x23, 0xc5, - 0xf6, 0xc7, 0x41, 0xe3, 0x31, 0xd0, 0x93, 0x94, 0x84, 0x78, 0xbd, 0xcc, 0x3b, 0xfb, 0xc3, 0x11, - 0xb7, 0xe6, 0xb0, 0x5b, 0x93, 0x8d, 0x6d, 0x3b, 0x4d, 0x1c, 0x1e, 0x50, 0x12, 0x7b, 0x9b, 0xac, - 0x31, 0xd6, 0x0f, 0x47, 0x26, 0xfd, 0xf0, 0xa3, 0xed, 0x0b, 0xd9, 0x78, 0x0a, 0xe6, 0x10, 0x4e, - 0x68, 0x46, 0xf2, 0x75, 0x8d, 0x17, 0xdd, 0x98, 0x59, 0x94, 0x57, 0xfc, 0x53, 0x56, 0x1c, 0x11, - 0xc3, 0x02, 0x2e, 0x89, 0x9a, 0x52, 0xb0, 0xfd, 0x51, 0x68, 0x4f, 0xfb, 0x72, 0x05, 0x15, 0xfb, - 0x37, 0xb0, 0x3a, 0x3d, 0x5a, 0x1f, 0x67, 0x09, 0x8d, 0x33, 0x6c, 0x13, 0x50, 0x63, 0x7a, 0x87, - 0x66, 0x7c, 0xe2, 0x4f, 0x40, 0xa5, 0x4d, 0xd0, 0x31, 0x41, 0x72, 0xe4, 0xbf, 0xcf, 0x1e, 0xb9, - 0x47, 0x50, 0xab, 0xe9, 0x59, 0xa3, 0x81, 0xf3, 0xe3, 0xa0, 0x80, 0x25, 0x82, 0x86, 0x05, 0xac, - 0x8a, 0x4e, 0x08, 0xb2, 0x7d, 0xbd, 0x4d, 0x50, 0x0b, 0xc9, 0x16, 0xd6, 0xc0, 0xca, 0xd4, 0x5f, - 0x8d, 0x3b, 0x78, 0x57, 0x02, 0xa2, 0x80, 0xe1, 0x02, 0x9d, 0x9e, 0xc7, 0xd2, 0xee, 0xaa, 0xb7, - 0xc1, 0x2d, 0x64, 0xc2, 0x94, 0x85, 0xec, 0xc8, 0x2c, 0x64, 0x57, 0x63, 0x17, 0x68, 0x28, 0xc3, - 0x67, 0xdc, 0x3e, 0xcd, 0x83, 0xfd, 0x02, 0x6a, 0xcd, 0x23, 0x7c, 0x36, 0x28, 0x20, 0xd7, 0x87, - 0x05, 0xac, 0xc9, 0xa9, 0x64, 0xf8, 0xcc, 0xf6, 0xb9, 0xc8, 0xa0, 0x88, 0x41, 0xcc, 0xb9, 0x45, - 0x01, 0xfd, 0x2f, 0xa1, 0xe8, 0x0e, 0x14, 0x09, 0x28, 0x92, 0x10, 0x65, 0x90, 0x36, 0x81, 0x0e, - 0x25, 0x44, 0xef, 0x40, 0x54, 0x40, 0xec, 0x72, 0x67, 0xc3, 0xf4, 0x9f, 0xdc, 0xb0, 0xbd, 0xf9, - 0xb7, 0x57, 0x50, 0xe1, 0x73, 0xfb, 0x50, 0x06, 0xe5, 0x7b, 0xf3, 0xc6, 0x78, 0x04, 0xf4, 0x2c, - 0x0f, 0x72, 0xcc, 0x87, 0xb8, 0xb4, 0x03, 0xbf, 0x5b, 0xd4, 0x39, 0x62, 0x69, 0xc2, 0x15, 0x4e, - 0x4c, 0x5c, 0xe1, 0x47, 0xdb, 0x17, 0xf2, 0x7d, 0x3c, 0x1b, 0x9b, 0x00, 0x84, 0x7c, 0x75, 0xd1, - 0x71, 0x20, 0x1e, 0x8f, 0xb2, 0x5f, 0x95, 0xca, 0x7e, 0x6e, 0xbf, 0x51, 0x81, 0xce, 0xbb, 0x33, - 0x2c, 0x30, 0x47, 0xe2, 0x5e, 0xd0, 0x21, 0xa8, 0xae, 0x34, 0x56, 0x2e, 0x2e, 0xad, 0x65, 0x8f, - 0x20, 0x1e, 0x6a, 0x09, 0xd9, 0x58, 0x03, 0x1a, 0x4d, 0x70, 0x5c, 0x57, 0x1b, 0xb5, 0x8b, 0x4b, - 0x6b, 0xce, 0x23, 0xe8, 0x30, 0xc1, 0xb1, 0xb1, 0x01, 0x2a, 0x41, 0x98, 0x93, 0x1e, 0xae, 0x97, - 0x1a, 0x8b, 0x17, 0x97, 0x56, 0xd5, 0x23, 0x68, 0x9f, 0x0b, 0x8c, 0xe8, 0xd0, 0x2c, 0xaf, 0x97, - 0xc7, 0xc4, 0x03, 0x9a, 0xe5, 0x8c, 0x08, 0xd9, 0x2e, 0xa3, 0xba, 0x36, 0x26, 0xf8, 0x72, 0xa3, - 0x86, 0xf6, 0xea, 0xbd, 0xa9, 0x4c, 0x39, 0x78, 0x53, 0x02, 0xc0, 0x23, 0xe8, 0x3f, 0xd2, 0xc9, - 0x71, 0x9a, 0xfd, 0xda, 0xf3, 0xe9, 0x37, 0xa9, 0x3b, 0xda, 0xbf, 0xca, 0x64, 0x18, 0x3f, 0x5a, - 0x2f, 0xf1, 0x32, 0xf1, 0x8e, 0xae, 0xfb, 0xa6, 0x7a, 0xd3, 0x37, 0xd5, 0xcf, 0x7d, 0x53, 0x7d, - 0x7d, 0x6b, 0x2a, 0x37, 0xb7, 0xa6, 0xf2, 0xf1, 0xd6, 0x54, 0x9e, 0xfd, 0x13, 0x91, 0xfc, 0xe4, - 0x45, 0xdb, 0x09, 0x69, 0xd7, 0xe5, 0xbb, 0xfc, 0x77, 0x8c, 0xf3, 0x73, 0x9a, 0x9e, 0xca, 0x53, - 0x90, 0x10, 0x37, 0xa2, 0x6e, 0x4c, 0x11, 0xfe, 0xe6, 0x63, 0xd5, 0xae, 0xf0, 0xef, 0xd0, 0xee, - 0xd7, 0x00, 0x00, 0x00, 0xff, 0xff, 0x37, 0x85, 0xd3, 0xf8, 0x17, 0x07, 0x00, 0x00, -} - -func (m *MsgCreateBid) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateBid) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateBid) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Deposit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Order.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCreateBidResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateBidResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgCloseBid) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseBid) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseBid) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseBidResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseBidResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *BidID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BidID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BidID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintBid(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Bid) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Bid) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Bid) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *BidFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BidFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BidFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintBid(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x32 - } - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintBid(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintBid(dAtA []byte, offset int, v uint64) int { - offset -= sovBid(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MsgCreateBid) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Order.Size() - n += 1 + l + sovBid(uint64(l)) - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - l = m.Price.Size() - n += 1 + l + sovBid(uint64(l)) - l = m.Deposit.Size() - n += 1 + l + sovBid(uint64(l)) - return n -} - -func (m *MsgCreateBidResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgCloseBid) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidID.Size() - n += 1 + l + sovBid(uint64(l)) - return n -} - -func (m *MsgCloseBidResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *BidID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovBid(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovBid(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovBid(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - return n -} - -func (m *Bid) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidID.Size() - n += 1 + l + sovBid(uint64(l)) - if m.State != 0 { - n += 1 + sovBid(uint64(m.State)) - } - l = m.Price.Size() - n += 1 + l + sovBid(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovBid(uint64(m.CreatedAt)) - } - return n -} - -func (m *BidFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovBid(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovBid(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovBid(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - return n -} - -func sovBid(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozBid(x uint64) (n int) { - return sovBid(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MsgCreateBid) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateBid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateBid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Order.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Deposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateBidResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateBidResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseBid) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseBid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseBid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseBidResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseBidResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BidID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BidID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BidID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Bid) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Bid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Bid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Bid_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BidFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BidFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BidFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipBid(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthBid - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupBid - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthBid - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthBid = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowBid = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupBid = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta2/codec.go b/go/node/market/v1beta2/codec.go deleted file mode 100644 index 75350343..00000000 --- a/go/node/market/v1beta2/codec.go +++ /dev/null @@ -1,50 +0,0 @@ -package v1beta2 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/market module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/market and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterCodec registers the necessary x/market interfaces and concrete types -// on the provided Amino codec. These types are used for Amino JSON serialization. -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgCreateBid{}, ModuleName+"/"+MsgTypeCreateBid, nil) - cdc.RegisterConcrete(&MsgCloseBid{}, ModuleName+"/"+MsgTypeCloseBid, nil) - cdc.RegisterConcrete(&MsgCreateLease{}, ModuleName+"/"+MsgTypeCreateLease, nil) - cdc.RegisterConcrete(&MsgWithdrawLease{}, ModuleName+"/"+MsgTypeWithdrawLease, nil) - cdc.RegisterConcrete(&MsgCloseLease{}, ModuleName+"/"+MsgTypeCloseLease, nil) -} - -// RegisterInterfaces registers the x/market interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgCreateBid{}, - &MsgCloseBid{}, - &MsgCreateLease{}, - &MsgWithdrawLease{}, - &MsgCloseLease{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/market/v1beta2/errors.go b/go/node/market/v1beta2/errors.go deleted file mode 100644 index 6f74841d..00000000 --- a/go/node/market/v1beta2/errors.go +++ /dev/null @@ -1,72 +0,0 @@ -package v1beta2 - -import ( - "errors" -) - -var ( - // ErrEmptyProvider is the error when provider is empty - ErrEmptyProvider = errors.New("empty provider") - // ErrSameAccount is the error when owner and provider are the same account - ErrSameAccount = errors.New("owner and provider are the same account") - // ErrInternal is the error for internal error - ErrInternal = errors.New("internal error") - // ErrBidOverOrder is the error when bid price is above max order price - ErrBidOverOrder = errors.New("bid price above max order price") - // ErrAttributeMismatch is the error for attribute mismatch - ErrAttributeMismatch = errors.New("attribute mismatch") - // ErrCapabilitiesMismatch is the error for capabilities mismatch - ErrCapabilitiesMismatch = errors.New("capabilities mismatch") - // ErrUnknownBid is the error for unknown bid - ErrUnknownBid = errors.New("unknown bid") - // ErrUnknownLease is the error for unknown bid - ErrUnknownLease = errors.New("unknown lease") - // ErrUnknownLeaseForBid is the error when lease is unknown for bid - ErrUnknownLeaseForBid = errors.New("unknown lease for bid") - // ErrUnknownOrderForBid is the error when order is unknown for bid - ErrUnknownOrderForBid = errors.New("unknown order for bid") - // ErrLeaseNotActive is the error when lease is not active - ErrLeaseNotActive = errors.New("lease not active") - // ErrBidNotActive is the error when bid is not matched - ErrBidNotActive = errors.New("bid not active") - // ErrBidNotOpen is the error when bid is not matched - ErrBidNotOpen = errors.New("bid not open") - // ErrNoLeaseForOrder is the error when there is no lease for order - ErrNoLeaseForOrder = errors.New("no lease for order") - // ErrOrderNotFound order not found - ErrOrderNotFound = errors.New("invalid order: order not found") - // ErrGroupNotFound order not found - ErrGroupNotFound = errors.New("order not found") - // ErrGroupNotOpen order not found - ErrGroupNotOpen = errors.New("order not open") - // ErrOrderNotOpen order not found - ErrOrderNotOpen = errors.New("bid: order not open") - // ErrBidNotFound bid not found - ErrBidNotFound = errors.New("invalid bid: bid not found") - // ErrBidZeroPrice zero price - ErrBidZeroPrice = errors.New("invalid bid: zero price") - // ErrLeaseNotFound lease not found - ErrLeaseNotFound = errors.New("invalid lease: lease not found") - // ErrBidExists bid exists - ErrBidExists = errors.New("invalid bid: bid exists from provider") - // ErrBidInvalidPrice bid invalid price - ErrBidInvalidPrice = errors.New("bid price is invalid") - // ErrOrderActive order active - ErrOrderActive = errors.New("order active") - // ErrOrderClosed order closed - ErrOrderClosed = errors.New("order closed") - // ErrOrderExists indicates a new order was proposed overwrite the existing store key - ErrOrderExists = errors.New("order already exists in store") - // ErrOrderTooEarly to match bid - ErrOrderTooEarly = errors.New("order: chain height to low for bidding") - // ErrOrderDurationExceeded order should be closed - ErrOrderDurationExceeded = errors.New("order duration has exceeded the bidding duration") - // ErrInvalidDeposit indicates an invalid deposit - ErrInvalidDeposit = errors.New("Deposit invalid") - // ErrInvalidParam indicates an invalid chain parameter - ErrInvalidParam = errors.New("parameter invalid") - // ErrUnknownProvider indicates an invalid chain parameter - ErrUnknownProvider = errors.New("unknown provider") - // ErrInvalidBid indicates an invalid chain parameter - ErrInvalidBid = errors.New("unknown provider") -) diff --git a/go/node/market/v1beta2/escrow.go b/go/node/market/v1beta2/escrow.go deleted file mode 100644 index fbc9e455..00000000 --- a/go/node/market/v1beta2/escrow.go +++ /dev/null @@ -1,61 +0,0 @@ -package v1beta2 - -import ( - "fmt" - "strconv" - "strings" - - sdk "github.com/cosmos/cosmos-sdk/types" - - etypes "github.com/akash-network/akash-api/go/node/escrow/v1beta2" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta2" -) - -const ( - bidEscrowScope = "bid" -) - -func EscrowAccountForBid(id BidID) etypes.AccountID { - return etypes.AccountID{ - Scope: bidEscrowScope, - XID: id.String(), - } -} - -func EscrowPaymentForLease(id LeaseID) string { - return fmt.Sprintf("%v/%v/%s", id.GSeq, id.OSeq, id.Provider) -} - -func LeaseIDFromEscrowAccount(id etypes.AccountID, pid string) (LeaseID, bool) { - did, ok := dtypes.DeploymentIDFromEscrowAccount(id) - if !ok { - return LeaseID{}, false - } - - parts := strings.Split(pid, "/") - if len(parts) != 3 { - return LeaseID{}, false - } - - gseq, err := strconv.ParseUint(parts[0], 10, 32) - if err != nil { - return LeaseID{}, false - } - - oseq, err := strconv.ParseUint(parts[1], 10, 32) - if err != nil { - return LeaseID{}, false - } - - owner, err := sdk.AccAddressFromBech32(parts[2]) - if err != nil { - return LeaseID{}, false - } - - return MakeLeaseID( - MakeBidID( - MakeOrderID( - dtypes.MakeGroupID( - did, uint32(gseq)), uint32(oseq)), owner)), true -} diff --git a/go/node/market/v1beta2/event.go b/go/node/market/v1beta2/event.go deleted file mode 100644 index 0fe56d00..00000000 --- a/go/node/market/v1beta2/event.go +++ /dev/null @@ -1,360 +0,0 @@ -package v1beta2 - -import ( - "strconv" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" - - "github.com/akash-network/akash-api/go/sdkutil" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta2" -) - -const ( - evActionOrderCreated = "order-created" - evActionOrderClosed = "order-closed" - evActionBidCreated = "bid-created" - evActionBidClosed = "bid-closed" - evActionLeaseCreated = "lease-created" - evActionLeaseClosed = "lease-closed" - - evOSeqKey = "oseq" - evProviderKey = "provider" - evPriceDenomKey = "price-denom" - evPriceAmountKey = "price-amount" -) - -var ( - ErrParsingPrice = errors.New("error parsing price") -) - -// EventOrderCreated struct -type EventOrderCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID OrderID `json:"id"` -} - -func NewEventOrderCreated(id OrderID) EventOrderCreated { - return EventOrderCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionOrderCreated, - }, - ID: id, - } -} - -// ToSDKEvent method creates new sdk event for EventOrderCreated struct -func (e EventOrderCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionOrderCreated), - }, orderIDEVAttributes(e.ID)...)..., - ) -} - -// EventOrderClosed struct -type EventOrderClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID OrderID `json:"id"` -} - -func NewEventOrderClosed(id OrderID) EventOrderClosed { - return EventOrderClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionOrderClosed, - }, - ID: id, - } -} - -// ToSDKEvent method creates new sdk event for EventOrderClosed struct -func (e EventOrderClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionOrderClosed), - }, orderIDEVAttributes(e.ID)...)..., - ) -} - -// EventBidCreated struct -type EventBidCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID BidID `json:"id"` - Price sdk.DecCoin `json:"price"` -} - -func NewEventBidCreated(id BidID, price sdk.DecCoin) EventBidCreated { - return EventBidCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionBidCreated, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventBidCreated struct -func (e EventBidCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionBidCreated), - }, bidIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)..., - ) -} - -// EventBidClosed struct -type EventBidClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID BidID `json:"id"` - Price sdk.DecCoin `json:"price"` -} - -func NewEventBidClosed(id BidID, price sdk.DecCoin) EventBidClosed { - return EventBidClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionBidClosed, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventBidClosed struct -func (e EventBidClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionBidClosed), - }, bidIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)..., - ) -} - -// EventLeaseCreated struct -type EventLeaseCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID LeaseID `json:"id"` - Price sdk.DecCoin `json:"price"` -} - -func NewEventLeaseCreated(id LeaseID, price sdk.DecCoin) EventLeaseCreated { - return EventLeaseCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionLeaseCreated, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventLeaseCreated struct -func (e EventLeaseCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionLeaseCreated), - }, leaseIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)...) -} - -// EventLeaseClosed struct -type EventLeaseClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID LeaseID `json:"id"` - Price sdk.DecCoin `json:"price"` -} - -func NewEventLeaseClosed(id LeaseID, price sdk.DecCoin) EventLeaseClosed { - return EventLeaseClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionLeaseClosed, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventLeaseClosed struct -func (e EventLeaseClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionLeaseClosed), - }, leaseIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)...) -} - -// orderIDEVAttributes returns event attribues for given orderID -func orderIDEVAttributes(id OrderID) []sdk.Attribute { - return append(dtypes.GroupIDEVAttributes(id.GroupID()), - sdk.NewAttribute(evOSeqKey, strconv.FormatUint(uint64(id.OSeq), 10))) -} - -// parseEVOrderID returns orderID for given event attributes -func parseEVOrderID(attrs []sdk.Attribute) (OrderID, error) { - gid, err := dtypes.ParseEVGroupID(attrs) - if err != nil { - return OrderID{}, err - } - oseq, err := sdkutil.GetUint64(attrs, evOSeqKey) - if err != nil { - return OrderID{}, err - } - - return OrderID{ - Owner: gid.Owner, - DSeq: gid.DSeq, - GSeq: gid.GSeq, - OSeq: uint32(oseq), - }, nil - -} - -// bidIDEVAttributes returns event attribues for given bidID -func bidIDEVAttributes(id BidID) []sdk.Attribute { - return append(orderIDEVAttributes(id.OrderID()), - sdk.NewAttribute(evProviderKey, id.Provider)) -} - -// parseEVBidID returns bidID for given event attributes -func parseEVBidID(attrs []sdk.Attribute) (BidID, error) { - oid, err := parseEVOrderID(attrs) - if err != nil { - return BidID{}, err - } - - provider, err := sdkutil.GetAccAddress(attrs, evProviderKey) - if err != nil { - return BidID{}, err - } - - return BidID{ - Owner: oid.Owner, - DSeq: oid.DSeq, - GSeq: oid.GSeq, - OSeq: oid.OSeq, - Provider: provider.String(), - }, nil -} - -// leaseIDEVAttributes returns event attribues for given LeaseID -func leaseIDEVAttributes(id LeaseID) []sdk.Attribute { - return append(orderIDEVAttributes(id.OrderID()), - sdk.NewAttribute(evProviderKey, id.Provider)) -} - -// parseEVLeaseID returns leaseID for given event attributes -func parseEVLeaseID(attrs []sdk.Attribute) (LeaseID, error) { - bid, err := parseEVBidID(attrs) - if err != nil { - return LeaseID{}, err - } - return LeaseID(bid), nil -} - -func priceEVAttributes(price sdk.DecCoin) []sdk.Attribute { - return []sdk.Attribute{ - sdk.NewAttribute(evPriceDenomKey, price.Denom), - sdk.NewAttribute(evPriceAmountKey, price.Amount.String()), - } -} - -func parseEVPriceAttributes(attrs []sdk.Attribute) (sdk.DecCoin, error) { - denom, err := sdkutil.GetString(attrs, evPriceDenomKey) - if err != nil { - return sdk.DecCoin{}, err - } - - amounts, err := sdkutil.GetString(attrs, evPriceAmountKey) - if err != nil { - return sdk.DecCoin{}, err - } - - amount, err := sdk.NewDecFromStr(amounts) - if err != nil { - return sdk.DecCoin{}, ErrParsingPrice - } - - return sdk.NewDecCoinFromDec(denom, amount), nil -} - -// ParseEvent parses event and returns details of event and error if occurred -func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { - if ev.Type != sdkutil.EventTypeMessage { - return nil, sdkutil.ErrUnknownType - } - if ev.Module != ModuleName { - return nil, sdkutil.ErrUnknownModule - } - switch ev.Action { - - case evActionOrderCreated: - id, err := parseEVOrderID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventOrderCreated(id), nil - case evActionOrderClosed: - id, err := parseEVOrderID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventOrderClosed(id), nil - - case evActionBidCreated: - id, err := parseEVBidID(ev.Attributes) - if err != nil { - return nil, err - } - price, err := parseEVPriceAttributes(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventBidCreated(id, price), nil - case evActionBidClosed: - id, err := parseEVBidID(ev.Attributes) - if err != nil { - return nil, err - } - // optional price - price, _ := parseEVPriceAttributes(ev.Attributes) - return NewEventBidClosed(id, price), nil - - case evActionLeaseCreated: - id, err := parseEVLeaseID(ev.Attributes) - if err != nil { - return nil, err - } - price, err := parseEVPriceAttributes(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventLeaseCreated(id, price), nil - case evActionLeaseClosed: - id, err := parseEVLeaseID(ev.Attributes) - if err != nil { - return nil, err - } - // optional price - price, _ := parseEVPriceAttributes(ev.Attributes) - return NewEventLeaseClosed(id, price), nil - - default: - return nil, sdkutil.ErrUnknownAction - } -} diff --git a/go/node/market/v1beta2/genesis.pb.go b/go/node/market/v1beta2/genesis.pb.go deleted file mode 100644 index 6906526a..00000000 --- a/go/node/market/v1beta2/genesis.pb.go +++ /dev/null @@ -1,453 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta2/genesis.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState defines the basic genesis state used by market module -type GenesisState struct { - Orders []Order `protobuf:"bytes,1,rep,name=orders,proto3" json:"orders" yaml:"orders"` - Leases []Lease `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases" yaml:"leases"` - Params Params `protobuf:"bytes,3,opt,name=params,proto3" json:"params" yaml:"params"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_e3591e07a3cf8f44, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetOrders() []Order { - if m != nil { - return m.Orders - } - return nil -} - -func (m *GenesisState) GetLeases() []Lease { - if m != nil { - return m.Leases - } - return nil -} - -func (m *GenesisState) GetParams() Params { - if m != nil { - return m.Params - } - return Params{} -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.market.v1beta2.GenesisState") -} - -func init() { - proto.RegisterFile("akash/market/v1beta2/genesis.proto", fileDescriptor_e3591e07a3cf8f44) -} - -var fileDescriptor_e3591e07a3cf8f44 = []byte{ - // 301 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x90, 0xb1, 0x4e, 0xc3, 0x30, - 0x14, 0x45, 0x93, 0x56, 0xea, 0x90, 0xc2, 0x12, 0x75, 0x88, 0x0a, 0x72, 0x82, 0xa7, 0x2e, 0xd8, - 0x22, 0x4c, 0x30, 0x76, 0x61, 0x41, 0x02, 0xa5, 0xb0, 0xb0, 0x39, 0xd4, 0x72, 0xa3, 0x34, 0x71, - 0x64, 0x1b, 0x10, 0x7f, 0x01, 0x7f, 0xd5, 0xb1, 0x23, 0x53, 0x84, 0x92, 0x8d, 0x91, 0x2f, 0x40, - 0xb1, 0x2d, 0x45, 0x42, 0x51, 0xb7, 0x5c, 0xbd, 0xf3, 0x8e, 0x6f, 0x9e, 0x07, 0x49, 0x4e, 0xe4, - 0x06, 0x17, 0x44, 0xe4, 0x54, 0xe1, 0xd7, 0x8b, 0x94, 0x2a, 0x12, 0x63, 0x46, 0x4b, 0x2a, 0x33, - 0x89, 0x2a, 0xc1, 0x15, 0xf7, 0x67, 0x9a, 0x41, 0x86, 0x41, 0x96, 0x99, 0xcf, 0x18, 0x67, 0x5c, - 0x03, 0xb8, 0xfb, 0x32, 0xec, 0x3c, 0x1a, 0xf4, 0x71, 0xb1, 0xa6, 0xe2, 0x20, 0xb1, 0xa5, 0x44, - 0x52, 0x4b, 0x9c, 0x0d, 0x12, 0x15, 0x11, 0xa4, 0xb0, 0x95, 0xe0, 0xe7, 0xc8, 0x3b, 0xba, 0x31, - 0x25, 0x57, 0x8a, 0x28, 0xea, 0x3f, 0x78, 0x13, 0xfd, 0x88, 0x0c, 0xdc, 0x68, 0xbc, 0x98, 0xc6, - 0x27, 0x68, 0xa8, 0x34, 0xba, 0xeb, 0x98, 0x65, 0xb8, 0xab, 0x43, 0xe7, 0xa7, 0x0e, 0xed, 0xca, - 0x6f, 0x1d, 0x1e, 0xbf, 0x93, 0x62, 0x7b, 0x0d, 0x4d, 0x86, 0x89, 0x1d, 0x74, 0x56, 0x5d, 0x4c, - 0x06, 0xa3, 0x43, 0xd6, 0xdb, 0x8e, 0xe9, 0xad, 0x66, 0xa5, 0xb7, 0x9a, 0x0c, 0x13, 0x3b, 0xf0, - 0x1f, 0xbd, 0x89, 0xf9, 0x99, 0x60, 0x1c, 0xb9, 0x8b, 0x69, 0x7c, 0x3a, 0x6c, 0xbd, 0xd7, 0x4c, - 0xaf, 0x35, 0x3b, 0xbd, 0xd6, 0x64, 0x98, 0xd8, 0xc1, 0x72, 0xb5, 0x6b, 0x80, 0xbb, 0x6f, 0x80, - 0xfb, 0xdd, 0x00, 0xf7, 0xa3, 0x05, 0xce, 0xbe, 0x05, 0xce, 0x57, 0x0b, 0x9c, 0xa7, 0x2b, 0x96, - 0xa9, 0xcd, 0x4b, 0x8a, 0x9e, 0x79, 0x81, 0xf5, 0x53, 0xe7, 0x25, 0x55, 0x6f, 0x5c, 0xe4, 0x36, - 0x91, 0x2a, 0xc3, 0x8c, 0xe3, 0x92, 0xaf, 0xe9, 0xbf, 0xab, 0xa7, 0x13, 0x7d, 0xef, 0xcb, 0xbf, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x91, 0x42, 0x96, 0xb9, 0x28, 0x02, 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.Leases) > 0 { - for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Orders) > 0 { - for iNdEx := len(m.Orders) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Orders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Orders) > 0 { - for _, e := range m.Orders { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - if len(m.Leases) > 0 { - for _, e := range m.Leases { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - l = m.Params.Size() - n += 1 + l + sovGenesis(uint64(l)) - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Orders = append(m.Orders, Order{}) - if err := m.Orders[len(m.Orders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Leases = append(m.Leases, Lease{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta2/id.go b/go/node/market/v1beta2/id.go deleted file mode 100644 index a78cc598..00000000 --- a/go/node/market/v1beta2/id.go +++ /dev/null @@ -1,154 +0,0 @@ -package v1beta2 - -import ( - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta2" -) - -// MakeOrderID returns OrderID instance with provided groupID details and oseq -func MakeOrderID(id dtypes.GroupID, oseq uint32) OrderID { - return OrderID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - OSeq: oseq, - } -} - -// GroupID method returns groupID details for specific order -func (id OrderID) GroupID() dtypes.GroupID { - return dtypes.GroupID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - } -} - -// Equals method compares specific order with provided order -func (id OrderID) Equals(other OrderID) bool { - return id.GroupID().Equals(other.GroupID()) && id.OSeq == other.OSeq -} - -// Validate method for OrderID and returns nil -func (id OrderID) Validate() error { - if err := id.GroupID().Validate(); err != nil { - return sdkerrors.Wrap(err, "OrderID: Invalid GroupID") - } - if id.OSeq == 0 { - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "OrderID: Invalid Order Sequence") - } - return nil -} - -// String provides stringer interface to save reflected formatting. -func (id OrderID) String() string { - return fmt.Sprintf("%s/%v", id.GroupID(), id.OSeq) -} - -// MakeBidID returns BidID instance with provided order details and provider -func MakeBidID(id OrderID, provider sdk.AccAddress) BidID { - return BidID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - OSeq: id.OSeq, - Provider: provider.String(), - } -} - -// Equals method compares specific bid with provided bid -func (id BidID) Equals(other BidID) bool { - return id.OrderID().Equals(other.OrderID()) && - id.Provider == other.Provider -} - -// LeaseID method returns lease details of bid -func (id BidID) LeaseID() LeaseID { - return LeaseID(id) -} - -// OrderID method returns OrderID details with specific bid details -func (id BidID) OrderID() OrderID { - return OrderID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - OSeq: id.OSeq, - } -} - -// String method for consistent output. -func (id BidID) String() string { - return fmt.Sprintf("%s/%v", id.OrderID(), id.Provider) -} - -// GroupID method returns GroupID details with specific bid details -func (id BidID) GroupID() dtypes.GroupID { - return id.OrderID().GroupID() -} - -// DeploymentID method returns deployment details with specific bid details -func (id BidID) DeploymentID() dtypes.DeploymentID { - return id.GroupID().DeploymentID() -} - -// Validate validates bid instance and returns nil -func (id BidID) Validate() error { - if err := id.OrderID().Validate(); err != nil { - return sdkerrors.Wrap(err, "BidID: Invalid OrderID") - } - if _, err := sdk.AccAddressFromBech32(id.Provider); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "BidID: Invalid Provider Address") - } - if id.Owner == id.Provider { - return sdkerrors.Wrap(sdkerrors.ErrConflict, "BidID: self-bid") - } - return nil -} - -// MakeLeaseID returns LeaseID instance with provided bid details -func MakeLeaseID(id BidID) LeaseID { - return LeaseID(id) -} - -// Equals method compares specific lease with provided lease -func (id LeaseID) Equals(other LeaseID) bool { - return id.BidID().Equals(other.BidID()) -} - -// Validate calls the BidID's validator and returns any error. -func (id LeaseID) Validate() error { - if err := id.BidID().Validate(); err != nil { - return sdkerrors.Wrap(err, "LeaseID: Invalid BidID") - } - return nil -} - -// BidID method returns BidID details with specific LeaseID -func (id LeaseID) BidID() BidID { - return BidID(id) -} - -// OrderID method returns OrderID details with specific lease details -func (id LeaseID) OrderID() OrderID { - return id.BidID().OrderID() -} - -// GroupID method returns GroupID details with specific lease details -func (id LeaseID) GroupID() dtypes.GroupID { - return id.OrderID().GroupID() -} - -// DeploymentID method returns deployment details with specific lease details -func (id LeaseID) DeploymentID() dtypes.DeploymentID { - return id.GroupID().DeploymentID() -} - -// String method provides human readable representation of LeaseID. -func (id LeaseID) String() string { - return id.BidID().String() -} diff --git a/go/node/market/v1beta2/key.go b/go/node/market/v1beta2/key.go deleted file mode 100644 index 78c5e7e8..00000000 --- a/go/node/market/v1beta2/key.go +++ /dev/null @@ -1,28 +0,0 @@ -package v1beta2 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "market" - - // StoreKey is the store key string for market - StoreKey = ModuleName - - // RouterKey is the message route for market - RouterKey = ModuleName -) - -func OrderPrefix() []byte { - return []byte{0x01, 0x00} -} - -func BidPrefix() []byte { - return []byte{0x02, 0x00} -} - -func LeasePrefix() []byte { - return []byte{0x03, 0x00} -} - -func SecondaryLeasePrefix() []byte { - return []byte{0x03, 0x01} -} diff --git a/go/node/market/v1beta2/lease.pb.go b/go/node/market/v1beta2/lease.pb.go deleted file mode 100644 index 6a365b6d..00000000 --- a/go/node/market/v1beta2/lease.pb.go +++ /dev/null @@ -1,2134 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta2/lease.proto - -package v1beta2 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of lease -type Lease_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - LeaseStateInvalid Lease_State = 0 - // LeaseActive denotes state for lease active - LeaseActive Lease_State = 1 - // LeaseInsufficientFunds denotes state for lease insufficient_funds - LeaseInsufficientFunds Lease_State = 2 - // LeaseClosed denotes state for lease closed - LeaseClosed Lease_State = 3 -) - -var Lease_State_name = map[int32]string{ - 0: "invalid", - 1: "active", - 2: "insufficient_funds", - 3: "closed", -} - -var Lease_State_value = map[string]int32{ - "invalid": 0, - "active": 1, - "insufficient_funds": 2, - "closed": 3, -} - -func (x Lease_State) String() string { - return proto.EnumName(Lease_State_name, int32(x)) -} - -func (Lease_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_13e4d452e6d81c98, []int{1, 0} -} - -// LeaseID stores bid details of lease -type LeaseID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` -} - -func (m *LeaseID) Reset() { *m = LeaseID{} } -func (*LeaseID) ProtoMessage() {} -func (*LeaseID) Descriptor() ([]byte, []int) { - return fileDescriptor_13e4d452e6d81c98, []int{0} -} -func (m *LeaseID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseID) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseID.Merge(m, src) -} -func (m *LeaseID) XXX_Size() int { - return m.Size() -} -func (m *LeaseID) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseID.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseID proto.InternalMessageInfo - -func (m *LeaseID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *LeaseID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *LeaseID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *LeaseID) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *LeaseID) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -// Lease stores LeaseID, state of lease and price -type Lease struct { - LeaseID LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"id" yaml:"id"` - State Lease_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta2.Lease_State" json:"state" yaml:"state"` - Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - ClosedOn int64 `protobuf:"varint,5,opt,name=closed_on,json=closedOn,proto3" json:"closed_on,omitempty"` -} - -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_13e4d452e6d81c98, []int{1} -} -func (m *Lease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Lease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Lease) XXX_Merge(src proto.Message) { - xxx_messageInfo_Lease.Merge(m, src) -} -func (m *Lease) XXX_Size() int { - return m.Size() -} -func (m *Lease) XXX_DiscardUnknown() { - xxx_messageInfo_Lease.DiscardUnknown(m) -} - -var xxx_messageInfo_Lease proto.InternalMessageInfo - -func (m *Lease) GetLeaseID() LeaseID { - if m != nil { - return m.LeaseID - } - return LeaseID{} -} - -func (m *Lease) GetState() Lease_State { - if m != nil { - return m.State - } - return LeaseStateInvalid -} - -func (m *Lease) GetPrice() types.DecCoin { - if m != nil { - return m.Price - } - return types.DecCoin{} -} - -func (m *Lease) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -func (m *Lease) GetClosedOn() int64 { - if m != nil { - return m.ClosedOn - } - return 0 -} - -// LeaseFilters defines flags for lease list filter -type LeaseFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` - State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *LeaseFilters) Reset() { *m = LeaseFilters{} } -func (m *LeaseFilters) String() string { return proto.CompactTextString(m) } -func (*LeaseFilters) ProtoMessage() {} -func (*LeaseFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_13e4d452e6d81c98, []int{2} -} -func (m *LeaseFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseFilters.Merge(m, src) -} -func (m *LeaseFilters) XXX_Size() int { - return m.Size() -} -func (m *LeaseFilters) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseFilters proto.InternalMessageInfo - -func (m *LeaseFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *LeaseFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *LeaseFilters) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *LeaseFilters) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *LeaseFilters) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *LeaseFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -// MsgCreateLease is sent to create a lease -type MsgCreateLease struct { - BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCreateLease) Reset() { *m = MsgCreateLease{} } -func (m *MsgCreateLease) String() string { return proto.CompactTextString(m) } -func (*MsgCreateLease) ProtoMessage() {} -func (*MsgCreateLease) Descriptor() ([]byte, []int) { - return fileDescriptor_13e4d452e6d81c98, []int{3} -} -func (m *MsgCreateLease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateLease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateLease) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateLease.Merge(m, src) -} -func (m *MsgCreateLease) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateLease) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateLease.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateLease proto.InternalMessageInfo - -func (m *MsgCreateLease) GetBidID() BidID { - if m != nil { - return m.BidID - } - return BidID{} -} - -// MsgCreateLeaseResponse is the response from creating a lease -type MsgCreateLeaseResponse struct { -} - -func (m *MsgCreateLeaseResponse) Reset() { *m = MsgCreateLeaseResponse{} } -func (m *MsgCreateLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateLeaseResponse) ProtoMessage() {} -func (*MsgCreateLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_13e4d452e6d81c98, []int{4} -} -func (m *MsgCreateLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateLeaseResponse.Merge(m, src) -} -func (m *MsgCreateLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateLeaseResponse proto.InternalMessageInfo - -// MsgWithdrawLease defines an SDK message for closing bid -type MsgWithdrawLease struct { - LeaseID LeaseID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgWithdrawLease) Reset() { *m = MsgWithdrawLease{} } -func (m *MsgWithdrawLease) String() string { return proto.CompactTextString(m) } -func (*MsgWithdrawLease) ProtoMessage() {} -func (*MsgWithdrawLease) Descriptor() ([]byte, []int) { - return fileDescriptor_13e4d452e6d81c98, []int{5} -} -func (m *MsgWithdrawLease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgWithdrawLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgWithdrawLease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgWithdrawLease) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgWithdrawLease.Merge(m, src) -} -func (m *MsgWithdrawLease) XXX_Size() int { - return m.Size() -} -func (m *MsgWithdrawLease) XXX_DiscardUnknown() { - xxx_messageInfo_MsgWithdrawLease.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgWithdrawLease proto.InternalMessageInfo - -func (m *MsgWithdrawLease) GetLeaseID() LeaseID { - if m != nil { - return m.LeaseID - } - return LeaseID{} -} - -// MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. -type MsgWithdrawLeaseResponse struct { -} - -func (m *MsgWithdrawLeaseResponse) Reset() { *m = MsgWithdrawLeaseResponse{} } -func (m *MsgWithdrawLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*MsgWithdrawLeaseResponse) ProtoMessage() {} -func (*MsgWithdrawLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_13e4d452e6d81c98, []int{6} -} -func (m *MsgWithdrawLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgWithdrawLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgWithdrawLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgWithdrawLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgWithdrawLeaseResponse.Merge(m, src) -} -func (m *MsgWithdrawLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgWithdrawLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgWithdrawLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgWithdrawLeaseResponse proto.InternalMessageInfo - -// MsgCloseLease defines an SDK message for closing order -type MsgCloseLease struct { - LeaseID LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseLease) Reset() { *m = MsgCloseLease{} } -func (m *MsgCloseLease) String() string { return proto.CompactTextString(m) } -func (*MsgCloseLease) ProtoMessage() {} -func (*MsgCloseLease) Descriptor() ([]byte, []int) { - return fileDescriptor_13e4d452e6d81c98, []int{7} -} -func (m *MsgCloseLease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseLease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseLease) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseLease.Merge(m, src) -} -func (m *MsgCloseLease) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseLease) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseLease.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseLease proto.InternalMessageInfo - -func (m *MsgCloseLease) GetLeaseID() LeaseID { - if m != nil { - return m.LeaseID - } - return LeaseID{} -} - -// MsgCloseLeaseResponse defines the Msg/CloseLease response type. -type MsgCloseLeaseResponse struct { -} - -func (m *MsgCloseLeaseResponse) Reset() { *m = MsgCloseLeaseResponse{} } -func (m *MsgCloseLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseLeaseResponse) ProtoMessage() {} -func (*MsgCloseLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_13e4d452e6d81c98, []int{8} -} -func (m *MsgCloseLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseLeaseResponse.Merge(m, src) -} -func (m *MsgCloseLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseLeaseResponse proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("akash.market.v1beta2.Lease_State", Lease_State_name, Lease_State_value) - proto.RegisterType((*LeaseID)(nil), "akash.market.v1beta2.LeaseID") - proto.RegisterType((*Lease)(nil), "akash.market.v1beta2.Lease") - proto.RegisterType((*LeaseFilters)(nil), "akash.market.v1beta2.LeaseFilters") - proto.RegisterType((*MsgCreateLease)(nil), "akash.market.v1beta2.MsgCreateLease") - proto.RegisterType((*MsgCreateLeaseResponse)(nil), "akash.market.v1beta2.MsgCreateLeaseResponse") - proto.RegisterType((*MsgWithdrawLease)(nil), "akash.market.v1beta2.MsgWithdrawLease") - proto.RegisterType((*MsgWithdrawLeaseResponse)(nil), "akash.market.v1beta2.MsgWithdrawLeaseResponse") - proto.RegisterType((*MsgCloseLease)(nil), "akash.market.v1beta2.MsgCloseLease") - proto.RegisterType((*MsgCloseLeaseResponse)(nil), "akash.market.v1beta2.MsgCloseLeaseResponse") -} - -func init() { proto.RegisterFile("akash/market/v1beta2/lease.proto", fileDescriptor_13e4d452e6d81c98) } - -var fileDescriptor_13e4d452e6d81c98 = []byte{ - // 755 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xcf, 0x4e, 0xdb, 0x4e, - 0x10, 0xb6, 0x93, 0x38, 0x24, 0x1b, 0xfe, 0xe4, 0x67, 0x01, 0xbf, 0xd4, 0x14, 0xdb, 0xf5, 0x89, - 0x4b, 0x6d, 0x11, 0x4e, 0xa5, 0x27, 0x42, 0x44, 0x15, 0xa9, 0x08, 0xd5, 0x54, 0x6a, 0x55, 0x55, - 0x8a, 0x1c, 0xef, 0x62, 0x56, 0x24, 0xde, 0xe0, 0x35, 0x41, 0x7d, 0x83, 0x8a, 0x53, 0x8f, 0xbd, - 0xa0, 0x22, 0xf5, 0x65, 0x38, 0xa2, 0x9e, 0x7a, 0xb2, 0xaa, 0x70, 0xa9, 0x72, 0x8c, 0xfa, 0x00, - 0xd5, 0xee, 0x3a, 0x24, 0x41, 0x11, 0xa7, 0xaa, 0xa7, 0x9e, 0x92, 0xf9, 0x66, 0xbe, 0x99, 0xf1, - 0x37, 0x33, 0x36, 0x30, 0xbd, 0x13, 0x8f, 0x1e, 0x3b, 0x1d, 0x2f, 0x3a, 0x41, 0xb1, 0xd3, 0xdb, - 0x6c, 0xa1, 0xd8, 0xab, 0x3a, 0x6d, 0xe4, 0x51, 0x64, 0x77, 0x23, 0x12, 0x13, 0x75, 0x99, 0x47, - 0xd8, 0x22, 0xc2, 0x4e, 0x23, 0xb4, 0xe5, 0x80, 0x04, 0x84, 0x07, 0x38, 0xec, 0x9f, 0x88, 0xd5, - 0x74, 0x9f, 0xd0, 0x0e, 0xa1, 0x4e, 0xcb, 0xa3, 0x28, 0x4d, 0xb6, 0xe9, 0xf8, 0x04, 0x87, 0x23, - 0xff, 0xcc, 0x6a, 0x2d, 0x0c, 0x85, 0xdf, 0xba, 0xca, 0x80, 0xb9, 0x97, 0xac, 0x76, 0xa3, 0xae, - 0x3a, 0x40, 0x21, 0xe7, 0x21, 0x8a, 0x2a, 0xb2, 0x29, 0x6f, 0x14, 0x6b, 0x8f, 0x06, 0x89, 0x21, - 0x80, 0x61, 0x62, 0xcc, 0x7f, 0xf0, 0x3a, 0xed, 0x6d, 0x8b, 0x9b, 0x96, 0x2b, 0x60, 0x75, 0x0b, - 0xe4, 0x20, 0x45, 0xa7, 0x95, 0x8c, 0x29, 0x6f, 0xe4, 0x6a, 0x46, 0x3f, 0x31, 0x72, 0xf5, 0x43, - 0x74, 0x3a, 0x48, 0x0c, 0x8e, 0x0f, 0x13, 0xa3, 0x24, 0x68, 0xcc, 0xb2, 0x5c, 0x0e, 0x32, 0x52, - 0xc0, 0x48, 0x59, 0x53, 0xde, 0x58, 0x10, 0xa4, 0x17, 0x29, 0x29, 0x98, 0x22, 0x05, 0x82, 0x14, - 0xa4, 0x24, 0xc2, 0x48, 0xb9, 0x31, 0xe9, 0x20, 0x25, 0x91, 0x29, 0x12, 0x11, 0x24, 0xf6, 0xa3, - 0x3e, 0x07, 0x85, 0x6e, 0x44, 0x7a, 0x18, 0xa2, 0xa8, 0xa2, 0xf0, 0x47, 0x32, 0x06, 0x89, 0x71, - 0x87, 0x0d, 0x13, 0x63, 0x49, 0x90, 0x46, 0x88, 0xe5, 0xde, 0x39, 0xb7, 0x0b, 0x9f, 0xaf, 0x0c, - 0xe9, 0xe7, 0x95, 0x21, 0x59, 0xbf, 0xb2, 0x40, 0xe1, 0x12, 0xa9, 0xef, 0x41, 0x81, 0xcf, 0xa9, - 0x89, 0x21, 0xd7, 0xa8, 0x54, 0x5d, 0xb7, 0x67, 0xcd, 0xca, 0x4e, 0x15, 0xad, 0x59, 0xd7, 0x89, - 0x21, 0xf5, 0x13, 0x63, 0x24, 0xf1, 0x20, 0x31, 0x32, 0x18, 0x0e, 0x13, 0xa3, 0x28, 0x0a, 0x63, - 0x68, 0xb9, 0x73, 0x3c, 0x65, 0x03, 0xaa, 0x2e, 0x50, 0x68, 0xec, 0xc5, 0x88, 0xcb, 0xb9, 0x58, - 0x7d, 0xf2, 0x40, 0x6a, 0xfb, 0x90, 0x05, 0x8a, 0x09, 0x71, 0xce, 0x78, 0x42, 0xdc, 0xb4, 0x5c, - 0x01, 0xab, 0xaf, 0x80, 0xd2, 0x8d, 0xb0, 0x8f, 0xb8, 0xda, 0xa5, 0xea, 0x63, 0x5b, 0xac, 0x8b, - 0xcd, 0xd6, 0x25, 0x4d, 0xb9, 0x69, 0xd7, 0x91, 0xbf, 0x4b, 0x70, 0x58, 0x5b, 0x67, 0xdd, 0xb2, - 0x94, 0x9c, 0x32, 0x4e, 0xc9, 0x4d, 0xcb, 0x15, 0xb0, 0xba, 0x0e, 0x80, 0x1f, 0x21, 0x2f, 0x46, - 0xb0, 0xe9, 0xc5, 0x7c, 0x20, 0x59, 0xb7, 0x98, 0x22, 0x3b, 0xb1, 0xba, 0x06, 0x8a, 0x7e, 0x9b, - 0x50, 0x04, 0x9b, 0x24, 0xe4, 0xaa, 0x67, 0xdd, 0x82, 0x00, 0x0e, 0x42, 0xeb, 0x8b, 0x0c, 0x14, - 0xde, 0xba, 0x6a, 0x81, 0x39, 0x1c, 0xf6, 0xbc, 0x36, 0x86, 0x65, 0x49, 0x5b, 0xb9, 0xb8, 0x34, - 0xff, 0xe3, 0x0f, 0xc6, 0x9d, 0x0d, 0xe1, 0x50, 0xd7, 0x40, 0xde, 0xf3, 0x63, 0xdc, 0x43, 0x65, - 0x59, 0x5b, 0xba, 0xb8, 0x34, 0x4b, 0x3c, 0x64, 0x87, 0x43, 0x6a, 0x15, 0xa8, 0x38, 0xa4, 0x67, - 0x47, 0x47, 0xd8, 0xc7, 0x28, 0x8c, 0x9b, 0x47, 0x67, 0x21, 0xa4, 0xe5, 0x8c, 0xa6, 0x5d, 0x5c, - 0x9a, 0xab, 0x42, 0xee, 0x09, 0xf7, 0x1e, 0xf3, 0xb2, 0x84, 0xa2, 0x95, 0x72, 0x76, 0x22, 0xe1, - 0x2e, 0x87, 0xb4, 0xdc, 0xc7, 0xaf, 0xba, 0x34, 0x31, 0xf6, 0x6f, 0x19, 0x30, 0xcf, 0xfd, 0x7b, - 0xb8, 0x1d, 0xa3, 0x88, 0xfe, 0x3b, 0x8f, 0x89, 0xf3, 0x60, 0x62, 0x88, 0x65, 0xcd, 0x8f, 0xc5, - 0x78, 0x68, 0x13, 0xb7, 0x73, 0x5c, 0xd4, 0x36, 0x58, 0xdc, 0xa7, 0xc1, 0x2e, 0xdf, 0x16, 0x71, - 0x53, 0xaf, 0x41, 0xbe, 0x85, 0xe1, 0xf8, 0xa2, 0xd6, 0x66, 0xaf, 0x7d, 0x0d, 0xc3, 0x46, 0xbd, - 0x66, 0xa6, 0xf7, 0xa4, 0x70, 0x73, 0xd6, 0x35, 0x29, 0x2d, 0x0c, 0x1b, 0x30, 0xad, 0x56, 0x01, - 0xab, 0xd3, 0xd5, 0x5c, 0x44, 0xbb, 0x24, 0xa4, 0xc8, 0x8a, 0x40, 0x79, 0x9f, 0x06, 0x6f, 0x70, - 0x7c, 0x0c, 0x23, 0xef, 0x5c, 0x74, 0xf2, 0xf6, 0x5e, 0x27, 0x7f, 0xe0, 0xb6, 0xa7, 0xba, 0xd1, - 0x40, 0xe5, 0x7e, 0xcd, 0xbb, 0x7e, 0x28, 0x58, 0x60, 0x9d, 0xb2, 0x4d, 0xfc, 0x0b, 0xaf, 0x9a, - 0xb4, 0xa1, 0xff, 0xc1, 0xca, 0x54, 0xd1, 0x51, 0x37, 0xb5, 0xc3, 0xeb, 0xbe, 0x2e, 0xdf, 0xf4, - 0x75, 0xf9, 0x47, 0x5f, 0x97, 0x3f, 0xdd, 0xea, 0xd2, 0xcd, 0xad, 0x2e, 0x7d, 0xbf, 0xd5, 0xa5, - 0x77, 0xcf, 0x02, 0x1c, 0x1f, 0x9f, 0xb5, 0x6c, 0x9f, 0x74, 0x1c, 0xde, 0xce, 0xd3, 0x10, 0xc5, - 0xe7, 0x24, 0x3a, 0x49, 0x2d, 0xaf, 0x8b, 0x9d, 0x80, 0x38, 0x21, 0x81, 0xe8, 0xde, 0x37, 0xa7, - 0x95, 0xe7, 0x1f, 0x9c, 0xad, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x44, 0xd2, 0x5d, 0xf4, 0x00, - 0x07, 0x00, 0x00, -} - -func (m *LeaseID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintLease(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintLease(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Lease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ClosedOn != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.ClosedOn)) - i-- - dAtA[i] = 0x28 - } - if m.CreatedAt != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *LeaseFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintLease(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x32 - } - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintLease(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintLease(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateLease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateLease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCreateLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgWithdrawLease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgWithdrawLease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgWithdrawLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgWithdrawLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgWithdrawLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgWithdrawLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgCloseLease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseLease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintLease(dAtA []byte, offset int, v uint64) int { - offset -= sovLease(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *LeaseID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovLease(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovLease(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovLease(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - return n -} - -func (m *Lease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LeaseID.Size() - n += 1 + l + sovLease(uint64(l)) - if m.State != 0 { - n += 1 + sovLease(uint64(m.State)) - } - l = m.Price.Size() - n += 1 + l + sovLease(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovLease(uint64(m.CreatedAt)) - } - if m.ClosedOn != 0 { - n += 1 + sovLease(uint64(m.ClosedOn)) - } - return n -} - -func (m *LeaseFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovLease(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovLease(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovLease(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - return n -} - -func (m *MsgCreateLease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidID.Size() - n += 1 + l + sovLease(uint64(l)) - return n -} - -func (m *MsgCreateLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgWithdrawLease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LeaseID.Size() - n += 1 + l + sovLease(uint64(l)) - return n -} - -func (m *MsgWithdrawLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgCloseLease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LeaseID.Size() - n += 1 + l + sovLease(uint64(l)) - return n -} - -func (m *MsgCloseLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovLease(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozLease(x uint64) (n int) { - return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *LeaseID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Lease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Lease_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClosedOn", wireType) - } - m.ClosedOn = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ClosedOn |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateLease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateLease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateLease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgWithdrawLease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgWithdrawLease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgWithdrawLease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgWithdrawLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgWithdrawLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgWithdrawLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseLease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseLease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseLease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipLease(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthLease - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupLease - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthLease - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowLease = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupLease = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta2/migrate/v1beta1.go b/go/node/market/v1beta2/migrate/v1beta1.go deleted file mode 100644 index e7413f26..00000000 --- a/go/node/market/v1beta2/migrate/v1beta1.go +++ /dev/null @@ -1,16 +0,0 @@ -package migrate - -import ( - "github.com/akash-network/akash-api/go/node/market/v1beta1" - "github.com/akash-network/akash-api/go/node/market/v1beta2" -) - -func LeaseIDToV1beta1(from v1beta1.LeaseID) v1beta2.LeaseID { - return v1beta2.LeaseID{ - Owner: from.Owner, - DSeq: from.DSeq, - GSeq: from.GSeq, - OSeq: from.OSeq, - Provider: from.Provider, - } -} diff --git a/go/node/market/v1beta2/msgs.go b/go/node/market/v1beta2/msgs.go deleted file mode 100644 index 47a8df17..00000000 --- a/go/node/market/v1beta2/msgs.go +++ /dev/null @@ -1,215 +0,0 @@ -package v1beta2 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -const ( - MsgTypeCreateBid = "create-bid" - MsgTypeCloseBid = "close-bid" - MsgTypeCreateLease = "create-lease" - MsgTypeWithdrawLease = "withdraw-lease" - MsgTypeCloseLease = "close-lease" -) - -var ( - _ sdk.Msg = &MsgCreateBid{} - _ sdk.Msg = &MsgCloseBid{} - _ sdk.Msg = &MsgCreateLease{} - _ sdk.Msg = &MsgWithdrawLease{} - _ sdk.Msg = &MsgCloseLease{} -) - -// NewMsgCreateBid creates a new MsgCreateBid instance -func NewMsgCreateBid(id OrderID, provider sdk.AccAddress, price sdk.DecCoin, deposit sdk.Coin) *MsgCreateBid { - return &MsgCreateBid{ - Order: id, - Provider: provider.String(), - Price: price, - Deposit: deposit, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateBid) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateBid) Type() string { return MsgTypeCreateBid } - -// GetSignBytes encodes the message for signing -func (msg MsgCreateBid) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateBid) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.Provider) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic does basic validation of provider and order -func (msg MsgCreateBid) ValidateBasic() error { - if err := msg.Order.Validate(); err != nil { - return err - } - - provider, err := sdk.AccAddressFromBech32(msg.Provider) - if err != nil { - return ErrEmptyProvider - } - - owner, err := sdk.AccAddressFromBech32(msg.Order.Owner) - if err != nil { - return errors.Wrap(ErrInvalidBid, "empty owner") - } - - if provider.Equals(owner) { - return ErrSameAccount - } - - if msg.Price.IsZero() { - return ErrBidZeroPrice - } - - return nil -} - -// NewMsgWithdrawLease creates a new MsgWithdrawLease instance -func NewMsgWithdrawLease(id LeaseID) *MsgWithdrawLease { - return &MsgWithdrawLease{ - LeaseID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgWithdrawLease) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgWithdrawLease) Type() string { return MsgTypeWithdrawLease } - -// GetSignBytes encodes the message for signing -func (msg MsgWithdrawLease) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgWithdrawLease) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.GetLeaseID().Provider) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic does basic validation of provider and order -func (msg MsgWithdrawLease) ValidateBasic() error { - if err := msg.LeaseID.Validate(); err != nil { - return err - } - return nil -} - -// NewMsgCreateLease creates a new MsgCreateLease instance -func NewMsgCreateLease(id BidID) *MsgCreateLease { - return &MsgCreateLease{ - BidID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateLease) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateLease) Type() string { return MsgTypeCreateLease } - -// GetSignBytes encodes the message for signing -func (msg MsgCreateLease) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateLease) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.BidID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic method for MsgCreateLease -func (msg MsgCreateLease) ValidateBasic() error { - return msg.BidID.Validate() -} - -// NewMsgCloseBid creates a new MsgCloseBid instance -func NewMsgCloseBid(id BidID) *MsgCloseBid { - return &MsgCloseBid{ - BidID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCloseBid) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCloseBid) Type() string { return MsgTypeCloseBid } - -// GetSignBytes encodes the message for signing -func (msg MsgCloseBid) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseBid) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.BidID.Provider) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic method for MsgCloseBid -func (msg MsgCloseBid) ValidateBasic() error { - return msg.BidID.Validate() -} - -// NewMsgCloseLease creates a new MsgCloseLease instance -func NewMsgCloseLease(id LeaseID) *MsgCloseLease { - return &MsgCloseLease{ - LeaseID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCloseLease) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCloseLease) Type() string { return MsgTypeCloseLease } - -// GetSignBytes encodes the message for signing -func (msg MsgCloseLease) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseLease) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.LeaseID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// ValidateBasic method for MsgCloseLease -func (msg MsgCloseLease) ValidateBasic() error { - return msg.LeaseID.Validate() -} diff --git a/go/node/market/v1beta2/order.pb.go b/go/node/market/v1beta2/order.pb.go deleted file mode 100644 index 41790dd6..00000000 --- a/go/node/market/v1beta2/order.pb.go +++ /dev/null @@ -1,1107 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta2/order.proto - -package v1beta2 - -import ( - fmt "fmt" - v1beta2 "github.com/akash-network/akash-api/go/node/deployment/v1beta2" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of order -type Order_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - OrderStateInvalid Order_State = 0 - // OrderOpen denotes state for order open - OrderOpen Order_State = 1 - // OrderMatched denotes state for order matched - OrderActive Order_State = 2 - // OrderClosed denotes state for order lost - OrderClosed Order_State = 3 -) - -var Order_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "active", - 3: "closed", -} - -var Order_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "active": 2, - "closed": 3, -} - -func (x Order_State) String() string { - return proto.EnumName(Order_State_name, int32(x)) -} - -func (Order_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_31c7b4cb1ace8a4b, []int{1, 0} -} - -// OrderID stores owner and all other seq numbers -type OrderID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` -} - -func (m *OrderID) Reset() { *m = OrderID{} } -func (*OrderID) ProtoMessage() {} -func (*OrderID) Descriptor() ([]byte, []int) { - return fileDescriptor_31c7b4cb1ace8a4b, []int{0} -} -func (m *OrderID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OrderID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_OrderID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *OrderID) XXX_Merge(src proto.Message) { - xxx_messageInfo_OrderID.Merge(m, src) -} -func (m *OrderID) XXX_Size() int { - return m.Size() -} -func (m *OrderID) XXX_DiscardUnknown() { - xxx_messageInfo_OrderID.DiscardUnknown(m) -} - -var xxx_messageInfo_OrderID proto.InternalMessageInfo - -func (m *OrderID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *OrderID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *OrderID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *OrderID) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -// Order stores orderID, state of order and other details -type Order struct { - OrderID OrderID `protobuf:"bytes,1,opt,name=order_id,json=orderId,proto3" json:"id" yaml:"id"` - State Order_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta2.Order_State" json:"state" yaml:"state"` - Spec v1beta2.GroupSpec `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec" yaml:"spec"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Order) Reset() { *m = Order{} } -func (*Order) ProtoMessage() {} -func (*Order) Descriptor() ([]byte, []int) { - return fileDescriptor_31c7b4cb1ace8a4b, []int{1} -} -func (m *Order) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Order.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Order) XXX_Merge(src proto.Message) { - xxx_messageInfo_Order.Merge(m, src) -} -func (m *Order) XXX_Size() int { - return m.Size() -} -func (m *Order) XXX_DiscardUnknown() { - xxx_messageInfo_Order.DiscardUnknown(m) -} - -var xxx_messageInfo_Order proto.InternalMessageInfo - -func (m *Order) GetOrderID() OrderID { - if m != nil { - return m.OrderID - } - return OrderID{} -} - -func (m *Order) GetState() Order_State { - if m != nil { - return m.State - } - return OrderStateInvalid -} - -func (m *Order) GetSpec() v1beta2.GroupSpec { - if m != nil { - return m.Spec - } - return v1beta2.GroupSpec{} -} - -func (m *Order) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -// OrderFilters defines flags for order list filter -type OrderFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *OrderFilters) Reset() { *m = OrderFilters{} } -func (m *OrderFilters) String() string { return proto.CompactTextString(m) } -func (*OrderFilters) ProtoMessage() {} -func (*OrderFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_31c7b4cb1ace8a4b, []int{2} -} -func (m *OrderFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OrderFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_OrderFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *OrderFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_OrderFilters.Merge(m, src) -} -func (m *OrderFilters) XXX_Size() int { - return m.Size() -} -func (m *OrderFilters) XXX_DiscardUnknown() { - xxx_messageInfo_OrderFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_OrderFilters proto.InternalMessageInfo - -func (m *OrderFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *OrderFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *OrderFilters) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *OrderFilters) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *OrderFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func init() { - proto.RegisterEnum("akash.market.v1beta2.Order_State", Order_State_name, Order_State_value) - proto.RegisterType((*OrderID)(nil), "akash.market.v1beta2.OrderID") - proto.RegisterType((*Order)(nil), "akash.market.v1beta2.Order") - proto.RegisterType((*OrderFilters)(nil), "akash.market.v1beta2.OrderFilters") -} - -func init() { proto.RegisterFile("akash/market/v1beta2/order.proto", fileDescriptor_31c7b4cb1ace8a4b) } - -var fileDescriptor_31c7b4cb1ace8a4b = []byte{ - // 582 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xb6, 0x13, 0xa7, 0x6d, 0x2e, 0x2d, 0x04, 0xab, 0x88, 0xe2, 0xaa, 0x3e, 0x63, 0x96, 0x2c, - 0xd8, 0x22, 0x9d, 0xe8, 0xd6, 0x50, 0x51, 0x65, 0x8a, 0xe4, 0x30, 0x21, 0xa4, 0xca, 0xf1, 0x9d, - 0x5c, 0x2b, 0x89, 0xcf, 0xb5, 0xaf, 0xa9, 0xba, 0x33, 0xa0, 0x4c, 0x2c, 0x48, 0x2c, 0x91, 0x2a, - 0xf1, 0x43, 0x58, 0x3b, 0x76, 0x64, 0xb2, 0x50, 0xb2, 0xa0, 0x8c, 0xf9, 0x05, 0xe8, 0xde, 0x19, - 0xdc, 0x22, 0xd4, 0x1f, 0xc0, 0x64, 0xbf, 0xef, 0x7d, 0xdf, 0xbb, 0x7b, 0xdf, 0x7b, 0x3a, 0x64, - 0xf9, 0x43, 0x3f, 0x3b, 0x75, 0xc7, 0x7e, 0x3a, 0xa4, 0xdc, 0x9d, 0xbc, 0x1c, 0x50, 0xee, 0xb7, - 0x5d, 0x96, 0x12, 0x9a, 0x3a, 0x49, 0xca, 0x38, 0xd3, 0xb7, 0x81, 0xe1, 0x48, 0x86, 0x53, 0x30, - 0x8c, 0xed, 0x90, 0x85, 0x0c, 0x08, 0xae, 0xf8, 0x93, 0x5c, 0xa3, 0x25, 0xab, 0x11, 0x9a, 0x8c, - 0xd8, 0xe5, 0x98, 0xc6, 0x65, 0xc5, 0x30, 0x65, 0xe7, 0x49, 0x96, 0xd0, 0x40, 0x32, 0xed, 0x85, - 0x8a, 0xd6, 0x7b, 0xe2, 0x94, 0xee, 0x91, 0xee, 0xa2, 0x1a, 0xbb, 0x88, 0x69, 0xba, 0xa3, 0x5a, - 0x6a, 0xab, 0xde, 0x79, 0xba, 0xcc, 0xb1, 0x04, 0x56, 0x39, 0xde, 0xbc, 0xf4, 0xc7, 0xa3, 0x03, - 0x1b, 0x42, 0xdb, 0x93, 0xb0, 0xbe, 0x8f, 0x34, 0x92, 0xd1, 0xb3, 0x9d, 0x8a, 0xa5, 0xb6, 0xb4, - 0x0e, 0x9e, 0xe7, 0x58, 0x3b, 0xea, 0xd3, 0xb3, 0x65, 0x8e, 0x01, 0x5f, 0xe5, 0xb8, 0x21, 0x65, - 0x22, 0xb2, 0x3d, 0x00, 0x85, 0x28, 0x14, 0xa2, 0xaa, 0xa5, 0xb6, 0xb6, 0xa4, 0xe8, 0xb8, 0x10, - 0x85, 0x77, 0x44, 0xa1, 0x14, 0x85, 0x85, 0x88, 0x09, 0x91, 0x56, 0x8a, 0x7a, 0x85, 0x88, 0xdd, - 0x11, 0x31, 0x29, 0x12, 0x9f, 0x83, 0x8d, 0x2f, 0x57, 0x58, 0xf9, 0x79, 0x85, 0x15, 0xfb, 0x5b, - 0x15, 0xd5, 0xa0, 0x4b, 0xfd, 0x3d, 0xda, 0x00, 0x53, 0x4f, 0x22, 0x02, 0x6d, 0x36, 0xda, 0x7b, - 0xce, 0xbf, 0x8c, 0x75, 0x0a, 0x53, 0x3a, 0xf6, 0x75, 0x8e, 0x95, 0x79, 0x8e, 0x7f, 0xbb, 0xb4, - 0xcc, 0x71, 0x25, 0x22, 0xab, 0x1c, 0xd7, 0xe5, 0x81, 0x11, 0xb1, 0xbd, 0x75, 0x28, 0xd9, 0x25, - 0xba, 0x87, 0x6a, 0x19, 0xf7, 0x39, 0x05, 0x47, 0x1e, 0xb4, 0x9f, 0xdd, 0x53, 0xda, 0xe9, 0x0b, - 0xa2, 0x34, 0x19, 0x34, 0xa5, 0xc9, 0x10, 0xda, 0x9e, 0x84, 0xf5, 0xb7, 0x48, 0x13, 0xf3, 0x02, - 0xbf, 0x1a, 0xed, 0xe7, 0x45, 0xc9, 0x72, 0xb4, 0x7f, 0xca, 0x1e, 0x8b, 0xd1, 0xf6, 0x13, 0x1a, - 0x74, 0x76, 0xc5, 0x9d, 0x85, 0x37, 0x42, 0x58, 0x7a, 0x23, 0x22, 0xdb, 0x03, 0x50, 0xdf, 0x43, - 0x28, 0x48, 0xa9, 0xcf, 0x29, 0x39, 0xf1, 0x39, 0xd8, 0x5a, 0xf5, 0xea, 0x05, 0x72, 0xc8, 0xed, - 0x0f, 0x2a, 0xaa, 0xc1, 0x05, 0x75, 0x1b, 0xad, 0x47, 0xf1, 0xc4, 0x1f, 0x45, 0xa4, 0xa9, 0x18, - 0x8f, 0xa7, 0x33, 0xeb, 0x11, 0x5c, 0x1f, 0x92, 0x5d, 0x99, 0xd0, 0x9f, 0x20, 0x8d, 0x25, 0x34, - 0x6e, 0xaa, 0xc6, 0xd6, 0x74, 0x66, 0xd5, 0x81, 0xd0, 0x4b, 0x68, 0xac, 0xef, 0xa2, 0x35, 0x3f, - 0xe0, 0xd1, 0x84, 0x36, 0x2b, 0xc6, 0xc3, 0xe9, 0xcc, 0x6a, 0x40, 0xea, 0x10, 0x20, 0x91, 0x0c, - 0x46, 0x2c, 0xa3, 0xa4, 0x59, 0xbd, 0x95, 0x7c, 0x0d, 0x90, 0xa1, 0x7d, 0xfc, 0x6a, 0x2a, 0xb7, - 0x26, 0xf8, 0xb9, 0x82, 0x36, 0x21, 0xff, 0x26, 0x1a, 0x71, 0x9a, 0x66, 0xff, 0xdb, 0xb2, 0x8a, - 0x7e, 0xe4, 0xea, 0xd4, 0xca, 0x7e, 0xee, 0xdb, 0x8b, 0x03, 0x4d, 0xf8, 0xd2, 0xe9, 0x5f, 0xcf, - 0x4d, 0xf5, 0x66, 0x6e, 0xaa, 0x3f, 0xe6, 0xa6, 0xfa, 0x69, 0x61, 0x2a, 0x37, 0x0b, 0x53, 0xf9, - 0xbe, 0x30, 0x95, 0x77, 0xaf, 0xc2, 0x88, 0x9f, 0x9e, 0x0f, 0x9c, 0x80, 0x8d, 0x5d, 0xd8, 0x99, - 0x17, 0x31, 0xe5, 0x17, 0x2c, 0x1d, 0x16, 0x91, 0x9f, 0x44, 0x6e, 0xc8, 0xdc, 0x98, 0x11, 0xfa, - 0xd7, 0xb3, 0x33, 0x58, 0x83, 0xb7, 0x61, 0xff, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb3, 0xb1, - 0x3c, 0x4c, 0x95, 0x04, 0x00, 0x00, -} - -func (m *OrderID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OrderID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OrderID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.OSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintOrder(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Order) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Order) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Order) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOrder(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.OrderID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOrder(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *OrderFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OrderFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OrderFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintOrder(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintOrder(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintOrder(dAtA []byte, offset int, v uint64) int { - offset -= sovOrder(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *OrderID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovOrder(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovOrder(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovOrder(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovOrder(uint64(m.OSeq)) - } - return n -} - -func (m *Order) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.OrderID.Size() - n += 1 + l + sovOrder(uint64(l)) - if m.State != 0 { - n += 1 + sovOrder(uint64(m.State)) - } - l = m.Spec.Size() - n += 1 + l + sovOrder(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovOrder(uint64(m.CreatedAt)) - } - return n -} - -func (m *OrderFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovOrder(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovOrder(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovOrder(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovOrder(uint64(m.OSeq)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovOrder(uint64(l)) - } - return n -} - -func sovOrder(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozOrder(x uint64) (n int) { - return sovOrder(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *OrderID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OrderID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OrderID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOrder(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOrder - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Order) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Order: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Order: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OrderID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.OrderID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Order_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOrder(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOrder - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OrderFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OrderFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OrderFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOrder(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOrder - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipOrder(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOrder - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOrder - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOrder - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthOrder - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupOrder - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthOrder - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthOrder = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowOrder = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupOrder = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta2/params.go b/go/node/market/v1beta2/params.go deleted file mode 100644 index 2b773491..00000000 --- a/go/node/market/v1beta2/params.go +++ /dev/null @@ -1,76 +0,0 @@ -package v1beta2 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/pkg/errors" -) - -var _ paramtypes.ParamSet = (*Params)(nil) - -var ( - DefaultBidMinDeposit = sdk.NewCoin("uakt", sdk.NewInt(5000000)) - defaultOrderMaxBids uint32 = 20 - maxOrderMaxBids uint32 = 500 -) - -const ( - keyBidMinDeposit = "BidMinDeposit" - keyOrderMaxBids = "OrderMaxBids" -) - -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair([]byte(keyBidMinDeposit), &p.BidMinDeposit, validateCoin), - paramtypes.NewParamSetPair([]byte(keyOrderMaxBids), &p.OrderMaxBids, validateOrderMaxBids), - } -} - -func DefaultParams() Params { - return Params{ - BidMinDeposit: DefaultBidMinDeposit, - OrderMaxBids: defaultOrderMaxBids, - } -} - -func (p Params) Validate() error { - if err := validateCoin(p.BidMinDeposit); err != nil { - return err - } - - if err := validateOrderMaxBids(p.OrderMaxBids); err != nil { - return err - } - return nil -} - -func validateCoin(i interface{}) error { - _, ok := i.(sdk.Coin) - if !ok { - return errors.Wrapf(ErrInvalidParam, "invalid type %T", i) - } - - return nil -} - -func validateOrderMaxBids(i interface{}) error { - val, ok := i.(uint32) - - if !ok { - return errors.Wrapf(ErrInvalidParam, "invalid type %T", i) - } - - if val == 0 { - return errors.Wrap(ErrInvalidParam, "order max bids too low") - } - - if val > maxOrderMaxBids { - return errors.Wrap(ErrInvalidParam, "order max bids too high") - } - - return nil -} diff --git a/go/node/market/v1beta2/params.pb.go b/go/node/market/v1beta2/params.pb.go deleted file mode 100644 index c92a62c2..00000000 --- a/go/node/market/v1beta2/params.pb.go +++ /dev/null @@ -1,365 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta2/params.proto - -package v1beta2 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Params is the params for the x/market module -type Params struct { - BidMinDeposit types.Coin `protobuf:"bytes,1,opt,name=bid_min_deposit,json=bidMinDeposit,proto3" json:"bid_min_deposit" yaml:"bid_min_deposit"` - OrderMaxBids uint32 `protobuf:"varint,2,opt,name=order_max_bids,json=orderMaxBids,proto3" json:"order_max_bids" yaml:"order_max_bids"` -} - -func (m *Params) Reset() { *m = Params{} } -func (m *Params) String() string { return proto.CompactTextString(m) } -func (*Params) ProtoMessage() {} -func (*Params) Descriptor() ([]byte, []int) { - return fileDescriptor_ea1237af8227f99c, []int{0} -} -func (m *Params) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Params.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Params) XXX_Merge(src proto.Message) { - xxx_messageInfo_Params.Merge(m, src) -} -func (m *Params) XXX_Size() int { - return m.Size() -} -func (m *Params) XXX_DiscardUnknown() { - xxx_messageInfo_Params.DiscardUnknown(m) -} - -var xxx_messageInfo_Params proto.InternalMessageInfo - -func (m *Params) GetBidMinDeposit() types.Coin { - if m != nil { - return m.BidMinDeposit - } - return types.Coin{} -} - -func (m *Params) GetOrderMaxBids() uint32 { - if m != nil { - return m.OrderMaxBids - } - return 0 -} - -func init() { - proto.RegisterType((*Params)(nil), "akash.market.v1beta2.Params") -} - -func init() { proto.RegisterFile("akash/market/v1beta2/params.proto", fileDescriptor_ea1237af8227f99c) } - -var fileDescriptor_ea1237af8227f99c = []byte{ - // 321 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0x31, 0x4f, 0xe3, 0x30, - 0x18, 0x86, 0xe3, 0x1b, 0x3a, 0xe4, 0xda, 0x3b, 0xa9, 0xea, 0x9d, 0x4a, 0x07, 0xbb, 0x64, 0xea, - 0x82, 0xad, 0x96, 0x09, 0xd8, 0x02, 0x6b, 0x05, 0x2a, 0x1b, 0x4b, 0x64, 0xd7, 0x56, 0x6a, 0x15, - 0xe7, 0x8b, 0xe2, 0x00, 0xe5, 0x07, 0xb0, 0xf3, 0xb3, 0x3a, 0x76, 0x64, 0xb2, 0x50, 0xba, 0x75, - 0x2c, 0x7f, 0x00, 0x35, 0xc9, 0x50, 0xb2, 0xd9, 0x7e, 0x9f, 0xf7, 0xf9, 0xa4, 0xcf, 0xfe, 0x29, - 0x5f, 0x72, 0xbb, 0x60, 0x86, 0x67, 0x4b, 0x95, 0xb3, 0xe7, 0xb1, 0x50, 0x39, 0x9f, 0xb0, 0x94, - 0x67, 0xdc, 0x58, 0x9a, 0x66, 0x90, 0x43, 0xb7, 0x57, 0x22, 0xb4, 0x42, 0x68, 0x8d, 0x0c, 0x7a, - 0x31, 0xc4, 0x50, 0x02, 0xec, 0x70, 0xaa, 0xd8, 0x01, 0x9e, 0x83, 0x35, 0x60, 0x99, 0xe0, 0x56, - 0xd5, 0xb6, 0x31, 0x9b, 0x83, 0x4e, 0xaa, 0x3c, 0xf8, 0x42, 0x7e, 0xeb, 0xae, 0x94, 0x77, 0xdf, - 0x90, 0xff, 0x57, 0x68, 0x19, 0x19, 0x9d, 0x44, 0x52, 0xa5, 0x60, 0x75, 0xde, 0x47, 0x43, 0x34, - 0xfa, 0x3d, 0x39, 0xa1, 0x95, 0x85, 0x1e, 0x2c, 0xf5, 0xc0, 0x31, 0xbd, 0x06, 0x9d, 0x84, 0xe1, - 0xda, 0x11, 0xaf, 0x70, 0xa4, 0x13, 0x6a, 0x39, 0xd5, 0xc9, 0x4d, 0xd5, 0xdb, 0x39, 0xd2, 0x54, - 0xed, 0x1d, 0xf9, 0xff, 0xca, 0xcd, 0xe3, 0x65, 0xd0, 0x08, 0x82, 0x59, 0x47, 0x1c, 0x77, 0xbb, - 0xdc, 0xff, 0x03, 0x99, 0x54, 0x59, 0x64, 0xf8, 0x2a, 0x12, 0x5a, 0xda, 0xfe, 0xaf, 0x21, 0x1a, - 0x75, 0xc2, 0xab, 0xc2, 0x91, 0xf6, 0xed, 0x21, 0x99, 0xf2, 0x55, 0xa8, 0xa5, 0xdd, 0x39, 0xd2, - 0x20, 0xf7, 0x8e, 0xfc, 0xab, 0x86, 0xfc, 0x7c, 0x0f, 0x66, 0x6d, 0x38, 0x2a, 0x86, 0xf7, 0xeb, - 0x02, 0xa3, 0x4d, 0x81, 0xd1, 0x67, 0x81, 0xd1, 0xfb, 0x16, 0x7b, 0x9b, 0x2d, 0xf6, 0x3e, 0xb6, - 0xd8, 0x7b, 0xb8, 0x88, 0x75, 0xbe, 0x78, 0x12, 0x74, 0x0e, 0x86, 0x95, 0x6b, 0x3e, 0x4b, 0x54, - 0xfe, 0x02, 0xd9, 0xb2, 0xbe, 0xf1, 0x54, 0xb3, 0x18, 0x58, 0x02, 0x52, 0x35, 0xfe, 0x48, 0xb4, - 0xca, 0x8d, 0x9e, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0xbf, 0x2f, 0x47, 0x44, 0xc2, 0x01, 0x00, - 0x00, -} - -func (m *Params) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Params) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.OrderMaxBids != 0 { - i = encodeVarintParams(dAtA, i, uint64(m.OrderMaxBids)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.BidMinDeposit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintParams(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintParams(dAtA []byte, offset int, v uint64) int { - offset -= sovParams(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Params) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidMinDeposit.Size() - n += 1 + l + sovParams(uint64(l)) - if m.OrderMaxBids != 0 { - n += 1 + sovParams(uint64(m.OrderMaxBids)) - } - return n -} - -func sovParams(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozParams(x uint64) (n int) { - return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Params) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Params: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidMinDeposit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidMinDeposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OrderMaxBids", wireType) - } - m.OrderMaxBids = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OrderMaxBids |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipParams(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipParams(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthParams - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupParams - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthParams - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta2/query.pb.go b/go/node/market/v1beta2/query.pb.go deleted file mode 100644 index 8e232167..00000000 --- a/go/node/market/v1beta2/query.pb.go +++ /dev/null @@ -1,3034 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta2/query.proto - -package v1beta2 - -import ( - context "context" - fmt "fmt" - v1beta2 "github.com/akash-network/akash-api/go/node/escrow/v1beta2" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryOrdersRequest is request type for the Query/Orders RPC method -type QueryOrdersRequest struct { - Filters OrderFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryOrdersRequest) Reset() { *m = QueryOrdersRequest{} } -func (m *QueryOrdersRequest) String() string { return proto.CompactTextString(m) } -func (*QueryOrdersRequest) ProtoMessage() {} -func (*QueryOrdersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_673a6df4a4bc85e3, []int{0} -} -func (m *QueryOrdersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrdersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrdersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrdersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrdersRequest.Merge(m, src) -} -func (m *QueryOrdersRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryOrdersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrdersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrdersRequest proto.InternalMessageInfo - -func (m *QueryOrdersRequest) GetFilters() OrderFilters { - if m != nil { - return m.Filters - } - return OrderFilters{} -} - -func (m *QueryOrdersRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryOrdersResponse is response type for the Query/Orders RPC method -type QueryOrdersResponse struct { - Orders Orders `protobuf:"bytes,1,rep,name=orders,proto3,castrepeated=Orders" json:"orders"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryOrdersResponse) Reset() { *m = QueryOrdersResponse{} } -func (m *QueryOrdersResponse) String() string { return proto.CompactTextString(m) } -func (*QueryOrdersResponse) ProtoMessage() {} -func (*QueryOrdersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_673a6df4a4bc85e3, []int{1} -} -func (m *QueryOrdersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrdersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrdersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrdersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrdersResponse.Merge(m, src) -} -func (m *QueryOrdersResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryOrdersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrdersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrdersResponse proto.InternalMessageInfo - -func (m *QueryOrdersResponse) GetOrders() Orders { - if m != nil { - return m.Orders - } - return nil -} - -func (m *QueryOrdersResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryOrderRequest is request type for the Query/Order RPC method -type QueryOrderRequest struct { - ID OrderID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryOrderRequest) Reset() { *m = QueryOrderRequest{} } -func (m *QueryOrderRequest) String() string { return proto.CompactTextString(m) } -func (*QueryOrderRequest) ProtoMessage() {} -func (*QueryOrderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_673a6df4a4bc85e3, []int{2} -} -func (m *QueryOrderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrderRequest.Merge(m, src) -} -func (m *QueryOrderRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryOrderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrderRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrderRequest proto.InternalMessageInfo - -func (m *QueryOrderRequest) GetID() OrderID { - if m != nil { - return m.ID - } - return OrderID{} -} - -// QueryOrderResponse is response type for the Query/Order RPC method -type QueryOrderResponse struct { - Order Order `protobuf:"bytes,1,opt,name=order,proto3" json:"order"` -} - -func (m *QueryOrderResponse) Reset() { *m = QueryOrderResponse{} } -func (m *QueryOrderResponse) String() string { return proto.CompactTextString(m) } -func (*QueryOrderResponse) ProtoMessage() {} -func (*QueryOrderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_673a6df4a4bc85e3, []int{3} -} -func (m *QueryOrderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrderResponse.Merge(m, src) -} -func (m *QueryOrderResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryOrderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrderResponse proto.InternalMessageInfo - -func (m *QueryOrderResponse) GetOrder() Order { - if m != nil { - return m.Order - } - return Order{} -} - -// QueryBidsRequest is request type for the Query/Bids RPC method -type QueryBidsRequest struct { - Filters BidFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryBidsRequest) Reset() { *m = QueryBidsRequest{} } -func (m *QueryBidsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryBidsRequest) ProtoMessage() {} -func (*QueryBidsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_673a6df4a4bc85e3, []int{4} -} -func (m *QueryBidsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidsRequest.Merge(m, src) -} -func (m *QueryBidsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryBidsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidsRequest proto.InternalMessageInfo - -func (m *QueryBidsRequest) GetFilters() BidFilters { - if m != nil { - return m.Filters - } - return BidFilters{} -} - -func (m *QueryBidsRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryBidsResponse is response type for the Query/Bids RPC method -type QueryBidsResponse struct { - Bids []QueryBidResponse `protobuf:"bytes,1,rep,name=bids,proto3" json:"bids"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryBidsResponse) Reset() { *m = QueryBidsResponse{} } -func (m *QueryBidsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryBidsResponse) ProtoMessage() {} -func (*QueryBidsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_673a6df4a4bc85e3, []int{5} -} -func (m *QueryBidsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidsResponse.Merge(m, src) -} -func (m *QueryBidsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryBidsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidsResponse proto.InternalMessageInfo - -func (m *QueryBidsResponse) GetBids() []QueryBidResponse { - if m != nil { - return m.Bids - } - return nil -} - -func (m *QueryBidsResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryBidRequest is request type for the Query/Bid RPC method -type QueryBidRequest struct { - ID BidID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryBidRequest) Reset() { *m = QueryBidRequest{} } -func (m *QueryBidRequest) String() string { return proto.CompactTextString(m) } -func (*QueryBidRequest) ProtoMessage() {} -func (*QueryBidRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_673a6df4a4bc85e3, []int{6} -} -func (m *QueryBidRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidRequest.Merge(m, src) -} -func (m *QueryBidRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryBidRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidRequest proto.InternalMessageInfo - -func (m *QueryBidRequest) GetID() BidID { - if m != nil { - return m.ID - } - return BidID{} -} - -// QueryBidResponse is response type for the Query/Bid RPC method -type QueryBidResponse struct { - Bid Bid `protobuf:"bytes,1,opt,name=bid,proto3" json:"bid"` - EscrowAccount v1beta2.Account `protobuf:"bytes,2,opt,name=escrow_account,json=escrowAccount,proto3" json:"escrow_account"` -} - -func (m *QueryBidResponse) Reset() { *m = QueryBidResponse{} } -func (m *QueryBidResponse) String() string { return proto.CompactTextString(m) } -func (*QueryBidResponse) ProtoMessage() {} -func (*QueryBidResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_673a6df4a4bc85e3, []int{7} -} -func (m *QueryBidResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidResponse.Merge(m, src) -} -func (m *QueryBidResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryBidResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidResponse proto.InternalMessageInfo - -func (m *QueryBidResponse) GetBid() Bid { - if m != nil { - return m.Bid - } - return Bid{} -} - -func (m *QueryBidResponse) GetEscrowAccount() v1beta2.Account { - if m != nil { - return m.EscrowAccount - } - return v1beta2.Account{} -} - -// QueryLeasesRequest is request type for the Query/Leases RPC method -type QueryLeasesRequest struct { - Filters LeaseFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryLeasesRequest) Reset() { *m = QueryLeasesRequest{} } -func (m *QueryLeasesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryLeasesRequest) ProtoMessage() {} -func (*QueryLeasesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_673a6df4a4bc85e3, []int{8} -} -func (m *QueryLeasesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeasesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeasesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeasesRequest.Merge(m, src) -} -func (m *QueryLeasesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryLeasesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeasesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeasesRequest proto.InternalMessageInfo - -func (m *QueryLeasesRequest) GetFilters() LeaseFilters { - if m != nil { - return m.Filters - } - return LeaseFilters{} -} - -func (m *QueryLeasesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryLeasesResponse is response type for the Query/Leases RPC method -type QueryLeasesResponse struct { - Leases []QueryLeaseResponse `protobuf:"bytes,1,rep,name=leases,proto3" json:"leases"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryLeasesResponse) Reset() { *m = QueryLeasesResponse{} } -func (m *QueryLeasesResponse) String() string { return proto.CompactTextString(m) } -func (*QueryLeasesResponse) ProtoMessage() {} -func (*QueryLeasesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_673a6df4a4bc85e3, []int{9} -} -func (m *QueryLeasesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeasesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeasesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeasesResponse.Merge(m, src) -} -func (m *QueryLeasesResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryLeasesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeasesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeasesResponse proto.InternalMessageInfo - -func (m *QueryLeasesResponse) GetLeases() []QueryLeaseResponse { - if m != nil { - return m.Leases - } - return nil -} - -func (m *QueryLeasesResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryLeaseRequest is request type for the Query/Lease RPC method -type QueryLeaseRequest struct { - ID LeaseID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryLeaseRequest) Reset() { *m = QueryLeaseRequest{} } -func (m *QueryLeaseRequest) String() string { return proto.CompactTextString(m) } -func (*QueryLeaseRequest) ProtoMessage() {} -func (*QueryLeaseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_673a6df4a4bc85e3, []int{10} -} -func (m *QueryLeaseRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeaseRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeaseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeaseRequest.Merge(m, src) -} -func (m *QueryLeaseRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryLeaseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeaseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeaseRequest proto.InternalMessageInfo - -func (m *QueryLeaseRequest) GetID() LeaseID { - if m != nil { - return m.ID - } - return LeaseID{} -} - -// QueryLeaseResponse is response type for the Query/Lease RPC method -type QueryLeaseResponse struct { - Lease Lease `protobuf:"bytes,1,opt,name=lease,proto3" json:"lease"` - EscrowPayment v1beta2.FractionalPayment `protobuf:"bytes,2,opt,name=escrow_payment,json=escrowPayment,proto3" json:"escrow_payment"` -} - -func (m *QueryLeaseResponse) Reset() { *m = QueryLeaseResponse{} } -func (m *QueryLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*QueryLeaseResponse) ProtoMessage() {} -func (*QueryLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_673a6df4a4bc85e3, []int{11} -} -func (m *QueryLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeaseResponse.Merge(m, src) -} -func (m *QueryLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeaseResponse proto.InternalMessageInfo - -func (m *QueryLeaseResponse) GetLease() Lease { - if m != nil { - return m.Lease - } - return Lease{} -} - -func (m *QueryLeaseResponse) GetEscrowPayment() v1beta2.FractionalPayment { - if m != nil { - return m.EscrowPayment - } - return v1beta2.FractionalPayment{} -} - -func init() { - proto.RegisterType((*QueryOrdersRequest)(nil), "akash.market.v1beta2.QueryOrdersRequest") - proto.RegisterType((*QueryOrdersResponse)(nil), "akash.market.v1beta2.QueryOrdersResponse") - proto.RegisterType((*QueryOrderRequest)(nil), "akash.market.v1beta2.QueryOrderRequest") - proto.RegisterType((*QueryOrderResponse)(nil), "akash.market.v1beta2.QueryOrderResponse") - proto.RegisterType((*QueryBidsRequest)(nil), "akash.market.v1beta2.QueryBidsRequest") - proto.RegisterType((*QueryBidsResponse)(nil), "akash.market.v1beta2.QueryBidsResponse") - proto.RegisterType((*QueryBidRequest)(nil), "akash.market.v1beta2.QueryBidRequest") - proto.RegisterType((*QueryBidResponse)(nil), "akash.market.v1beta2.QueryBidResponse") - proto.RegisterType((*QueryLeasesRequest)(nil), "akash.market.v1beta2.QueryLeasesRequest") - proto.RegisterType((*QueryLeasesResponse)(nil), "akash.market.v1beta2.QueryLeasesResponse") - proto.RegisterType((*QueryLeaseRequest)(nil), "akash.market.v1beta2.QueryLeaseRequest") - proto.RegisterType((*QueryLeaseResponse)(nil), "akash.market.v1beta2.QueryLeaseResponse") -} - -func init() { proto.RegisterFile("akash/market/v1beta2/query.proto", fileDescriptor_673a6df4a4bc85e3) } - -var fileDescriptor_673a6df4a4bc85e3 = []byte{ - // 796 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0x4f, 0x6f, 0xd3, 0x4e, - 0x10, 0x8d, 0xd3, 0x36, 0x3f, 0x69, 0xab, 0x5f, 0x81, 0xa5, 0x87, 0x92, 0x42, 0xd2, 0x06, 0xd1, - 0xa4, 0x48, 0xd8, 0x6a, 0x38, 0x54, 0xbd, 0x15, 0x53, 0x05, 0xb5, 0xe2, 0x4f, 0x09, 0x9c, 0xb8, - 0xa0, 0x75, 0xbc, 0x75, 0x57, 0x4d, 0xbc, 0xa9, 0xd7, 0xa1, 0xea, 0x01, 0x09, 0x21, 0x21, 0x71, - 0x04, 0x71, 0x45, 0x08, 0xa9, 0x12, 0x07, 0x2e, 0x7c, 0x8d, 0x1e, 0x2b, 0x71, 0xe1, 0x54, 0x50, - 0xca, 0x07, 0x41, 0xfb, 0xc7, 0x4e, 0x5c, 0x39, 0xb6, 0x2b, 0xb5, 0xb7, 0xc4, 0x7e, 0x33, 0xf3, - 0x66, 0xe6, 0xed, 0xf3, 0x82, 0x39, 0xb4, 0x83, 0xd8, 0xb6, 0xd1, 0x41, 0xde, 0x0e, 0xf6, 0x8d, - 0x57, 0x4b, 0x16, 0xf6, 0x51, 0xdd, 0xd8, 0xed, 0x61, 0x6f, 0x5f, 0xef, 0x7a, 0xd4, 0xa7, 0x70, - 0x5a, 0x20, 0x74, 0x89, 0xd0, 0x15, 0xa2, 0x38, 0xed, 0x50, 0x87, 0x0a, 0x80, 0xc1, 0x7f, 0x49, - 0x6c, 0xf1, 0xba, 0x43, 0xa9, 0xd3, 0xc6, 0x06, 0xea, 0x12, 0x03, 0xb9, 0x2e, 0xf5, 0x91, 0x4f, - 0xa8, 0xcb, 0xd4, 0xdb, 0xdb, 0x2d, 0xca, 0x3a, 0x94, 0x19, 0x16, 0x62, 0x58, 0x96, 0x50, 0x05, - 0x97, 0x8c, 0x2e, 0x72, 0x88, 0x2b, 0xc0, 0x0a, 0x1b, 0xcf, 0x8b, 0x7a, 0x36, 0xf6, 0x14, 0xa2, - 0x14, 0x8b, 0xb0, 0x88, 0x9d, 0x98, 0xa1, 0x8d, 0x11, 0xc3, 0x51, 0x04, 0x66, 0x2d, 0x8f, 0xee, - 0x85, 0x08, 0x7f, 0xbf, 0x8b, 0x15, 0xe3, 0xca, 0x57, 0x0d, 0xc0, 0xa7, 0x9c, 0xe8, 0x13, 0x5e, - 0x98, 0x35, 0xf1, 0x6e, 0x0f, 0x33, 0x1f, 0x9a, 0xe0, 0xbf, 0x2d, 0xd2, 0xf6, 0xb1, 0xc7, 0x66, - 0xb4, 0x39, 0xad, 0x36, 0x59, 0xaf, 0xe8, 0x71, 0x43, 0xd2, 0x45, 0x54, 0x43, 0x22, 0xcd, 0xf1, - 0xc3, 0xe3, 0x72, 0xae, 0x19, 0x04, 0xc2, 0x06, 0x00, 0x83, 0xa6, 0x67, 0xf2, 0x22, 0xcd, 0x82, - 0x2e, 0x27, 0xa4, 0xf3, 0x09, 0xe9, 0x72, 0x09, 0x6a, 0x42, 0xfa, 0x26, 0x72, 0xb0, 0xaa, 0xdf, - 0x1c, 0x8a, 0xac, 0x1c, 0x68, 0xe0, 0x6a, 0x84, 0x22, 0xeb, 0x52, 0x97, 0x61, 0x78, 0x1f, 0x14, - 0xc4, 0xb4, 0x38, 0xc5, 0xb1, 0xda, 0x64, 0x7d, 0x36, 0x81, 0xa2, 0x39, 0xc5, 0xb9, 0x7d, 0xff, - 0x5d, 0x2e, 0xa8, 0x24, 0x2a, 0x14, 0x3e, 0x88, 0x21, 0x59, 0x4d, 0x25, 0x29, 0x19, 0x44, 0x58, - 0x3e, 0x06, 0x57, 0x06, 0x24, 0x83, 0x31, 0xae, 0x80, 0x3c, 0xb1, 0xd5, 0x04, 0x6f, 0x24, 0xd0, - 0x5b, 0x5f, 0x33, 0x01, 0x27, 0xd8, 0x3f, 0x2e, 0xe7, 0xd7, 0xd7, 0x9a, 0x79, 0x62, 0x57, 0x1e, - 0x0d, 0xef, 0x25, 0xec, 0x79, 0x19, 0x4c, 0x08, 0xe2, 0x2a, 0x67, 0x62, 0xcb, 0x72, 0x1d, 0x12, - 0x5f, 0xf9, 0xac, 0x81, 0xcb, 0x22, 0x9f, 0x49, 0xec, 0x70, 0xcb, 0xab, 0xa7, 0xb7, 0x3c, 0x17, - 0x9f, 0xcf, 0x24, 0xf6, 0x05, 0xef, 0xf8, 0x8b, 0xa6, 0xc6, 0x27, 0xe9, 0xa9, 0x6e, 0x57, 0xc1, - 0xb8, 0x45, 0xec, 0x60, 0xbf, 0x0b, 0xf1, 0xe4, 0x82, 0xb0, 0x20, 0x4a, 0x51, 0x14, 0x91, 0xe7, - 0xb7, 0xde, 0x0d, 0x70, 0x69, 0x50, 0x48, 0x4e, 0x6f, 0x79, 0x68, 0xb9, 0xb3, 0x23, 0x07, 0x17, - 0xb3, 0xda, 0x8f, 0x43, 0xbb, 0x08, 0x7b, 0x5d, 0x02, 0x63, 0x56, 0x98, 0xee, 0xda, 0xc8, 0x74, - 0xaa, 0x3b, 0x8e, 0x85, 0x1b, 0x60, 0x4a, 0x9e, 0xec, 0x97, 0xa8, 0xd5, 0xa2, 0x3d, 0xd7, 0x57, - 0x0d, 0x06, 0x4a, 0x93, 0x2f, 0xc3, 0xe8, 0x7b, 0x12, 0xa4, 0x32, 0xfc, 0x2f, 0xdf, 0xaa, 0x87, - 0x03, 0x1f, 0x78, 0xc8, 0xed, 0xe3, 0xcc, 0x3e, 0x20, 0xa2, 0x2e, 0x58, 0x23, 0xdf, 0x02, 0x1f, - 0x08, 0x28, 0xaa, 0xc9, 0x35, 0x40, 0x41, 0x78, 0x5e, 0xa0, 0x93, 0x5a, 0x82, 0x4e, 0x44, 0xe8, - 0x29, 0xa5, 0xa8, 0xe8, 0xf3, 0xb7, 0x02, 0x55, 0x2c, 0xb3, 0x15, 0x08, 0x7c, 0x8c, 0x5e, 0x0e, - 0x22, 0xbb, 0x19, 0xf6, 0x02, 0xc1, 0x3c, 0x59, 0x82, 0x22, 0x26, 0xf0, 0x02, 0x81, 0x87, 0xcf, - 0x43, 0xdd, 0x74, 0xd1, 0x7e, 0x07, 0x87, 0xba, 0xa9, 0xc6, 0xeb, 0xa6, 0xe1, 0xa1, 0x16, 0xef, - 0x0b, 0xb5, 0x37, 0x25, 0x3c, 0xaa, 0x20, 0xf5, 0xb0, 0xfe, 0xa3, 0x00, 0x26, 0x04, 0x4b, 0xf8, - 0x5e, 0x03, 0xca, 0x66, 0x61, 0xd2, 0x2e, 0x22, 0x5f, 0x9c, 0xe2, 0x62, 0x06, 0xa4, 0x6c, 0xbc, - 0xb2, 0xf8, 0xf6, 0xe7, 0xdf, 0x4f, 0xf9, 0x9b, 0x70, 0xde, 0x18, 0xfd, 0x09, 0x65, 0x46, 0x9b, - 0x30, 0x1f, 0xbe, 0xd3, 0xc0, 0x84, 0x88, 0x86, 0xd5, 0xb4, 0xfc, 0x01, 0x91, 0x5a, 0x3a, 0xf0, - 0x4c, 0x3c, 0x88, 0xbb, 0x45, 0xe1, 0x1b, 0x0d, 0x8c, 0x73, 0x6b, 0x83, 0x29, 0x26, 0x16, 0x8e, - 0xa3, 0x9a, 0x8a, 0x53, 0x24, 0xaa, 0x82, 0xc4, 0x3c, 0x2c, 0x1b, 0xa3, 0x6e, 0x0b, 0x6a, 0x14, - 0xaf, 0xc1, 0x98, 0x49, 0x6c, 0x78, 0x2b, 0xcd, 0x45, 0x65, 0xfd, 0x8c, 0x66, 0x9b, 0xa9, 0xbc, - 0x98, 0x00, 0x17, 0x85, 0x3c, 0xb8, 0x30, 0xf5, 0x80, 0x66, 0x12, 0x45, 0xd4, 0x05, 0xd2, 0x96, - 0x21, 0xcf, 0xf8, 0x40, 0x14, 0x22, 0x3a, 0x51, 0x14, 0xc3, 0xa7, 0xb7, 0x98, 0xd9, 0x53, 0x32, - 0xf2, 0xe0, 0x23, 0x31, 0x9f, 0x1d, 0xf6, 0x4b, 0xda, 0x51, 0xbf, 0xa4, 0xfd, 0xe9, 0x97, 0xb4, - 0x0f, 0x27, 0xa5, 0xdc, 0xd1, 0x49, 0x29, 0xf7, 0xeb, 0xa4, 0x94, 0x7b, 0xb1, 0xe2, 0x10, 0x7f, - 0xbb, 0x67, 0xe9, 0x2d, 0xda, 0x91, 0x69, 0xee, 0xb8, 0xd8, 0xdf, 0xa3, 0xde, 0x8e, 0xfa, 0xc7, - 0x6f, 0x9f, 0x0e, 0x35, 0x5c, 0x6a, 0xe3, 0x53, 0x05, 0xac, 0x82, 0xb8, 0xd7, 0xdd, 0xfd, 0x17, - 0x00, 0x00, 0xff, 0xff, 0x8e, 0x19, 0x72, 0x7b, 0xf7, 0x0a, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Orders queries orders with filters - Orders(ctx context.Context, in *QueryOrdersRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) - // Order queries order details - Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) - // Bids queries bids with filters - Bids(ctx context.Context, in *QueryBidsRequest, opts ...grpc.CallOption) (*QueryBidsResponse, error) - // Bid queries bid details - Bid(ctx context.Context, in *QueryBidRequest, opts ...grpc.CallOption) (*QueryBidResponse, error) - // Leases queries leases with filters - Leases(ctx context.Context, in *QueryLeasesRequest, opts ...grpc.CallOption) (*QueryLeasesResponse, error) - // Lease queries lease details - Lease(ctx context.Context, in *QueryLeaseRequest, opts ...grpc.CallOption) (*QueryLeaseResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Orders(ctx context.Context, in *QueryOrdersRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) { - out := new(QueryOrdersResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta2.Query/Orders", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) { - out := new(QueryOrderResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta2.Query/Order", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Bids(ctx context.Context, in *QueryBidsRequest, opts ...grpc.CallOption) (*QueryBidsResponse, error) { - out := new(QueryBidsResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta2.Query/Bids", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Bid(ctx context.Context, in *QueryBidRequest, opts ...grpc.CallOption) (*QueryBidResponse, error) { - out := new(QueryBidResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta2.Query/Bid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Leases(ctx context.Context, in *QueryLeasesRequest, opts ...grpc.CallOption) (*QueryLeasesResponse, error) { - out := new(QueryLeasesResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta2.Query/Leases", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Lease(ctx context.Context, in *QueryLeaseRequest, opts ...grpc.CallOption) (*QueryLeaseResponse, error) { - out := new(QueryLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta2.Query/Lease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Orders queries orders with filters - Orders(context.Context, *QueryOrdersRequest) (*QueryOrdersResponse, error) - // Order queries order details - Order(context.Context, *QueryOrderRequest) (*QueryOrderResponse, error) - // Bids queries bids with filters - Bids(context.Context, *QueryBidsRequest) (*QueryBidsResponse, error) - // Bid queries bid details - Bid(context.Context, *QueryBidRequest) (*QueryBidResponse, error) - // Leases queries leases with filters - Leases(context.Context, *QueryLeasesRequest) (*QueryLeasesResponse, error) - // Lease queries lease details - Lease(context.Context, *QueryLeaseRequest) (*QueryLeaseResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Orders(ctx context.Context, req *QueryOrdersRequest) (*QueryOrdersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Orders not implemented") -} -func (*UnimplementedQueryServer) Order(ctx context.Context, req *QueryOrderRequest) (*QueryOrderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Order not implemented") -} -func (*UnimplementedQueryServer) Bids(ctx context.Context, req *QueryBidsRequest) (*QueryBidsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Bids not implemented") -} -func (*UnimplementedQueryServer) Bid(ctx context.Context, req *QueryBidRequest) (*QueryBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Bid not implemented") -} -func (*UnimplementedQueryServer) Leases(ctx context.Context, req *QueryLeasesRequest) (*QueryLeasesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Leases not implemented") -} -func (*UnimplementedQueryServer) Lease(ctx context.Context, req *QueryLeaseRequest) (*QueryLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Lease not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Orders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryOrdersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Orders(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta2.Query/Orders", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Orders(ctx, req.(*QueryOrdersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Order_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryOrderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Order(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta2.Query/Order", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Order(ctx, req.(*QueryOrderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Bids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryBidsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Bids(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta2.Query/Bids", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Bids(ctx, req.(*QueryBidsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Bid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryBidRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Bid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta2.Query/Bid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Bid(ctx, req.(*QueryBidRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Leases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryLeasesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Leases(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta2.Query/Leases", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Leases(ctx, req.(*QueryLeasesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Lease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryLeaseRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Lease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta2.Query/Lease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Lease(ctx, req.(*QueryLeaseRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.market.v1beta2.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Orders", - Handler: _Query_Orders_Handler, - }, - { - MethodName: "Order", - Handler: _Query_Order_Handler, - }, - { - MethodName: "Bids", - Handler: _Query_Bids_Handler, - }, - { - MethodName: "Bid", - Handler: _Query_Bid_Handler, - }, - { - MethodName: "Leases", - Handler: _Query_Leases_Handler, - }, - { - MethodName: "Lease", - Handler: _Query_Lease_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/market/v1beta2/query.proto", -} - -func (m *QueryOrdersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrdersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrdersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryOrdersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrdersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrdersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Orders) > 0 { - for iNdEx := len(m.Orders) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Orders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryOrderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryOrderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Order.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryBidsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryBidsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Bids) > 0 { - for iNdEx := len(m.Bids) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Bids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryBidRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryBidResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.EscrowAccount.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.Bid.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryLeasesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeasesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryLeasesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeasesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Leases) > 0 { - for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryLeaseRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeaseRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeaseRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.EscrowPayment.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.Lease.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryOrdersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryOrdersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Orders) > 0 { - for _, e := range m.Orders { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryOrderRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryOrderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Order.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryBidsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryBidsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Bids) > 0 { - for _, e := range m.Bids { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryBidRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryBidResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Bid.Size() - n += 1 + l + sovQuery(uint64(l)) - l = m.EscrowAccount.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryLeasesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryLeasesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Leases) > 0 { - for _, e := range m.Leases { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryLeaseRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Lease.Size() - n += 1 + l + sovQuery(uint64(l)) - l = m.EscrowPayment.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryOrdersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrdersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrdersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOrdersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrdersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrdersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Orders = append(m.Orders, Order{}) - if err := m.Orders[len(m.Orders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOrderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOrderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Order.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bids", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bids = append(m.Bids, QueryBidResponse{}) - if err := m.Bids[len(m.Bids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bid", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Bid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EscrowAccount", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EscrowAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeasesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeasesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeasesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeasesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Leases = append(m.Leases, QueryLeaseResponse{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeaseRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeaseRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeaseRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Lease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EscrowPayment", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EscrowPayment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta2/query.pb.gw.go b/go/node/market/v1beta2/query.pb.gw.go deleted file mode 100644 index 5ebd8b82..00000000 --- a/go/node/market/v1beta2/query.pb.gw.go +++ /dev/null @@ -1,586 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/market/v1beta2/query.proto - -/* -Package v1beta2 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta2 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Orders_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Orders_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrdersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Orders_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Orders(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Orders_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrdersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Orders_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Orders(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Order_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrderRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Order_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Order(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrderRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Order_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Order(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Bids_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Bids_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bids_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Bids(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Bids_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bids_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Bids(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Bid_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Bid_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bid_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Bid(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Bid_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bid_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Bid(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Leases_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Leases_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeasesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Leases_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Leases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Leases_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeasesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Leases_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Leases(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Lease_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Lease_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeaseRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Lease_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Lease(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Lease_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeaseRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Lease_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Lease(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Orders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Orders_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Orders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Order_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Bids_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bid_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Bid_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bid_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Leases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Leases_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Leases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Lease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Lease_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Lease_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Orders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Orders_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Orders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Order_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Bids_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bid_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Bid_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bid_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Leases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Leases_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Leases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Lease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Lease_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Lease_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Orders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta2", "orders", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Order_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta2", "orders", "info"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Bids_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta2", "bids", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Bid_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta2", "bids", "info"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Leases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta2", "leases", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Lease_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta2", "leases", "info"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Orders_0 = runtime.ForwardResponseMessage - - forward_Query_Order_0 = runtime.ForwardResponseMessage - - forward_Query_Bids_0 = runtime.ForwardResponseMessage - - forward_Query_Bid_0 = runtime.ForwardResponseMessage - - forward_Query_Leases_0 = runtime.ForwardResponseMessage - - forward_Query_Lease_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/market/v1beta2/service.pb.go b/go/node/market/v1beta2/service.pb.go deleted file mode 100644 index fe1dae41..00000000 --- a/go/node/market/v1beta2/service.pb.go +++ /dev/null @@ -1,287 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta2/service.proto - -package v1beta2 - -import ( - context "context" - fmt "fmt" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { - proto.RegisterFile("akash/market/v1beta2/service.proto", fileDescriptor_22998c3fbeffa97e) -} - -var fileDescriptor_22998c3fbeffa97e = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0xcf, 0x4d, 0x2c, 0xca, 0x4e, 0x2d, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, - 0xd2, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, - 0x01, 0xab, 0xd1, 0x83, 0xa8, 0xd1, 0x83, 0xaa, 0x91, 0x92, 0xc3, 0xaa, 0x33, 0x29, 0x33, 0x05, - 0xa2, 0x4b, 0x4a, 0x01, 0xab, 0x7c, 0x4e, 0x6a, 0x62, 0x31, 0xd4, 0x5c, 0xa3, 0x17, 0xcc, 0x5c, - 0xcc, 0xbe, 0xc5, 0xe9, 0x42, 0xd1, 0x5c, 0x9c, 0xce, 0x45, 0xa9, 0x89, 0x25, 0xa9, 0x4e, 0x99, - 0x29, 0x42, 0x4a, 0x7a, 0xd8, 0x6c, 0xd3, 0xf3, 0x2d, 0x4e, 0x87, 0xab, 0x91, 0xd2, 0x22, 0xac, - 0x26, 0x28, 0xb5, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0x28, 0x82, 0x8b, 0xc3, 0x39, 0x27, 0xbf, - 0x18, 0x6c, 0xb6, 0x22, 0x6e, 0x7d, 0x50, 0x25, 0x52, 0x9a, 0x04, 0x95, 0xc0, 0x4d, 0x4e, 0xe7, - 0xe2, 0x0d, 0xcf, 0x2c, 0xc9, 0x48, 0x29, 0x4a, 0x2c, 0xf7, 0x01, 0xf9, 0x4a, 0x48, 0x0d, 0xa7, - 0x5e, 0x14, 0x75, 0x52, 0x7a, 0xc4, 0xa9, 0x83, 0x5b, 0x94, 0xc8, 0xc5, 0x0d, 0xf1, 0x17, 0xc4, - 0x1a, 0x15, 0x02, 0xbe, 0x87, 0x58, 0xa2, 0x43, 0x8c, 0x2a, 0xb8, 0x15, 0x71, 0x5c, 0x5c, 0x60, - 0xff, 0x41, 0x6c, 0x50, 0xc6, 0x1f, 0x08, 0x10, 0x0b, 0xb4, 0x89, 0x50, 0x04, 0x33, 0xdf, 0x29, - 0xf8, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, - 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x2c, 0xd3, 0x33, 0x4b, 0x32, - 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, 0x06, 0xea, 0xe6, 0xa5, 0x96, 0x94, 0xe7, 0x17, - 0x65, 0x43, 0x79, 0x89, 0x05, 0x99, 0xfa, 0xe9, 0xf9, 0xfa, 0x79, 0xf9, 0x29, 0xa9, 0x68, 0x69, - 0x29, 0x89, 0x0d, 0x9c, 0x8c, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc0, 0x05, 0xbc, 0x13, - 0xc4, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // CreateBid defines a method to create a bid given proper inputs. - CreateBid(ctx context.Context, in *MsgCreateBid, opts ...grpc.CallOption) (*MsgCreateBidResponse, error) - // CloseBid defines a method to close a bid given proper inputs. - CloseBid(ctx context.Context, in *MsgCloseBid, opts ...grpc.CallOption) (*MsgCloseBidResponse, error) - // WithdrawLease withdraws accrued funds from the lease payment - WithdrawLease(ctx context.Context, in *MsgWithdrawLease, opts ...grpc.CallOption) (*MsgWithdrawLeaseResponse, error) - // CreateLease creates a new lease - CreateLease(ctx context.Context, in *MsgCreateLease, opts ...grpc.CallOption) (*MsgCreateLeaseResponse, error) - // CloseLease defines a method to close an order given proper inputs. - CloseLease(ctx context.Context, in *MsgCloseLease, opts ...grpc.CallOption) (*MsgCloseLeaseResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) CreateBid(ctx context.Context, in *MsgCreateBid, opts ...grpc.CallOption) (*MsgCreateBidResponse, error) { - out := new(MsgCreateBidResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta2.Msg/CreateBid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseBid(ctx context.Context, in *MsgCloseBid, opts ...grpc.CallOption) (*MsgCloseBidResponse, error) { - out := new(MsgCloseBidResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta2.Msg/CloseBid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) WithdrawLease(ctx context.Context, in *MsgWithdrawLease, opts ...grpc.CallOption) (*MsgWithdrawLeaseResponse, error) { - out := new(MsgWithdrawLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta2.Msg/WithdrawLease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CreateLease(ctx context.Context, in *MsgCreateLease, opts ...grpc.CallOption) (*MsgCreateLeaseResponse, error) { - out := new(MsgCreateLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta2.Msg/CreateLease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseLease(ctx context.Context, in *MsgCloseLease, opts ...grpc.CallOption) (*MsgCloseLeaseResponse, error) { - out := new(MsgCloseLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta2.Msg/CloseLease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // CreateBid defines a method to create a bid given proper inputs. - CreateBid(context.Context, *MsgCreateBid) (*MsgCreateBidResponse, error) - // CloseBid defines a method to close a bid given proper inputs. - CloseBid(context.Context, *MsgCloseBid) (*MsgCloseBidResponse, error) - // WithdrawLease withdraws accrued funds from the lease payment - WithdrawLease(context.Context, *MsgWithdrawLease) (*MsgWithdrawLeaseResponse, error) - // CreateLease creates a new lease - CreateLease(context.Context, *MsgCreateLease) (*MsgCreateLeaseResponse, error) - // CloseLease defines a method to close an order given proper inputs. - CloseLease(context.Context, *MsgCloseLease) (*MsgCloseLeaseResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) CreateBid(ctx context.Context, req *MsgCreateBid) (*MsgCreateBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateBid not implemented") -} -func (*UnimplementedMsgServer) CloseBid(ctx context.Context, req *MsgCloseBid) (*MsgCloseBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseBid not implemented") -} -func (*UnimplementedMsgServer) WithdrawLease(ctx context.Context, req *MsgWithdrawLease) (*MsgWithdrawLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method WithdrawLease not implemented") -} -func (*UnimplementedMsgServer) CreateLease(ctx context.Context, req *MsgCreateLease) (*MsgCreateLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateLease not implemented") -} -func (*UnimplementedMsgServer) CloseLease(ctx context.Context, req *MsgCloseLease) (*MsgCloseLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseLease not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_CreateBid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateBid) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateBid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta2.Msg/CreateBid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateBid(ctx, req.(*MsgCreateBid)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseBid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseBid) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseBid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta2.Msg/CloseBid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseBid(ctx, req.(*MsgCloseBid)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_WithdrawLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgWithdrawLease) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).WithdrawLease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta2.Msg/WithdrawLease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).WithdrawLease(ctx, req.(*MsgWithdrawLease)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CreateLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateLease) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateLease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta2.Msg/CreateLease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateLease(ctx, req.(*MsgCreateLease)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseLease) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseLease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta2.Msg/CloseLease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseLease(ctx, req.(*MsgCloseLease)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.market.v1beta2.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateBid", - Handler: _Msg_CreateBid_Handler, - }, - { - MethodName: "CloseBid", - Handler: _Msg_CloseBid_Handler, - }, - { - MethodName: "WithdrawLease", - Handler: _Msg_WithdrawLease_Handler, - }, - { - MethodName: "CreateLease", - Handler: _Msg_CreateLease_Handler, - }, - { - MethodName: "CloseLease", - Handler: _Msg_CloseLease_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/market/v1beta2/service.proto", -} diff --git a/go/node/market/v1beta2/types.go b/go/node/market/v1beta2/types.go deleted file mode 100644 index 575d2695..00000000 --- a/go/node/market/v1beta2/types.go +++ /dev/null @@ -1,240 +0,0 @@ -package v1beta2 - -import ( - "strings" - - "gopkg.in/yaml.v3" - - sdk "github.com/cosmos/cosmos-sdk/types" - - atypes "github.com/akash-network/akash-api/go/node/audit/v1beta2" - types "github.com/akash-network/akash-api/go/node/types/v1beta2" -) - -// ID method returns OrderID details of specific order -func (o Order) ID() OrderID { - return o.OrderID -} - -// String implements the Stringer interface for a Order object. -func (o Order) String() string { - out, _ := yaml.Marshal(o) - return string(out) -} - -// Orders is a collection of Order -type Orders []Order - -// String implements the Stringer interface for a Orders object. -func (o Orders) String() string { - var out string - for _, order := range o { - out += order.String() + "\n" - } - - return strings.TrimSpace(out) -} - -// ValidateCanBid method validates whether order is open or not and -// returns error if not -func (o Order) ValidateCanBid() error { - switch o.State { - case OrderOpen: - return nil - case OrderActive: - return ErrOrderActive - default: - return ErrOrderClosed - } -} - -// ValidateInactive method validates whether order is open or not and -// returns error if not -func (o Order) ValidateInactive() error { - switch o.State { - case OrderClosed: - return nil - case OrderActive: - return ErrOrderActive - default: - return ErrOrderClosed - } -} - -// Price method returns price of specific order -func (o Order) Price() sdk.DecCoin { - return o.Spec.Price() -} - -// MatchAttributes method compares provided attributes with specific order attributes -func (o Order) MatchAttributes(attrs []types.Attribute) bool { - return o.Spec.MatchAttributes(attrs) -} - -// MatchRequirements method compares provided attributes with specific order attributes -func (o Order) MatchRequirements(prov []atypes.Provider) bool { - return o.Spec.MatchRequirements(prov) -} - -// MatchResourcesRequirements method compares provider capabilities with specific order resources attributes -func (o Order) MatchResourcesRequirements(attr types.Attributes) bool { - return o.Spec.MatchResourcesRequirements(attr) -} - -// Accept returns whether order filters valid or not -func (filters OrderFilters) Accept(obj Order, stateVal Order_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.OrderID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.OrderID.DSeq { - return false - } - - // Checking gseq filter - if filters.GSeq != 0 && filters.GSeq != obj.OrderID.GSeq { - return false - } - - // Checking oseq filter - if filters.OSeq != 0 && filters.OSeq != obj.OrderID.OSeq { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} - -// ID method returns BidID details of specific bid -func (obj Bid) ID() BidID { - return obj.BidID -} - -// String implements the Stringer interface for a Bid object. -func (obj Bid) String() string { - out, _ := yaml.Marshal(obj) - return string(out) -} - -// Bids is a collection of Bid -type Bids []Bid - -// String implements the Stringer interface for a Bids object. -func (b Bids) String() string { - var out string - for _, bid := range b { - out += bid.String() + "\n" - } - - return strings.TrimSpace(out) -} - -// Accept returns whether bid filters valid or not -func (filters BidFilters) Accept(obj Bid, stateVal Bid_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.BidID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.BidID.DSeq { - return false - } - - // Checking gseq filter - if filters.GSeq != 0 && filters.GSeq != obj.BidID.GSeq { - return false - } - - // Checking oseq filter - if filters.OSeq != 0 && filters.OSeq != obj.BidID.OSeq { - return false - } - - // Checking provider filter - if filters.Provider != "" && filters.Provider != obj.BidID.Provider { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} - -// ID method returns LeaseID details of specific lease -func (obj Lease) ID() LeaseID { - return obj.LeaseID -} - -// String implements the Stringer interface for a Lease object. -func (obj Lease) String() string { - out, _ := yaml.Marshal(obj) - return string(out) -} - -// Leases is a collection of Lease -type Leases []Lease - -// String implements the Stringer interface for a Leases object. -func (l Leases) String() string { - var out string - for _, order := range l { - out += order.String() + "\n" - } - - return strings.TrimSpace(out) -} - -// Accept returns whether lease filters valid or not -func (filters LeaseFilters) Accept(obj Lease, stateVal Lease_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.LeaseID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.LeaseID.DSeq { - return false - } - - // Checking gseq filter - if filters.GSeq != 0 && filters.GSeq != obj.LeaseID.GSeq { - return false - } - - // Checking oseq filter - if filters.OSeq != 0 && filters.OSeq != obj.LeaseID.OSeq { - return false - } - - // Checking provider filter - if filters.Provider != "" && filters.Provider != obj.LeaseID.Provider { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} - -func (m QueryLeasesResponse) TotalPriceAmount() sdk.Dec { - total := sdk.NewDec(0) - - for _, lease := range m.Leases { - total = total.Add(lease.Lease.Price.Amount) - } - - return total -} diff --git a/go/node/market/v1beta3/bid.pb.go b/go/node/market/v1beta3/bid.pb.go deleted file mode 100644 index 7786a2c1..00000000 --- a/go/node/market/v1beta3/bid.pb.go +++ /dev/null @@ -1,1967 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta3/bid.proto - -package v1beta3 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of bid -type Bid_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - BidStateInvalid Bid_State = 0 - // BidOpen denotes state for bid open - BidOpen Bid_State = 1 - // BidMatched denotes state for bid open - BidActive Bid_State = 2 - // BidLost denotes state for bid lost - BidLost Bid_State = 3 - // BidClosed denotes state for bid closed - BidClosed Bid_State = 4 -) - -var Bid_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "active", - 3: "lost", - 4: "closed", -} - -var Bid_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "active": 2, - "lost": 3, - "closed": 4, -} - -func (x Bid_State) String() string { - return proto.EnumName(Bid_State_name, int32(x)) -} - -func (Bid_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d666bdb5fcae295d, []int{5, 0} -} - -// MsgCreateBid defines an SDK message for creating Bid -type MsgCreateBid struct { - Order OrderID `protobuf:"bytes,1,opt,name=order,proto3" json:"order" yaml:"order"` - Provider string `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider" yaml:"provider"` - Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` - Deposit types.Coin `protobuf:"bytes,4,opt,name=deposit,proto3" json:"deposit" yaml:"deposit"` -} - -func (m *MsgCreateBid) Reset() { *m = MsgCreateBid{} } -func (m *MsgCreateBid) String() string { return proto.CompactTextString(m) } -func (*MsgCreateBid) ProtoMessage() {} -func (*MsgCreateBid) Descriptor() ([]byte, []int) { - return fileDescriptor_d666bdb5fcae295d, []int{0} -} -func (m *MsgCreateBid) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateBid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateBid.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateBid) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateBid.Merge(m, src) -} -func (m *MsgCreateBid) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateBid) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateBid.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateBid proto.InternalMessageInfo - -func (m *MsgCreateBid) GetOrder() OrderID { - if m != nil { - return m.Order - } - return OrderID{} -} - -func (m *MsgCreateBid) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *MsgCreateBid) GetPrice() types.DecCoin { - if m != nil { - return m.Price - } - return types.DecCoin{} -} - -func (m *MsgCreateBid) GetDeposit() types.Coin { - if m != nil { - return m.Deposit - } - return types.Coin{} -} - -// MsgCreateBidResponse defines the Msg/CreateBid response type. -type MsgCreateBidResponse struct { -} - -func (m *MsgCreateBidResponse) Reset() { *m = MsgCreateBidResponse{} } -func (m *MsgCreateBidResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateBidResponse) ProtoMessage() {} -func (*MsgCreateBidResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_d666bdb5fcae295d, []int{1} -} -func (m *MsgCreateBidResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateBidResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateBidResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateBidResponse.Merge(m, src) -} -func (m *MsgCreateBidResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateBidResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateBidResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateBidResponse proto.InternalMessageInfo - -// MsgCloseBid defines an SDK message for closing bid -type MsgCloseBid struct { - BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseBid) Reset() { *m = MsgCloseBid{} } -func (m *MsgCloseBid) String() string { return proto.CompactTextString(m) } -func (*MsgCloseBid) ProtoMessage() {} -func (*MsgCloseBid) Descriptor() ([]byte, []int) { - return fileDescriptor_d666bdb5fcae295d, []int{2} -} -func (m *MsgCloseBid) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseBid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseBid.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseBid) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseBid.Merge(m, src) -} -func (m *MsgCloseBid) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseBid) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseBid.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseBid proto.InternalMessageInfo - -func (m *MsgCloseBid) GetBidID() BidID { - if m != nil { - return m.BidID - } - return BidID{} -} - -// MsgCloseBidResponse defines the Msg/CloseBid response type. -type MsgCloseBidResponse struct { -} - -func (m *MsgCloseBidResponse) Reset() { *m = MsgCloseBidResponse{} } -func (m *MsgCloseBidResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseBidResponse) ProtoMessage() {} -func (*MsgCloseBidResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_d666bdb5fcae295d, []int{3} -} -func (m *MsgCloseBidResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseBidResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseBidResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseBidResponse.Merge(m, src) -} -func (m *MsgCloseBidResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseBidResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseBidResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseBidResponse proto.InternalMessageInfo - -// BidID stores owner and all other seq numbers -// A successful bid becomes a Lease(ID). -type BidID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` -} - -func (m *BidID) Reset() { *m = BidID{} } -func (*BidID) ProtoMessage() {} -func (*BidID) Descriptor() ([]byte, []int) { - return fileDescriptor_d666bdb5fcae295d, []int{4} -} -func (m *BidID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BidID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BidID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BidID) XXX_Merge(src proto.Message) { - xxx_messageInfo_BidID.Merge(m, src) -} -func (m *BidID) XXX_Size() int { - return m.Size() -} -func (m *BidID) XXX_DiscardUnknown() { - xxx_messageInfo_BidID.DiscardUnknown(m) -} - -var xxx_messageInfo_BidID proto.InternalMessageInfo - -func (m *BidID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *BidID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *BidID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *BidID) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *BidID) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -// Bid stores BidID, state of bid and price -type Bid struct { - BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` - State Bid_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta3.Bid_State" json:"state" yaml:"state"` - Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Bid) Reset() { *m = Bid{} } -func (*Bid) ProtoMessage() {} -func (*Bid) Descriptor() ([]byte, []int) { - return fileDescriptor_d666bdb5fcae295d, []int{5} -} -func (m *Bid) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Bid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Bid.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Bid) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bid.Merge(m, src) -} -func (m *Bid) XXX_Size() int { - return m.Size() -} -func (m *Bid) XXX_DiscardUnknown() { - xxx_messageInfo_Bid.DiscardUnknown(m) -} - -var xxx_messageInfo_Bid proto.InternalMessageInfo - -func (m *Bid) GetBidID() BidID { - if m != nil { - return m.BidID - } - return BidID{} -} - -func (m *Bid) GetState() Bid_State { - if m != nil { - return m.State - } - return BidStateInvalid -} - -func (m *Bid) GetPrice() types.DecCoin { - if m != nil { - return m.Price - } - return types.DecCoin{} -} - -func (m *Bid) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -// BidFilters defines flags for bid list filter -type BidFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` - State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *BidFilters) Reset() { *m = BidFilters{} } -func (m *BidFilters) String() string { return proto.CompactTextString(m) } -func (*BidFilters) ProtoMessage() {} -func (*BidFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_d666bdb5fcae295d, []int{6} -} -func (m *BidFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BidFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BidFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BidFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_BidFilters.Merge(m, src) -} -func (m *BidFilters) XXX_Size() int { - return m.Size() -} -func (m *BidFilters) XXX_DiscardUnknown() { - xxx_messageInfo_BidFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_BidFilters proto.InternalMessageInfo - -func (m *BidFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *BidFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *BidFilters) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *BidFilters) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *BidFilters) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *BidFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func init() { - proto.RegisterEnum("akash.market.v1beta3.Bid_State", Bid_State_name, Bid_State_value) - proto.RegisterType((*MsgCreateBid)(nil), "akash.market.v1beta3.MsgCreateBid") - proto.RegisterType((*MsgCreateBidResponse)(nil), "akash.market.v1beta3.MsgCreateBidResponse") - proto.RegisterType((*MsgCloseBid)(nil), "akash.market.v1beta3.MsgCloseBid") - proto.RegisterType((*MsgCloseBidResponse)(nil), "akash.market.v1beta3.MsgCloseBidResponse") - proto.RegisterType((*BidID)(nil), "akash.market.v1beta3.BidID") - proto.RegisterType((*Bid)(nil), "akash.market.v1beta3.Bid") - proto.RegisterType((*BidFilters)(nil), "akash.market.v1beta3.BidFilters") -} - -func init() { proto.RegisterFile("akash/market/v1beta3/bid.proto", fileDescriptor_d666bdb5fcae295d) } - -var fileDescriptor_d666bdb5fcae295d = []byte{ - // 750 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x95, 0xbd, 0x6e, 0xdb, 0x48, - 0x10, 0xc7, 0x49, 0x89, 0x94, 0xad, 0x95, 0x3f, 0x04, 0xda, 0x3e, 0xd8, 0xba, 0x33, 0x97, 0xc7, - 0xe2, 0xe0, 0xe6, 0x48, 0xd8, 0xae, 0xce, 0x57, 0x99, 0x16, 0xee, 0x20, 0x20, 0x81, 0x13, 0x3a, - 0x48, 0x91, 0x14, 0x06, 0xc5, 0x5d, 0xd0, 0x0b, 0x4b, 0x5c, 0x9a, 0x64, 0x64, 0xe4, 0x0d, 0x02, - 0x57, 0x01, 0xd2, 0xa4, 0x71, 0x60, 0x20, 0x0f, 0x92, 0xd6, 0xa5, 0xcb, 0xa4, 0x21, 0x02, 0xb9, - 0x09, 0x54, 0xea, 0x09, 0x82, 0xfd, 0xd0, 0x87, 0x03, 0x25, 0x40, 0x0a, 0x77, 0xa9, 0xa4, 0xf9, - 0xcf, 0xfe, 0x86, 0xc3, 0xf9, 0x0f, 0x49, 0x60, 0x06, 0xa7, 0x41, 0x76, 0xe2, 0x76, 0x83, 0xf4, - 0x14, 0xe7, 0x6e, 0x6f, 0xbb, 0x8d, 0xf3, 0x60, 0xd7, 0x6d, 0x13, 0xe4, 0x24, 0x29, 0xcd, 0xa9, - 0xb1, 0xca, 0xf3, 0x8e, 0xc8, 0x3b, 0x32, 0xdf, 0x58, 0x8d, 0x68, 0x44, 0xf9, 0x01, 0x97, 0xfd, - 0x13, 0x67, 0x1b, 0x66, 0x48, 0xb3, 0x2e, 0xcd, 0xdc, 0x76, 0x90, 0x61, 0x59, 0x6a, 0xdb, 0x0d, - 0x29, 0x89, 0x65, 0xde, 0x9a, 0x79, 0x2d, 0x9a, 0x22, 0x9c, 0x8a, 0x13, 0xf6, 0xa7, 0x12, 0x58, - 0x78, 0x98, 0x45, 0x07, 0x29, 0x0e, 0x72, 0xec, 0x11, 0x64, 0x3c, 0x07, 0x3a, 0xcf, 0xaf, 0xab, - 0x96, 0xba, 0x55, 0xdb, 0xd9, 0x74, 0x66, 0xb5, 0xe3, 0x1c, 0xb2, 0x23, 0xad, 0xa6, 0xf7, 0xd7, - 0x75, 0x01, 0x95, 0x7e, 0x01, 0x75, 0x2e, 0x0c, 0x0a, 0x28, 0xe0, 0x61, 0x01, 0x17, 0x5e, 0x06, - 0xdd, 0xce, 0x9e, 0xcd, 0x43, 0xdb, 0x17, 0xb2, 0xf1, 0x2f, 0x98, 0x4f, 0x52, 0xda, 0x23, 0xac, - 0x7e, 0xc9, 0x52, 0xb7, 0xaa, 0x1e, 0x1c, 0x14, 0x70, 0xac, 0x0d, 0x0b, 0xb8, 0x2c, 0xb0, 0x91, - 0x62, 0xfb, 0xe3, 0xa4, 0xf1, 0x18, 0xe8, 0x49, 0x4a, 0x42, 0xbc, 0x5e, 0xe6, 0x9d, 0xfd, 0xe1, - 0x88, 0x9b, 0x77, 0xd8, 0xcd, 0xcb, 0xc6, 0xb6, 0x9d, 0x26, 0x0e, 0x0f, 0x28, 0x89, 0xbd, 0x4d, - 0xd6, 0x18, 0xeb, 0x87, 0x23, 0x93, 0x7e, 0x78, 0x68, 0xfb, 0x42, 0x36, 0x9e, 0x82, 0x39, 0x84, - 0x13, 0x9a, 0x91, 0x7c, 0x5d, 0xe3, 0x45, 0x37, 0x66, 0x16, 0xe5, 0x15, 0xff, 0x94, 0x15, 0x47, - 0xc4, 0xb0, 0x80, 0x4b, 0xa2, 0xa6, 0x14, 0x6c, 0x7f, 0x94, 0xda, 0xd3, 0xbe, 0x5c, 0x41, 0xc5, - 0xfe, 0x0d, 0xac, 0x4e, 0x8f, 0xd6, 0xc7, 0x59, 0x42, 0xe3, 0x0c, 0xdb, 0x04, 0xd4, 0x98, 0xde, - 0xa1, 0x19, 0x9f, 0xf8, 0x13, 0x50, 0x69, 0x13, 0x74, 0x4c, 0x90, 0x1c, 0xf9, 0xef, 0xb3, 0x47, - 0xee, 0x11, 0xd4, 0x6a, 0x7a, 0xd6, 0x68, 0xe0, 0x3c, 0x1c, 0x14, 0xb0, 0x44, 0xd0, 0xb0, 0x80, - 0x55, 0xd1, 0x09, 0x41, 0xb6, 0xaf, 0xb7, 0x09, 0x6a, 0x21, 0xd9, 0xc2, 0x1a, 0x58, 0x99, 0xba, - 0xd4, 0xb8, 0x83, 0x77, 0x25, 0x20, 0x0a, 0x18, 0x2e, 0xd0, 0xe9, 0x79, 0x2c, 0xed, 0xae, 0x7a, - 0x1b, 0xdc, 0x42, 0x26, 0x4c, 0x59, 0xc8, 0x42, 0x66, 0x21, 0xfb, 0x35, 0x76, 0x81, 0x86, 0x32, - 0x7c, 0xc6, 0xed, 0xd3, 0x3c, 0xd8, 0x2f, 0xa0, 0xd6, 0x3c, 0xc2, 0x67, 0x83, 0x02, 0x72, 0x7d, - 0x58, 0xc0, 0x9a, 0x9c, 0x4a, 0x86, 0xcf, 0x6c, 0x9f, 0x8b, 0x0c, 0x8a, 0x18, 0xc4, 0x9c, 0x5b, - 0x14, 0xd0, 0xff, 0x12, 0x8a, 0xee, 0x40, 0x91, 0x80, 0x22, 0x09, 0x51, 0x06, 0x69, 0x13, 0xe8, - 0x50, 0x42, 0xf4, 0x0e, 0x44, 0x05, 0xc4, 0x7e, 0xee, 0x6c, 0x98, 0xfe, 0x93, 0x1b, 0xb6, 0x37, - 0xff, 0xf6, 0x0a, 0x2a, 0x7c, 0x6e, 0x1f, 0xca, 0xa0, 0x7c, 0x6f, 0xde, 0x18, 0x8f, 0x80, 0x9e, - 0xe5, 0x41, 0x8e, 0xf9, 0x10, 0x97, 0x76, 0xe0, 0x77, 0x8b, 0x3a, 0x47, 0xec, 0x98, 0x70, 0x85, - 0x13, 0x13, 0x57, 0x78, 0x68, 0xfb, 0x42, 0xbe, 0x8f, 0x67, 0x63, 0x13, 0x80, 0x90, 0xaf, 0x2e, - 0x3a, 0x0e, 0xc4, 0xe3, 0x51, 0xf6, 0xab, 0x52, 0xd9, 0xcf, 0xed, 0x37, 0x2a, 0xd0, 0x79, 0x77, - 0x86, 0x05, 0xe6, 0x48, 0xdc, 0x0b, 0x3a, 0x04, 0xd5, 0x95, 0xc6, 0xca, 0xc5, 0xa5, 0xb5, 0xec, - 0x11, 0xc4, 0x53, 0x2d, 0x21, 0x1b, 0x6b, 0x40, 0xa3, 0x09, 0x8e, 0xeb, 0x6a, 0xa3, 0x76, 0x71, - 0x69, 0xcd, 0x79, 0x04, 0x1d, 0x26, 0x38, 0x36, 0x36, 0x40, 0x25, 0x08, 0x73, 0xd2, 0xc3, 0xf5, - 0x52, 0x63, 0xf1, 0xe2, 0xd2, 0xaa, 0x7a, 0x04, 0xed, 0x73, 0x81, 0x11, 0x1d, 0x9a, 0xe5, 0xf5, - 0xf2, 0x98, 0x78, 0x40, 0xb3, 0x9c, 0x11, 0x21, 0xdb, 0x65, 0x54, 0xd7, 0xc6, 0x04, 0x5f, 0x6e, - 0xd4, 0xd0, 0x5e, 0xbd, 0x37, 0x95, 0x29, 0x07, 0x6f, 0x4a, 0x00, 0x78, 0x04, 0xfd, 0x47, 0x3a, - 0x39, 0x4e, 0xb3, 0x5f, 0x7b, 0x3e, 0xfd, 0x26, 0x75, 0x47, 0xfb, 0x57, 0x99, 0x0c, 0xe3, 0x47, - 0xeb, 0x25, 0x5e, 0x26, 0xde, 0xd1, 0x75, 0xdf, 0x54, 0x6f, 0xfa, 0xa6, 0xfa, 0xb9, 0x6f, 0xaa, - 0xaf, 0x6f, 0x4d, 0xe5, 0xe6, 0xd6, 0x54, 0x3e, 0xde, 0x9a, 0xca, 0xb3, 0x7f, 0x22, 0x92, 0x9f, - 0xbc, 0x68, 0x3b, 0x21, 0xed, 0xba, 0x7c, 0x97, 0xff, 0x8e, 0x71, 0x7e, 0x4e, 0xd3, 0x53, 0x19, - 0x05, 0x09, 0x71, 0x23, 0xea, 0xc6, 0x14, 0xe1, 0x6f, 0x3e, 0x46, 0xed, 0x0a, 0xff, 0x0e, 0xed, - 0x7e, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x2b, 0x74, 0xc0, 0xb6, 0x17, 0x07, 0x00, 0x00, -} - -func (m *MsgCreateBid) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateBid) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateBid) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Deposit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Order.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCreateBidResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateBidResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgCloseBid) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseBid) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseBid) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseBidResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseBidResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *BidID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BidID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BidID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintBid(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Bid) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Bid) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Bid) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *BidFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BidFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BidFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintBid(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x32 - } - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintBid(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintBid(dAtA []byte, offset int, v uint64) int { - offset -= sovBid(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *MsgCreateBid) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Order.Size() - n += 1 + l + sovBid(uint64(l)) - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - l = m.Price.Size() - n += 1 + l + sovBid(uint64(l)) - l = m.Deposit.Size() - n += 1 + l + sovBid(uint64(l)) - return n -} - -func (m *MsgCreateBidResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgCloseBid) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidID.Size() - n += 1 + l + sovBid(uint64(l)) - return n -} - -func (m *MsgCloseBidResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *BidID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovBid(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovBid(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovBid(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - return n -} - -func (m *Bid) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidID.Size() - n += 1 + l + sovBid(uint64(l)) - if m.State != 0 { - n += 1 + sovBid(uint64(m.State)) - } - l = m.Price.Size() - n += 1 + l + sovBid(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovBid(uint64(m.CreatedAt)) - } - return n -} - -func (m *BidFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovBid(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovBid(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovBid(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - return n -} - -func sovBid(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozBid(x uint64) (n int) { - return sovBid(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *MsgCreateBid) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateBid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateBid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Order.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Deposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateBidResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateBidResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseBid) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseBid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseBid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseBidResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseBidResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BidID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BidID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BidID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Bid) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Bid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Bid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Bid_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BidFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BidFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BidFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipBid(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthBid - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupBid - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthBid - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthBid = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowBid = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupBid = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta3/codec.go b/go/node/market/v1beta3/codec.go deleted file mode 100644 index c8dbf100..00000000 --- a/go/node/market/v1beta3/codec.go +++ /dev/null @@ -1,50 +0,0 @@ -package v1beta3 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/market module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/market and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterCodec registers the necessary x/market interfaces and concrete types -// on the provided Amino codec. These types are used for Amino JSON serialization. -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgCreateBid{}, ModuleName+"/"+MsgTypeCreateBid, nil) - cdc.RegisterConcrete(&MsgCloseBid{}, ModuleName+"/"+MsgTypeCloseBid, nil) - cdc.RegisterConcrete(&MsgCreateLease{}, ModuleName+"/"+MsgTypeCreateLease, nil) - cdc.RegisterConcrete(&MsgWithdrawLease{}, ModuleName+"/"+MsgTypeWithdrawLease, nil) - cdc.RegisterConcrete(&MsgCloseLease{}, ModuleName+"/"+MsgTypeCloseLease, nil) -} - -// RegisterInterfaces registers the x/market interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgCreateBid{}, - &MsgCloseBid{}, - &MsgCreateLease{}, - &MsgWithdrawLease{}, - &MsgCloseLease{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/market/v1beta3/errors.go b/go/node/market/v1beta3/errors.go deleted file mode 100644 index 044de7d7..00000000 --- a/go/node/market/v1beta3/errors.go +++ /dev/null @@ -1,107 +0,0 @@ -package v1beta3 - -import ( - "errors" -) - -const ( - errCodeEmptyProvider uint32 = iota + 1 - errCodeSameAccount - errCodeInternal - errCodeOverOrder - errCodeAttributeMismatch - errCodeUnknownBid - errCodeUnknownLease - errCodeUnknownLeaseForOrder - errCodeUnknownOrderForBid - errCodeLeaseNotActive - errCodeBidNotActive - errCodeBidNotOpen - errCodeOrderNotOpen - errCodeNoLeaseForOrder - errCodeOrderNotFound - errCodeGroupNotFound - errCodeGroupNotOpen - errCodeBidNotFound - errCodeBidZeroPrice - errCodeLeaseNotFound - errCodeBidExists - errCodeInvalidPrice - errCodeOrderActive - errCodeOrderClosed - errCodeOrderExists - errCodeOrderDurationExceeded - errCodeOrderTooEarly - errInvalidDeposit - errInvalidParam - errUnknownProvider - errInvalidBid - errCodeCapabilitiesMismatch -) - -var ( - // ErrEmptyProvider is the error when provider is empty - ErrEmptyProvider = errors.New("empty provider") - // ErrSameAccount is the error when owner and provider are the same account - ErrSameAccount = errors.New("owner and provider are the same account") - // ErrInternal is the error for internal error - ErrInternal = errors.New("internal error") - // ErrBidOverOrder is the error when bid price is above max order price - ErrBidOverOrder = errors.New("bid price above max order price") - // ErrAttributeMismatch is the error for attribute mismatch - ErrAttributeMismatch = errors.New("attribute mismatch") - // ErrCapabilitiesMismatch is the error for capabilities mismatch - ErrCapabilitiesMismatch = errors.New("capabilities mismatch") - // ErrUnknownBid is the error for unknown bid - ErrUnknownBid = errors.New("unknown bid") - // ErrUnknownLease is the error for unknown bid - ErrUnknownLease = errors.New("unknown lease") - // ErrUnknownLeaseForBid is the error when lease is unknown for bid - ErrUnknownLeaseForBid = errors.New("unknown lease for bid") - // ErrUnknownOrderForBid is the error when order is unknown for bid - ErrUnknownOrderForBid = errors.New("unknown order for bid") - // ErrLeaseNotActive is the error when lease is not active - ErrLeaseNotActive = errors.New("lease not active") - // ErrBidNotActive is the error when bid is not matched - ErrBidNotActive = errors.New("bid not active") - // ErrBidNotOpen is the error when bid is not matched - ErrBidNotOpen = errors.New("bid not open") - // ErrNoLeaseForOrder is the error when there is no lease for order - ErrNoLeaseForOrder = errors.New("no lease for order") - // ErrOrderNotFound order not found - ErrOrderNotFound = errors.New("invalid order: order not found") - // ErrGroupNotFound order not found - ErrGroupNotFound = errors.New("order not found") - // ErrGroupNotOpen order not found - ErrGroupNotOpen = errors.New("order not open") - // ErrOrderNotOpen order not found - ErrOrderNotOpen = errors.New("bid: order not open") - // ErrBidNotFound bid not found - ErrBidNotFound = errors.New("invalid bid: bid not found") - // ErrBidZeroPrice zero price - ErrBidZeroPrice = errors.New("invalid bid: zero price") - // ErrLeaseNotFound lease not found - ErrLeaseNotFound = errors.New("invalid lease: lease not found") - // ErrBidExists bid exists - ErrBidExists = errors.New("invalid bid: bid exists from provider") - // ErrBidInvalidPrice bid invalid price - ErrBidInvalidPrice = errors.New("bid price is invalid") - // ErrOrderActive order active - ErrOrderActive = errors.New("order active") - // ErrOrderClosed order closed - ErrOrderClosed = errors.New("order closed") - // ErrOrderExists indicates a new order was proposed overwrite the existing store key - ErrOrderExists = errors.New("order already exists in store") - // ErrOrderTooEarly to match bid - ErrOrderTooEarly = errors.New("order: chain height to low for bidding") - // ErrOrderDurationExceeded order should be closed - ErrOrderDurationExceeded = errors.New("order duration has exceeded the bidding duration") - // ErrInvalidDeposit indicates an invalid deposit - ErrInvalidDeposit = errors.New("deposit invalid") - // ErrInvalidParam indicates an invalid chain parameter - ErrInvalidParam = errors.New("parameter invalid") - // ErrUnknownProvider indicates an invalid chain parameter - ErrUnknownProvider = errors.New("unknown provider") - // ErrInvalidBid indicates an invalid chain parameter - ErrInvalidBid = errors.New("unknown provider") -) diff --git a/go/node/market/v1beta3/escrow.go b/go/node/market/v1beta3/escrow.go deleted file mode 100644 index f49ee939..00000000 --- a/go/node/market/v1beta3/escrow.go +++ /dev/null @@ -1,61 +0,0 @@ -package v1beta3 - -import ( - "fmt" - "strconv" - "strings" - - sdk "github.com/cosmos/cosmos-sdk/types" - - etypes "github.com/akash-network/akash-api/go/node/escrow/v1beta3" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" -) - -const ( - bidEscrowScope = "bid" -) - -func EscrowAccountForBid(id BidID) etypes.AccountID { - return etypes.AccountID{ - Scope: bidEscrowScope, - XID: id.String(), - } -} - -func EscrowPaymentForLease(id LeaseID) string { - return fmt.Sprintf("%v/%v/%s", id.GSeq, id.OSeq, id.Provider) -} - -func LeaseIDFromEscrowAccount(id etypes.AccountID, pid string) (LeaseID, bool) { - did, ok := dtypes.DeploymentIDFromEscrowAccount(id) - if !ok { - return LeaseID{}, false - } - - parts := strings.Split(pid, "/") - if len(parts) != 3 { - return LeaseID{}, false - } - - gseq, err := strconv.ParseUint(parts[0], 10, 32) - if err != nil { - return LeaseID{}, false - } - - oseq, err := strconv.ParseUint(parts[1], 10, 32) - if err != nil { - return LeaseID{}, false - } - - owner, err := sdk.AccAddressFromBech32(parts[2]) - if err != nil { - return LeaseID{}, false - } - - return MakeLeaseID( - MakeBidID( - MakeOrderID( - dtypes.MakeGroupID( - did, uint32(gseq)), uint32(oseq)), owner)), true -} diff --git a/go/node/market/v1beta3/event.go b/go/node/market/v1beta3/event.go deleted file mode 100644 index 7277c237..00000000 --- a/go/node/market/v1beta3/event.go +++ /dev/null @@ -1,359 +0,0 @@ -package v1beta3 - -import ( - "errors" - "strconv" - - sdk "github.com/cosmos/cosmos-sdk/types" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - "github.com/akash-network/akash-api/go/sdkutil" -) - -const ( - evActionOrderCreated = "order-created" - evActionOrderClosed = "order-closed" - evActionBidCreated = "bid-created" - evActionBidClosed = "bid-closed" - evActionLeaseCreated = "lease-created" - evActionLeaseClosed = "lease-closed" - - evOSeqKey = "oseq" - evProviderKey = "provider" - evPriceDenomKey = "price-denom" - evPriceAmountKey = "price-amount" -) - -var ( - ErrParsingPrice = errors.New("error parsing price") -) - -// EventOrderCreated struct -type EventOrderCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID OrderID `json:"id"` -} - -func NewEventOrderCreated(id OrderID) EventOrderCreated { - return EventOrderCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionOrderCreated, - }, - ID: id, - } -} - -// ToSDKEvent method creates new sdk event for EventOrderCreated struct -func (e EventOrderCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionOrderCreated), - }, orderIDEVAttributes(e.ID)...)..., - ) -} - -// EventOrderClosed struct -type EventOrderClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID OrderID `json:"id"` -} - -func NewEventOrderClosed(id OrderID) EventOrderClosed { - return EventOrderClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionOrderClosed, - }, - ID: id, - } -} - -// ToSDKEvent method creates new sdk event for EventOrderClosed struct -func (e EventOrderClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionOrderClosed), - }, orderIDEVAttributes(e.ID)...)..., - ) -} - -// EventBidCreated struct -type EventBidCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID BidID `json:"id"` - Price sdk.DecCoin `json:"price"` -} - -func NewEventBidCreated(id BidID, price sdk.DecCoin) EventBidCreated { - return EventBidCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionBidCreated, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventBidCreated struct -func (e EventBidCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionBidCreated), - }, bidIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)..., - ) -} - -// EventBidClosed struct -type EventBidClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID BidID `json:"id"` - Price sdk.DecCoin `json:"price"` -} - -func NewEventBidClosed(id BidID, price sdk.DecCoin) EventBidClosed { - return EventBidClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionBidClosed, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventBidClosed struct -func (e EventBidClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionBidClosed), - }, bidIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)..., - ) -} - -// EventLeaseCreated struct -type EventLeaseCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID LeaseID `json:"id"` - Price sdk.DecCoin `json:"price"` -} - -func NewEventLeaseCreated(id LeaseID, price sdk.DecCoin) EventLeaseCreated { - return EventLeaseCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionLeaseCreated, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventLeaseCreated struct -func (e EventLeaseCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionLeaseCreated), - }, leaseIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)...) -} - -// EventLeaseClosed struct -type EventLeaseClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID LeaseID `json:"id"` - Price sdk.DecCoin `json:"price"` -} - -func NewEventLeaseClosed(id LeaseID, price sdk.DecCoin) EventLeaseClosed { - return EventLeaseClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionLeaseClosed, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventLeaseClosed struct -func (e EventLeaseClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionLeaseClosed), - }, leaseIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)...) -} - -// orderIDEVAttributes returns event attribues for given orderID -func orderIDEVAttributes(id OrderID) []sdk.Attribute { - return append(dtypes.GroupIDEVAttributes(id.GroupID()), - sdk.NewAttribute(evOSeqKey, strconv.FormatUint(uint64(id.OSeq), 10))) -} - -// parseEVOrderID returns orderID for given event attributes -func parseEVOrderID(attrs []sdk.Attribute) (OrderID, error) { - gid, err := dtypes.ParseEVGroupID(attrs) - if err != nil { - return OrderID{}, err - } - oseq, err := sdkutil.GetUint64(attrs, evOSeqKey) - if err != nil { - return OrderID{}, err - } - - return OrderID{ - Owner: gid.Owner, - DSeq: gid.DSeq, - GSeq: gid.GSeq, - OSeq: uint32(oseq), - }, nil - -} - -// bidIDEVAttributes returns event attribues for given bidID -func bidIDEVAttributes(id BidID) []sdk.Attribute { - return append(orderIDEVAttributes(id.OrderID()), - sdk.NewAttribute(evProviderKey, id.Provider)) -} - -// parseEVBidID returns bidID for given event attributes -func parseEVBidID(attrs []sdk.Attribute) (BidID, error) { - oid, err := parseEVOrderID(attrs) - if err != nil { - return BidID{}, err - } - - provider, err := sdkutil.GetAccAddress(attrs, evProviderKey) - if err != nil { - return BidID{}, err - } - - return BidID{ - Owner: oid.Owner, - DSeq: oid.DSeq, - GSeq: oid.GSeq, - OSeq: oid.OSeq, - Provider: provider.String(), - }, nil -} - -// leaseIDEVAttributes returns event attribues for given LeaseID -func leaseIDEVAttributes(id LeaseID) []sdk.Attribute { - return append(orderIDEVAttributes(id.OrderID()), - sdk.NewAttribute(evProviderKey, id.Provider)) -} - -// parseEVLeaseID returns leaseID for given event attributes -func parseEVLeaseID(attrs []sdk.Attribute) (LeaseID, error) { - bid, err := parseEVBidID(attrs) - if err != nil { - return LeaseID{}, err - } - return LeaseID(bid), nil -} - -func priceEVAttributes(price sdk.DecCoin) []sdk.Attribute { - return []sdk.Attribute{ - sdk.NewAttribute(evPriceDenomKey, price.Denom), - sdk.NewAttribute(evPriceAmountKey, price.Amount.String()), - } -} - -func parseEVPriceAttributes(attrs []sdk.Attribute) (sdk.DecCoin, error) { - denom, err := sdkutil.GetString(attrs, evPriceDenomKey) - if err != nil { - return sdk.DecCoin{}, err - } - - amounts, err := sdkutil.GetString(attrs, evPriceAmountKey) - if err != nil { - return sdk.DecCoin{}, err - } - - amount, err := sdk.NewDecFromStr(amounts) - if err != nil { - return sdk.DecCoin{}, ErrParsingPrice - } - - return sdk.NewDecCoinFromDec(denom, amount), nil -} - -// ParseEvent parses event and returns details of event and error if occurred -func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { - if ev.Type != sdkutil.EventTypeMessage { - return nil, sdkutil.ErrUnknownType - } - if ev.Module != ModuleName { - return nil, sdkutil.ErrUnknownModule - } - switch ev.Action { - - case evActionOrderCreated: - id, err := parseEVOrderID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventOrderCreated(id), nil - case evActionOrderClosed: - id, err := parseEVOrderID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventOrderClosed(id), nil - - case evActionBidCreated: - id, err := parseEVBidID(ev.Attributes) - if err != nil { - return nil, err - } - price, err := parseEVPriceAttributes(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventBidCreated(id, price), nil - case evActionBidClosed: - id, err := parseEVBidID(ev.Attributes) - if err != nil { - return nil, err - } - // optional price - price, _ := parseEVPriceAttributes(ev.Attributes) - return NewEventBidClosed(id, price), nil - - case evActionLeaseCreated: - id, err := parseEVLeaseID(ev.Attributes) - if err != nil { - return nil, err - } - price, err := parseEVPriceAttributes(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventLeaseCreated(id, price), nil - case evActionLeaseClosed: - id, err := parseEVLeaseID(ev.Attributes) - if err != nil { - return nil, err - } - // optional price - price, _ := parseEVPriceAttributes(ev.Attributes) - return NewEventLeaseClosed(id, price), nil - - default: - return nil, sdkutil.ErrUnknownAction - } -} diff --git a/go/node/market/v1beta3/events_test.go b/go/node/market/v1beta3/events_test.go deleted file mode 100644 index 5134314f..00000000 --- a/go/node/market/v1beta3/events_test.go +++ /dev/null @@ -1,460 +0,0 @@ -package v1beta3 - -import ( - "fmt" - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/pkg/errors" - - "github.com/stretchr/testify/require" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -var ( - errWildcard = errors.New("wildcard string error can't be matched") - evOwnerKey = "owner" - evDSeqKey = "dseq" - evGSeqKey = "gseq" -) - -type testEventParsing struct { - msg sdkutil.Event - expErr error -} - -func (tep testEventParsing) testMessageType() func(t *testing.T) { - _, err := ParseEvent(tep.msg) - return func(t *testing.T) { - // if the error expected is errWildcard to catch untyped errors, don't fail the test, the error was expected. - if errors.Is(tep.expErr, errWildcard) { - require.Error(t, err) - } else { - require.Equal(t, tep.expErr, err) - } - } -} - -var TEPS = []testEventParsing{ - { - msg: sdkutil.Event{ - Type: "nil", - }, - expErr: sdkutil.ErrUnknownType, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - }, - expErr: sdkutil.ErrUnknownAction, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: "nil", - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: "nil", - }, - expErr: sdkutil.ErrUnknownAction, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionOrderCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionOrderCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "nooo", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionOrderCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "abc", - }, - }, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionOrderClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - }, - }, - expErr: nil, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionBidCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionBidCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "yesss", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionBidCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "hello", - }, - }, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionBidClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: nil, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionLeaseCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionLeaseCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "hello", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionLeaseClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: nil, - }, -} - -func TestEventParsing(t *testing.T) { - for i, test := range TEPS { - t.Run(fmt.Sprintf("%d", i), - test.testMessageType()) - } -} diff --git a/go/node/market/v1beta3/genesis.pb.go b/go/node/market/v1beta3/genesis.pb.go deleted file mode 100644 index 5d1a89d0..00000000 --- a/go/node/market/v1beta3/genesis.pb.go +++ /dev/null @@ -1,518 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta3/genesis.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState defines the basic genesis state used by market module -type GenesisState struct { - Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params" yaml:"params"` - Orders []Order `protobuf:"bytes,2,rep,name=orders,proto3" json:"orders" yaml:"orders"` - Leases []Lease `protobuf:"bytes,3,rep,name=leases,proto3" json:"leases" yaml:"leases"` - Bids []Bid `protobuf:"bytes,4,rep,name=bids,proto3" json:"bids" yaml:"bids"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_fa7e580cdc6cba31, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetParams() Params { - if m != nil { - return m.Params - } - return Params{} -} - -func (m *GenesisState) GetOrders() []Order { - if m != nil { - return m.Orders - } - return nil -} - -func (m *GenesisState) GetLeases() []Lease { - if m != nil { - return m.Leases - } - return nil -} - -func (m *GenesisState) GetBids() []Bid { - if m != nil { - return m.Bids - } - return nil -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.market.v1beta3.GenesisState") -} - -func init() { - proto.RegisterFile("akash/market/v1beta3/genesis.proto", fileDescriptor_fa7e580cdc6cba31) -} - -var fileDescriptor_fa7e580cdc6cba31 = []byte{ - // 339 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xb1, 0x4e, 0xeb, 0x30, - 0x14, 0x86, 0x93, 0xb6, 0xea, 0x90, 0xde, 0xbb, 0x44, 0x1d, 0x42, 0x8b, 0x9c, 0xe2, 0xa9, 0x0b, - 0xb6, 0x68, 0x27, 0x18, 0xb3, 0x20, 0x21, 0x24, 0x50, 0x0a, 0x0b, 0x9b, 0x43, 0xac, 0xd4, 0x6a, - 0x53, 0x57, 0xb6, 0x01, 0xf1, 0x16, 0x3c, 0x56, 0xc7, 0x8e, 0x2c, 0x44, 0xa8, 0xdd, 0x18, 0xfb, - 0x04, 0x28, 0xb6, 0xa5, 0x48, 0xc8, 0xea, 0x96, 0x3f, 0xff, 0x77, 0xbe, 0xe4, 0xd8, 0x01, 0x24, - 0x0b, 0x22, 0xe7, 0xb8, 0x24, 0x62, 0x41, 0x15, 0x7e, 0xbd, 0xc8, 0xa8, 0x22, 0x53, 0x5c, 0xd0, - 0x15, 0x95, 0x4c, 0xa2, 0xb5, 0xe0, 0x8a, 0x87, 0x7d, 0xcd, 0x20, 0xc3, 0x20, 0xcb, 0x0c, 0xfa, - 0x05, 0x2f, 0xb8, 0x06, 0x70, 0xfd, 0x64, 0xd8, 0xc1, 0xc8, 0xe9, 0xe3, 0x22, 0xa7, 0xe2, 0x28, - 0xb1, 0xa4, 0x44, 0x52, 0x4b, 0x00, 0x27, 0x91, 0xb1, 0xdc, 0xf6, 0x67, 0xce, 0x7e, 0x4d, 0x04, - 0x29, 0xed, 0x2f, 0xc3, 0xaf, 0x56, 0xf0, 0xef, 0xda, 0x2c, 0x31, 0x53, 0x44, 0xd1, 0xf0, 0x31, - 0xe8, 0x1a, 0x20, 0xf2, 0x47, 0xfe, 0xb8, 0x37, 0x39, 0x45, 0xae, 0xa5, 0xd0, 0xbd, 0x66, 0x92, - 0x78, 0x53, 0xc5, 0xde, 0x4f, 0x15, 0xdb, 0x99, 0x43, 0x15, 0xff, 0x7f, 0x27, 0xe5, 0xf2, 0x0a, - 0x9a, 0x0c, 0x53, 0x5b, 0x84, 0x0f, 0x41, 0x57, 0xef, 0x26, 0xa3, 0xd6, 0xa8, 0x3d, 0xee, 0x4d, - 0x86, 0x6e, 0xed, 0x5d, 0xcd, 0x34, 0x56, 0x33, 0xd2, 0x58, 0x4d, 0x86, 0xa9, 0x2d, 0x6a, 0xab, - 0x3e, 0x0f, 0x19, 0xb5, 0x8f, 0x59, 0x6f, 0x6b, 0xa6, 0xb1, 0x9a, 0x91, 0xc6, 0x6a, 0x32, 0x4c, - 0x6d, 0x11, 0xde, 0x04, 0x9d, 0x8c, 0xe5, 0x32, 0xea, 0x68, 0xe7, 0x89, 0xdb, 0x99, 0xb0, 0x3c, - 0x19, 0x5a, 0xa3, 0xc6, 0x0f, 0x55, 0xdc, 0x33, 0xbe, 0x3a, 0xc1, 0x54, 0xbf, 0x4c, 0x66, 0x9b, - 0x1d, 0xf0, 0xb7, 0x3b, 0xe0, 0x7f, 0xef, 0x80, 0xff, 0xb1, 0x07, 0xde, 0x76, 0x0f, 0xbc, 0xcf, - 0x3d, 0xf0, 0x9e, 0x2e, 0x0b, 0xa6, 0xe6, 0x2f, 0x19, 0x7a, 0xe6, 0x25, 0xd6, 0x5f, 0x38, 0x5f, - 0x51, 0xf5, 0xc6, 0xc5, 0xc2, 0x26, 0xb2, 0x66, 0xb8, 0xe0, 0x78, 0xc5, 0x73, 0xfa, 0xe7, 0x06, - 0xb3, 0xae, 0xbe, 0xbb, 0xe9, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x0f, 0x57, 0x88, 0x94, - 0x02, 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Bids) > 0 { - for iNdEx := len(m.Bids) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Bids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Leases) > 0 { - for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Orders) > 0 { - for iNdEx := len(m.Orders) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Orders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Params.Size() - n += 1 + l + sovGenesis(uint64(l)) - if len(m.Orders) > 0 { - for _, e := range m.Orders { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - if len(m.Leases) > 0 { - for _, e := range m.Leases { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - if len(m.Bids) > 0 { - for _, e := range m.Bids { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Orders = append(m.Orders, Order{}) - if err := m.Orders[len(m.Orders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Leases = append(m.Leases, Lease{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bids", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bids = append(m.Bids, Bid{}) - if err := m.Bids[len(m.Bids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta3/id.go b/go/node/market/v1beta3/id.go deleted file mode 100644 index e8981526..00000000 --- a/go/node/market/v1beta3/id.go +++ /dev/null @@ -1,154 +0,0 @@ -package v1beta3 - -import ( - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" -) - -// MakeOrderID returns OrderID instance with provided groupID details and oseq -func MakeOrderID(id dtypes.GroupID, oseq uint32) OrderID { - return OrderID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - OSeq: oseq, - } -} - -// GroupID method returns groupID details for specific order -func (id OrderID) GroupID() dtypes.GroupID { - return dtypes.GroupID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - } -} - -// Equals method compares specific order with provided order -func (id OrderID) Equals(other OrderID) bool { - return id.GroupID().Equals(other.GroupID()) && id.OSeq == other.OSeq -} - -// Validate method for OrderID and returns nil -func (id OrderID) Validate() error { - if err := id.GroupID().Validate(); err != nil { - return sdkerrors.Wrap(err, "OrderID: Invalid GroupID") - } - if id.OSeq == 0 { - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "OrderID: Invalid Order Sequence") - } - return nil -} - -// String provides stringer interface to save reflected formatting. -func (id OrderID) String() string { - return fmt.Sprintf("%s/%v", id.GroupID(), id.OSeq) -} - -// MakeBidID returns BidID instance with provided order details and provider -func MakeBidID(id OrderID, provider sdk.AccAddress) BidID { - return BidID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - OSeq: id.OSeq, - Provider: provider.String(), - } -} - -// Equals method compares specific bid with provided bid -func (id BidID) Equals(other BidID) bool { - return id.OrderID().Equals(other.OrderID()) && - id.Provider == other.Provider -} - -// LeaseID method returns lease details of bid -func (id BidID) LeaseID() LeaseID { - return LeaseID(id) -} - -// OrderID method returns OrderID details with specific bid details -func (id BidID) OrderID() OrderID { - return OrderID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - OSeq: id.OSeq, - } -} - -// String method for consistent output. -func (id BidID) String() string { - return fmt.Sprintf("%s/%v", id.OrderID(), id.Provider) -} - -// GroupID method returns GroupID details with specific bid details -func (id BidID) GroupID() dtypes.GroupID { - return id.OrderID().GroupID() -} - -// DeploymentID method returns deployment details with specific bid details -func (id BidID) DeploymentID() dtypes.DeploymentID { - return id.GroupID().DeploymentID() -} - -// Validate validates bid instance and returns nil -func (id BidID) Validate() error { - if err := id.OrderID().Validate(); err != nil { - return sdkerrors.Wrap(err, "BidID: Invalid OrderID") - } - if _, err := sdk.AccAddressFromBech32(id.Provider); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "BidID: Invalid Provider Address") - } - if id.Owner == id.Provider { - return sdkerrors.Wrap(sdkerrors.ErrConflict, "BidID: self-bid") - } - return nil -} - -// MakeLeaseID returns LeaseID instance with provided bid details -func MakeLeaseID(id BidID) LeaseID { - return LeaseID(id) -} - -// Equals method compares specific lease with provided lease -func (id LeaseID) Equals(other LeaseID) bool { - return id.BidID().Equals(other.BidID()) -} - -// Validate calls the BidID's validator and returns any error. -func (id LeaseID) Validate() error { - if err := id.BidID().Validate(); err != nil { - return sdkerrors.Wrap(err, "LeaseID: Invalid BidID") - } - return nil -} - -// BidID method returns BidID details with specific LeaseID -func (id LeaseID) BidID() BidID { - return BidID(id) -} - -// OrderID method returns OrderID details with specific lease details -func (id LeaseID) OrderID() OrderID { - return id.BidID().OrderID() -} - -// GroupID method returns GroupID details with specific lease details -func (id LeaseID) GroupID() dtypes.GroupID { - return id.OrderID().GroupID() -} - -// DeploymentID method returns deployment details with specific lease details -func (id LeaseID) DeploymentID() dtypes.DeploymentID { - return id.GroupID().DeploymentID() -} - -// String method provides human readable representation of LeaseID. -func (id LeaseID) String() string { - return id.BidID().String() -} diff --git a/go/node/market/v1beta3/key.go b/go/node/market/v1beta3/key.go deleted file mode 100644 index d6afb173..00000000 --- a/go/node/market/v1beta3/key.go +++ /dev/null @@ -1,28 +0,0 @@ -package v1beta3 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "market" - - // StoreKey is the store key string for market - StoreKey = ModuleName - - // RouterKey is the message route for market - RouterKey = ModuleName -) - -func OrderPrefix() []byte { - return []byte{0x01, 0x00} -} - -func BidPrefix() []byte { - return []byte{0x02, 0x00} -} - -func LeasePrefix() []byte { - return []byte{0x03, 0x00} -} - -func SecondaryLeasePrefix() []byte { - return []byte{0x03, 0x01} -} diff --git a/go/node/market/v1beta3/lease.pb.go b/go/node/market/v1beta3/lease.pb.go deleted file mode 100644 index 31c5a327..00000000 --- a/go/node/market/v1beta3/lease.pb.go +++ /dev/null @@ -1,2134 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta3/lease.proto - -package v1beta3 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of lease -type Lease_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - LeaseStateInvalid Lease_State = 0 - // LeaseActive denotes state for lease active - LeaseActive Lease_State = 1 - // LeaseInsufficientFunds denotes state for lease insufficient_funds - LeaseInsufficientFunds Lease_State = 2 - // LeaseClosed denotes state for lease closed - LeaseClosed Lease_State = 3 -) - -var Lease_State_name = map[int32]string{ - 0: "invalid", - 1: "active", - 2: "insufficient_funds", - 3: "closed", -} - -var Lease_State_value = map[string]int32{ - "invalid": 0, - "active": 1, - "insufficient_funds": 2, - "closed": 3, -} - -func (x Lease_State) String() string { - return proto.EnumName(Lease_State_name, int32(x)) -} - -func (Lease_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_2eca529d0bc380ae, []int{1, 0} -} - -// LeaseID stores bid details of lease -type LeaseID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` -} - -func (m *LeaseID) Reset() { *m = LeaseID{} } -func (*LeaseID) ProtoMessage() {} -func (*LeaseID) Descriptor() ([]byte, []int) { - return fileDescriptor_2eca529d0bc380ae, []int{0} -} -func (m *LeaseID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseID) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseID.Merge(m, src) -} -func (m *LeaseID) XXX_Size() int { - return m.Size() -} -func (m *LeaseID) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseID.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseID proto.InternalMessageInfo - -func (m *LeaseID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *LeaseID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *LeaseID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *LeaseID) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *LeaseID) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -// Lease stores LeaseID, state of lease and price -type Lease struct { - LeaseID LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"id" yaml:"id"` - State Lease_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta3.Lease_State" json:"state" yaml:"state"` - Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - ClosedOn int64 `protobuf:"varint,5,opt,name=closed_on,json=closedOn,proto3" json:"closed_on,omitempty"` -} - -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_2eca529d0bc380ae, []int{1} -} -func (m *Lease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Lease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Lease) XXX_Merge(src proto.Message) { - xxx_messageInfo_Lease.Merge(m, src) -} -func (m *Lease) XXX_Size() int { - return m.Size() -} -func (m *Lease) XXX_DiscardUnknown() { - xxx_messageInfo_Lease.DiscardUnknown(m) -} - -var xxx_messageInfo_Lease proto.InternalMessageInfo - -func (m *Lease) GetLeaseID() LeaseID { - if m != nil { - return m.LeaseID - } - return LeaseID{} -} - -func (m *Lease) GetState() Lease_State { - if m != nil { - return m.State - } - return LeaseStateInvalid -} - -func (m *Lease) GetPrice() types.DecCoin { - if m != nil { - return m.Price - } - return types.DecCoin{} -} - -func (m *Lease) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -func (m *Lease) GetClosedOn() int64 { - if m != nil { - return m.ClosedOn - } - return 0 -} - -// LeaseFilters defines flags for lease list filter -type LeaseFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` - State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *LeaseFilters) Reset() { *m = LeaseFilters{} } -func (m *LeaseFilters) String() string { return proto.CompactTextString(m) } -func (*LeaseFilters) ProtoMessage() {} -func (*LeaseFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_2eca529d0bc380ae, []int{2} -} -func (m *LeaseFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseFilters.Merge(m, src) -} -func (m *LeaseFilters) XXX_Size() int { - return m.Size() -} -func (m *LeaseFilters) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseFilters proto.InternalMessageInfo - -func (m *LeaseFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *LeaseFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *LeaseFilters) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *LeaseFilters) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *LeaseFilters) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *LeaseFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -// MsgCreateLease is sent to create a lease -type MsgCreateLease struct { - BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCreateLease) Reset() { *m = MsgCreateLease{} } -func (m *MsgCreateLease) String() string { return proto.CompactTextString(m) } -func (*MsgCreateLease) ProtoMessage() {} -func (*MsgCreateLease) Descriptor() ([]byte, []int) { - return fileDescriptor_2eca529d0bc380ae, []int{3} -} -func (m *MsgCreateLease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateLease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateLease) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateLease.Merge(m, src) -} -func (m *MsgCreateLease) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateLease) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateLease.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateLease proto.InternalMessageInfo - -func (m *MsgCreateLease) GetBidID() BidID { - if m != nil { - return m.BidID - } - return BidID{} -} - -// MsgCreateLeaseResponse is the response from creating a lease -type MsgCreateLeaseResponse struct { -} - -func (m *MsgCreateLeaseResponse) Reset() { *m = MsgCreateLeaseResponse{} } -func (m *MsgCreateLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateLeaseResponse) ProtoMessage() {} -func (*MsgCreateLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2eca529d0bc380ae, []int{4} -} -func (m *MsgCreateLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateLeaseResponse.Merge(m, src) -} -func (m *MsgCreateLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateLeaseResponse proto.InternalMessageInfo - -// MsgWithdrawLease defines an SDK message for closing bid -type MsgWithdrawLease struct { - LeaseID LeaseID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgWithdrawLease) Reset() { *m = MsgWithdrawLease{} } -func (m *MsgWithdrawLease) String() string { return proto.CompactTextString(m) } -func (*MsgWithdrawLease) ProtoMessage() {} -func (*MsgWithdrawLease) Descriptor() ([]byte, []int) { - return fileDescriptor_2eca529d0bc380ae, []int{5} -} -func (m *MsgWithdrawLease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgWithdrawLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgWithdrawLease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgWithdrawLease) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgWithdrawLease.Merge(m, src) -} -func (m *MsgWithdrawLease) XXX_Size() int { - return m.Size() -} -func (m *MsgWithdrawLease) XXX_DiscardUnknown() { - xxx_messageInfo_MsgWithdrawLease.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgWithdrawLease proto.InternalMessageInfo - -func (m *MsgWithdrawLease) GetLeaseID() LeaseID { - if m != nil { - return m.LeaseID - } - return LeaseID{} -} - -// MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. -type MsgWithdrawLeaseResponse struct { -} - -func (m *MsgWithdrawLeaseResponse) Reset() { *m = MsgWithdrawLeaseResponse{} } -func (m *MsgWithdrawLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*MsgWithdrawLeaseResponse) ProtoMessage() {} -func (*MsgWithdrawLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2eca529d0bc380ae, []int{6} -} -func (m *MsgWithdrawLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgWithdrawLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgWithdrawLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgWithdrawLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgWithdrawLeaseResponse.Merge(m, src) -} -func (m *MsgWithdrawLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgWithdrawLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgWithdrawLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgWithdrawLeaseResponse proto.InternalMessageInfo - -// MsgCloseLease defines an SDK message for closing order -type MsgCloseLease struct { - LeaseID LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseLease) Reset() { *m = MsgCloseLease{} } -func (m *MsgCloseLease) String() string { return proto.CompactTextString(m) } -func (*MsgCloseLease) ProtoMessage() {} -func (*MsgCloseLease) Descriptor() ([]byte, []int) { - return fileDescriptor_2eca529d0bc380ae, []int{7} -} -func (m *MsgCloseLease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseLease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseLease) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseLease.Merge(m, src) -} -func (m *MsgCloseLease) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseLease) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseLease.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseLease proto.InternalMessageInfo - -func (m *MsgCloseLease) GetLeaseID() LeaseID { - if m != nil { - return m.LeaseID - } - return LeaseID{} -} - -// MsgCloseLeaseResponse defines the Msg/CloseLease response type. -type MsgCloseLeaseResponse struct { -} - -func (m *MsgCloseLeaseResponse) Reset() { *m = MsgCloseLeaseResponse{} } -func (m *MsgCloseLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseLeaseResponse) ProtoMessage() {} -func (*MsgCloseLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_2eca529d0bc380ae, []int{8} -} -func (m *MsgCloseLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseLeaseResponse.Merge(m, src) -} -func (m *MsgCloseLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseLeaseResponse proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("akash.market.v1beta3.Lease_State", Lease_State_name, Lease_State_value) - proto.RegisterType((*LeaseID)(nil), "akash.market.v1beta3.LeaseID") - proto.RegisterType((*Lease)(nil), "akash.market.v1beta3.Lease") - proto.RegisterType((*LeaseFilters)(nil), "akash.market.v1beta3.LeaseFilters") - proto.RegisterType((*MsgCreateLease)(nil), "akash.market.v1beta3.MsgCreateLease") - proto.RegisterType((*MsgCreateLeaseResponse)(nil), "akash.market.v1beta3.MsgCreateLeaseResponse") - proto.RegisterType((*MsgWithdrawLease)(nil), "akash.market.v1beta3.MsgWithdrawLease") - proto.RegisterType((*MsgWithdrawLeaseResponse)(nil), "akash.market.v1beta3.MsgWithdrawLeaseResponse") - proto.RegisterType((*MsgCloseLease)(nil), "akash.market.v1beta3.MsgCloseLease") - proto.RegisterType((*MsgCloseLeaseResponse)(nil), "akash.market.v1beta3.MsgCloseLeaseResponse") -} - -func init() { proto.RegisterFile("akash/market/v1beta3/lease.proto", fileDescriptor_2eca529d0bc380ae) } - -var fileDescriptor_2eca529d0bc380ae = []byte{ - // 755 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xcf, 0x4e, 0xdb, 0x4e, - 0x10, 0xb6, 0x93, 0x38, 0x24, 0x1b, 0xfe, 0xe4, 0x67, 0x01, 0xbf, 0xd4, 0x14, 0xdb, 0xf5, 0x89, - 0x4b, 0x6d, 0x11, 0x4e, 0xa5, 0x27, 0x42, 0x44, 0x15, 0xa9, 0x08, 0xd5, 0x54, 0x6a, 0x55, 0x55, - 0x8a, 0x1c, 0xef, 0x62, 0x56, 0x24, 0xde, 0xe0, 0x35, 0x41, 0x7d, 0x83, 0x8a, 0x53, 0x8f, 0xbd, - 0xa0, 0x22, 0xf5, 0x65, 0x38, 0xa2, 0x9e, 0x7a, 0xb2, 0xaa, 0x70, 0xa9, 0x72, 0x8c, 0xfa, 0x00, - 0xd5, 0xee, 0x3a, 0x24, 0x41, 0x11, 0xa7, 0xaa, 0xa7, 0x9e, 0x92, 0xf9, 0x66, 0xbe, 0x99, 0xf1, - 0x37, 0x33, 0x36, 0x30, 0xbd, 0x13, 0x8f, 0x1e, 0x3b, 0x1d, 0x2f, 0x3a, 0x41, 0xb1, 0xd3, 0xdb, - 0x6c, 0xa1, 0xd8, 0xdb, 0x72, 0xda, 0xc8, 0xa3, 0xc8, 0xee, 0x46, 0x24, 0x26, 0xea, 0x32, 0x8f, - 0xb0, 0x45, 0x84, 0x9d, 0x46, 0x68, 0xcb, 0x01, 0x09, 0x08, 0x0f, 0x70, 0xd8, 0x3f, 0x11, 0xab, - 0xe9, 0x3e, 0xa1, 0x1d, 0x42, 0x9d, 0x96, 0x47, 0x51, 0x9a, 0x6c, 0xd3, 0xf1, 0x09, 0x0e, 0x47, - 0xfe, 0x99, 0xd5, 0x5a, 0x18, 0x0a, 0xbf, 0x75, 0x95, 0x01, 0x73, 0x2f, 0x59, 0xed, 0x46, 0x5d, - 0x75, 0x80, 0x42, 0xce, 0x43, 0x14, 0x55, 0x64, 0x53, 0xde, 0x28, 0xd6, 0x1e, 0x0d, 0x12, 0x43, - 0x00, 0xc3, 0xc4, 0x98, 0xff, 0xe0, 0x75, 0xda, 0xdb, 0x16, 0x37, 0x2d, 0x57, 0xc0, 0xea, 0x16, - 0xc8, 0x41, 0x8a, 0x4e, 0x2b, 0x19, 0x53, 0xde, 0xc8, 0xd5, 0x8c, 0x7e, 0x62, 0xe4, 0xea, 0x87, - 0xe8, 0x74, 0x90, 0x18, 0x1c, 0x1f, 0x26, 0x46, 0x49, 0xd0, 0x98, 0x65, 0xb9, 0x1c, 0x64, 0xa4, - 0x80, 0x91, 0xb2, 0xa6, 0xbc, 0xb1, 0x20, 0x48, 0x2f, 0x52, 0x52, 0x30, 0x45, 0x0a, 0x04, 0x29, - 0x48, 0x49, 0x84, 0x91, 0x72, 0x63, 0xd2, 0x41, 0x4a, 0x22, 0x53, 0x24, 0x22, 0x48, 0xec, 0x47, - 0x7d, 0x0e, 0x0a, 0xdd, 0x88, 0xf4, 0x30, 0x44, 0x51, 0x45, 0xe1, 0x8f, 0x64, 0x0c, 0x12, 0xe3, - 0x0e, 0x1b, 0x26, 0xc6, 0x92, 0x20, 0x8d, 0x10, 0xcb, 0xbd, 0x73, 0x6e, 0x17, 0x3e, 0x5f, 0x19, - 0xd2, 0xcf, 0x2b, 0x43, 0xb2, 0x7e, 0x65, 0x81, 0xc2, 0x25, 0x52, 0xdf, 0x83, 0x02, 0x9f, 0x53, - 0x13, 0x43, 0xae, 0x51, 0xa9, 0xba, 0x6e, 0xcf, 0x9a, 0x95, 0x9d, 0x2a, 0x5a, 0xb3, 0xae, 0x13, - 0x43, 0xea, 0x27, 0xc6, 0x48, 0xe2, 0x41, 0x62, 0x64, 0x30, 0x1c, 0x26, 0x46, 0x51, 0x14, 0xc6, - 0xd0, 0x72, 0xe7, 0x78, 0xca, 0x06, 0x54, 0x5d, 0xa0, 0xd0, 0xd8, 0x8b, 0x11, 0x97, 0x73, 0xb1, - 0xfa, 0xe4, 0x81, 0xd4, 0xf6, 0x21, 0x0b, 0x14, 0x13, 0xe2, 0x9c, 0xf1, 0x84, 0xb8, 0x69, 0xb9, - 0x02, 0x56, 0x5f, 0x01, 0xa5, 0x1b, 0x61, 0x1f, 0x71, 0xb5, 0x4b, 0xd5, 0xc7, 0xb6, 0x58, 0x17, - 0x9b, 0xad, 0x4b, 0x9a, 0x72, 0xd3, 0xae, 0x23, 0x7f, 0x97, 0xe0, 0xb0, 0xb6, 0xce, 0xba, 0x65, - 0x29, 0x39, 0x65, 0x9c, 0x92, 0x9b, 0x96, 0x2b, 0x60, 0x75, 0x1d, 0x00, 0x3f, 0x42, 0x5e, 0x8c, - 0x60, 0xd3, 0x8b, 0xf9, 0x40, 0xb2, 0x6e, 0x31, 0x45, 0x76, 0x62, 0x75, 0x0d, 0x14, 0xfd, 0x36, - 0xa1, 0x08, 0x36, 0x49, 0xc8, 0x55, 0xcf, 0xba, 0x05, 0x01, 0x1c, 0x84, 0xd6, 0x17, 0x19, 0x28, - 0xbc, 0x75, 0xd5, 0x02, 0x73, 0x38, 0xec, 0x79, 0x6d, 0x0c, 0xcb, 0x92, 0xb6, 0x72, 0x71, 0x69, - 0xfe, 0xc7, 0x1f, 0x8c, 0x3b, 0x1b, 0xc2, 0xa1, 0xae, 0x81, 0xbc, 0xe7, 0xc7, 0xb8, 0x87, 0xca, - 0xb2, 0xb6, 0x74, 0x71, 0x69, 0x96, 0x78, 0xc8, 0x0e, 0x87, 0xd4, 0x2a, 0x50, 0x71, 0x48, 0xcf, - 0x8e, 0x8e, 0xb0, 0x8f, 0x51, 0x18, 0x37, 0x8f, 0xce, 0x42, 0x48, 0xcb, 0x19, 0x4d, 0xbb, 0xb8, - 0x34, 0x57, 0x85, 0xdc, 0x13, 0xee, 0x3d, 0xe6, 0x65, 0x09, 0x45, 0x2b, 0xe5, 0xec, 0x44, 0xc2, - 0x5d, 0x0e, 0x69, 0xb9, 0x8f, 0x5f, 0x75, 0x69, 0x62, 0xec, 0xdf, 0x32, 0x60, 0x9e, 0xfb, 0xf7, - 0x70, 0x3b, 0x46, 0x11, 0xfd, 0x77, 0x1e, 0x13, 0xe7, 0xc1, 0xc4, 0x10, 0xcb, 0x9a, 0x1f, 0x8b, - 0xf1, 0xd0, 0x26, 0x6e, 0xe7, 0xb8, 0xa8, 0x6d, 0xb0, 0xb8, 0x4f, 0x83, 0x5d, 0xbe, 0x2d, 0xe2, - 0xa6, 0x5e, 0x83, 0x7c, 0x0b, 0xc3, 0xf1, 0x45, 0xad, 0xcd, 0x5e, 0xfb, 0x1a, 0x86, 0x8d, 0x7a, - 0xcd, 0x4c, 0xef, 0x49, 0xe1, 0xe6, 0xac, 0x6b, 0x52, 0x5a, 0x18, 0x36, 0x60, 0x5a, 0xad, 0x02, - 0x56, 0xa7, 0xab, 0xb9, 0x88, 0x76, 0x49, 0x48, 0x91, 0x15, 0x81, 0xf2, 0x3e, 0x0d, 0xde, 0xe0, - 0xf8, 0x18, 0x46, 0xde, 0xb9, 0xe8, 0xe4, 0xed, 0xbd, 0x4e, 0xfe, 0xc0, 0x6d, 0x4f, 0x75, 0xa3, - 0x81, 0xca, 0xfd, 0x9a, 0x77, 0xfd, 0x50, 0xb0, 0xc0, 0x3a, 0x65, 0x9b, 0xf8, 0x17, 0x5e, 0x35, - 0x69, 0x43, 0xff, 0x83, 0x95, 0xa9, 0xa2, 0xa3, 0x6e, 0x6a, 0x87, 0xd7, 0x7d, 0x5d, 0xbe, 0xe9, - 0xeb, 0xf2, 0x8f, 0xbe, 0x2e, 0x7f, 0xba, 0xd5, 0xa5, 0x9b, 0x5b, 0x5d, 0xfa, 0x7e, 0xab, 0x4b, - 0xef, 0x9e, 0x05, 0x38, 0x3e, 0x3e, 0x6b, 0xd9, 0x3e, 0xe9, 0x38, 0xbc, 0x9d, 0xa7, 0x21, 0x8a, - 0xcf, 0x49, 0x74, 0x92, 0x5a, 0x5e, 0x17, 0x3b, 0x01, 0x71, 0x42, 0x02, 0xd1, 0xbd, 0x6f, 0x4e, - 0x2b, 0xcf, 0x3f, 0x38, 0x5b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x98, 0xd5, 0x37, 0x96, 0x00, - 0x07, 0x00, 0x00, -} - -func (m *LeaseID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintLease(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintLease(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Lease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ClosedOn != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.ClosedOn)) - i-- - dAtA[i] = 0x28 - } - if m.CreatedAt != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *LeaseFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintLease(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x32 - } - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintLease(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintLease(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateLease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateLease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCreateLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgWithdrawLease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgWithdrawLease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgWithdrawLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgWithdrawLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgWithdrawLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgWithdrawLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgCloseLease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseLease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintLease(dAtA []byte, offset int, v uint64) int { - offset -= sovLease(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *LeaseID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovLease(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovLease(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovLease(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - return n -} - -func (m *Lease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LeaseID.Size() - n += 1 + l + sovLease(uint64(l)) - if m.State != 0 { - n += 1 + sovLease(uint64(m.State)) - } - l = m.Price.Size() - n += 1 + l + sovLease(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovLease(uint64(m.CreatedAt)) - } - if m.ClosedOn != 0 { - n += 1 + sovLease(uint64(m.ClosedOn)) - } - return n -} - -func (m *LeaseFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovLease(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovLease(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovLease(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - return n -} - -func (m *MsgCreateLease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidID.Size() - n += 1 + l + sovLease(uint64(l)) - return n -} - -func (m *MsgCreateLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgWithdrawLease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LeaseID.Size() - n += 1 + l + sovLease(uint64(l)) - return n -} - -func (m *MsgWithdrawLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgCloseLease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LeaseID.Size() - n += 1 + l + sovLease(uint64(l)) - return n -} - -func (m *MsgCloseLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovLease(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozLease(x uint64) (n int) { - return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *LeaseID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Lease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Lease_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClosedOn", wireType) - } - m.ClosedOn = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ClosedOn |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateLease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateLease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateLease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgWithdrawLease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgWithdrawLease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgWithdrawLease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgWithdrawLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgWithdrawLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgWithdrawLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseLease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseLease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseLease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipLease(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthLease - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupLease - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthLease - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowLease = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupLease = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta3/migrate/v1beta2.go b/go/node/market/v1beta3/migrate/v1beta2.go deleted file mode 100644 index 57fb883c..00000000 --- a/go/node/market/v1beta3/migrate/v1beta2.go +++ /dev/null @@ -1,16 +0,0 @@ -package migrate - -import ( - "github.com/akash-network/akash-api/go/node/market/v1beta2" - "github.com/akash-network/akash-api/go/node/market/v1beta3" -) - -func LeaseIDFromV1beta2(from v1beta2.LeaseID) v1beta3.LeaseID { - return v1beta3.LeaseID{ - Owner: from.Owner, - DSeq: from.DSeq, - GSeq: from.GSeq, - OSeq: from.OSeq, - Provider: from.Provider, - } -} diff --git a/go/node/market/v1beta3/msgs.go b/go/node/market/v1beta3/msgs.go deleted file mode 100644 index 1dfe91a9..00000000 --- a/go/node/market/v1beta3/msgs.go +++ /dev/null @@ -1,215 +0,0 @@ -package v1beta3 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -const ( - MsgTypeCreateBid = "create-bid" - MsgTypeCloseBid = "close-bid" - MsgTypeCreateLease = "create-lease" - MsgTypeWithdrawLease = "withdraw-lease" - MsgTypeCloseLease = "close-lease" -) - -var ( - _ sdk.Msg = &MsgCreateBid{} - _ sdk.Msg = &MsgCloseBid{} - _ sdk.Msg = &MsgCreateLease{} - _ sdk.Msg = &MsgWithdrawLease{} - _ sdk.Msg = &MsgCloseLease{} -) - -// NewMsgCreateBid creates a new MsgCreateBid instance -func NewMsgCreateBid(id OrderID, provider sdk.AccAddress, price sdk.DecCoin, deposit sdk.Coin) *MsgCreateBid { - return &MsgCreateBid{ - Order: id, - Provider: provider.String(), - Price: price, - Deposit: deposit, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateBid) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateBid) Type() string { return MsgTypeCreateBid } - -// GetSignBytes encodes the message for signing -func (msg MsgCreateBid) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateBid) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.Provider) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic does basic validation of provider and order -func (msg MsgCreateBid) ValidateBasic() error { - if err := msg.Order.Validate(); err != nil { - return err - } - - provider, err := sdk.AccAddressFromBech32(msg.Provider) - if err != nil { - return ErrEmptyProvider - } - - owner, err := sdk.AccAddressFromBech32(msg.Order.Owner) - if err != nil { - return errors.Wrap(ErrInvalidBid, "empty owner") - } - - if provider.Equals(owner) { - return ErrSameAccount - } - - if msg.Price.IsZero() { - return ErrBidZeroPrice - } - - return nil -} - -// NewMsgWithdrawLease creates a new MsgWithdrawLease instance -func NewMsgWithdrawLease(id LeaseID) *MsgWithdrawLease { - return &MsgWithdrawLease{ - LeaseID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgWithdrawLease) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgWithdrawLease) Type() string { return MsgTypeWithdrawLease } - -// GetSignBytes encodes the message for signing -func (msg MsgWithdrawLease) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgWithdrawLease) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.GetLeaseID().Provider) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic does basic validation of provider and order -func (msg MsgWithdrawLease) ValidateBasic() error { - if err := msg.LeaseID.Validate(); err != nil { - return err - } - return nil -} - -// NewMsgCreateLease creates a new MsgCreateLease instance -func NewMsgCreateLease(id BidID) *MsgCreateLease { - return &MsgCreateLease{ - BidID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateLease) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateLease) Type() string { return MsgTypeCreateLease } - -// GetSignBytes encodes the message for signing -func (msg MsgCreateLease) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateLease) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.BidID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic method for MsgCreateLease -func (msg MsgCreateLease) ValidateBasic() error { - return msg.BidID.Validate() -} - -// NewMsgCloseBid creates a new MsgCloseBid instance -func NewMsgCloseBid(id BidID) *MsgCloseBid { - return &MsgCloseBid{ - BidID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCloseBid) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCloseBid) Type() string { return MsgTypeCloseBid } - -// GetSignBytes encodes the message for signing -func (msg MsgCloseBid) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseBid) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.BidID.Provider) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic method for MsgCloseBid -func (msg MsgCloseBid) ValidateBasic() error { - return msg.BidID.Validate() -} - -// NewMsgCloseLease creates a new MsgCloseLease instance -func NewMsgCloseLease(id LeaseID) *MsgCloseLease { - return &MsgCloseLease{ - LeaseID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCloseLease) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCloseLease) Type() string { return MsgTypeCloseLease } - -// GetSignBytes encodes the message for signing -func (msg MsgCloseLease) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseLease) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.LeaseID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// ValidateBasic method for MsgCloseLease -func (msg MsgCloseLease) ValidateBasic() error { - return msg.LeaseID.Validate() -} diff --git a/go/node/market/v1beta3/order.pb.go b/go/node/market/v1beta3/order.pb.go deleted file mode 100644 index 1d2f1958..00000000 --- a/go/node/market/v1beta3/order.pb.go +++ /dev/null @@ -1,1107 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta3/order.proto - -package v1beta3 - -import ( - fmt "fmt" - v1beta3 "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of order -type Order_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - OrderStateInvalid Order_State = 0 - // OrderOpen denotes state for order open - OrderOpen Order_State = 1 - // OrderMatched denotes state for order matched - OrderActive Order_State = 2 - // OrderClosed denotes state for order lost - OrderClosed Order_State = 3 -) - -var Order_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "active", - 3: "closed", -} - -var Order_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "active": 2, - "closed": 3, -} - -func (x Order_State) String() string { - return proto.EnumName(Order_State_name, int32(x)) -} - -func (Order_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_cffb7c6e7c4cdcbf, []int{1, 0} -} - -// OrderID stores owner and all other seq numbers -type OrderID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` -} - -func (m *OrderID) Reset() { *m = OrderID{} } -func (*OrderID) ProtoMessage() {} -func (*OrderID) Descriptor() ([]byte, []int) { - return fileDescriptor_cffb7c6e7c4cdcbf, []int{0} -} -func (m *OrderID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OrderID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_OrderID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *OrderID) XXX_Merge(src proto.Message) { - xxx_messageInfo_OrderID.Merge(m, src) -} -func (m *OrderID) XXX_Size() int { - return m.Size() -} -func (m *OrderID) XXX_DiscardUnknown() { - xxx_messageInfo_OrderID.DiscardUnknown(m) -} - -var xxx_messageInfo_OrderID proto.InternalMessageInfo - -func (m *OrderID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *OrderID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *OrderID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *OrderID) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -// Order stores orderID, state of order and other details -type Order struct { - OrderID OrderID `protobuf:"bytes,1,opt,name=order_id,json=orderId,proto3" json:"id" yaml:"id"` - State Order_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta3.Order_State" json:"state" yaml:"state"` - Spec v1beta3.GroupSpec `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec" yaml:"spec"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Order) Reset() { *m = Order{} } -func (*Order) ProtoMessage() {} -func (*Order) Descriptor() ([]byte, []int) { - return fileDescriptor_cffb7c6e7c4cdcbf, []int{1} -} -func (m *Order) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Order.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Order) XXX_Merge(src proto.Message) { - xxx_messageInfo_Order.Merge(m, src) -} -func (m *Order) XXX_Size() int { - return m.Size() -} -func (m *Order) XXX_DiscardUnknown() { - xxx_messageInfo_Order.DiscardUnknown(m) -} - -var xxx_messageInfo_Order proto.InternalMessageInfo - -func (m *Order) GetOrderID() OrderID { - if m != nil { - return m.OrderID - } - return OrderID{} -} - -func (m *Order) GetState() Order_State { - if m != nil { - return m.State - } - return OrderStateInvalid -} - -func (m *Order) GetSpec() v1beta3.GroupSpec { - if m != nil { - return m.Spec - } - return v1beta3.GroupSpec{} -} - -func (m *Order) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -// OrderFilters defines flags for order list filter -type OrderFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *OrderFilters) Reset() { *m = OrderFilters{} } -func (m *OrderFilters) String() string { return proto.CompactTextString(m) } -func (*OrderFilters) ProtoMessage() {} -func (*OrderFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_cffb7c6e7c4cdcbf, []int{2} -} -func (m *OrderFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OrderFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_OrderFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *OrderFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_OrderFilters.Merge(m, src) -} -func (m *OrderFilters) XXX_Size() int { - return m.Size() -} -func (m *OrderFilters) XXX_DiscardUnknown() { - xxx_messageInfo_OrderFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_OrderFilters proto.InternalMessageInfo - -func (m *OrderFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *OrderFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *OrderFilters) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *OrderFilters) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *OrderFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func init() { - proto.RegisterEnum("akash.market.v1beta3.Order_State", Order_State_name, Order_State_value) - proto.RegisterType((*OrderID)(nil), "akash.market.v1beta3.OrderID") - proto.RegisterType((*Order)(nil), "akash.market.v1beta3.Order") - proto.RegisterType((*OrderFilters)(nil), "akash.market.v1beta3.OrderFilters") -} - -func init() { proto.RegisterFile("akash/market/v1beta3/order.proto", fileDescriptor_cffb7c6e7c4cdcbf) } - -var fileDescriptor_cffb7c6e7c4cdcbf = []byte{ - // 582 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xb6, 0x13, 0xa7, 0x6d, 0x2e, 0x2d, 0x04, 0xab, 0x88, 0xe2, 0xaa, 0x3e, 0x63, 0x96, 0x2c, - 0xd8, 0xa2, 0x9d, 0xc8, 0xd6, 0x50, 0x51, 0x65, 0x8a, 0xe4, 0x30, 0x21, 0xa4, 0xca, 0xf1, 0x9d, - 0x5c, 0x2b, 0x89, 0xcf, 0xb5, 0xaf, 0xa9, 0xba, 0x33, 0xa0, 0x4c, 0x2c, 0x48, 0x2c, 0x91, 0x2a, - 0xf1, 0x43, 0x58, 0x3b, 0x76, 0x64, 0xb2, 0x50, 0xb2, 0xa0, 0x8c, 0xf9, 0x05, 0xe8, 0xde, 0x19, - 0xdc, 0x22, 0xd4, 0x1f, 0xc0, 0x64, 0xbf, 0xef, 0x7d, 0xdf, 0xbb, 0x7b, 0xdf, 0x7b, 0x3a, 0x64, - 0xf9, 0x43, 0x3f, 0x3b, 0x75, 0xc7, 0x7e, 0x3a, 0xa4, 0xdc, 0x9d, 0xbc, 0x1c, 0x50, 0xee, 0x1f, - 0xb8, 0x2c, 0x25, 0x34, 0x75, 0x92, 0x94, 0x71, 0xa6, 0x6f, 0x03, 0xc3, 0x91, 0x0c, 0xa7, 0x60, - 0x18, 0xdb, 0x21, 0x0b, 0x19, 0x10, 0x5c, 0xf1, 0x27, 0xb9, 0x46, 0x4b, 0x56, 0x23, 0x34, 0x19, - 0xb1, 0xcb, 0x31, 0x8d, 0xcb, 0x8a, 0x61, 0xca, 0xce, 0x93, 0x2c, 0xa1, 0x81, 0x64, 0xda, 0x0b, - 0x15, 0xad, 0xf7, 0xc4, 0x29, 0xdd, 0x23, 0xdd, 0x45, 0x35, 0x76, 0x11, 0xd3, 0x74, 0x47, 0xb5, - 0xd4, 0x56, 0xbd, 0xf3, 0x74, 0x99, 0x63, 0x09, 0xac, 0x72, 0xbc, 0x79, 0xe9, 0x8f, 0x47, 0x6d, - 0x1b, 0x42, 0xdb, 0x93, 0xb0, 0x7e, 0x80, 0x34, 0x92, 0xd1, 0xb3, 0x9d, 0x8a, 0xa5, 0xb6, 0xb4, - 0x0e, 0x9e, 0xe7, 0x58, 0x3b, 0xea, 0xd3, 0xb3, 0x65, 0x8e, 0x01, 0x5f, 0xe5, 0xb8, 0x21, 0x65, - 0x22, 0xb2, 0x3d, 0x00, 0x85, 0x28, 0x14, 0xa2, 0xaa, 0xa5, 0xb6, 0xb6, 0xa4, 0xe8, 0xb8, 0x10, - 0x85, 0x77, 0x44, 0xa1, 0x14, 0x85, 0x85, 0x88, 0x09, 0x91, 0x56, 0x8a, 0x7a, 0x85, 0x88, 0xdd, - 0x11, 0x31, 0x29, 0x12, 0x9f, 0xf6, 0xc6, 0x97, 0x2b, 0xac, 0xfc, 0xbc, 0xc2, 0x8a, 0xfd, 0xad, - 0x8a, 0x6a, 0xd0, 0xa5, 0xfe, 0x1e, 0x6d, 0x80, 0xa9, 0x27, 0x11, 0x81, 0x36, 0x1b, 0xfb, 0x7b, - 0xce, 0xbf, 0x8c, 0x75, 0x0a, 0x53, 0x3a, 0xf6, 0x75, 0x8e, 0x95, 0x79, 0x8e, 0x7f, 0xbb, 0xb4, - 0xcc, 0x71, 0x25, 0x22, 0xab, 0x1c, 0xd7, 0xe5, 0x81, 0x11, 0xb1, 0xbd, 0x75, 0x28, 0xd9, 0x25, - 0xba, 0x87, 0x6a, 0x19, 0xf7, 0x39, 0x05, 0x47, 0x1e, 0xec, 0x3f, 0xbb, 0xa7, 0xb4, 0xd3, 0x17, - 0x44, 0x69, 0x32, 0x68, 0x4a, 0x93, 0x21, 0xb4, 0x3d, 0x09, 0xeb, 0x6f, 0x91, 0x26, 0xe6, 0x05, - 0x7e, 0x35, 0xf6, 0x9f, 0x17, 0x25, 0xcb, 0xd1, 0xfe, 0x29, 0x7b, 0x2c, 0x46, 0xdb, 0x4f, 0x68, - 0xd0, 0xd9, 0x15, 0x77, 0x16, 0xde, 0x08, 0x61, 0xe9, 0x8d, 0x88, 0x6c, 0x0f, 0x40, 0x7d, 0x0f, - 0xa1, 0x20, 0xa5, 0x3e, 0xa7, 0xe4, 0xc4, 0xe7, 0x60, 0x6b, 0xd5, 0xab, 0x17, 0xc8, 0x21, 0xb7, - 0x3f, 0xa8, 0xa8, 0x06, 0x17, 0xd4, 0x6d, 0xb4, 0x1e, 0xc5, 0x13, 0x7f, 0x14, 0x91, 0xa6, 0x62, - 0x3c, 0x9e, 0xce, 0xac, 0x47, 0x70, 0x7d, 0x48, 0x76, 0x65, 0x42, 0x7f, 0x82, 0x34, 0x96, 0xd0, - 0xb8, 0xa9, 0x1a, 0x5b, 0xd3, 0x99, 0x55, 0x07, 0x42, 0x2f, 0xa1, 0xb1, 0xbe, 0x8b, 0xd6, 0xfc, - 0x80, 0x47, 0x13, 0xda, 0xac, 0x18, 0x0f, 0xa7, 0x33, 0xab, 0x01, 0xa9, 0x43, 0x80, 0x44, 0x32, - 0x18, 0xb1, 0x8c, 0x92, 0x66, 0xf5, 0x56, 0xf2, 0x35, 0x40, 0x86, 0xf6, 0xf1, 0xab, 0xa9, 0xdc, - 0x9a, 0xe0, 0xe7, 0x0a, 0xda, 0x84, 0xfc, 0x9b, 0x68, 0xc4, 0x69, 0x9a, 0xfd, 0x6f, 0xcb, 0x2a, - 0xfa, 0x91, 0xab, 0x53, 0x2b, 0xfb, 0xb9, 0x6f, 0x2f, 0xda, 0x9a, 0xf0, 0xa5, 0xd3, 0xbf, 0x9e, - 0x9b, 0xea, 0xcd, 0xdc, 0x54, 0x7f, 0xcc, 0x4d, 0xf5, 0xd3, 0xc2, 0x54, 0x6e, 0x16, 0xa6, 0xf2, - 0x7d, 0x61, 0x2a, 0xef, 0x5e, 0x85, 0x11, 0x3f, 0x3d, 0x1f, 0x38, 0x01, 0x1b, 0xbb, 0xb0, 0x33, - 0x2f, 0x62, 0xca, 0x2f, 0x58, 0x3a, 0x2c, 0x22, 0x3f, 0x89, 0xdc, 0x90, 0xb9, 0x31, 0x23, 0xf4, - 0xaf, 0x67, 0x67, 0xb0, 0x06, 0x6f, 0xc3, 0xc1, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x14, 0x36, - 0xa2, 0x46, 0x95, 0x04, 0x00, 0x00, -} - -func (m *OrderID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OrderID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OrderID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.OSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintOrder(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Order) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Order) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Order) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOrder(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.OrderID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOrder(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *OrderFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OrderFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OrderFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintOrder(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintOrder(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintOrder(dAtA []byte, offset int, v uint64) int { - offset -= sovOrder(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *OrderID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovOrder(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovOrder(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovOrder(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovOrder(uint64(m.OSeq)) - } - return n -} - -func (m *Order) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.OrderID.Size() - n += 1 + l + sovOrder(uint64(l)) - if m.State != 0 { - n += 1 + sovOrder(uint64(m.State)) - } - l = m.Spec.Size() - n += 1 + l + sovOrder(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovOrder(uint64(m.CreatedAt)) - } - return n -} - -func (m *OrderFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovOrder(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovOrder(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovOrder(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovOrder(uint64(m.OSeq)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovOrder(uint64(l)) - } - return n -} - -func sovOrder(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozOrder(x uint64) (n int) { - return sovOrder(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *OrderID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OrderID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OrderID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOrder(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOrder - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Order) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Order: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Order: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OrderID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.OrderID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Order_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOrder(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOrder - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OrderFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OrderFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OrderFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOrder(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOrder - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipOrder(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOrder - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOrder - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOrder - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthOrder - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupOrder - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthOrder - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthOrder = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowOrder = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupOrder = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta3/params.go b/go/node/market/v1beta3/params.go deleted file mode 100644 index 4dbe0ddd..00000000 --- a/go/node/market/v1beta3/params.go +++ /dev/null @@ -1,76 +0,0 @@ -package v1beta3 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/pkg/errors" -) - -var _ paramtypes.ParamSet = (*Params)(nil) - -var ( - DefaultBidMinDeposit = sdk.NewCoin("uakt", sdk.NewInt(5000000)) - defaultOrderMaxBids uint32 = 20 - maxOrderMaxBids uint32 = 500 -) - -const ( - keyBidMinDeposit = "BidMinDeposit" - keyOrderMaxBids = "OrderMaxBids" -) - -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair([]byte(keyBidMinDeposit), &p.BidMinDeposit, validateCoin), - paramtypes.NewParamSetPair([]byte(keyOrderMaxBids), &p.OrderMaxBids, validateOrderMaxBids), - } -} - -func DefaultParams() Params { - return Params{ - BidMinDeposit: DefaultBidMinDeposit, - OrderMaxBids: defaultOrderMaxBids, - } -} - -func (p Params) Validate() error { - if err := validateCoin(p.BidMinDeposit); err != nil { - return err - } - - if err := validateOrderMaxBids(p.OrderMaxBids); err != nil { - return err - } - return nil -} - -func validateCoin(i interface{}) error { - _, ok := i.(sdk.Coin) - if !ok { - return errors.Wrapf(ErrInvalidParam, "invalid type %T", i) - } - - return nil -} - -func validateOrderMaxBids(i interface{}) error { - val, ok := i.(uint32) - - if !ok { - return errors.Wrapf(ErrInvalidParam, "invalid type %T", i) - } - - if val == 0 { - return errors.Wrap(ErrInvalidParam, "order max bids too low") - } - - if val > maxOrderMaxBids { - return errors.Wrap(ErrInvalidParam, "order max bids too high") - } - - return nil -} diff --git a/go/node/market/v1beta3/params.pb.go b/go/node/market/v1beta3/params.pb.go deleted file mode 100644 index a7e88138..00000000 --- a/go/node/market/v1beta3/params.pb.go +++ /dev/null @@ -1,365 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta3/params.proto - -package v1beta3 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Params is the params for the x/market module -type Params struct { - BidMinDeposit types.Coin `protobuf:"bytes,1,opt,name=bid_min_deposit,json=bidMinDeposit,proto3" json:"bid_min_deposit" yaml:"bid_min_deposit"` - OrderMaxBids uint32 `protobuf:"varint,2,opt,name=order_max_bids,json=orderMaxBids,proto3" json:"order_max_bids" yaml:"order_max_bids"` -} - -func (m *Params) Reset() { *m = Params{} } -func (m *Params) String() string { return proto.CompactTextString(m) } -func (*Params) ProtoMessage() {} -func (*Params) Descriptor() ([]byte, []int) { - return fileDescriptor_ee47153ff137a530, []int{0} -} -func (m *Params) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Params.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Params) XXX_Merge(src proto.Message) { - xxx_messageInfo_Params.Merge(m, src) -} -func (m *Params) XXX_Size() int { - return m.Size() -} -func (m *Params) XXX_DiscardUnknown() { - xxx_messageInfo_Params.DiscardUnknown(m) -} - -var xxx_messageInfo_Params proto.InternalMessageInfo - -func (m *Params) GetBidMinDeposit() types.Coin { - if m != nil { - return m.BidMinDeposit - } - return types.Coin{} -} - -func (m *Params) GetOrderMaxBids() uint32 { - if m != nil { - return m.OrderMaxBids - } - return 0 -} - -func init() { - proto.RegisterType((*Params)(nil), "akash.market.v1beta3.Params") -} - -func init() { proto.RegisterFile("akash/market/v1beta3/params.proto", fileDescriptor_ee47153ff137a530) } - -var fileDescriptor_ee47153ff137a530 = []byte{ - // 321 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0x31, 0x4f, 0xe3, 0x30, - 0x18, 0x86, 0xe3, 0x1b, 0x3a, 0xe4, 0xda, 0x3b, 0xa9, 0xea, 0x9d, 0x4a, 0x07, 0xbb, 0x64, 0xea, - 0x82, 0xad, 0xd2, 0x09, 0xd8, 0x02, 0x6b, 0x05, 0x2a, 0x1b, 0x4b, 0x64, 0xd7, 0x56, 0x6a, 0x15, - 0xe7, 0x8b, 0xe2, 0x00, 0xe5, 0x07, 0xb0, 0xf3, 0xb3, 0x3a, 0x76, 0x64, 0xb2, 0x50, 0xba, 0x75, - 0x2c, 0x7f, 0x00, 0x35, 0xc9, 0x50, 0xb2, 0xd9, 0x7e, 0x9f, 0xf7, 0xf9, 0xa4, 0xcf, 0xfe, 0x29, - 0x5f, 0x72, 0xbb, 0x60, 0x86, 0x67, 0x4b, 0x95, 0xb3, 0xe7, 0xb1, 0x50, 0x39, 0x9f, 0xb0, 0x94, - 0x67, 0xdc, 0x58, 0x9a, 0x66, 0x90, 0x43, 0xb7, 0x57, 0x22, 0xb4, 0x42, 0x68, 0x8d, 0x0c, 0x7a, - 0x31, 0xc4, 0x50, 0x02, 0xec, 0x70, 0xaa, 0xd8, 0x01, 0x9e, 0x83, 0x35, 0x60, 0x99, 0xe0, 0x56, - 0xd5, 0xb6, 0x31, 0x9b, 0x83, 0x4e, 0xaa, 0x3c, 0xf8, 0x42, 0x7e, 0xeb, 0xae, 0x94, 0x77, 0xdf, - 0x90, 0xff, 0x57, 0x68, 0x19, 0x19, 0x9d, 0x44, 0x52, 0xa5, 0x60, 0x75, 0xde, 0x47, 0x43, 0x34, - 0xfa, 0x7d, 0x7e, 0x42, 0x2b, 0x0b, 0x3d, 0x58, 0xea, 0x81, 0x63, 0x7a, 0x0d, 0x3a, 0x09, 0xc3, - 0xb5, 0x23, 0x5e, 0xe1, 0x48, 0x27, 0xd4, 0x72, 0xaa, 0x93, 0x9b, 0xaa, 0xb7, 0x73, 0xa4, 0xa9, - 0xda, 0x3b, 0xf2, 0xff, 0x95, 0x9b, 0xc7, 0xcb, 0xa0, 0x11, 0x04, 0xb3, 0x8e, 0x38, 0xee, 0x76, - 0xb9, 0xff, 0x07, 0x32, 0xa9, 0xb2, 0xc8, 0xf0, 0x55, 0x24, 0xb4, 0xb4, 0xfd, 0x5f, 0x43, 0x34, - 0xea, 0x84, 0x57, 0x85, 0x23, 0xed, 0xdb, 0x43, 0x32, 0xe5, 0xab, 0x50, 0x4b, 0xbb, 0x73, 0xa4, - 0x41, 0xee, 0x1d, 0xf9, 0x57, 0x0d, 0xf9, 0xf9, 0x1e, 0xcc, 0xda, 0x70, 0x54, 0x0c, 0xef, 0xd7, - 0x05, 0x46, 0x9b, 0x02, 0xa3, 0xcf, 0x02, 0xa3, 0xf7, 0x2d, 0xf6, 0x36, 0x5b, 0xec, 0x7d, 0x6c, - 0xb1, 0xf7, 0x70, 0x11, 0xeb, 0x7c, 0xf1, 0x24, 0xe8, 0x1c, 0x0c, 0x2b, 0xd7, 0x7c, 0x96, 0xa8, - 0xfc, 0x05, 0xb2, 0x65, 0x7d, 0xe3, 0xa9, 0x66, 0x31, 0xb0, 0x04, 0xa4, 0x6a, 0xfc, 0x91, 0x68, - 0x95, 0x1b, 0x9d, 0x7c, 0x07, 0x00, 0x00, 0xff, 0xff, 0x04, 0xe1, 0x11, 0x23, 0xc2, 0x01, 0x00, - 0x00, -} - -func (m *Params) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Params) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.OrderMaxBids != 0 { - i = encodeVarintParams(dAtA, i, uint64(m.OrderMaxBids)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.BidMinDeposit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintParams(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintParams(dAtA []byte, offset int, v uint64) int { - offset -= sovParams(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Params) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidMinDeposit.Size() - n += 1 + l + sovParams(uint64(l)) - if m.OrderMaxBids != 0 { - n += 1 + sovParams(uint64(m.OrderMaxBids)) - } - return n -} - -func sovParams(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozParams(x uint64) (n int) { - return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Params) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Params: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidMinDeposit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidMinDeposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OrderMaxBids", wireType) - } - m.OrderMaxBids = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OrderMaxBids |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipParams(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipParams(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthParams - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupParams - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthParams - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta3/query.pb.go b/go/node/market/v1beta3/query.pb.go deleted file mode 100644 index 14409581..00000000 --- a/go/node/market/v1beta3/query.pb.go +++ /dev/null @@ -1,3034 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta3/query.proto - -package v1beta3 - -import ( - context "context" - fmt "fmt" - v1beta3 "github.com/akash-network/akash-api/go/node/escrow/v1beta3" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryOrdersRequest is request type for the Query/Orders RPC method -type QueryOrdersRequest struct { - Filters OrderFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryOrdersRequest) Reset() { *m = QueryOrdersRequest{} } -func (m *QueryOrdersRequest) String() string { return proto.CompactTextString(m) } -func (*QueryOrdersRequest) ProtoMessage() {} -func (*QueryOrdersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0aae55c1068e12dc, []int{0} -} -func (m *QueryOrdersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrdersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrdersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrdersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrdersRequest.Merge(m, src) -} -func (m *QueryOrdersRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryOrdersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrdersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrdersRequest proto.InternalMessageInfo - -func (m *QueryOrdersRequest) GetFilters() OrderFilters { - if m != nil { - return m.Filters - } - return OrderFilters{} -} - -func (m *QueryOrdersRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryOrdersResponse is response type for the Query/Orders RPC method -type QueryOrdersResponse struct { - Orders Orders `protobuf:"bytes,1,rep,name=orders,proto3,castrepeated=Orders" json:"orders"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryOrdersResponse) Reset() { *m = QueryOrdersResponse{} } -func (m *QueryOrdersResponse) String() string { return proto.CompactTextString(m) } -func (*QueryOrdersResponse) ProtoMessage() {} -func (*QueryOrdersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0aae55c1068e12dc, []int{1} -} -func (m *QueryOrdersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrdersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrdersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrdersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrdersResponse.Merge(m, src) -} -func (m *QueryOrdersResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryOrdersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrdersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrdersResponse proto.InternalMessageInfo - -func (m *QueryOrdersResponse) GetOrders() Orders { - if m != nil { - return m.Orders - } - return nil -} - -func (m *QueryOrdersResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryOrderRequest is request type for the Query/Order RPC method -type QueryOrderRequest struct { - ID OrderID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryOrderRequest) Reset() { *m = QueryOrderRequest{} } -func (m *QueryOrderRequest) String() string { return proto.CompactTextString(m) } -func (*QueryOrderRequest) ProtoMessage() {} -func (*QueryOrderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0aae55c1068e12dc, []int{2} -} -func (m *QueryOrderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrderRequest.Merge(m, src) -} -func (m *QueryOrderRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryOrderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrderRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrderRequest proto.InternalMessageInfo - -func (m *QueryOrderRequest) GetID() OrderID { - if m != nil { - return m.ID - } - return OrderID{} -} - -// QueryOrderResponse is response type for the Query/Order RPC method -type QueryOrderResponse struct { - Order Order `protobuf:"bytes,1,opt,name=order,proto3" json:"order"` -} - -func (m *QueryOrderResponse) Reset() { *m = QueryOrderResponse{} } -func (m *QueryOrderResponse) String() string { return proto.CompactTextString(m) } -func (*QueryOrderResponse) ProtoMessage() {} -func (*QueryOrderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0aae55c1068e12dc, []int{3} -} -func (m *QueryOrderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrderResponse.Merge(m, src) -} -func (m *QueryOrderResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryOrderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrderResponse proto.InternalMessageInfo - -func (m *QueryOrderResponse) GetOrder() Order { - if m != nil { - return m.Order - } - return Order{} -} - -// QueryBidsRequest is request type for the Query/Bids RPC method -type QueryBidsRequest struct { - Filters BidFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryBidsRequest) Reset() { *m = QueryBidsRequest{} } -func (m *QueryBidsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryBidsRequest) ProtoMessage() {} -func (*QueryBidsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0aae55c1068e12dc, []int{4} -} -func (m *QueryBidsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidsRequest.Merge(m, src) -} -func (m *QueryBidsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryBidsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidsRequest proto.InternalMessageInfo - -func (m *QueryBidsRequest) GetFilters() BidFilters { - if m != nil { - return m.Filters - } - return BidFilters{} -} - -func (m *QueryBidsRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryBidsResponse is response type for the Query/Bids RPC method -type QueryBidsResponse struct { - Bids []QueryBidResponse `protobuf:"bytes,1,rep,name=bids,proto3" json:"bids"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryBidsResponse) Reset() { *m = QueryBidsResponse{} } -func (m *QueryBidsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryBidsResponse) ProtoMessage() {} -func (*QueryBidsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0aae55c1068e12dc, []int{5} -} -func (m *QueryBidsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidsResponse.Merge(m, src) -} -func (m *QueryBidsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryBidsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidsResponse proto.InternalMessageInfo - -func (m *QueryBidsResponse) GetBids() []QueryBidResponse { - if m != nil { - return m.Bids - } - return nil -} - -func (m *QueryBidsResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryBidRequest is request type for the Query/Bid RPC method -type QueryBidRequest struct { - ID BidID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryBidRequest) Reset() { *m = QueryBidRequest{} } -func (m *QueryBidRequest) String() string { return proto.CompactTextString(m) } -func (*QueryBidRequest) ProtoMessage() {} -func (*QueryBidRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0aae55c1068e12dc, []int{6} -} -func (m *QueryBidRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidRequest.Merge(m, src) -} -func (m *QueryBidRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryBidRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidRequest proto.InternalMessageInfo - -func (m *QueryBidRequest) GetID() BidID { - if m != nil { - return m.ID - } - return BidID{} -} - -// QueryBidResponse is response type for the Query/Bid RPC method -type QueryBidResponse struct { - Bid Bid `protobuf:"bytes,1,opt,name=bid,proto3" json:"bid"` - EscrowAccount v1beta3.Account `protobuf:"bytes,2,opt,name=escrow_account,json=escrowAccount,proto3" json:"escrow_account"` -} - -func (m *QueryBidResponse) Reset() { *m = QueryBidResponse{} } -func (m *QueryBidResponse) String() string { return proto.CompactTextString(m) } -func (*QueryBidResponse) ProtoMessage() {} -func (*QueryBidResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0aae55c1068e12dc, []int{7} -} -func (m *QueryBidResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidResponse.Merge(m, src) -} -func (m *QueryBidResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryBidResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidResponse proto.InternalMessageInfo - -func (m *QueryBidResponse) GetBid() Bid { - if m != nil { - return m.Bid - } - return Bid{} -} - -func (m *QueryBidResponse) GetEscrowAccount() v1beta3.Account { - if m != nil { - return m.EscrowAccount - } - return v1beta3.Account{} -} - -// QueryLeasesRequest is request type for the Query/Leases RPC method -type QueryLeasesRequest struct { - Filters LeaseFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryLeasesRequest) Reset() { *m = QueryLeasesRequest{} } -func (m *QueryLeasesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryLeasesRequest) ProtoMessage() {} -func (*QueryLeasesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0aae55c1068e12dc, []int{8} -} -func (m *QueryLeasesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeasesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeasesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeasesRequest.Merge(m, src) -} -func (m *QueryLeasesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryLeasesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeasesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeasesRequest proto.InternalMessageInfo - -func (m *QueryLeasesRequest) GetFilters() LeaseFilters { - if m != nil { - return m.Filters - } - return LeaseFilters{} -} - -func (m *QueryLeasesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryLeasesResponse is response type for the Query/Leases RPC method -type QueryLeasesResponse struct { - Leases []QueryLeaseResponse `protobuf:"bytes,1,rep,name=leases,proto3" json:"leases"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryLeasesResponse) Reset() { *m = QueryLeasesResponse{} } -func (m *QueryLeasesResponse) String() string { return proto.CompactTextString(m) } -func (*QueryLeasesResponse) ProtoMessage() {} -func (*QueryLeasesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0aae55c1068e12dc, []int{9} -} -func (m *QueryLeasesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeasesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeasesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeasesResponse.Merge(m, src) -} -func (m *QueryLeasesResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryLeasesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeasesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeasesResponse proto.InternalMessageInfo - -func (m *QueryLeasesResponse) GetLeases() []QueryLeaseResponse { - if m != nil { - return m.Leases - } - return nil -} - -func (m *QueryLeasesResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryLeaseRequest is request type for the Query/Lease RPC method -type QueryLeaseRequest struct { - ID LeaseID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryLeaseRequest) Reset() { *m = QueryLeaseRequest{} } -func (m *QueryLeaseRequest) String() string { return proto.CompactTextString(m) } -func (*QueryLeaseRequest) ProtoMessage() {} -func (*QueryLeaseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0aae55c1068e12dc, []int{10} -} -func (m *QueryLeaseRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeaseRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeaseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeaseRequest.Merge(m, src) -} -func (m *QueryLeaseRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryLeaseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeaseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeaseRequest proto.InternalMessageInfo - -func (m *QueryLeaseRequest) GetID() LeaseID { - if m != nil { - return m.ID - } - return LeaseID{} -} - -// QueryLeaseResponse is response type for the Query/Lease RPC method -type QueryLeaseResponse struct { - Lease Lease `protobuf:"bytes,1,opt,name=lease,proto3" json:"lease"` - EscrowPayment v1beta3.FractionalPayment `protobuf:"bytes,2,opt,name=escrow_payment,json=escrowPayment,proto3" json:"escrow_payment"` -} - -func (m *QueryLeaseResponse) Reset() { *m = QueryLeaseResponse{} } -func (m *QueryLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*QueryLeaseResponse) ProtoMessage() {} -func (*QueryLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0aae55c1068e12dc, []int{11} -} -func (m *QueryLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeaseResponse.Merge(m, src) -} -func (m *QueryLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeaseResponse proto.InternalMessageInfo - -func (m *QueryLeaseResponse) GetLease() Lease { - if m != nil { - return m.Lease - } - return Lease{} -} - -func (m *QueryLeaseResponse) GetEscrowPayment() v1beta3.FractionalPayment { - if m != nil { - return m.EscrowPayment - } - return v1beta3.FractionalPayment{} -} - -func init() { - proto.RegisterType((*QueryOrdersRequest)(nil), "akash.market.v1beta3.QueryOrdersRequest") - proto.RegisterType((*QueryOrdersResponse)(nil), "akash.market.v1beta3.QueryOrdersResponse") - proto.RegisterType((*QueryOrderRequest)(nil), "akash.market.v1beta3.QueryOrderRequest") - proto.RegisterType((*QueryOrderResponse)(nil), "akash.market.v1beta3.QueryOrderResponse") - proto.RegisterType((*QueryBidsRequest)(nil), "akash.market.v1beta3.QueryBidsRequest") - proto.RegisterType((*QueryBidsResponse)(nil), "akash.market.v1beta3.QueryBidsResponse") - proto.RegisterType((*QueryBidRequest)(nil), "akash.market.v1beta3.QueryBidRequest") - proto.RegisterType((*QueryBidResponse)(nil), "akash.market.v1beta3.QueryBidResponse") - proto.RegisterType((*QueryLeasesRequest)(nil), "akash.market.v1beta3.QueryLeasesRequest") - proto.RegisterType((*QueryLeasesResponse)(nil), "akash.market.v1beta3.QueryLeasesResponse") - proto.RegisterType((*QueryLeaseRequest)(nil), "akash.market.v1beta3.QueryLeaseRequest") - proto.RegisterType((*QueryLeaseResponse)(nil), "akash.market.v1beta3.QueryLeaseResponse") -} - -func init() { proto.RegisterFile("akash/market/v1beta3/query.proto", fileDescriptor_0aae55c1068e12dc) } - -var fileDescriptor_0aae55c1068e12dc = []byte{ - // 795 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x4f, 0x13, 0x41, - 0x14, 0xee, 0x16, 0xa8, 0xc9, 0x10, 0x51, 0x47, 0x0e, 0x58, 0xb4, 0x85, 0x1a, 0x69, 0x31, 0x71, - 0x37, 0xc0, 0x81, 0x70, 0xc3, 0x95, 0xd4, 0x40, 0xfc, 0x81, 0xd5, 0x93, 0x17, 0x33, 0xdb, 0x1d, - 0x96, 0x09, 0xed, 0x4e, 0xd9, 0xd9, 0x4a, 0x38, 0x98, 0x18, 0x13, 0x13, 0x8f, 0x1a, 0xaf, 0xc6, - 0x98, 0x90, 0x78, 0xf0, 0xe2, 0xbf, 0xc1, 0x91, 0xc4, 0x8b, 0x27, 0x34, 0xc5, 0x3f, 0xc4, 0xcc, - 0x8f, 0xdd, 0x76, 0xc9, 0x76, 0x77, 0x49, 0xe0, 0xd6, 0xee, 0x7e, 0xef, 0xbd, 0xef, 0xbd, 0xf7, - 0xcd, 0xb7, 0x03, 0x66, 0xd0, 0x0e, 0x62, 0xdb, 0x46, 0x1b, 0x79, 0x3b, 0xd8, 0x37, 0x5e, 0x2f, - 0x58, 0xd8, 0x47, 0x4b, 0xc6, 0x6e, 0x17, 0x7b, 0xfb, 0x7a, 0xc7, 0xa3, 0x3e, 0x85, 0x93, 0x02, - 0xa1, 0x4b, 0x84, 0xae, 0x10, 0xc5, 0x49, 0x87, 0x3a, 0x54, 0x00, 0x0c, 0xfe, 0x4b, 0x62, 0x8b, - 0x37, 0x1d, 0x4a, 0x9d, 0x16, 0x36, 0x50, 0x87, 0x18, 0xc8, 0x75, 0xa9, 0x8f, 0x7c, 0x42, 0x5d, - 0xa6, 0xde, 0xde, 0x6d, 0x52, 0xd6, 0xa6, 0xcc, 0xb0, 0x10, 0xc3, 0xb2, 0x84, 0x2a, 0xb8, 0x60, - 0x74, 0x90, 0x43, 0x5c, 0x01, 0x56, 0xd8, 0x78, 0x5e, 0xd4, 0xb3, 0xb1, 0xa7, 0x10, 0xa5, 0x58, - 0x84, 0x45, 0xec, 0xc4, 0x0c, 0x2d, 0x8c, 0x18, 0x8e, 0x22, 0x30, 0x6b, 0x7a, 0x74, 0x2f, 0x44, - 0xf8, 0xfb, 0x1d, 0xac, 0x18, 0x57, 0xbe, 0x69, 0x00, 0x3e, 0xe3, 0x44, 0x9f, 0xf2, 0xc2, 0xac, - 0x81, 0x77, 0xbb, 0x98, 0xf9, 0xd0, 0x04, 0x97, 0xb6, 0x48, 0xcb, 0xc7, 0x1e, 0x9b, 0xd2, 0x66, - 0xb4, 0xda, 0xf8, 0x62, 0x45, 0x8f, 0x1b, 0x92, 0x2e, 0xa2, 0xea, 0x12, 0x69, 0x8e, 0x1e, 0x1e, - 0x97, 0x73, 0x8d, 0x20, 0x10, 0xd6, 0x01, 0xe8, 0x37, 0x3d, 0x95, 0x17, 0x69, 0xe6, 0x74, 0x39, - 0x21, 0x9d, 0x4f, 0x48, 0x97, 0x4b, 0x50, 0x13, 0xd2, 0x37, 0x91, 0x83, 0x55, 0xfd, 0xc6, 0x40, - 0x64, 0xe5, 0x40, 0x03, 0xd7, 0x23, 0x14, 0x59, 0x87, 0xba, 0x0c, 0xc3, 0x07, 0xa0, 0x20, 0xa6, - 0xc5, 0x29, 0x8e, 0xd4, 0xc6, 0x17, 0xa7, 0x13, 0x28, 0x9a, 0x13, 0x9c, 0xdb, 0x8f, 0x3f, 0xe5, - 0x82, 0x4a, 0xa2, 0x42, 0xe1, 0xc3, 0x18, 0x92, 0xd5, 0x54, 0x92, 0x92, 0x41, 0x84, 0xe5, 0x13, - 0x70, 0xad, 0x4f, 0x32, 0x18, 0xe3, 0x0a, 0xc8, 0x13, 0x5b, 0x4d, 0xf0, 0x56, 0x02, 0xbd, 0xf5, - 0x35, 0x13, 0x70, 0x82, 0xbd, 0xe3, 0x72, 0x7e, 0x7d, 0xad, 0x91, 0x27, 0x76, 0xe5, 0xf1, 0xe0, - 0x5e, 0xc2, 0x9e, 0x97, 0xc1, 0x98, 0x20, 0xae, 0x72, 0x26, 0xb6, 0x2c, 0xd7, 0x21, 0xf1, 0x95, - 0x2f, 0x1a, 0xb8, 0x2a, 0xf2, 0x99, 0xc4, 0x0e, 0xb7, 0xbc, 0x7a, 0x7a, 0xcb, 0x33, 0xf1, 0xf9, - 0x4c, 0x62, 0x5f, 0xf0, 0x8e, 0xbf, 0x6a, 0x6a, 0x7c, 0x92, 0x9e, 0xea, 0x76, 0x15, 0x8c, 0x5a, - 0xc4, 0x0e, 0xf6, 0x3b, 0x17, 0x4f, 0x2e, 0x08, 0x0b, 0xa2, 0x14, 0x45, 0x11, 0x79, 0x7e, 0xeb, - 0xdd, 0x00, 0x57, 0xfa, 0x85, 0xe4, 0xf4, 0x96, 0x07, 0x96, 0x3b, 0x3d, 0x74, 0x70, 0x31, 0xab, - 0xfd, 0x34, 0xb0, 0x8b, 0xb0, 0xd7, 0x05, 0x30, 0x62, 0x85, 0xe9, 0x6e, 0x0c, 0x4d, 0xa7, 0xba, - 0xe3, 0x58, 0xb8, 0x01, 0x26, 0xe4, 0xc9, 0x7e, 0x85, 0x9a, 0x4d, 0xda, 0x75, 0x7d, 0xd5, 0x60, - 0xa0, 0x34, 0xf9, 0x32, 0x8c, 0xbe, 0x2f, 0x41, 0x2a, 0xc3, 0x65, 0xf9, 0x56, 0x3d, 0xec, 0xfb, - 0xc0, 0x23, 0x6e, 0x1f, 0x67, 0xf6, 0x01, 0x11, 0x75, 0xc1, 0x1a, 0xf9, 0x1e, 0xf8, 0x40, 0x40, - 0x51, 0x4d, 0xae, 0x0e, 0x0a, 0xc2, 0xf3, 0x02, 0x9d, 0xd4, 0x12, 0x74, 0x22, 0x42, 0x4f, 0x29, - 0x45, 0x45, 0x9f, 0xbf, 0x15, 0xa8, 0x62, 0x99, 0xad, 0x40, 0xe0, 0x63, 0xf4, 0x72, 0x10, 0xd9, - 0xcd, 0xa0, 0x17, 0x08, 0xe6, 0xc9, 0x12, 0x14, 0x31, 0x81, 0x17, 0x08, 0x3c, 0x7c, 0x11, 0xea, - 0xa6, 0x83, 0xf6, 0xdb, 0x38, 0xd4, 0x4d, 0x35, 0x5e, 0x37, 0x75, 0x0f, 0x35, 0x79, 0x5f, 0xa8, - 0xb5, 0x29, 0xe1, 0x51, 0x05, 0xa9, 0x87, 0x8b, 0x3f, 0x0b, 0x60, 0x4c, 0xb0, 0x84, 0x1f, 0x34, - 0xa0, 0x6c, 0x16, 0x26, 0xed, 0x22, 0xf2, 0xc5, 0x29, 0xce, 0x67, 0x40, 0xca, 0xc6, 0x2b, 0xf3, - 0xef, 0x7e, 0xfd, 0xfb, 0x9c, 0xbf, 0x0d, 0x67, 0x8d, 0xe1, 0x9f, 0x50, 0x66, 0xb4, 0x08, 0xf3, - 0xe1, 0x7b, 0x0d, 0x8c, 0x89, 0x68, 0x58, 0x4d, 0xcb, 0x1f, 0x10, 0xa9, 0xa5, 0x03, 0xcf, 0xc4, - 0x83, 0xb8, 0x5b, 0x14, 0xbe, 0xd5, 0xc0, 0x28, 0xb7, 0x36, 0x98, 0x62, 0x62, 0xe1, 0x38, 0xaa, - 0xa9, 0x38, 0x45, 0xa2, 0x2a, 0x48, 0xcc, 0xc2, 0xb2, 0x31, 0xec, 0xb6, 0xa0, 0x46, 0xf1, 0x06, - 0x8c, 0x98, 0xc4, 0x86, 0x77, 0xd2, 0x5c, 0x54, 0xd6, 0xcf, 0x68, 0xb6, 0x99, 0xca, 0x8b, 0x09, - 0x70, 0x51, 0xc8, 0x83, 0x0b, 0x53, 0x0f, 0x68, 0x26, 0x51, 0x44, 0x5d, 0x20, 0x6d, 0x19, 0xf2, - 0x8c, 0xf7, 0x45, 0x21, 0xa2, 0x13, 0x45, 0x31, 0x78, 0x7a, 0x8b, 0x99, 0x3d, 0x25, 0x23, 0x0f, - 0x3e, 0x12, 0xf3, 0xf9, 0x61, 0xaf, 0xa4, 0x1d, 0xf5, 0x4a, 0xda, 0xdf, 0x5e, 0x49, 0xfb, 0x78, - 0x52, 0xca, 0x1d, 0x9d, 0x94, 0x72, 0xbf, 0x4f, 0x4a, 0xb9, 0x97, 0x2b, 0x0e, 0xf1, 0xb7, 0xbb, - 0x96, 0xde, 0xa4, 0x6d, 0x99, 0xe6, 0x9e, 0x8b, 0xfd, 0x3d, 0xea, 0xed, 0xa8, 0x7f, 0xfc, 0xf6, - 0xe9, 0x50, 0xc3, 0xa5, 0x36, 0x3e, 0x55, 0xc0, 0x2a, 0x88, 0x7b, 0xdd, 0xd2, 0xff, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x9f, 0x56, 0xf0, 0xe7, 0xf7, 0x0a, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Orders queries orders with filters - Orders(ctx context.Context, in *QueryOrdersRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) - // Order queries order details - Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) - // Bids queries bids with filters - Bids(ctx context.Context, in *QueryBidsRequest, opts ...grpc.CallOption) (*QueryBidsResponse, error) - // Bid queries bid details - Bid(ctx context.Context, in *QueryBidRequest, opts ...grpc.CallOption) (*QueryBidResponse, error) - // Leases queries leases with filters - Leases(ctx context.Context, in *QueryLeasesRequest, opts ...grpc.CallOption) (*QueryLeasesResponse, error) - // Lease queries lease details - Lease(ctx context.Context, in *QueryLeaseRequest, opts ...grpc.CallOption) (*QueryLeaseResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Orders(ctx context.Context, in *QueryOrdersRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) { - out := new(QueryOrdersResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta3.Query/Orders", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) { - out := new(QueryOrderResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta3.Query/Order", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Bids(ctx context.Context, in *QueryBidsRequest, opts ...grpc.CallOption) (*QueryBidsResponse, error) { - out := new(QueryBidsResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta3.Query/Bids", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Bid(ctx context.Context, in *QueryBidRequest, opts ...grpc.CallOption) (*QueryBidResponse, error) { - out := new(QueryBidResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta3.Query/Bid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Leases(ctx context.Context, in *QueryLeasesRequest, opts ...grpc.CallOption) (*QueryLeasesResponse, error) { - out := new(QueryLeasesResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta3.Query/Leases", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Lease(ctx context.Context, in *QueryLeaseRequest, opts ...grpc.CallOption) (*QueryLeaseResponse, error) { - out := new(QueryLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta3.Query/Lease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Orders queries orders with filters - Orders(context.Context, *QueryOrdersRequest) (*QueryOrdersResponse, error) - // Order queries order details - Order(context.Context, *QueryOrderRequest) (*QueryOrderResponse, error) - // Bids queries bids with filters - Bids(context.Context, *QueryBidsRequest) (*QueryBidsResponse, error) - // Bid queries bid details - Bid(context.Context, *QueryBidRequest) (*QueryBidResponse, error) - // Leases queries leases with filters - Leases(context.Context, *QueryLeasesRequest) (*QueryLeasesResponse, error) - // Lease queries lease details - Lease(context.Context, *QueryLeaseRequest) (*QueryLeaseResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Orders(ctx context.Context, req *QueryOrdersRequest) (*QueryOrdersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Orders not implemented") -} -func (*UnimplementedQueryServer) Order(ctx context.Context, req *QueryOrderRequest) (*QueryOrderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Order not implemented") -} -func (*UnimplementedQueryServer) Bids(ctx context.Context, req *QueryBidsRequest) (*QueryBidsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Bids not implemented") -} -func (*UnimplementedQueryServer) Bid(ctx context.Context, req *QueryBidRequest) (*QueryBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Bid not implemented") -} -func (*UnimplementedQueryServer) Leases(ctx context.Context, req *QueryLeasesRequest) (*QueryLeasesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Leases not implemented") -} -func (*UnimplementedQueryServer) Lease(ctx context.Context, req *QueryLeaseRequest) (*QueryLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Lease not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Orders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryOrdersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Orders(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta3.Query/Orders", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Orders(ctx, req.(*QueryOrdersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Order_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryOrderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Order(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta3.Query/Order", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Order(ctx, req.(*QueryOrderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Bids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryBidsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Bids(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta3.Query/Bids", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Bids(ctx, req.(*QueryBidsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Bid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryBidRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Bid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta3.Query/Bid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Bid(ctx, req.(*QueryBidRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Leases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryLeasesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Leases(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta3.Query/Leases", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Leases(ctx, req.(*QueryLeasesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Lease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryLeaseRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Lease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta3.Query/Lease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Lease(ctx, req.(*QueryLeaseRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.market.v1beta3.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Orders", - Handler: _Query_Orders_Handler, - }, - { - MethodName: "Order", - Handler: _Query_Order_Handler, - }, - { - MethodName: "Bids", - Handler: _Query_Bids_Handler, - }, - { - MethodName: "Bid", - Handler: _Query_Bid_Handler, - }, - { - MethodName: "Leases", - Handler: _Query_Leases_Handler, - }, - { - MethodName: "Lease", - Handler: _Query_Lease_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/market/v1beta3/query.proto", -} - -func (m *QueryOrdersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrdersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrdersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryOrdersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrdersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrdersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Orders) > 0 { - for iNdEx := len(m.Orders) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Orders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryOrderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryOrderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Order.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryBidsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryBidsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Bids) > 0 { - for iNdEx := len(m.Bids) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Bids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryBidRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryBidResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.EscrowAccount.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.Bid.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryLeasesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeasesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryLeasesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeasesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Leases) > 0 { - for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryLeaseRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeaseRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeaseRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.EscrowPayment.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.Lease.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryOrdersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryOrdersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Orders) > 0 { - for _, e := range m.Orders { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryOrderRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryOrderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Order.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryBidsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryBidsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Bids) > 0 { - for _, e := range m.Bids { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryBidRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryBidResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Bid.Size() - n += 1 + l + sovQuery(uint64(l)) - l = m.EscrowAccount.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryLeasesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryLeasesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Leases) > 0 { - for _, e := range m.Leases { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryLeaseRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Lease.Size() - n += 1 + l + sovQuery(uint64(l)) - l = m.EscrowPayment.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryOrdersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrdersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrdersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOrdersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrdersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrdersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Orders = append(m.Orders, Order{}) - if err := m.Orders[len(m.Orders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOrderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOrderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Order.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bids", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bids = append(m.Bids, QueryBidResponse{}) - if err := m.Bids[len(m.Bids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bid", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Bid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EscrowAccount", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EscrowAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeasesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeasesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeasesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeasesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Leases = append(m.Leases, QueryLeaseResponse{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeaseRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeaseRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeaseRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Lease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EscrowPayment", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EscrowPayment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta3/query.pb.gw.go b/go/node/market/v1beta3/query.pb.gw.go deleted file mode 100644 index 64ecf0c3..00000000 --- a/go/node/market/v1beta3/query.pb.gw.go +++ /dev/null @@ -1,586 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/market/v1beta3/query.proto - -/* -Package v1beta3 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta3 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Orders_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Orders_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrdersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Orders_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Orders(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Orders_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrdersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Orders_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Orders(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Order_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrderRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Order_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Order(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrderRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Order_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Order(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Bids_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Bids_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bids_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Bids(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Bids_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bids_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Bids(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Bid_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Bid_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bid_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Bid(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Bid_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bid_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Bid(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Leases_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Leases_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeasesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Leases_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Leases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Leases_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeasesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Leases_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Leases(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Lease_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Lease_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeaseRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Lease_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Lease(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Lease_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeaseRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Lease_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Lease(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Orders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Orders_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Orders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Order_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Bids_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bid_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Bid_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bid_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Leases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Leases_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Leases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Lease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Lease_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Lease_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Orders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Orders_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Orders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Order_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Bids_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bid_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Bid_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bid_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Leases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Leases_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Leases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Lease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Lease_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Lease_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Orders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta3", "orders", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Order_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta3", "orders", "info"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Bids_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta3", "bids", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Bid_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta3", "bids", "info"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Leases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta3", "leases", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Lease_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta3", "leases", "info"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Orders_0 = runtime.ForwardResponseMessage - - forward_Query_Order_0 = runtime.ForwardResponseMessage - - forward_Query_Bids_0 = runtime.ForwardResponseMessage - - forward_Query_Bid_0 = runtime.ForwardResponseMessage - - forward_Query_Leases_0 = runtime.ForwardResponseMessage - - forward_Query_Lease_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/market/v1beta3/service.pb.go b/go/node/market/v1beta3/service.pb.go deleted file mode 100644 index 1c01a3cd..00000000 --- a/go/node/market/v1beta3/service.pb.go +++ /dev/null @@ -1,287 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta3/service.proto - -package v1beta3 - -import ( - context "context" - fmt "fmt" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { - proto.RegisterFile("akash/market/v1beta3/service.proto", fileDescriptor_0637a2f85fdb6b87) -} - -var fileDescriptor_0637a2f85fdb6b87 = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0xcf, 0x4d, 0x2c, 0xca, 0x4e, 0x2d, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, - 0xd6, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, - 0x01, 0xab, 0xd1, 0x83, 0xa8, 0xd1, 0x83, 0xaa, 0x91, 0x92, 0xc3, 0xaa, 0x33, 0x29, 0x33, 0x05, - 0xa2, 0x4b, 0x4a, 0x01, 0xab, 0x7c, 0x4e, 0x6a, 0x62, 0x31, 0xd4, 0x5c, 0xa3, 0x17, 0xcc, 0x5c, - 0xcc, 0xbe, 0xc5, 0xe9, 0x42, 0xd1, 0x5c, 0x9c, 0xce, 0x45, 0xa9, 0x89, 0x25, 0xa9, 0x4e, 0x99, - 0x29, 0x42, 0x4a, 0x7a, 0xd8, 0x6c, 0xd3, 0xf3, 0x2d, 0x4e, 0x87, 0xab, 0x91, 0xd2, 0x22, 0xac, - 0x26, 0x28, 0xb5, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0x28, 0x82, 0x8b, 0xc3, 0x39, 0x27, 0xbf, - 0x18, 0x6c, 0xb6, 0x22, 0x6e, 0x7d, 0x50, 0x25, 0x52, 0x9a, 0x04, 0x95, 0xc0, 0x4d, 0x4e, 0xe7, - 0xe2, 0x0d, 0xcf, 0x2c, 0xc9, 0x48, 0x29, 0x4a, 0x2c, 0xf7, 0x01, 0xf9, 0x4a, 0x48, 0x0d, 0xa7, - 0x5e, 0x14, 0x75, 0x52, 0x7a, 0xc4, 0xa9, 0x83, 0x5b, 0x94, 0xc8, 0xc5, 0x0d, 0xf1, 0x17, 0xc4, - 0x1a, 0x15, 0x02, 0xbe, 0x87, 0x58, 0xa2, 0x43, 0x8c, 0x2a, 0xb8, 0x15, 0x71, 0x5c, 0x5c, 0x60, - 0xff, 0x41, 0x6c, 0x50, 0xc6, 0x1f, 0x08, 0x10, 0x0b, 0xb4, 0x89, 0x50, 0x04, 0x33, 0xdf, 0x29, - 0xf8, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, - 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x2c, 0xd3, 0x33, 0x4b, 0x32, - 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, 0x06, 0xea, 0xe6, 0xa5, 0x96, 0x94, 0xe7, 0x17, - 0x65, 0x43, 0x79, 0x89, 0x05, 0x99, 0xfa, 0xe9, 0xf9, 0xfa, 0x79, 0xf9, 0x29, 0xa9, 0x68, 0x69, - 0x29, 0x89, 0x0d, 0x9c, 0x8c, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc4, 0x0e, 0xeb, 0xba, - 0xc4, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // CreateBid defines a method to create a bid given proper inputs. - CreateBid(ctx context.Context, in *MsgCreateBid, opts ...grpc.CallOption) (*MsgCreateBidResponse, error) - // CloseBid defines a method to close a bid given proper inputs. - CloseBid(ctx context.Context, in *MsgCloseBid, opts ...grpc.CallOption) (*MsgCloseBidResponse, error) - // WithdrawLease withdraws accrued funds from the lease payment - WithdrawLease(ctx context.Context, in *MsgWithdrawLease, opts ...grpc.CallOption) (*MsgWithdrawLeaseResponse, error) - // CreateLease creates a new lease - CreateLease(ctx context.Context, in *MsgCreateLease, opts ...grpc.CallOption) (*MsgCreateLeaseResponse, error) - // CloseLease defines a method to close an order given proper inputs. - CloseLease(ctx context.Context, in *MsgCloseLease, opts ...grpc.CallOption) (*MsgCloseLeaseResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) CreateBid(ctx context.Context, in *MsgCreateBid, opts ...grpc.CallOption) (*MsgCreateBidResponse, error) { - out := new(MsgCreateBidResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta3.Msg/CreateBid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseBid(ctx context.Context, in *MsgCloseBid, opts ...grpc.CallOption) (*MsgCloseBidResponse, error) { - out := new(MsgCloseBidResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta3.Msg/CloseBid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) WithdrawLease(ctx context.Context, in *MsgWithdrawLease, opts ...grpc.CallOption) (*MsgWithdrawLeaseResponse, error) { - out := new(MsgWithdrawLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta3.Msg/WithdrawLease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CreateLease(ctx context.Context, in *MsgCreateLease, opts ...grpc.CallOption) (*MsgCreateLeaseResponse, error) { - out := new(MsgCreateLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta3.Msg/CreateLease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseLease(ctx context.Context, in *MsgCloseLease, opts ...grpc.CallOption) (*MsgCloseLeaseResponse, error) { - out := new(MsgCloseLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta3.Msg/CloseLease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // CreateBid defines a method to create a bid given proper inputs. - CreateBid(context.Context, *MsgCreateBid) (*MsgCreateBidResponse, error) - // CloseBid defines a method to close a bid given proper inputs. - CloseBid(context.Context, *MsgCloseBid) (*MsgCloseBidResponse, error) - // WithdrawLease withdraws accrued funds from the lease payment - WithdrawLease(context.Context, *MsgWithdrawLease) (*MsgWithdrawLeaseResponse, error) - // CreateLease creates a new lease - CreateLease(context.Context, *MsgCreateLease) (*MsgCreateLeaseResponse, error) - // CloseLease defines a method to close an order given proper inputs. - CloseLease(context.Context, *MsgCloseLease) (*MsgCloseLeaseResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) CreateBid(ctx context.Context, req *MsgCreateBid) (*MsgCreateBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateBid not implemented") -} -func (*UnimplementedMsgServer) CloseBid(ctx context.Context, req *MsgCloseBid) (*MsgCloseBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseBid not implemented") -} -func (*UnimplementedMsgServer) WithdrawLease(ctx context.Context, req *MsgWithdrawLease) (*MsgWithdrawLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method WithdrawLease not implemented") -} -func (*UnimplementedMsgServer) CreateLease(ctx context.Context, req *MsgCreateLease) (*MsgCreateLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateLease not implemented") -} -func (*UnimplementedMsgServer) CloseLease(ctx context.Context, req *MsgCloseLease) (*MsgCloseLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseLease not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_CreateBid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateBid) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateBid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta3.Msg/CreateBid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateBid(ctx, req.(*MsgCreateBid)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseBid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseBid) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseBid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta3.Msg/CloseBid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseBid(ctx, req.(*MsgCloseBid)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_WithdrawLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgWithdrawLease) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).WithdrawLease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta3.Msg/WithdrawLease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).WithdrawLease(ctx, req.(*MsgWithdrawLease)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CreateLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateLease) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateLease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta3.Msg/CreateLease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateLease(ctx, req.(*MsgCreateLease)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseLease) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseLease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta3.Msg/CloseLease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseLease(ctx, req.(*MsgCloseLease)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.market.v1beta3.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateBid", - Handler: _Msg_CreateBid_Handler, - }, - { - MethodName: "CloseBid", - Handler: _Msg_CloseBid_Handler, - }, - { - MethodName: "WithdrawLease", - Handler: _Msg_WithdrawLease_Handler, - }, - { - MethodName: "CreateLease", - Handler: _Msg_CreateLease_Handler, - }, - { - MethodName: "CloseLease", - Handler: _Msg_CloseLease_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/market/v1beta3/service.proto", -} diff --git a/go/node/market/v1beta3/types.go b/go/node/market/v1beta3/types.go deleted file mode 100644 index b67ea4e6..00000000 --- a/go/node/market/v1beta3/types.go +++ /dev/null @@ -1,241 +0,0 @@ -package v1beta3 - -import ( - "strings" - - sdk "github.com/cosmos/cosmos-sdk/types" - - types "github.com/akash-network/akash-api/go/node/types/v1beta3" - - atypes "github.com/akash-network/akash-api/go/node/audit/v1beta3" - - "gopkg.in/yaml.v3" -) - -// ID method returns OrderID details of specific order -func (o Order) ID() OrderID { - return o.OrderID -} - -// String implements the Stringer interface for a Order object. -func (o Order) String() string { - out, _ := yaml.Marshal(o) - return string(out) -} - -// Orders is a collection of Order -type Orders []Order - -// String implements the Stringer interface for a Orders object. -func (o Orders) String() string { - var out string - for _, order := range o { - out += order.String() + "\n" - } - - return strings.TrimSpace(out) -} - -// ValidateCanBid method validates whether order is open or not and -// returns error if not -func (o Order) ValidateCanBid() error { - switch o.State { - case OrderOpen: - return nil - case OrderActive: - return ErrOrderActive - default: - return ErrOrderClosed - } -} - -// ValidateInactive method validates whether order is open or not and -// returns error if not -func (o Order) ValidateInactive() error { - switch o.State { - case OrderClosed: - return nil - case OrderActive: - return ErrOrderActive - default: - return ErrOrderClosed - } -} - -// Price method returns price of specific order -func (o Order) Price() sdk.DecCoin { - return o.Spec.Price() -} - -// MatchAttributes method compares provided attributes with specific order attributes -func (o Order) MatchAttributes(attrs []types.Attribute) bool { - return o.Spec.MatchAttributes(attrs) -} - -// MatchRequirements method compares provided attributes with specific order attributes -func (o Order) MatchRequirements(prov []atypes.Provider) bool { - return o.Spec.MatchRequirements(prov) -} - -// MatchResourcesRequirements method compares provider capabilities with specific order resources attributes -func (o Order) MatchResourcesRequirements(attr types.Attributes) bool { - return o.Spec.MatchResourcesRequirements(attr) -} - -// Accept returns whether order filters valid or not -func (filters OrderFilters) Accept(obj Order, stateVal Order_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.OrderID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.OrderID.DSeq { - return false - } - - // Checking gseq filter - if filters.GSeq != 0 && filters.GSeq != obj.OrderID.GSeq { - return false - } - - // Checking oseq filter - if filters.OSeq != 0 && filters.OSeq != obj.OrderID.OSeq { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} - -// ID method returns BidID details of specific bid -func (obj Bid) ID() BidID { - return obj.BidID -} - -// String implements the Stringer interface for a Bid object. -func (obj Bid) String() string { - out, _ := yaml.Marshal(obj) - return string(out) -} - -// Bids is a collection of Bid -type Bids []Bid - -// String implements the Stringer interface for a Bids object. -func (b Bids) String() string { - var out string - for _, bid := range b { - out += bid.String() + "\n" - } - - return strings.TrimSpace(out) -} - -// Accept returns whether bid filters valid or not -func (filters BidFilters) Accept(obj Bid, stateVal Bid_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.BidID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.BidID.DSeq { - return false - } - - // Checking gseq filter - if filters.GSeq != 0 && filters.GSeq != obj.BidID.GSeq { - return false - } - - // Checking oseq filter - if filters.OSeq != 0 && filters.OSeq != obj.BidID.OSeq { - return false - } - - // Checking provider filter - if filters.Provider != "" && filters.Provider != obj.BidID.Provider { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} - -// ID method returns LeaseID details of specific lease -func (obj Lease) ID() LeaseID { - return obj.LeaseID -} - -// String implements the Stringer interface for a Lease object. -func (obj Lease) String() string { - out, _ := yaml.Marshal(obj) - return string(out) -} - -// Leases is a collection of Lease -type Leases []Lease - -// String implements the Stringer interface for a Leases object. -func (l Leases) String() string { - var out string - for _, order := range l { - out += order.String() + "\n" - } - - return strings.TrimSpace(out) -} - -// Accept returns whether lease filters valid or not -func (filters LeaseFilters) Accept(obj Lease, stateVal Lease_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.LeaseID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.LeaseID.DSeq { - return false - } - - // Checking gseq filter - if filters.GSeq != 0 && filters.GSeq != obj.LeaseID.GSeq { - return false - } - - // Checking oseq filter - if filters.OSeq != 0 && filters.OSeq != obj.LeaseID.OSeq { - return false - } - - // Checking provider filter - if filters.Provider != "" && filters.Provider != obj.LeaseID.Provider { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} - -func (m QueryLeasesResponse) TotalPriceAmount() sdk.Dec { - total := sdk.NewDec(0) - - for _, lease := range m.Leases { - total = total.Add(lease.Lease.Price.Amount) - } - - return total -} diff --git a/go/node/market/v1beta4/bid.go b/go/node/market/v1beta4/bid.go deleted file mode 100644 index deaba01a..00000000 --- a/go/node/market/v1beta4/bid.go +++ /dev/null @@ -1,82 +0,0 @@ -package v1beta4 - -import ( - "sort" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" -) - -type ResourcesOffer []ResourceOffer - -var _ sort.Interface = (*ResourcesOffer)(nil) - -func (s ResourcesOffer) MatchGSpec(gspec dtypes.GroupSpec) bool { - if len(s) == 0 { - return true - } - - ru := make(map[uint32]*dtypes.ResourceUnit) - - for idx := range gspec.Resources { - ru[gspec.Resources[idx].ID] = &gspec.Resources[idx] - } - - for _, ro := range s { - res, exists := ru[ro.Resources.ID] - if !exists { - return false - } - - ru[ro.Resources.ID] = nil - - if res.Count != ro.Count { - return false - } - - // TODO @troian check resources boundaries - } - - return true -} - -func (r *ResourceOffer) Dup() ResourceOffer { - return ResourceOffer{ - Resources: r.Resources.Dup(), - Count: r.Count, - } -} - -func (s ResourcesOffer) Len() int { - return len(s) -} - -func (s ResourcesOffer) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (s ResourcesOffer) Less(i, j int) bool { - return s[i].Resources.ID < s[j].Resources.ID -} - -func (s ResourcesOffer) Dup() ResourcesOffer { - res := make(ResourcesOffer, 0, len(s)) - - for _, ru := range s { - res = append(res, ru.Dup()) - } - - return res -} - -func ResourceOfferFromRU(ru dtypes.ResourceUnits) ResourcesOffer { - res := make(ResourcesOffer, 0, len(ru)) - - for _, r := range ru { - res = append(res, ResourceOffer{ - Resources: r.Resources, - Count: r.Count, - }) - } - - return res -} diff --git a/go/node/market/v1beta4/bid.pb.go b/go/node/market/v1beta4/bid.pb.go deleted file mode 100644 index 37d581df..00000000 --- a/go/node/market/v1beta4/bid.pb.go +++ /dev/null @@ -1,2337 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta4/bid.proto - -package v1beta4 - -import ( - fmt "fmt" - v1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of bid -type Bid_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - BidStateInvalid Bid_State = 0 - // BidOpen denotes state for bid open - BidOpen Bid_State = 1 - // BidMatched denotes state for bid open - BidActive Bid_State = 2 - // BidLost denotes state for bid lost - BidLost Bid_State = 3 - // BidClosed denotes state for bid closed - BidClosed Bid_State = 4 -) - -var Bid_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "active", - 3: "lost", - 4: "closed", -} - -var Bid_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "active": 2, - "lost": 3, - "closed": 4, -} - -func (x Bid_State) String() string { - return proto.EnumName(Bid_State_name, int32(x)) -} - -func (Bid_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_c19fff42684bf143, []int{6, 0} -} - -// ResourceOffer describes resources that provider is offering -// for deployment -type ResourceOffer struct { - Resources v1beta3.Resources `protobuf:"bytes,1,opt,name=resources,proto3" json:"resources" yaml:"resources"` - Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count" yaml:"count"` -} - -func (m *ResourceOffer) Reset() { *m = ResourceOffer{} } -func (m *ResourceOffer) String() string { return proto.CompactTextString(m) } -func (*ResourceOffer) ProtoMessage() {} -func (*ResourceOffer) Descriptor() ([]byte, []int) { - return fileDescriptor_c19fff42684bf143, []int{0} -} -func (m *ResourceOffer) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceOffer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceOffer.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceOffer) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceOffer.Merge(m, src) -} -func (m *ResourceOffer) XXX_Size() int { - return m.Size() -} -func (m *ResourceOffer) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceOffer.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceOffer proto.InternalMessageInfo - -func (m *ResourceOffer) GetResources() v1beta3.Resources { - if m != nil { - return m.Resources - } - return v1beta3.Resources{} -} - -func (m *ResourceOffer) GetCount() uint32 { - if m != nil { - return m.Count - } - return 0 -} - -// MsgCreateBid defines an SDK message for creating Bid -type MsgCreateBid struct { - Order OrderID `protobuf:"bytes,1,opt,name=order,proto3" json:"order" yaml:"order"` - Provider string `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider" yaml:"provider"` - Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` - Deposit types.Coin `protobuf:"bytes,4,opt,name=deposit,proto3" json:"deposit" yaml:"deposit"` - ResourcesOffer ResourcesOffer `protobuf:"bytes,5,rep,name=resources_offer,json=resourcesOffer,proto3,castrepeated=ResourcesOffer" json:"resources_offer" yaml:"resources_offer"` -} - -func (m *MsgCreateBid) Reset() { *m = MsgCreateBid{} } -func (m *MsgCreateBid) String() string { return proto.CompactTextString(m) } -func (*MsgCreateBid) ProtoMessage() {} -func (*MsgCreateBid) Descriptor() ([]byte, []int) { - return fileDescriptor_c19fff42684bf143, []int{1} -} -func (m *MsgCreateBid) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateBid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateBid.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateBid) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateBid.Merge(m, src) -} -func (m *MsgCreateBid) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateBid) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateBid.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateBid proto.InternalMessageInfo - -func (m *MsgCreateBid) GetOrder() OrderID { - if m != nil { - return m.Order - } - return OrderID{} -} - -func (m *MsgCreateBid) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *MsgCreateBid) GetPrice() types.DecCoin { - if m != nil { - return m.Price - } - return types.DecCoin{} -} - -func (m *MsgCreateBid) GetDeposit() types.Coin { - if m != nil { - return m.Deposit - } - return types.Coin{} -} - -func (m *MsgCreateBid) GetResourcesOffer() ResourcesOffer { - if m != nil { - return m.ResourcesOffer - } - return nil -} - -// MsgCreateBidResponse defines the Msg/CreateBid response type. -type MsgCreateBidResponse struct { -} - -func (m *MsgCreateBidResponse) Reset() { *m = MsgCreateBidResponse{} } -func (m *MsgCreateBidResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateBidResponse) ProtoMessage() {} -func (*MsgCreateBidResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c19fff42684bf143, []int{2} -} -func (m *MsgCreateBidResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateBidResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateBidResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateBidResponse.Merge(m, src) -} -func (m *MsgCreateBidResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateBidResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateBidResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateBidResponse proto.InternalMessageInfo - -// MsgCloseBid defines an SDK message for closing bid -type MsgCloseBid struct { - BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseBid) Reset() { *m = MsgCloseBid{} } -func (m *MsgCloseBid) String() string { return proto.CompactTextString(m) } -func (*MsgCloseBid) ProtoMessage() {} -func (*MsgCloseBid) Descriptor() ([]byte, []int) { - return fileDescriptor_c19fff42684bf143, []int{3} -} -func (m *MsgCloseBid) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseBid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseBid.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseBid) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseBid.Merge(m, src) -} -func (m *MsgCloseBid) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseBid) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseBid.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseBid proto.InternalMessageInfo - -func (m *MsgCloseBid) GetBidID() BidID { - if m != nil { - return m.BidID - } - return BidID{} -} - -// MsgCloseBidResponse defines the Msg/CloseBid response type. -type MsgCloseBidResponse struct { -} - -func (m *MsgCloseBidResponse) Reset() { *m = MsgCloseBidResponse{} } -func (m *MsgCloseBidResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseBidResponse) ProtoMessage() {} -func (*MsgCloseBidResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_c19fff42684bf143, []int{4} -} -func (m *MsgCloseBidResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseBidResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseBidResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseBidResponse.Merge(m, src) -} -func (m *MsgCloseBidResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseBidResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseBidResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseBidResponse proto.InternalMessageInfo - -// BidID stores owner and all other seq numbers -// A successful bid becomes a Lease(ID). -type BidID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` -} - -func (m *BidID) Reset() { *m = BidID{} } -func (*BidID) ProtoMessage() {} -func (*BidID) Descriptor() ([]byte, []int) { - return fileDescriptor_c19fff42684bf143, []int{5} -} -func (m *BidID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BidID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BidID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BidID) XXX_Merge(src proto.Message) { - xxx_messageInfo_BidID.Merge(m, src) -} -func (m *BidID) XXX_Size() int { - return m.Size() -} -func (m *BidID) XXX_DiscardUnknown() { - xxx_messageInfo_BidID.DiscardUnknown(m) -} - -var xxx_messageInfo_BidID proto.InternalMessageInfo - -func (m *BidID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *BidID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *BidID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *BidID) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *BidID) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -// Bid stores BidID, state of bid and price -type Bid struct { - BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` - State Bid_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta4.Bid_State" json:"state" yaml:"state"` - Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - ResourcesOffer ResourcesOffer `protobuf:"bytes,5,rep,name=resources_offer,json=resourcesOffer,proto3,castrepeated=ResourcesOffer" json:"resources_offer" yaml:"resources_offer"` -} - -func (m *Bid) Reset() { *m = Bid{} } -func (*Bid) ProtoMessage() {} -func (*Bid) Descriptor() ([]byte, []int) { - return fileDescriptor_c19fff42684bf143, []int{6} -} -func (m *Bid) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Bid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Bid.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Bid) XXX_Merge(src proto.Message) { - xxx_messageInfo_Bid.Merge(m, src) -} -func (m *Bid) XXX_Size() int { - return m.Size() -} -func (m *Bid) XXX_DiscardUnknown() { - xxx_messageInfo_Bid.DiscardUnknown(m) -} - -var xxx_messageInfo_Bid proto.InternalMessageInfo - -func (m *Bid) GetBidID() BidID { - if m != nil { - return m.BidID - } - return BidID{} -} - -func (m *Bid) GetState() Bid_State { - if m != nil { - return m.State - } - return BidStateInvalid -} - -func (m *Bid) GetPrice() types.DecCoin { - if m != nil { - return m.Price - } - return types.DecCoin{} -} - -func (m *Bid) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -func (m *Bid) GetResourcesOffer() ResourcesOffer { - if m != nil { - return m.ResourcesOffer - } - return nil -} - -// BidFilters defines flags for bid list filter -type BidFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` - State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *BidFilters) Reset() { *m = BidFilters{} } -func (m *BidFilters) String() string { return proto.CompactTextString(m) } -func (*BidFilters) ProtoMessage() {} -func (*BidFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_c19fff42684bf143, []int{7} -} -func (m *BidFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BidFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_BidFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *BidFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_BidFilters.Merge(m, src) -} -func (m *BidFilters) XXX_Size() int { - return m.Size() -} -func (m *BidFilters) XXX_DiscardUnknown() { - xxx_messageInfo_BidFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_BidFilters proto.InternalMessageInfo - -func (m *BidFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *BidFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *BidFilters) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *BidFilters) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *BidFilters) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *BidFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func init() { - proto.RegisterEnum("akash.market.v1beta4.Bid_State", Bid_State_name, Bid_State_value) - proto.RegisterType((*ResourceOffer)(nil), "akash.market.v1beta4.ResourceOffer") - proto.RegisterType((*MsgCreateBid)(nil), "akash.market.v1beta4.MsgCreateBid") - proto.RegisterType((*MsgCreateBidResponse)(nil), "akash.market.v1beta4.MsgCreateBidResponse") - proto.RegisterType((*MsgCloseBid)(nil), "akash.market.v1beta4.MsgCloseBid") - proto.RegisterType((*MsgCloseBidResponse)(nil), "akash.market.v1beta4.MsgCloseBidResponse") - proto.RegisterType((*BidID)(nil), "akash.market.v1beta4.BidID") - proto.RegisterType((*Bid)(nil), "akash.market.v1beta4.Bid") - proto.RegisterType((*BidFilters)(nil), "akash.market.v1beta4.BidFilters") -} - -func init() { proto.RegisterFile("akash/market/v1beta4/bid.proto", fileDescriptor_c19fff42684bf143) } - -var fileDescriptor_c19fff42684bf143 = []byte{ - // 891 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x96, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xc7, 0xe3, 0xc4, 0x6e, 0x37, 0x93, 0x6d, 0x1b, 0x79, 0xbb, 0xab, 0x34, 0x50, 0x8f, 0x19, - 0x04, 0xea, 0x05, 0x5b, 0x6d, 0xb9, 0x50, 0x4e, 0xeb, 0xad, 0x40, 0x95, 0x40, 0x05, 0x17, 0x38, - 0xc0, 0xa1, 0x72, 0x3c, 0xb3, 0xde, 0x51, 0x53, 0x4f, 0xea, 0x71, 0xbb, 0xe2, 0x3f, 0x40, 0x3d, - 0x21, 0x71, 0xe1, 0x52, 0xb4, 0xd2, 0xde, 0xf6, 0x2f, 0xd9, 0x63, 0x8f, 0x9c, 0x06, 0x94, 0x0a, - 0x09, 0xe5, 0x98, 0x7f, 0x00, 0x34, 0x3f, 0xe2, 0xfc, 0x50, 0x41, 0xe2, 0xd0, 0x03, 0xd2, 0x9e, - 0xdc, 0xf9, 0xbe, 0xf9, 0xbc, 0x79, 0x7e, 0xf3, 0x7d, 0x8d, 0x81, 0x97, 0x9c, 0x24, 0xfc, 0x59, - 0x78, 0x9a, 0x14, 0x27, 0xa4, 0x0c, 0x2f, 0xb6, 0x7b, 0xa4, 0x4c, 0x3e, 0x0c, 0x7b, 0x14, 0x07, - 0x83, 0x82, 0x95, 0xcc, 0x5d, 0x57, 0xf1, 0x40, 0xc7, 0x03, 0x13, 0xef, 0xae, 0x67, 0x2c, 0x63, - 0x6a, 0x43, 0x28, 0xff, 0xd2, 0x7b, 0xbb, 0x5e, 0xca, 0xf8, 0x29, 0xe3, 0x61, 0x2f, 0xe1, 0xc4, - 0xa4, 0xda, 0x0e, 0x53, 0x46, 0x73, 0x13, 0x47, 0xfa, 0xac, 0x99, 0xf0, 0x6e, 0x58, 0x10, 0xce, - 0xce, 0x8b, 0x94, 0x70, 0xb3, 0xc7, 0xbf, 0xb5, 0x1e, 0x56, 0x60, 0x52, 0xe8, 0x1d, 0xe8, 0x95, - 0x05, 0x56, 0x62, 0x43, 0x1d, 0x3e, 0x7d, 0x4a, 0x0a, 0xb7, 0x07, 0x9a, 0x55, 0x9a, 0x8e, 0xe5, - 0x5b, 0x5b, 0xad, 0x9d, 0xcd, 0x40, 0xd7, 0x2d, 0xcf, 0x32, 0x55, 0xef, 0x06, 0x13, 0x8a, 0x47, - 0xef, 0xbd, 0x16, 0xb0, 0x36, 0x12, 0x70, 0xca, 0x8d, 0x05, 0x6c, 0x7f, 0x9f, 0x9c, 0xf6, 0xf7, - 0x50, 0x25, 0xa1, 0x78, 0x1a, 0x76, 0x43, 0xe0, 0xa4, 0xec, 0x3c, 0x2f, 0x3b, 0x75, 0xdf, 0xda, - 0x5a, 0x89, 0x36, 0x46, 0x02, 0x6a, 0x61, 0x2c, 0xe0, 0x7d, 0x0d, 0xaa, 0x25, 0x8a, 0xb5, 0xbc, - 0x67, 0xff, 0xf9, 0x02, 0x5a, 0xe8, 0xaf, 0x06, 0xb8, 0xff, 0x39, 0xcf, 0x9e, 0x14, 0x24, 0x29, - 0x49, 0x44, 0xb1, 0xfb, 0x1d, 0x70, 0xd4, 0xcb, 0x2c, 0xd4, 0x39, 0xdf, 0xdf, 0xe0, 0x50, 0x6e, - 0x39, 0xd8, 0x8f, 0xde, 0x97, 0x75, 0x0e, 0x05, 0x74, 0x94, 0x20, 0xcf, 0x54, 0xf0, 0xf4, 0x4c, - 0xb5, 0x44, 0xb1, 0x96, 0xdd, 0x8f, 0xc1, 0xbd, 0x41, 0xc1, 0x2e, 0xa8, 0xcc, 0x2f, 0xeb, 0x6c, - 0x46, 0x70, 0x24, 0x60, 0xa5, 0x8d, 0x05, 0x5c, 0xd3, 0xd8, 0x44, 0x41, 0x71, 0x15, 0x74, 0xbf, - 0x04, 0xce, 0xa0, 0xa0, 0x29, 0xe9, 0x34, 0x54, 0x65, 0x6f, 0x07, 0xfa, 0x36, 0x67, 0x5b, 0xb8, - 0x1d, 0xec, 0x93, 0xf4, 0x09, 0xa3, 0x79, 0xb4, 0x69, 0x1a, 0xa8, 0x91, 0x69, 0x3d, 0x6a, 0x89, - 0x62, 0x2d, 0xbb, 0xdf, 0x80, 0x65, 0x4c, 0x06, 0x8c, 0xd3, 0xb2, 0x63, 0xab, 0xa4, 0x1b, 0xb7, - 0x26, 0x55, 0x19, 0xdf, 0x31, 0x19, 0x27, 0xc4, 0x58, 0xc0, 0x55, 0x9d, 0xd3, 0x08, 0x28, 0x9e, - 0x84, 0xdc, 0x97, 0x16, 0x58, 0xab, 0xae, 0xe6, 0x98, 0x49, 0x13, 0x74, 0x1c, 0xbf, 0xb1, 0xd5, - 0xda, 0x79, 0xf7, 0xf6, 0x7e, 0xce, 0xf9, 0x25, 0xfa, 0xda, 0x74, 0x75, 0xb5, 0x32, 0x84, 0xd2, - 0x47, 0x02, 0x2e, 0x66, 0x1d, 0x0b, 0xf8, 0x68, 0xc1, 0x15, 0x3a, 0x80, 0x5e, 0xfd, 0xb6, 0x88, - 0xc7, 0xab, 0xc5, 0xdc, 0x5a, 0x39, 0xa0, 0x86, 0x1e, 0x81, 0xf5, 0x59, 0x03, 0xc4, 0x84, 0x0f, - 0x58, 0xce, 0x09, 0xa2, 0xa0, 0x25, 0xf5, 0x3e, 0xe3, 0xca, 0x17, 0x5f, 0x81, 0xa5, 0x1e, 0xc5, - 0xc7, 0x14, 0x1b, 0x63, 0xbc, 0x75, 0xfb, 0x8b, 0x44, 0x14, 0x1f, 0xec, 0x47, 0xfe, 0xc4, 0x16, - 0x6a, 0x39, 0x12, 0xb0, 0x4e, 0xf1, 0x58, 0xc0, 0xa6, 0x2e, 0x95, 0x62, 0x14, 0x3b, 0x3d, 0x8a, - 0x0f, 0xb0, 0x29, 0xe1, 0x21, 0x78, 0x30, 0x73, 0x54, 0x55, 0xc1, 0x2f, 0x75, 0xa0, 0x13, 0x48, - 0x73, 0xb3, 0xe7, 0xb9, 0x31, 0x65, 0x53, 0x9b, 0x5b, 0x09, 0x33, 0x46, 0x93, 0x4b, 0x69, 0x34, - 0xf9, 0x74, 0x77, 0x81, 0x8d, 0x39, 0x39, 0x53, 0x26, 0xb3, 0x23, 0x38, 0x14, 0xd0, 0xde, 0x3f, - 0x22, 0x67, 0x23, 0x01, 0x95, 0x3e, 0x16, 0xb0, 0x65, 0xee, 0x8e, 0x93, 0x33, 0x14, 0x2b, 0x51, - 0x42, 0x99, 0x84, 0x1a, 0x6a, 0x82, 0x14, 0xf4, 0xa9, 0x81, 0xb2, 0x39, 0x28, 0xd3, 0x50, 0x66, - 0x20, 0x26, 0x21, 0x7b, 0x0a, 0x1d, 0x1a, 0x88, 0xcd, 0x41, 0x4c, 0x43, 0xf2, 0x31, 0x37, 0x07, - 0xce, 0x7f, 0x9c, 0x83, 0xbd, 0x7b, 0x3f, 0xbf, 0x80, 0x35, 0xd5, 0xb7, 0x3f, 0x6c, 0xd0, 0xb8, - 0xb3, 0xbb, 0x71, 0xbf, 0x00, 0x0e, 0x2f, 0x93, 0x92, 0xa8, 0x26, 0xae, 0xee, 0xc0, 0x7f, 0x4c, - 0x1a, 0x1c, 0xc9, 0x6d, 0xfa, 0x56, 0x14, 0x31, 0xbd, 0x15, 0xb5, 0x44, 0xb1, 0x96, 0xef, 0x62, - 0x82, 0x37, 0x01, 0x48, 0x95, 0x75, 0xf1, 0x71, 0xa2, 0x87, 0xb8, 0x11, 0x37, 0x8d, 0xf2, 0xf8, - 0x7f, 0x32, 0x88, 0xe8, 0x27, 0x0b, 0x38, 0xaa, 0x87, 0xae, 0x0f, 0x96, 0x69, 0x7e, 0x91, 0xf4, - 0x29, 0x6e, 0xd7, 0xba, 0x0f, 0x2e, 0xaf, 0xfc, 0xb5, 0x88, 0x62, 0x15, 0x3a, 0xd0, 0xb2, 0xfb, - 0x10, 0xd8, 0x6c, 0x40, 0xf2, 0xb6, 0xd5, 0x6d, 0x5d, 0x5e, 0xf9, 0xcb, 0x11, 0xc5, 0x87, 0x03, - 0x92, 0xbb, 0x1b, 0x60, 0x29, 0x49, 0x4b, 0x7a, 0x41, 0xda, 0xf5, 0xee, 0xca, 0xe5, 0x95, 0xdf, - 0x8c, 0x28, 0x7e, 0xac, 0x04, 0x49, 0xf4, 0x19, 0x2f, 0xdb, 0x8d, 0x8a, 0xf8, 0x8c, 0xf1, 0x52, - 0x12, 0xa9, 0x9c, 0x38, 0xdc, 0xb6, 0x2b, 0x42, 0x8d, 0x20, 0xee, 0xda, 0x3f, 0xbc, 0xf4, 0x6a, - 0x33, 0x3e, 0xbb, 0xae, 0x03, 0x10, 0x51, 0xfc, 0x09, 0xed, 0x97, 0xa4, 0xe0, 0x6f, 0xa6, 0x71, - 0xf6, 0x57, 0x29, 0x9c, 0x4c, 0xc9, 0xd2, 0xb4, 0x19, 0xff, 0x36, 0x04, 0xfa, 0x5f, 0x5e, 0x74, - 0xf4, 0x7a, 0xe8, 0x59, 0xd7, 0x43, 0xcf, 0xfa, 0x7d, 0xe8, 0x59, 0x3f, 0xde, 0x78, 0xb5, 0xeb, - 0x1b, 0xaf, 0xf6, 0xeb, 0x8d, 0x57, 0xfb, 0xf6, 0xa3, 0x8c, 0x96, 0xcf, 0xce, 0x7b, 0x41, 0xca, - 0x4e, 0x43, 0x65, 0xd1, 0x0f, 0x72, 0x52, 0x3e, 0x67, 0xc5, 0x89, 0x59, 0x25, 0x03, 0x1a, 0x66, - 0x2c, 0xcc, 0x19, 0x26, 0x0b, 0x5f, 0x21, 0xbd, 0x25, 0xf5, 0x01, 0xb2, 0xfb, 0x77, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x32, 0x11, 0x67, 0xc4, 0x34, 0x09, 0x00, 0x00, -} - -func (this *ResourceOffer) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResourceOffer) - if !ok { - that2, ok := that.(ResourceOffer) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Resources.Equal(&that1.Resources) { - return false - } - if this.Count != that1.Count { - return false - } - return true -} -func (m *ResourceOffer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceOffer) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceOffer) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Count != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCreateBid) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateBid) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateBid) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourcesOffer) > 0 { - for iNdEx := len(m.ResourcesOffer) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourcesOffer[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - { - size, err := m.Deposit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Order.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCreateBidResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateBidResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgCloseBid) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseBid) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseBid) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseBidResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseBidResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *BidID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BidID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BidID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintBid(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Bid) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Bid) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Bid) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.ResourcesOffer) > 0 { - for iNdEx := len(m.ResourcesOffer) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourcesOffer[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if m.CreatedAt != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintBid(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *BidFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BidFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BidFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintBid(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x32 - } - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintBid(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintBid(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintBid(dAtA []byte, offset int, v uint64) int { - offset -= sovBid(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ResourceOffer) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Resources.Size() - n += 1 + l + sovBid(uint64(l)) - if m.Count != 0 { - n += 1 + sovBid(uint64(m.Count)) - } - return n -} - -func (m *MsgCreateBid) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Order.Size() - n += 1 + l + sovBid(uint64(l)) - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - l = m.Price.Size() - n += 1 + l + sovBid(uint64(l)) - l = m.Deposit.Size() - n += 1 + l + sovBid(uint64(l)) - if len(m.ResourcesOffer) > 0 { - for _, e := range m.ResourcesOffer { - l = e.Size() - n += 1 + l + sovBid(uint64(l)) - } - } - return n -} - -func (m *MsgCreateBidResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgCloseBid) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidID.Size() - n += 1 + l + sovBid(uint64(l)) - return n -} - -func (m *MsgCloseBidResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *BidID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovBid(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovBid(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovBid(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - return n -} - -func (m *Bid) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidID.Size() - n += 1 + l + sovBid(uint64(l)) - if m.State != 0 { - n += 1 + sovBid(uint64(m.State)) - } - l = m.Price.Size() - n += 1 + l + sovBid(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovBid(uint64(m.CreatedAt)) - } - if len(m.ResourcesOffer) > 0 { - for _, e := range m.ResourcesOffer { - l = e.Size() - n += 1 + l + sovBid(uint64(l)) - } - } - return n -} - -func (m *BidFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovBid(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovBid(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovBid(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovBid(uint64(l)) - } - return n -} - -func sovBid(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozBid(x uint64) (n int) { - return sovBid(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ResourceOffer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceOffer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceOffer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateBid) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateBid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateBid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Order.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Deposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourcesOffer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourcesOffer = append(m.ResourcesOffer, ResourceOffer{}) - if err := m.ResourcesOffer[len(m.ResourcesOffer)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateBidResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateBidResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseBid) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseBid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseBid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseBidResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseBidResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BidID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BidID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BidID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Bid) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Bid: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Bid: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Bid_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourcesOffer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourcesOffer = append(m.ResourcesOffer, ResourceOffer{}) - if err := m.ResourcesOffer[len(m.ResourcesOffer)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BidFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BidFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BidFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowBid - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthBid - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthBid - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipBid(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthBid - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipBid(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowBid - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthBid - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupBid - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthBid - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthBid = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowBid = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupBid = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta4/bid_test.go b/go/node/market/v1beta4/bid_test.go deleted file mode 100644 index 5c64979a..00000000 --- a/go/node/market/v1beta4/bid_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package v1beta4_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/akash-network/akash-api/go/node/market/v1beta4" - testutil "github.com/akash-network/akash-api/go/testutil/v1beta3" -) - -func TestBid_GSpecMatch_Valid(t *testing.T) { - gspec := testutil.GroupSpec(t) - - rOffer := v1beta4.ResourceOfferFromRU(gspec.Resources) - - require.True(t, rOffer.MatchGSpec(gspec)) -} - -func TestBid_GSpecMatch_Valid2(t *testing.T) { - gspec := testutil.GroupSpec(t) - - if len(gspec.Resources) == 1 { - rl := testutil.ResourcesList(t, 2) - rl[0].Count = 4 - gspec.Resources = append(gspec.Resources, rl...) - } - - rOffer := v1beta4.ResourceOfferFromRU(gspec.Resources) - - require.True(t, rOffer.MatchGSpec(gspec)) -} - -func TestBid_GSpecMatch_InvalidCount(t *testing.T) { - gspec := testutil.GroupSpec(t) - - if len(gspec.Resources) == 1 { - rl := testutil.ResourcesList(t, 2) - gspec.Resources = append(gspec.Resources, rl...) - } - - rOffer := v1beta4.ResourceOfferFromRU(gspec.Resources) - - gspec.Resources[0].Count = 2 - - require.False(t, rOffer.MatchGSpec(gspec)) -} diff --git a/go/node/market/v1beta4/codec.go b/go/node/market/v1beta4/codec.go deleted file mode 100644 index b7ccd985..00000000 --- a/go/node/market/v1beta4/codec.go +++ /dev/null @@ -1,50 +0,0 @@ -package v1beta4 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/market module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/market and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterCodec registers the necessary x/market interfaces and concrete types -// on the provided Amino codec. These types are used for Amino JSON serialization. -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgCreateBid{}, ModuleName+"/"+MsgTypeCreateBid, nil) - cdc.RegisterConcrete(&MsgCloseBid{}, ModuleName+"/"+MsgTypeCloseBid, nil) - cdc.RegisterConcrete(&MsgCreateLease{}, ModuleName+"/"+MsgTypeCreateLease, nil) - cdc.RegisterConcrete(&MsgWithdrawLease{}, ModuleName+"/"+MsgTypeWithdrawLease, nil) - cdc.RegisterConcrete(&MsgCloseLease{}, ModuleName+"/"+MsgTypeCloseLease, nil) -} - -// RegisterInterfaces registers the x/market interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgCreateBid{}, - &MsgCloseBid{}, - &MsgCreateLease{}, - &MsgWithdrawLease{}, - &MsgCloseLease{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/market/v1beta4/errors.go b/go/node/market/v1beta4/errors.go deleted file mode 100644 index 21cd9950..00000000 --- a/go/node/market/v1beta4/errors.go +++ /dev/null @@ -1,107 +0,0 @@ -package v1beta4 - -import ( - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - errCodeEmptyProvider uint32 = iota + 1 - errCodeSameAccount - errCodeInternal - errCodeOverOrder - errCodeAttributeMismatch - errCodeUnknownBid - errCodeUnknownLease - errCodeUnknownLeaseForOrder - errCodeUnknownOrderForBid - errCodeLeaseNotActive - errCodeBidNotActive - errCodeBidNotOpen - errCodeOrderNotOpen - errCodeNoLeaseForOrder - errCodeOrderNotFound - errCodeGroupNotFound - errCodeGroupNotOpen - errCodeBidNotFound - errCodeBidZeroPrice - errCodeLeaseNotFound - errCodeBidExists - errCodeInvalidPrice - errCodeOrderActive - errCodeOrderClosed - errCodeOrderExists - errCodeOrderDurationExceeded - errCodeOrderTooEarly - errInvalidDeposit - errInvalidParam - errUnknownProvider - errInvalidBid - errCodeCapabilitiesMismatch -) - -var ( - // ErrEmptyProvider is the error when provider is empty - ErrEmptyProvider = sdkerrors.Register(ModuleName, errCodeEmptyProvider, "empty provider") - // ErrSameAccount is the error when owner and provider are the same account - ErrSameAccount = sdkerrors.Register(ModuleName, errCodeSameAccount, "owner and provider are the same account") - // ErrInternal is the error for internal error - ErrInternal = sdkerrors.Register(ModuleName, errCodeInternal, "internal error") - // ErrBidOverOrder is the error when bid price is above max order price - ErrBidOverOrder = sdkerrors.Register(ModuleName, errCodeOverOrder, "bid price above max order price") - // ErrAttributeMismatch is the error for attribute mismatch - ErrAttributeMismatch = sdkerrors.Register(ModuleName, errCodeAttributeMismatch, "attribute mismatch") - // ErrCapabilitiesMismatch is the error for capabilities mismatch - ErrCapabilitiesMismatch = sdkerrors.Register(ModuleName, errCodeCapabilitiesMismatch, "capabilities mismatch") - // ErrUnknownBid is the error for unknown bid - ErrUnknownBid = sdkerrors.Register(ModuleName, errCodeUnknownBid, "unknown bid") - // ErrUnknownLease is the error for unknown bid - ErrUnknownLease = sdkerrors.Register(ModuleName, errCodeUnknownLease, "unknown lease") - // ErrUnknownLeaseForBid is the error when lease is unknown for bid - ErrUnknownLeaseForBid = sdkerrors.Register(ModuleName, errCodeUnknownLeaseForOrder, "unknown lease for bid") - // ErrUnknownOrderForBid is the error when order is unknown for bid - ErrUnknownOrderForBid = sdkerrors.Register(ModuleName, errCodeUnknownOrderForBid, "unknown order for bid") - // ErrLeaseNotActive is the error when lease is not active - ErrLeaseNotActive = sdkerrors.Register(ModuleName, errCodeLeaseNotActive, "lease not active") - // ErrBidNotActive is the error when bid is not matched - ErrBidNotActive = sdkerrors.Register(ModuleName, errCodeBidNotActive, "bid not active") - // ErrBidNotOpen is the error when bid is not matched - ErrBidNotOpen = sdkerrors.Register(ModuleName, errCodeBidNotOpen, "bid not open") - // ErrNoLeaseForOrder is the error when there is no lease for order - ErrNoLeaseForOrder = sdkerrors.Register(ModuleName, errCodeNoLeaseForOrder, "no lease for order") - // ErrOrderNotFound order not found - ErrOrderNotFound = sdkerrors.Register(ModuleName, errCodeOrderNotFound, "invalid order: order not found") - // ErrGroupNotFound order not found - ErrGroupNotFound = sdkerrors.Register(ModuleName, errCodeGroupNotFound, "order not found") - // ErrGroupNotOpen order not found - ErrGroupNotOpen = sdkerrors.Register(ModuleName, errCodeGroupNotOpen, "order not open") - // ErrOrderNotOpen order not found - ErrOrderNotOpen = sdkerrors.Register(ModuleName, errCodeOrderNotOpen, "bid: order not open") - // ErrBidNotFound bid not found - ErrBidNotFound = sdkerrors.Register(ModuleName, errCodeBidNotFound, "invalid bid: bid not found") - // ErrBidZeroPrice zero price - ErrBidZeroPrice = sdkerrors.Register(ModuleName, errCodeBidZeroPrice, "invalid bid: zero price") - // ErrLeaseNotFound lease not found - ErrLeaseNotFound = sdkerrors.Register(ModuleName, errCodeLeaseNotFound, "invalid lease: lease not found") - // ErrBidExists bid exists - ErrBidExists = sdkerrors.Register(ModuleName, errCodeBidExists, "invalid bid: bid exists from provider") - // ErrBidInvalidPrice bid invalid price - ErrBidInvalidPrice = sdkerrors.Register(ModuleName, errCodeInvalidPrice, "bid price is invalid") - // ErrOrderActive order active - ErrOrderActive = sdkerrors.New(ModuleName, errCodeOrderActive, "order active") - // ErrOrderClosed order closed - ErrOrderClosed = sdkerrors.New(ModuleName, errCodeOrderClosed, "order closed") - // ErrOrderExists indicates a new order was proposed overwrite the existing store key - ErrOrderExists = sdkerrors.New(ModuleName, errCodeOrderExists, "order already exists in store") - // ErrOrderTooEarly to match bid - ErrOrderTooEarly = sdkerrors.New(ModuleName, errCodeOrderTooEarly, "order: chain height to low for bidding") - // ErrOrderDurationExceeded order should be closed - ErrOrderDurationExceeded = sdkerrors.New(ModuleName, errCodeOrderDurationExceeded, "order duration has exceeded the bidding duration") - // ErrInvalidDeposit indicates an invalid deposit - ErrInvalidDeposit = sdkerrors.Register(ModuleName, errInvalidDeposit, "Deposit invalid") - // ErrInvalidParam indicates an invalid chain parameter - ErrInvalidParam = sdkerrors.Register(ModuleName, errInvalidParam, "parameter invalid") - // ErrUnknownProvider indicates an invalid chain parameter - ErrUnknownProvider = sdkerrors.Register(ModuleName, errUnknownProvider, "unknown provider") - // ErrInvalidBid indicates an invalid chain parameter - ErrInvalidBid = sdkerrors.Register(ModuleName, errInvalidBid, "unknown provider") -) diff --git a/go/node/market/v1beta4/escrow.go b/go/node/market/v1beta4/escrow.go deleted file mode 100644 index eebb2edf..00000000 --- a/go/node/market/v1beta4/escrow.go +++ /dev/null @@ -1,61 +0,0 @@ -package v1beta4 - -import ( - "fmt" - "strconv" - "strings" - - sdk "github.com/cosmos/cosmos-sdk/types" - - etypes "github.com/akash-network/akash-api/go/node/escrow/v1beta3" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" -) - -const ( - bidEscrowScope = "bid" -) - -func EscrowAccountForBid(id BidID) etypes.AccountID { - return etypes.AccountID{ - Scope: bidEscrowScope, - XID: id.String(), - } -} - -func EscrowPaymentForLease(id LeaseID) string { - return fmt.Sprintf("%v/%v/%s", id.GSeq, id.OSeq, id.Provider) -} - -func LeaseIDFromEscrowAccount(id etypes.AccountID, pid string) (LeaseID, bool) { - did, ok := dtypes.DeploymentIDFromEscrowAccount(id) - if !ok { - return LeaseID{}, false - } - - parts := strings.Split(pid, "/") - if len(parts) != 3 { - return LeaseID{}, false - } - - gseq, err := strconv.ParseUint(parts[0], 10, 32) - if err != nil { - return LeaseID{}, false - } - - oseq, err := strconv.ParseUint(parts[1], 10, 32) - if err != nil { - return LeaseID{}, false - } - - owner, err := sdk.AccAddressFromBech32(parts[2]) - if err != nil { - return LeaseID{}, false - } - - return MakeLeaseID( - MakeBidID( - MakeOrderID( - dtypes.MakeGroupID( - did, uint32(gseq)), uint32(oseq)), owner)), true -} diff --git a/go/node/market/v1beta4/event.go b/go/node/market/v1beta4/event.go deleted file mode 100644 index a5ddc150..00000000 --- a/go/node/market/v1beta4/event.go +++ /dev/null @@ -1,359 +0,0 @@ -package v1beta4 - -import ( - "errors" - "strconv" - - sdk "github.com/cosmos/cosmos-sdk/types" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - "github.com/akash-network/akash-api/go/sdkutil" -) - -const ( - evActionOrderCreated = "order-created" - evActionOrderClosed = "order-closed" - evActionBidCreated = "bid-created" - evActionBidClosed = "bid-closed" - evActionLeaseCreated = "lease-created" - evActionLeaseClosed = "lease-closed" - - evOSeqKey = "oseq" - evProviderKey = "provider" - evPriceDenomKey = "price-denom" - evPriceAmountKey = "price-amount" -) - -var ( - ErrParsingPrice = errors.New("error parsing price") -) - -// EventOrderCreated struct -type EventOrderCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID OrderID `json:"id"` -} - -func NewEventOrderCreated(id OrderID) EventOrderCreated { - return EventOrderCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionOrderCreated, - }, - ID: id, - } -} - -// ToSDKEvent method creates new sdk event for EventOrderCreated struct -func (e EventOrderCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionOrderCreated), - }, orderIDEVAttributes(e.ID)...)..., - ) -} - -// EventOrderClosed struct -type EventOrderClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID OrderID `json:"id"` -} - -func NewEventOrderClosed(id OrderID) EventOrderClosed { - return EventOrderClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionOrderClosed, - }, - ID: id, - } -} - -// ToSDKEvent method creates new sdk event for EventOrderClosed struct -func (e EventOrderClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionOrderClosed), - }, orderIDEVAttributes(e.ID)...)..., - ) -} - -// EventBidCreated struct -type EventBidCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID BidID `json:"id"` - Price sdk.DecCoin `json:"price"` -} - -func NewEventBidCreated(id BidID, price sdk.DecCoin) EventBidCreated { - return EventBidCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionBidCreated, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventBidCreated struct -func (e EventBidCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionBidCreated), - }, bidIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)..., - ) -} - -// EventBidClosed struct -type EventBidClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID BidID `json:"id"` - Price sdk.DecCoin `json:"price"` -} - -func NewEventBidClosed(id BidID, price sdk.DecCoin) EventBidClosed { - return EventBidClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionBidClosed, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventBidClosed struct -func (e EventBidClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionBidClosed), - }, bidIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)..., - ) -} - -// EventLeaseCreated struct -type EventLeaseCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID LeaseID `json:"id"` - Price sdk.DecCoin `json:"price"` -} - -func NewEventLeaseCreated(id LeaseID, price sdk.DecCoin) EventLeaseCreated { - return EventLeaseCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionLeaseCreated, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventLeaseCreated struct -func (e EventLeaseCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionLeaseCreated), - }, leaseIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)...) -} - -// EventLeaseClosed struct -type EventLeaseClosed struct { - Context sdkutil.BaseModuleEvent `json:"context"` - ID LeaseID `json:"id"` - Price sdk.DecCoin `json:"price"` -} - -func NewEventLeaseClosed(id LeaseID, price sdk.DecCoin) EventLeaseClosed { - return EventLeaseClosed{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionLeaseClosed, - }, - ID: id, - Price: price, - } -} - -// ToSDKEvent method creates new sdk event for EventLeaseClosed struct -func (e EventLeaseClosed) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append( - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionLeaseClosed), - }, leaseIDEVAttributes(e.ID)...), - priceEVAttributes(e.Price)...)...) -} - -// orderIDEVAttributes returns event attribues for given orderID -func orderIDEVAttributes(id OrderID) []sdk.Attribute { - return append(dtypes.GroupIDEVAttributes(id.GroupID()), - sdk.NewAttribute(evOSeqKey, strconv.FormatUint(uint64(id.OSeq), 10))) -} - -// parseEVOrderID returns orderID for given event attributes -func parseEVOrderID(attrs []sdk.Attribute) (OrderID, error) { - gid, err := dtypes.ParseEVGroupID(attrs) - if err != nil { - return OrderID{}, err - } - oseq, err := sdkutil.GetUint64(attrs, evOSeqKey) - if err != nil { - return OrderID{}, err - } - - return OrderID{ - Owner: gid.Owner, - DSeq: gid.DSeq, - GSeq: gid.GSeq, - OSeq: uint32(oseq), - }, nil - -} - -// bidIDEVAttributes returns event attribues for given bidID -func bidIDEVAttributes(id BidID) []sdk.Attribute { - return append(orderIDEVAttributes(id.OrderID()), - sdk.NewAttribute(evProviderKey, id.Provider)) -} - -// parseEVBidID returns bidID for given event attributes -func parseEVBidID(attrs []sdk.Attribute) (BidID, error) { - oid, err := parseEVOrderID(attrs) - if err != nil { - return BidID{}, err - } - - provider, err := sdkutil.GetAccAddress(attrs, evProviderKey) - if err != nil { - return BidID{}, err - } - - return BidID{ - Owner: oid.Owner, - DSeq: oid.DSeq, - GSeq: oid.GSeq, - OSeq: oid.OSeq, - Provider: provider.String(), - }, nil -} - -// leaseIDEVAttributes returns event attribues for given LeaseID -func leaseIDEVAttributes(id LeaseID) []sdk.Attribute { - return append(orderIDEVAttributes(id.OrderID()), - sdk.NewAttribute(evProviderKey, id.Provider)) -} - -// parseEVLeaseID returns leaseID for given event attributes -func parseEVLeaseID(attrs []sdk.Attribute) (LeaseID, error) { - bid, err := parseEVBidID(attrs) - if err != nil { - return LeaseID{}, err - } - return LeaseID(bid), nil -} - -func priceEVAttributes(price sdk.DecCoin) []sdk.Attribute { - return []sdk.Attribute{ - sdk.NewAttribute(evPriceDenomKey, price.Denom), - sdk.NewAttribute(evPriceAmountKey, price.Amount.String()), - } -} - -func parseEVPriceAttributes(attrs []sdk.Attribute) (sdk.DecCoin, error) { - denom, err := sdkutil.GetString(attrs, evPriceDenomKey) - if err != nil { - return sdk.DecCoin{}, err - } - - amounts, err := sdkutil.GetString(attrs, evPriceAmountKey) - if err != nil { - return sdk.DecCoin{}, err - } - - amount, err := sdk.NewDecFromStr(amounts) - if err != nil { - return sdk.DecCoin{}, ErrParsingPrice - } - - return sdk.NewDecCoinFromDec(denom, amount), nil -} - -// ParseEvent parses event and returns details of event and error if occurred -func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { - if ev.Type != sdkutil.EventTypeMessage { - return nil, sdkutil.ErrUnknownType - } - if ev.Module != ModuleName { - return nil, sdkutil.ErrUnknownModule - } - switch ev.Action { - - case evActionOrderCreated: - id, err := parseEVOrderID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventOrderCreated(id), nil - case evActionOrderClosed: - id, err := parseEVOrderID(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventOrderClosed(id), nil - - case evActionBidCreated: - id, err := parseEVBidID(ev.Attributes) - if err != nil { - return nil, err - } - price, err := parseEVPriceAttributes(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventBidCreated(id, price), nil - case evActionBidClosed: - id, err := parseEVBidID(ev.Attributes) - if err != nil { - return nil, err - } - // optional price - price, _ := parseEVPriceAttributes(ev.Attributes) - return NewEventBidClosed(id, price), nil - - case evActionLeaseCreated: - id, err := parseEVLeaseID(ev.Attributes) - if err != nil { - return nil, err - } - price, err := parseEVPriceAttributes(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventLeaseCreated(id, price), nil - case evActionLeaseClosed: - id, err := parseEVLeaseID(ev.Attributes) - if err != nil { - return nil, err - } - // optional price - price, _ := parseEVPriceAttributes(ev.Attributes) - return NewEventLeaseClosed(id, price), nil - - default: - return nil, sdkutil.ErrUnknownAction - } -} diff --git a/go/node/market/v1beta4/events_test.go b/go/node/market/v1beta4/events_test.go deleted file mode 100644 index eab6ad81..00000000 --- a/go/node/market/v1beta4/events_test.go +++ /dev/null @@ -1,460 +0,0 @@ -package v1beta4 - -import ( - "fmt" - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/pkg/errors" - - "github.com/stretchr/testify/require" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -var ( - errWildcard = errors.New("wildcard string error can't be matched") - evOwnerKey = "owner" - evDSeqKey = "dseq" - evGSeqKey = "gseq" -) - -type testEventParsing struct { - msg sdkutil.Event - expErr error -} - -func (tep testEventParsing) testMessageType() func(t *testing.T) { - _, err := ParseEvent(tep.msg) - return func(t *testing.T) { - // if the error expected is errWildcard to catch untyped errors, don't fail the test, the error was expected. - if errors.Is(tep.expErr, errWildcard) { - require.Error(t, err) - } else { - require.Equal(t, tep.expErr, err) - } - } -} - -var TEPS = []testEventParsing{ - { - msg: sdkutil.Event{ - Type: "nil", - }, - expErr: sdkutil.ErrUnknownType, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - }, - expErr: sdkutil.ErrUnknownAction, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: "nil", - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: "nil", - }, - expErr: sdkutil.ErrUnknownAction, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionOrderCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionOrderCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "nooo", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionOrderCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "abc", - }, - }, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionOrderClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - }, - }, - expErr: nil, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionBidCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionBidCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "yesss", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionBidCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "hello", - }, - }, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionBidClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: nil, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionLeaseCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionLeaseCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "hello", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionLeaseClosed, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evDSeqKey, - Value: "5", - }, - { - Key: evGSeqKey, - Value: "2", - }, - { - Key: evOSeqKey, - Value: "5", - }, - { - Key: evProviderKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - { - Key: evPriceDenomKey, - Value: "uakt", - }, - { - Key: evPriceAmountKey, - Value: "23", - }, - }, - }, - expErr: nil, - }, -} - -func TestEventParsing(t *testing.T) { - for i, test := range TEPS { - t.Run(fmt.Sprintf("%d", i), - test.testMessageType()) - } -} diff --git a/go/node/market/v1beta4/genesis.pb.go b/go/node/market/v1beta4/genesis.pb.go deleted file mode 100644 index 55e4deb4..00000000 --- a/go/node/market/v1beta4/genesis.pb.go +++ /dev/null @@ -1,518 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta4/genesis.proto - -package v1beta4 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState defines the basic genesis state used by market module -type GenesisState struct { - Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params" yaml:"params"` - Orders []Order `protobuf:"bytes,2,rep,name=orders,proto3" json:"orders" yaml:"orders"` - Leases []Lease `protobuf:"bytes,3,rep,name=leases,proto3" json:"leases" yaml:"leases"` - Bids []Bid `protobuf:"bytes,4,rep,name=bids,proto3" json:"bids" yaml:"bids"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_e52179524daaace5, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetParams() Params { - if m != nil { - return m.Params - } - return Params{} -} - -func (m *GenesisState) GetOrders() []Order { - if m != nil { - return m.Orders - } - return nil -} - -func (m *GenesisState) GetLeases() []Lease { - if m != nil { - return m.Leases - } - return nil -} - -func (m *GenesisState) GetBids() []Bid { - if m != nil { - return m.Bids - } - return nil -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.market.v1beta4.GenesisState") -} - -func init() { - proto.RegisterFile("akash/market/v1beta4/genesis.proto", fileDescriptor_e52179524daaace5) -} - -var fileDescriptor_e52179524daaace5 = []byte{ - // 339 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xb1, 0x4e, 0xeb, 0x30, - 0x14, 0x86, 0x93, 0xb6, 0xea, 0x90, 0xde, 0xbb, 0x44, 0x1d, 0x42, 0x8b, 0x9c, 0xe2, 0xa9, 0x0b, - 0xb6, 0x28, 0x2c, 0x30, 0x66, 0x41, 0x42, 0x48, 0xa0, 0x14, 0x16, 0x36, 0x87, 0x58, 0xa9, 0xd5, - 0xa6, 0xae, 0x6c, 0x03, 0xe2, 0x2d, 0x78, 0xac, 0x8e, 0x1d, 0x59, 0x88, 0x50, 0xbb, 0x31, 0xf6, - 0x09, 0x50, 0x6c, 0x4b, 0x91, 0x90, 0xd5, 0x2d, 0x7f, 0xfe, 0xef, 0x7c, 0xc9, 0xb1, 0x03, 0x48, - 0xe6, 0x44, 0xce, 0x70, 0x49, 0xc4, 0x9c, 0x2a, 0xfc, 0x7a, 0x96, 0x51, 0x45, 0x2e, 0x70, 0x41, - 0x97, 0x54, 0x32, 0x89, 0x56, 0x82, 0x2b, 0x1e, 0xf6, 0x35, 0x83, 0x0c, 0x83, 0x2c, 0x33, 0xe8, - 0x17, 0xbc, 0xe0, 0x1a, 0xc0, 0xf5, 0x93, 0x61, 0x07, 0x23, 0xa7, 0x8f, 0x8b, 0x9c, 0x8a, 0x83, - 0xc4, 0x82, 0x12, 0x49, 0x2d, 0x01, 0x9c, 0x44, 0xc6, 0x72, 0xdb, 0x9f, 0x38, 0xfb, 0x15, 0x11, - 0xa4, 0xb4, 0xbf, 0x0c, 0xbf, 0x5a, 0xc1, 0xbf, 0x6b, 0xb3, 0xc4, 0x54, 0x11, 0x45, 0xc3, 0xc7, - 0xa0, 0x6b, 0x80, 0xc8, 0x1f, 0xf9, 0xe3, 0xde, 0xe4, 0x18, 0xb9, 0x96, 0x42, 0xf7, 0x9a, 0x49, - 0xe2, 0x75, 0x15, 0x7b, 0x3f, 0x55, 0x6c, 0x67, 0xf6, 0x55, 0xfc, 0xff, 0x9d, 0x94, 0x8b, 0x2b, - 0x68, 0x32, 0x4c, 0x6d, 0x11, 0x3e, 0x04, 0x5d, 0xbd, 0x9b, 0x8c, 0x5a, 0xa3, 0xf6, 0xb8, 0x37, - 0x19, 0xba, 0xb5, 0x77, 0x35, 0xd3, 0x58, 0xcd, 0x48, 0x63, 0x35, 0x19, 0xa6, 0xb6, 0xa8, 0xad, - 0xfa, 0x3c, 0x64, 0xd4, 0x3e, 0x64, 0xbd, 0xad, 0x99, 0xc6, 0x6a, 0x46, 0x1a, 0xab, 0xc9, 0x30, - 0xb5, 0x45, 0x78, 0x13, 0x74, 0x32, 0x96, 0xcb, 0xa8, 0xa3, 0x9d, 0x47, 0x6e, 0x67, 0xc2, 0xf2, - 0x64, 0x68, 0x8d, 0x1a, 0xdf, 0x57, 0x71, 0xcf, 0xf8, 0xea, 0x04, 0x53, 0xfd, 0x32, 0x99, 0xae, - 0xb7, 0xc0, 0xdf, 0x6c, 0x81, 0xff, 0xbd, 0x05, 0xfe, 0xc7, 0x0e, 0x78, 0x9b, 0x1d, 0xf0, 0x3e, - 0x77, 0xc0, 0x7b, 0xba, 0x2c, 0x98, 0x9a, 0xbd, 0x64, 0xe8, 0x99, 0x97, 0x58, 0x7f, 0xe1, 0x74, - 0x49, 0xd5, 0x1b, 0x17, 0x73, 0x9b, 0xc8, 0x8a, 0xe1, 0x82, 0xe3, 0x25, 0xcf, 0xe9, 0x9f, 0x1b, - 0xcc, 0xba, 0xfa, 0xee, 0xce, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xd2, 0xfd, 0x9d, 0x36, 0x94, - 0x02, 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Bids) > 0 { - for iNdEx := len(m.Bids) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Bids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Leases) > 0 { - for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Orders) > 0 { - for iNdEx := len(m.Orders) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Orders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Params.Size() - n += 1 + l + sovGenesis(uint64(l)) - if len(m.Orders) > 0 { - for _, e := range m.Orders { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - if len(m.Leases) > 0 { - for _, e := range m.Leases { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - if len(m.Bids) > 0 { - for _, e := range m.Bids { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Orders = append(m.Orders, Order{}) - if err := m.Orders[len(m.Orders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Leases = append(m.Leases, Lease{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bids", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bids = append(m.Bids, Bid{}) - if err := m.Bids[len(m.Bids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta4/id.go b/go/node/market/v1beta4/id.go deleted file mode 100644 index c2f7a010..00000000 --- a/go/node/market/v1beta4/id.go +++ /dev/null @@ -1,154 +0,0 @@ -package v1beta4 - -import ( - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" -) - -// MakeOrderID returns OrderID instance with provided groupID details and oseq -func MakeOrderID(id dtypes.GroupID, oseq uint32) OrderID { - return OrderID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - OSeq: oseq, - } -} - -// GroupID method returns groupID details for specific order -func (id OrderID) GroupID() dtypes.GroupID { - return dtypes.GroupID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - } -} - -// Equals method compares specific order with provided order -func (id OrderID) Equals(other OrderID) bool { - return id.GroupID().Equals(other.GroupID()) && id.OSeq == other.OSeq -} - -// Validate method for OrderID and returns nil -func (id OrderID) Validate() error { - if err := id.GroupID().Validate(); err != nil { - return sdkerrors.Wrap(err, "OrderID: Invalid GroupID") - } - if id.OSeq == 0 { - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "OrderID: Invalid Order Sequence") - } - return nil -} - -// String provides stringer interface to save reflected formatting. -func (id OrderID) String() string { - return fmt.Sprintf("%s/%v", id.GroupID(), id.OSeq) -} - -// MakeBidID returns BidID instance with provided order details and provider -func MakeBidID(id OrderID, provider sdk.AccAddress) BidID { - return BidID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - OSeq: id.OSeq, - Provider: provider.String(), - } -} - -// Equals method compares specific bid with provided bid -func (id BidID) Equals(other BidID) bool { - return id.OrderID().Equals(other.OrderID()) && - id.Provider == other.Provider -} - -// LeaseID method returns lease details of bid -func (id BidID) LeaseID() LeaseID { - return LeaseID(id) -} - -// OrderID method returns OrderID details with specific bid details -func (id BidID) OrderID() OrderID { - return OrderID{ - Owner: id.Owner, - DSeq: id.DSeq, - GSeq: id.GSeq, - OSeq: id.OSeq, - } -} - -// String method for consistent output. -func (id BidID) String() string { - return fmt.Sprintf("%s/%v", id.OrderID(), id.Provider) -} - -// GroupID method returns GroupID details with specific bid details -func (id BidID) GroupID() dtypes.GroupID { - return id.OrderID().GroupID() -} - -// DeploymentID method returns deployment details with specific bid details -func (id BidID) DeploymentID() dtypes.DeploymentID { - return id.GroupID().DeploymentID() -} - -// Validate validates bid instance and returns nil -func (id BidID) Validate() error { - if err := id.OrderID().Validate(); err != nil { - return sdkerrors.Wrap(err, "BidID: Invalid OrderID") - } - if _, err := sdk.AccAddressFromBech32(id.Provider); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "BidID: Invalid Provider Address") - } - if id.Owner == id.Provider { - return sdkerrors.Wrap(sdkerrors.ErrConflict, "BidID: self-bid") - } - return nil -} - -// MakeLeaseID returns LeaseID instance with provided bid details -func MakeLeaseID(id BidID) LeaseID { - return LeaseID(id) -} - -// Equals method compares specific lease with provided lease -func (id LeaseID) Equals(other LeaseID) bool { - return id.BidID().Equals(other.BidID()) -} - -// Validate calls the BidID's validator and returns any error. -func (id LeaseID) Validate() error { - if err := id.BidID().Validate(); err != nil { - return sdkerrors.Wrap(err, "LeaseID: Invalid BidID") - } - return nil -} - -// BidID method returns BidID details with specific LeaseID -func (id LeaseID) BidID() BidID { - return BidID(id) -} - -// OrderID method returns OrderID details with specific lease details -func (id LeaseID) OrderID() OrderID { - return id.BidID().OrderID() -} - -// GroupID method returns GroupID details with specific lease details -func (id LeaseID) GroupID() dtypes.GroupID { - return id.OrderID().GroupID() -} - -// DeploymentID method returns deployment details with specific lease details -func (id LeaseID) DeploymentID() dtypes.DeploymentID { - return id.GroupID().DeploymentID() -} - -// String method provides human readable representation of LeaseID. -func (id LeaseID) String() string { - return id.BidID().String() -} diff --git a/go/node/market/v1beta4/key.go b/go/node/market/v1beta4/key.go deleted file mode 100644 index 75026acf..00000000 --- a/go/node/market/v1beta4/key.go +++ /dev/null @@ -1,28 +0,0 @@ -package v1beta4 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "market" - - // StoreKey is the store key string for market - StoreKey = ModuleName - - // RouterKey is the message route for market - RouterKey = ModuleName -) - -func OrderPrefix() []byte { - return []byte{0x01, 0x00} -} - -func BidPrefix() []byte { - return []byte{0x02, 0x00} -} - -func LeasePrefix() []byte { - return []byte{0x03, 0x00} -} - -func SecondaryLeasePrefix() []byte { - return []byte{0x03, 0x01} -} diff --git a/go/node/market/v1beta4/lease.pb.go b/go/node/market/v1beta4/lease.pb.go deleted file mode 100644 index c5cbfe3a..00000000 --- a/go/node/market/v1beta4/lease.pb.go +++ /dev/null @@ -1,2134 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta4/lease.proto - -package v1beta4 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of lease -type Lease_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - LeaseStateInvalid Lease_State = 0 - // LeaseActive denotes state for lease active - LeaseActive Lease_State = 1 - // LeaseInsufficientFunds denotes state for lease insufficient_funds - LeaseInsufficientFunds Lease_State = 2 - // LeaseClosed denotes state for lease closed - LeaseClosed Lease_State = 3 -) - -var Lease_State_name = map[int32]string{ - 0: "invalid", - 1: "active", - 2: "insufficient_funds", - 3: "closed", -} - -var Lease_State_value = map[string]int32{ - "invalid": 0, - "active": 1, - "insufficient_funds": 2, - "closed": 3, -} - -func (x Lease_State) String() string { - return proto.EnumName(Lease_State_name, int32(x)) -} - -func (Lease_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_4ab6faa2f1861aa1, []int{1, 0} -} - -// LeaseID stores bid details of lease -type LeaseID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` -} - -func (m *LeaseID) Reset() { *m = LeaseID{} } -func (*LeaseID) ProtoMessage() {} -func (*LeaseID) Descriptor() ([]byte, []int) { - return fileDescriptor_4ab6faa2f1861aa1, []int{0} -} -func (m *LeaseID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseID) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseID.Merge(m, src) -} -func (m *LeaseID) XXX_Size() int { - return m.Size() -} -func (m *LeaseID) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseID.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseID proto.InternalMessageInfo - -func (m *LeaseID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *LeaseID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *LeaseID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *LeaseID) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *LeaseID) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -// Lease stores LeaseID, state of lease and price -type Lease struct { - LeaseID LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"id" yaml:"id"` - State Lease_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta4.Lease_State" json:"state" yaml:"state"` - Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - ClosedOn int64 `protobuf:"varint,5,opt,name=closed_on,json=closedOn,proto3" json:"closed_on,omitempty"` -} - -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_4ab6faa2f1861aa1, []int{1} -} -func (m *Lease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Lease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Lease) XXX_Merge(src proto.Message) { - xxx_messageInfo_Lease.Merge(m, src) -} -func (m *Lease) XXX_Size() int { - return m.Size() -} -func (m *Lease) XXX_DiscardUnknown() { - xxx_messageInfo_Lease.DiscardUnknown(m) -} - -var xxx_messageInfo_Lease proto.InternalMessageInfo - -func (m *Lease) GetLeaseID() LeaseID { - if m != nil { - return m.LeaseID - } - return LeaseID{} -} - -func (m *Lease) GetState() Lease_State { - if m != nil { - return m.State - } - return LeaseStateInvalid -} - -func (m *Lease) GetPrice() types.DecCoin { - if m != nil { - return m.Price - } - return types.DecCoin{} -} - -func (m *Lease) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -func (m *Lease) GetClosedOn() int64 { - if m != nil { - return m.ClosedOn - } - return 0 -} - -// LeaseFilters defines flags for lease list filter -type LeaseFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` - State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *LeaseFilters) Reset() { *m = LeaseFilters{} } -func (m *LeaseFilters) String() string { return proto.CompactTextString(m) } -func (*LeaseFilters) ProtoMessage() {} -func (*LeaseFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_4ab6faa2f1861aa1, []int{2} -} -func (m *LeaseFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_LeaseFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *LeaseFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseFilters.Merge(m, src) -} -func (m *LeaseFilters) XXX_Size() int { - return m.Size() -} -func (m *LeaseFilters) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseFilters proto.InternalMessageInfo - -func (m *LeaseFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *LeaseFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *LeaseFilters) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *LeaseFilters) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *LeaseFilters) GetProvider() string { - if m != nil { - return m.Provider - } - return "" -} - -func (m *LeaseFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -// MsgCreateLease is sent to create a lease -type MsgCreateLease struct { - BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCreateLease) Reset() { *m = MsgCreateLease{} } -func (m *MsgCreateLease) String() string { return proto.CompactTextString(m) } -func (*MsgCreateLease) ProtoMessage() {} -func (*MsgCreateLease) Descriptor() ([]byte, []int) { - return fileDescriptor_4ab6faa2f1861aa1, []int{3} -} -func (m *MsgCreateLease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateLease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateLease) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateLease.Merge(m, src) -} -func (m *MsgCreateLease) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateLease) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateLease.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateLease proto.InternalMessageInfo - -func (m *MsgCreateLease) GetBidID() BidID { - if m != nil { - return m.BidID - } - return BidID{} -} - -// MsgCreateLeaseResponse is the response from creating a lease -type MsgCreateLeaseResponse struct { -} - -func (m *MsgCreateLeaseResponse) Reset() { *m = MsgCreateLeaseResponse{} } -func (m *MsgCreateLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateLeaseResponse) ProtoMessage() {} -func (*MsgCreateLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_4ab6faa2f1861aa1, []int{4} -} -func (m *MsgCreateLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateLeaseResponse.Merge(m, src) -} -func (m *MsgCreateLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateLeaseResponse proto.InternalMessageInfo - -// MsgWithdrawLease defines an SDK message for closing bid -type MsgWithdrawLease struct { - LeaseID LeaseID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgWithdrawLease) Reset() { *m = MsgWithdrawLease{} } -func (m *MsgWithdrawLease) String() string { return proto.CompactTextString(m) } -func (*MsgWithdrawLease) ProtoMessage() {} -func (*MsgWithdrawLease) Descriptor() ([]byte, []int) { - return fileDescriptor_4ab6faa2f1861aa1, []int{5} -} -func (m *MsgWithdrawLease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgWithdrawLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgWithdrawLease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgWithdrawLease) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgWithdrawLease.Merge(m, src) -} -func (m *MsgWithdrawLease) XXX_Size() int { - return m.Size() -} -func (m *MsgWithdrawLease) XXX_DiscardUnknown() { - xxx_messageInfo_MsgWithdrawLease.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgWithdrawLease proto.InternalMessageInfo - -func (m *MsgWithdrawLease) GetLeaseID() LeaseID { - if m != nil { - return m.LeaseID - } - return LeaseID{} -} - -// MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. -type MsgWithdrawLeaseResponse struct { -} - -func (m *MsgWithdrawLeaseResponse) Reset() { *m = MsgWithdrawLeaseResponse{} } -func (m *MsgWithdrawLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*MsgWithdrawLeaseResponse) ProtoMessage() {} -func (*MsgWithdrawLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_4ab6faa2f1861aa1, []int{6} -} -func (m *MsgWithdrawLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgWithdrawLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgWithdrawLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgWithdrawLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgWithdrawLeaseResponse.Merge(m, src) -} -func (m *MsgWithdrawLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgWithdrawLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgWithdrawLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgWithdrawLeaseResponse proto.InternalMessageInfo - -// MsgCloseLease defines an SDK message for closing order -type MsgCloseLease struct { - LeaseID LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"id" yaml:"id"` -} - -func (m *MsgCloseLease) Reset() { *m = MsgCloseLease{} } -func (m *MsgCloseLease) String() string { return proto.CompactTextString(m) } -func (*MsgCloseLease) ProtoMessage() {} -func (*MsgCloseLease) Descriptor() ([]byte, []int) { - return fileDescriptor_4ab6faa2f1861aa1, []int{7} -} -func (m *MsgCloseLease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseLease.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseLease) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseLease.Merge(m, src) -} -func (m *MsgCloseLease) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseLease) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseLease.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseLease proto.InternalMessageInfo - -func (m *MsgCloseLease) GetLeaseID() LeaseID { - if m != nil { - return m.LeaseID - } - return LeaseID{} -} - -// MsgCloseLeaseResponse defines the Msg/CloseLease response type. -type MsgCloseLeaseResponse struct { -} - -func (m *MsgCloseLeaseResponse) Reset() { *m = MsgCloseLeaseResponse{} } -func (m *MsgCloseLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCloseLeaseResponse) ProtoMessage() {} -func (*MsgCloseLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_4ab6faa2f1861aa1, []int{8} -} -func (m *MsgCloseLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCloseLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCloseLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCloseLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCloseLeaseResponse.Merge(m, src) -} -func (m *MsgCloseLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCloseLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCloseLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCloseLeaseResponse proto.InternalMessageInfo - -func init() { - proto.RegisterEnum("akash.market.v1beta4.Lease_State", Lease_State_name, Lease_State_value) - proto.RegisterType((*LeaseID)(nil), "akash.market.v1beta4.LeaseID") - proto.RegisterType((*Lease)(nil), "akash.market.v1beta4.Lease") - proto.RegisterType((*LeaseFilters)(nil), "akash.market.v1beta4.LeaseFilters") - proto.RegisterType((*MsgCreateLease)(nil), "akash.market.v1beta4.MsgCreateLease") - proto.RegisterType((*MsgCreateLeaseResponse)(nil), "akash.market.v1beta4.MsgCreateLeaseResponse") - proto.RegisterType((*MsgWithdrawLease)(nil), "akash.market.v1beta4.MsgWithdrawLease") - proto.RegisterType((*MsgWithdrawLeaseResponse)(nil), "akash.market.v1beta4.MsgWithdrawLeaseResponse") - proto.RegisterType((*MsgCloseLease)(nil), "akash.market.v1beta4.MsgCloseLease") - proto.RegisterType((*MsgCloseLeaseResponse)(nil), "akash.market.v1beta4.MsgCloseLeaseResponse") -} - -func init() { proto.RegisterFile("akash/market/v1beta4/lease.proto", fileDescriptor_4ab6faa2f1861aa1) } - -var fileDescriptor_4ab6faa2f1861aa1 = []byte{ - // 756 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4f, 0x4f, 0xdb, 0x4a, - 0x10, 0xb7, 0x93, 0x38, 0x24, 0x1b, 0xfe, 0xe4, 0x59, 0xc0, 0xcb, 0x33, 0x0f, 0xdb, 0xf5, 0x89, - 0x4b, 0x6d, 0x11, 0x7a, 0x29, 0x3d, 0x11, 0x22, 0xaa, 0x48, 0x45, 0xa8, 0xa6, 0x52, 0xab, 0xaa, - 0x52, 0xe4, 0x78, 0x17, 0xb3, 0x22, 0xf1, 0x06, 0xaf, 0x09, 0xea, 0x37, 0xa8, 0x38, 0xf5, 0xd8, - 0x0b, 0x2a, 0x52, 0xbf, 0x0c, 0x47, 0xd4, 0x53, 0x4f, 0x56, 0x15, 0x2e, 0x55, 0x8e, 0x51, 0x3f, - 0x40, 0xb5, 0xbb, 0x0e, 0x49, 0x50, 0xc4, 0xa9, 0xea, 0xa9, 0xa7, 0x64, 0x7e, 0x33, 0xbf, 0x99, - 0xf1, 0x6f, 0x66, 0x6c, 0x60, 0x7a, 0x27, 0x1e, 0x3d, 0x76, 0x3a, 0x5e, 0x74, 0x82, 0x62, 0xa7, - 0xb7, 0xd9, 0x42, 0xb1, 0xf7, 0xc4, 0x69, 0x23, 0x8f, 0x22, 0xbb, 0x1b, 0x91, 0x98, 0xa8, 0xcb, - 0x3c, 0xc2, 0x16, 0x11, 0x76, 0x1a, 0xa1, 0x2d, 0x07, 0x24, 0x20, 0x3c, 0xc0, 0x61, 0xff, 0x44, - 0xac, 0xa6, 0xfb, 0x84, 0x76, 0x08, 0x75, 0x5a, 0x1e, 0x45, 0x69, 0xb2, 0x4d, 0xc7, 0x27, 0x38, - 0x1c, 0xf9, 0x67, 0x56, 0x6b, 0x61, 0x28, 0xfc, 0xd6, 0x55, 0x06, 0xcc, 0xbd, 0x60, 0xb5, 0x1b, - 0x75, 0xd5, 0x01, 0x0a, 0x39, 0x0f, 0x51, 0x54, 0x91, 0x4d, 0x79, 0xa3, 0x58, 0xfb, 0x6f, 0x90, - 0x18, 0x02, 0x18, 0x26, 0xc6, 0xfc, 0x7b, 0xaf, 0xd3, 0xde, 0xb6, 0xb8, 0x69, 0xb9, 0x02, 0x56, - 0xb7, 0x40, 0x0e, 0x52, 0x74, 0x5a, 0xc9, 0x98, 0xf2, 0x46, 0xae, 0x66, 0xf4, 0x13, 0x23, 0x57, - 0x3f, 0x44, 0xa7, 0x83, 0xc4, 0xe0, 0xf8, 0x30, 0x31, 0x4a, 0x82, 0xc6, 0x2c, 0xcb, 0xe5, 0x20, - 0x23, 0x05, 0x8c, 0x94, 0x35, 0xe5, 0x8d, 0x05, 0x41, 0x7a, 0x9e, 0x92, 0x82, 0x29, 0x52, 0x20, - 0x48, 0x41, 0x4a, 0x22, 0x8c, 0x94, 0x1b, 0x93, 0x0e, 0x52, 0x12, 0x99, 0x22, 0x11, 0x41, 0x62, - 0x3f, 0xea, 0x33, 0x50, 0xe8, 0x46, 0xa4, 0x87, 0x21, 0x8a, 0x2a, 0x0a, 0x7f, 0x24, 0x63, 0x90, - 0x18, 0x77, 0xd8, 0x30, 0x31, 0x96, 0x04, 0x69, 0x84, 0x58, 0xee, 0x9d, 0x73, 0xbb, 0xf0, 0xe9, - 0xca, 0x90, 0x7e, 0x5c, 0x19, 0x92, 0xf5, 0x33, 0x0b, 0x14, 0x2e, 0x91, 0xfa, 0x0e, 0x14, 0xf8, - 0x9c, 0x9a, 0x18, 0x72, 0x8d, 0x4a, 0xd5, 0x75, 0x7b, 0xd6, 0xac, 0xec, 0x54, 0xd1, 0x9a, 0x75, - 0x9d, 0x18, 0x52, 0x3f, 0x31, 0x46, 0x12, 0x0f, 0x12, 0x23, 0x83, 0xe1, 0x30, 0x31, 0x8a, 0xa2, - 0x30, 0x86, 0x96, 0x3b, 0xc7, 0x53, 0x36, 0xa0, 0xea, 0x02, 0x85, 0xc6, 0x5e, 0x8c, 0xb8, 0x9c, - 0x8b, 0xd5, 0x47, 0x0f, 0xa4, 0xb6, 0x0f, 0x59, 0xa0, 0x98, 0x10, 0xe7, 0x8c, 0x27, 0xc4, 0x4d, - 0xcb, 0x15, 0xb0, 0xfa, 0x12, 0x28, 0xdd, 0x08, 0xfb, 0x88, 0xab, 0x5d, 0xaa, 0xfe, 0x6f, 0x8b, - 0x75, 0xb1, 0xd9, 0xba, 0xa4, 0x29, 0x37, 0xed, 0x3a, 0xf2, 0x77, 0x09, 0x0e, 0x6b, 0xeb, 0xac, - 0x5b, 0x96, 0x92, 0x53, 0xc6, 0x29, 0xb9, 0x69, 0xb9, 0x02, 0x56, 0xd7, 0x01, 0xf0, 0x23, 0xe4, - 0xc5, 0x08, 0x36, 0xbd, 0x98, 0x0f, 0x24, 0xeb, 0x16, 0x53, 0x64, 0x27, 0x56, 0xd7, 0x40, 0xd1, - 0x6f, 0x13, 0x8a, 0x60, 0x93, 0x84, 0x5c, 0xf5, 0xac, 0x5b, 0x10, 0xc0, 0x41, 0x68, 0x7d, 0x96, - 0x81, 0xc2, 0x5b, 0x57, 0x2d, 0x30, 0x87, 0xc3, 0x9e, 0xd7, 0xc6, 0xb0, 0x2c, 0x69, 0x2b, 0x17, - 0x97, 0xe6, 0x3f, 0xfc, 0xc1, 0xb8, 0xb3, 0x21, 0x1c, 0xea, 0x1a, 0xc8, 0x7b, 0x7e, 0x8c, 0x7b, - 0xa8, 0x2c, 0x6b, 0x4b, 0x17, 0x97, 0x66, 0x89, 0x87, 0xec, 0x70, 0x48, 0xad, 0x02, 0x15, 0x87, - 0xf4, 0xec, 0xe8, 0x08, 0xfb, 0x18, 0x85, 0x71, 0xf3, 0xe8, 0x2c, 0x84, 0xb4, 0x9c, 0xd1, 0xb4, - 0x8b, 0x4b, 0x73, 0x55, 0xc8, 0x3d, 0xe1, 0xde, 0x63, 0x5e, 0x96, 0x50, 0xb4, 0x52, 0xce, 0x4e, - 0x24, 0xdc, 0xe5, 0x90, 0x96, 0xfb, 0xf0, 0x45, 0x97, 0x26, 0xc6, 0xfe, 0x35, 0x03, 0xe6, 0xb9, - 0x7f, 0x0f, 0xb7, 0x63, 0x14, 0xd1, 0xbf, 0xe7, 0x31, 0x71, 0x1e, 0x4c, 0x0c, 0xb1, 0xac, 0xf9, - 0xb1, 0x18, 0x0f, 0x6d, 0xe2, 0x76, 0x8e, 0x8b, 0xda, 0x06, 0x8b, 0xfb, 0x34, 0xd8, 0xe5, 0xdb, - 0x22, 0x6e, 0xea, 0x15, 0xc8, 0xb7, 0x30, 0x1c, 0x5f, 0xd4, 0xda, 0xec, 0xb5, 0xaf, 0x61, 0xd8, - 0xa8, 0xd7, 0xcc, 0xf4, 0x9e, 0x14, 0x6e, 0xce, 0xba, 0x26, 0xa5, 0x85, 0x61, 0x03, 0xa6, 0xd5, - 0x2a, 0x60, 0x75, 0xba, 0x9a, 0x8b, 0x68, 0x97, 0x84, 0x14, 0x59, 0x11, 0x28, 0xef, 0xd3, 0xe0, - 0x35, 0x8e, 0x8f, 0x61, 0xe4, 0x9d, 0x8b, 0x4e, 0xde, 0xdc, 0xeb, 0xe4, 0x37, 0xdc, 0xf6, 0x54, - 0x37, 0x1a, 0xa8, 0xdc, 0xaf, 0x79, 0xd7, 0x0f, 0x05, 0x0b, 0xac, 0x53, 0xb6, 0x89, 0x7f, 0xe0, - 0x55, 0x93, 0x36, 0xf4, 0x2f, 0x58, 0x99, 0x2a, 0x3a, 0xea, 0xa6, 0x76, 0x78, 0xdd, 0xd7, 0xe5, - 0x9b, 0xbe, 0x2e, 0x7f, 0xef, 0xeb, 0xf2, 0xc7, 0x5b, 0x5d, 0xba, 0xb9, 0xd5, 0xa5, 0x6f, 0xb7, - 0xba, 0xf4, 0xf6, 0x69, 0x80, 0xe3, 0xe3, 0xb3, 0x96, 0xed, 0x93, 0x8e, 0xc3, 0xdb, 0x79, 0x1c, - 0xa2, 0xf8, 0x9c, 0x44, 0x27, 0xa9, 0xe5, 0x75, 0xb1, 0x13, 0x10, 0x27, 0x24, 0x10, 0xdd, 0xfb, - 0xe6, 0xb4, 0xf2, 0xfc, 0x83, 0xb3, 0xf5, 0x2b, 0x00, 0x00, 0xff, 0xff, 0xcd, 0xc4, 0x50, 0x62, - 0x00, 0x07, 0x00, 0x00, -} - -func (m *LeaseID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintLease(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintLease(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Lease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ClosedOn != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.ClosedOn)) - i-- - dAtA[i] = 0x28 - } - if m.CreatedAt != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *LeaseFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintLease(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x32 - } - if len(m.Provider) > 0 { - i -= len(m.Provider) - copy(dAtA[i:], m.Provider) - i = encodeVarintLease(dAtA, i, uint64(len(m.Provider))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintLease(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintLease(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateLease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateLease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCreateLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgWithdrawLease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgWithdrawLease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgWithdrawLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgWithdrawLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgWithdrawLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgWithdrawLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgCloseLease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseLease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintLease(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MsgCloseLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCloseLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCloseLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func encodeVarintLease(dAtA []byte, offset int, v uint64) int { - offset -= sovLease(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *LeaseID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovLease(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovLease(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovLease(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - return n -} - -func (m *Lease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LeaseID.Size() - n += 1 + l + sovLease(uint64(l)) - if m.State != 0 { - n += 1 + sovLease(uint64(m.State)) - } - l = m.Price.Size() - n += 1 + l + sovLease(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovLease(uint64(m.CreatedAt)) - } - if m.ClosedOn != 0 { - n += 1 + sovLease(uint64(m.ClosedOn)) - } - return n -} - -func (m *LeaseFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovLease(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovLease(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovLease(uint64(m.OSeq)) - } - l = len(m.Provider) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovLease(uint64(l)) - } - return n -} - -func (m *MsgCreateLease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidID.Size() - n += 1 + l + sovLease(uint64(l)) - return n -} - -func (m *MsgCreateLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgWithdrawLease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LeaseID.Size() - n += 1 + l + sovLease(uint64(l)) - return n -} - -func (m *MsgWithdrawLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgCloseLease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LeaseID.Size() - n += 1 + l + sovLease(uint64(l)) - return n -} - -func (m *MsgCloseLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func sovLease(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozLease(x uint64) (n int) { - return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *LeaseID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Lease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Lease_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ClosedOn", wireType) - } - m.ClosedOn = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ClosedOn |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provider = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateLease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateLease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateLease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgWithdrawLease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgWithdrawLease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgWithdrawLease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgWithdrawLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgWithdrawLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgWithdrawLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseLease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseLease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseLease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthLease - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthLease - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCloseLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowLease - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCloseLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCloseLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipLease(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthLease - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipLease(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowLease - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthLease - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupLease - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthLease - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowLease = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupLease = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta4/migrate/v1beta3.go b/go/node/market/v1beta4/migrate/v1beta3.go deleted file mode 100644 index f87f9b94..00000000 --- a/go/node/market/v1beta4/migrate/v1beta3.go +++ /dev/null @@ -1,40 +0,0 @@ -package migrate - -import ( - "github.com/akash-network/akash-api/go/node/market/v1beta3" - "github.com/akash-network/akash-api/go/node/market/v1beta4" -) - -func BidStateFromV1beta3(from v1beta3.Bid_State) v1beta4.Bid_State { - return v1beta4.Bid_State(from) -} - -func LeaseIDFromV1beta3(from v1beta3.LeaseID) v1beta4.LeaseID { - return v1beta4.LeaseID{ - Owner: from.Owner, - DSeq: from.DSeq, - GSeq: from.GSeq, - OSeq: from.OSeq, - Provider: from.Provider, - } -} - -func BidIDFromV1beta3(from v1beta3.BidID) v1beta4.BidID { - return v1beta4.BidID{ - Owner: from.Owner, - DSeq: from.DSeq, - GSeq: from.GSeq, - OSeq: from.OSeq, - Provider: from.Provider, - } -} - -func BidFromV1beta3(from v1beta3.Bid) v1beta4.Bid { - return v1beta4.Bid{ - BidID: BidIDFromV1beta3(from.BidID), - State: BidStateFromV1beta3(from.State), - Price: from.Price, - CreatedAt: from.CreatedAt, - ResourcesOffer: v1beta4.ResourcesOffer{}, - } -} diff --git a/go/node/market/v1beta4/msgs.go b/go/node/market/v1beta4/msgs.go deleted file mode 100644 index 9aa2ef63..00000000 --- a/go/node/market/v1beta4/msgs.go +++ /dev/null @@ -1,216 +0,0 @@ -package v1beta4 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -const ( - MsgTypeCreateBid = "create-bid" - MsgTypeCloseBid = "close-bid" - MsgTypeCreateLease = "create-lease" - MsgTypeWithdrawLease = "withdraw-lease" - MsgTypeCloseLease = "close-lease" -) - -var ( - _ sdk.Msg = &MsgCreateBid{} - _ sdk.Msg = &MsgCloseBid{} - _ sdk.Msg = &MsgCreateLease{} - _ sdk.Msg = &MsgWithdrawLease{} - _ sdk.Msg = &MsgCloseLease{} -) - -// NewMsgCreateBid creates a new MsgCreateBid instance -func NewMsgCreateBid(id OrderID, provider sdk.AccAddress, price sdk.DecCoin, deposit sdk.Coin, roffer ResourcesOffer) *MsgCreateBid { - return &MsgCreateBid{ - Order: id, - Provider: provider.String(), - Price: price, - Deposit: deposit, - ResourcesOffer: roffer, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateBid) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateBid) Type() string { return MsgTypeCreateBid } - -// GetSignBytes encodes the message for signing -func (msg MsgCreateBid) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateBid) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.Provider) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic does basic validation of provider and order -func (msg MsgCreateBid) ValidateBasic() error { - if err := msg.Order.Validate(); err != nil { - return err - } - - provider, err := sdk.AccAddressFromBech32(msg.Provider) - if err != nil { - return ErrEmptyProvider - } - - owner, err := sdk.AccAddressFromBech32(msg.Order.Owner) - if err != nil { - return errors.Wrap(ErrInvalidBid, "empty owner") - } - - if provider.Equals(owner) { - return ErrSameAccount - } - - if msg.Price.IsZero() { - return ErrBidZeroPrice - } - - return nil -} - -// NewMsgWithdrawLease creates a new MsgWithdrawLease instance -func NewMsgWithdrawLease(id LeaseID) *MsgWithdrawLease { - return &MsgWithdrawLease{ - LeaseID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgWithdrawLease) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgWithdrawLease) Type() string { return MsgTypeWithdrawLease } - -// GetSignBytes encodes the message for signing -func (msg MsgWithdrawLease) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgWithdrawLease) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.GetLeaseID().Provider) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic does basic validation of provider and order -func (msg MsgWithdrawLease) ValidateBasic() error { - if err := msg.LeaseID.Validate(); err != nil { - return err - } - return nil -} - -// NewMsgCreateLease creates a new MsgCreateLease instance -func NewMsgCreateLease(id BidID) *MsgCreateLease { - return &MsgCreateLease{ - BidID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateLease) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateLease) Type() string { return MsgTypeCreateLease } - -// GetSignBytes encodes the message for signing -func (msg MsgCreateLease) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateLease) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.BidID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic method for MsgCreateLease -func (msg MsgCreateLease) ValidateBasic() error { - return msg.BidID.Validate() -} - -// NewMsgCloseBid creates a new MsgCloseBid instance -func NewMsgCloseBid(id BidID) *MsgCloseBid { - return &MsgCloseBid{ - BidID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCloseBid) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCloseBid) Type() string { return MsgTypeCloseBid } - -// GetSignBytes encodes the message for signing -func (msg MsgCloseBid) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseBid) GetSigners() []sdk.AccAddress { - provider, err := sdk.AccAddressFromBech32(msg.BidID.Provider) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{provider} -} - -// ValidateBasic method for MsgCloseBid -func (msg MsgCloseBid) ValidateBasic() error { - return msg.BidID.Validate() -} - -// NewMsgCloseLease creates a new MsgCloseLease instance -func NewMsgCloseLease(id LeaseID) *MsgCloseLease { - return &MsgCloseLease{ - LeaseID: id, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCloseLease) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCloseLease) Type() string { return MsgTypeCloseLease } - -// GetSignBytes encodes the message for signing -func (msg MsgCloseLease) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCloseLease) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.LeaseID.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// ValidateBasic method for MsgCloseLease -func (msg MsgCloseLease) ValidateBasic() error { - return msg.LeaseID.Validate() -} diff --git a/go/node/market/v1beta4/order.pb.go b/go/node/market/v1beta4/order.pb.go deleted file mode 100644 index fbf5abc5..00000000 --- a/go/node/market/v1beta4/order.pb.go +++ /dev/null @@ -1,1107 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta4/order.proto - -package v1beta4 - -import ( - fmt "fmt" - v1beta3 "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// State is an enum which refers to state of order -type Order_State int32 - -const ( - // Prefix should start with 0 in enum. So declaring dummy state - OrderStateInvalid Order_State = 0 - // OrderOpen denotes state for order open - OrderOpen Order_State = 1 - // OrderMatched denotes state for order matched - OrderActive Order_State = 2 - // OrderClosed denotes state for order lost - OrderClosed Order_State = 3 -) - -var Order_State_name = map[int32]string{ - 0: "invalid", - 1: "open", - 2: "active", - 3: "closed", -} - -var Order_State_value = map[string]int32{ - "invalid": 0, - "open": 1, - "active": 2, - "closed": 3, -} - -func (x Order_State) String() string { - return proto.EnumName(Order_State_name, int32(x)) -} - -func (Order_State) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_f7500a2881510f8f, []int{1, 0} -} - -// OrderID stores owner and all other seq numbers -type OrderID struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` -} - -func (m *OrderID) Reset() { *m = OrderID{} } -func (*OrderID) ProtoMessage() {} -func (*OrderID) Descriptor() ([]byte, []int) { - return fileDescriptor_f7500a2881510f8f, []int{0} -} -func (m *OrderID) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OrderID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_OrderID.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *OrderID) XXX_Merge(src proto.Message) { - xxx_messageInfo_OrderID.Merge(m, src) -} -func (m *OrderID) XXX_Size() int { - return m.Size() -} -func (m *OrderID) XXX_DiscardUnknown() { - xxx_messageInfo_OrderID.DiscardUnknown(m) -} - -var xxx_messageInfo_OrderID proto.InternalMessageInfo - -func (m *OrderID) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *OrderID) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *OrderID) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *OrderID) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -// Order stores orderID, state of order and other details -type Order struct { - OrderID OrderID `protobuf:"bytes,1,opt,name=order_id,json=orderId,proto3" json:"id" yaml:"id"` - State Order_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta4.Order_State" json:"state" yaml:"state"` - Spec v1beta3.GroupSpec `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec" yaml:"spec"` - CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` -} - -func (m *Order) Reset() { *m = Order{} } -func (*Order) ProtoMessage() {} -func (*Order) Descriptor() ([]byte, []int) { - return fileDescriptor_f7500a2881510f8f, []int{1} -} -func (m *Order) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Order.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Order) XXX_Merge(src proto.Message) { - xxx_messageInfo_Order.Merge(m, src) -} -func (m *Order) XXX_Size() int { - return m.Size() -} -func (m *Order) XXX_DiscardUnknown() { - xxx_messageInfo_Order.DiscardUnknown(m) -} - -var xxx_messageInfo_Order proto.InternalMessageInfo - -func (m *Order) GetOrderID() OrderID { - if m != nil { - return m.OrderID - } - return OrderID{} -} - -func (m *Order) GetState() Order_State { - if m != nil { - return m.State - } - return OrderStateInvalid -} - -func (m *Order) GetSpec() v1beta3.GroupSpec { - if m != nil { - return m.Spec - } - return v1beta3.GroupSpec{} -} - -func (m *Order) GetCreatedAt() int64 { - if m != nil { - return m.CreatedAt - } - return 0 -} - -// OrderFilters defines flags for order list filter -type OrderFilters struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` - GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` - OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` - State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state" yaml:"state"` -} - -func (m *OrderFilters) Reset() { *m = OrderFilters{} } -func (m *OrderFilters) String() string { return proto.CompactTextString(m) } -func (*OrderFilters) ProtoMessage() {} -func (*OrderFilters) Descriptor() ([]byte, []int) { - return fileDescriptor_f7500a2881510f8f, []int{2} -} -func (m *OrderFilters) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OrderFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_OrderFilters.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *OrderFilters) XXX_Merge(src proto.Message) { - xxx_messageInfo_OrderFilters.Merge(m, src) -} -func (m *OrderFilters) XXX_Size() int { - return m.Size() -} -func (m *OrderFilters) XXX_DiscardUnknown() { - xxx_messageInfo_OrderFilters.DiscardUnknown(m) -} - -var xxx_messageInfo_OrderFilters proto.InternalMessageInfo - -func (m *OrderFilters) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *OrderFilters) GetDSeq() uint64 { - if m != nil { - return m.DSeq - } - return 0 -} - -func (m *OrderFilters) GetGSeq() uint32 { - if m != nil { - return m.GSeq - } - return 0 -} - -func (m *OrderFilters) GetOSeq() uint32 { - if m != nil { - return m.OSeq - } - return 0 -} - -func (m *OrderFilters) GetState() string { - if m != nil { - return m.State - } - return "" -} - -func init() { - proto.RegisterEnum("akash.market.v1beta4.Order_State", Order_State_name, Order_State_value) - proto.RegisterType((*OrderID)(nil), "akash.market.v1beta4.OrderID") - proto.RegisterType((*Order)(nil), "akash.market.v1beta4.Order") - proto.RegisterType((*OrderFilters)(nil), "akash.market.v1beta4.OrderFilters") -} - -func init() { proto.RegisterFile("akash/market/v1beta4/order.proto", fileDescriptor_f7500a2881510f8f) } - -var fileDescriptor_f7500a2881510f8f = []byte{ - // 587 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xb1, 0x6b, 0xdb, 0x4e, - 0x14, 0x96, 0x6c, 0x39, 0x89, 0xcf, 0xc9, 0xef, 0xe7, 0x8a, 0x94, 0xa6, 0x0a, 0xd1, 0xa9, 0xea, - 0xe2, 0xa5, 0x12, 0xb5, 0xbb, 0xd4, 0x5b, 0xdc, 0xd0, 0xe0, 0xc9, 0x20, 0x77, 0x2a, 0x85, 0x20, - 0xeb, 0x0e, 0x45, 0xd8, 0xd6, 0x29, 0xd2, 0xc5, 0x21, 0x7b, 0x87, 0xe2, 0xa9, 0x4b, 0xa1, 0x8b, - 0x21, 0xd0, 0x3f, 0xa4, 0x6b, 0xc6, 0x8c, 0x9d, 0x44, 0xb1, 0x97, 0xe2, 0xd1, 0x7f, 0x41, 0xb9, - 0x77, 0x0a, 0x4e, 0x4a, 0xc9, 0x1f, 0xd0, 0x49, 0x7a, 0xdf, 0xfb, 0xbe, 0x77, 0xf7, 0xbe, 0xf7, - 0x38, 0x64, 0xf9, 0x43, 0x3f, 0x3b, 0x75, 0xc7, 0x7e, 0x3a, 0xa4, 0xdc, 0x9d, 0xbc, 0x1c, 0x50, - 0xee, 0xbf, 0x72, 0x59, 0x4a, 0x68, 0xea, 0x24, 0x29, 0xe3, 0x4c, 0xdf, 0x05, 0x86, 0x23, 0x19, - 0x4e, 0xc1, 0x30, 0x76, 0x43, 0x16, 0x32, 0x20, 0xb8, 0xe2, 0x4f, 0x72, 0x8d, 0x86, 0xac, 0x46, - 0x68, 0x32, 0x62, 0x97, 0x63, 0x1a, 0xdf, 0x56, 0x6c, 0xb9, 0x61, 0xca, 0xce, 0x93, 0x2c, 0xa1, - 0x81, 0x64, 0xda, 0x0b, 0x15, 0x6d, 0xf6, 0xc4, 0x29, 0xdd, 0x23, 0xdd, 0x45, 0x15, 0x76, 0x11, - 0xd3, 0x74, 0x4f, 0xb5, 0xd4, 0x46, 0xb5, 0xf3, 0x74, 0x99, 0x63, 0x09, 0xac, 0x72, 0xbc, 0x7d, - 0xe9, 0x8f, 0x47, 0x6d, 0x1b, 0x42, 0xdb, 0x93, 0xb0, 0xde, 0x42, 0x1a, 0xc9, 0xe8, 0xd9, 0x5e, - 0xc9, 0x52, 0x1b, 0x5a, 0x07, 0xcf, 0x73, 0xac, 0x1d, 0xf5, 0xe9, 0xd9, 0x32, 0xc7, 0x80, 0xaf, - 0x72, 0x5c, 0x93, 0x32, 0x11, 0xd9, 0x1e, 0x80, 0x42, 0x14, 0x0a, 0x51, 0xd9, 0x52, 0x1b, 0x3b, - 0x52, 0x74, 0x5c, 0x88, 0xc2, 0x7b, 0xa2, 0x50, 0x8a, 0xc2, 0x42, 0xc4, 0x84, 0x48, 0x5b, 0x8b, - 0x7a, 0x85, 0x88, 0xdd, 0x13, 0x31, 0x29, 0x12, 0x9f, 0xf6, 0xd6, 0xd7, 0x2b, 0xac, 0xfc, 0xba, - 0xc2, 0x8a, 0xfd, 0xbd, 0x8c, 0x2a, 0xd0, 0xa5, 0xfe, 0x01, 0x6d, 0x81, 0xa9, 0x27, 0x11, 0x81, - 0x36, 0x6b, 0xcd, 0x03, 0xe7, 0x6f, 0xc6, 0x3a, 0x85, 0x29, 0x1d, 0xfb, 0x3a, 0xc7, 0xca, 0x3c, - 0xc7, 0xb7, 0x2e, 0x2d, 0x73, 0x5c, 0x8a, 0xc8, 0x2a, 0xc7, 0x55, 0x79, 0x60, 0x44, 0x6c, 0x6f, - 0x13, 0x4a, 0x76, 0x89, 0xee, 0xa1, 0x4a, 0xc6, 0x7d, 0x4e, 0xc1, 0x91, 0xff, 0x9a, 0xcf, 0x1e, - 0x28, 0xed, 0xf4, 0x05, 0x51, 0x9a, 0x0c, 0x9a, 0xb5, 0xc9, 0x10, 0xda, 0x9e, 0x84, 0xf5, 0x77, - 0x48, 0x13, 0xf3, 0x02, 0xbf, 0x6a, 0xcd, 0xe7, 0x45, 0xc9, 0xf5, 0x68, 0x8b, 0xb2, 0x2d, 0xe7, - 0x58, 0x8c, 0xb6, 0x9f, 0xd0, 0xa0, 0xb3, 0x2f, 0xee, 0x2c, 0xbc, 0x11, 0xc2, 0xb5, 0x37, 0x22, - 0xb2, 0x3d, 0x00, 0xf5, 0x03, 0x84, 0x82, 0x94, 0xfa, 0x9c, 0x92, 0x13, 0x9f, 0x83, 0xad, 0x65, - 0xaf, 0x5a, 0x20, 0x87, 0xdc, 0xfe, 0xa8, 0xa2, 0x0a, 0x5c, 0x50, 0xb7, 0xd1, 0x66, 0x14, 0x4f, - 0xfc, 0x51, 0x44, 0xea, 0x8a, 0xf1, 0x78, 0x3a, 0xb3, 0x1e, 0xc1, 0xf5, 0x21, 0xd9, 0x95, 0x09, - 0xfd, 0x09, 0xd2, 0x58, 0x42, 0xe3, 0xba, 0x6a, 0xec, 0x4c, 0x67, 0x56, 0x15, 0x08, 0xbd, 0x84, - 0xc6, 0xfa, 0x3e, 0xda, 0xf0, 0x03, 0x1e, 0x4d, 0x68, 0xbd, 0x64, 0xfc, 0x3f, 0x9d, 0x59, 0x35, - 0x48, 0x1d, 0x02, 0x24, 0x92, 0xc1, 0x88, 0x65, 0x94, 0xd4, 0xcb, 0x77, 0x92, 0x6f, 0x00, 0x32, - 0xb4, 0x4f, 0xdf, 0x4c, 0xe5, 0xce, 0x04, 0xbf, 0x94, 0xd0, 0x36, 0xe4, 0xdf, 0x46, 0x23, 0x4e, - 0xd3, 0xec, 0x5f, 0x5b, 0x56, 0xd1, 0x8f, 0x5c, 0x9d, 0xca, 0xba, 0x9f, 0x87, 0xf6, 0xa2, 0xad, - 0x09, 0x5f, 0x3a, 0xfd, 0xeb, 0xb9, 0xa9, 0xde, 0xcc, 0x4d, 0xf5, 0xe7, 0xdc, 0x54, 0x3f, 0x2f, - 0x4c, 0xe5, 0x66, 0x61, 0x2a, 0x3f, 0x16, 0xa6, 0xf2, 0xfe, 0x75, 0x18, 0xf1, 0xd3, 0xf3, 0x81, - 0x13, 0xb0, 0xb1, 0x0b, 0x3b, 0xf3, 0x22, 0xa6, 0xfc, 0x82, 0xa5, 0xc3, 0x22, 0xf2, 0x93, 0xc8, - 0x0d, 0x99, 0x1b, 0x33, 0x42, 0xff, 0x78, 0x76, 0x06, 0x1b, 0xf0, 0x36, 0xb4, 0x7e, 0x07, 0x00, - 0x00, 0xff, 0xff, 0x9c, 0xe7, 0x69, 0x2b, 0x95, 0x04, 0x00, 0x00, -} - -func (m *OrderID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OrderID) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OrderID) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.OSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintOrder(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Order) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Order) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Order) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CreatedAt != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.CreatedAt)) - i-- - dAtA[i] = 0x20 - } - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOrder(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.State != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.State)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.OrderID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintOrder(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *OrderFilters) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OrderFilters) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OrderFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.State) > 0 { - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintOrder(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x2a - } - if m.OSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.OSeq)) - i-- - dAtA[i] = 0x20 - } - if m.GSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.GSeq)) - i-- - dAtA[i] = 0x18 - } - if m.DSeq != 0 { - i = encodeVarintOrder(dAtA, i, uint64(m.DSeq)) - i-- - dAtA[i] = 0x10 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintOrder(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintOrder(dAtA []byte, offset int, v uint64) int { - offset -= sovOrder(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *OrderID) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovOrder(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovOrder(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovOrder(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovOrder(uint64(m.OSeq)) - } - return n -} - -func (m *Order) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.OrderID.Size() - n += 1 + l + sovOrder(uint64(l)) - if m.State != 0 { - n += 1 + sovOrder(uint64(m.State)) - } - l = m.Spec.Size() - n += 1 + l + sovOrder(uint64(l)) - if m.CreatedAt != 0 { - n += 1 + sovOrder(uint64(m.CreatedAt)) - } - return n -} - -func (m *OrderFilters) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovOrder(uint64(l)) - } - if m.DSeq != 0 { - n += 1 + sovOrder(uint64(m.DSeq)) - } - if m.GSeq != 0 { - n += 1 + sovOrder(uint64(m.GSeq)) - } - if m.OSeq != 0 { - n += 1 + sovOrder(uint64(m.OSeq)) - } - l = len(m.State) - if l > 0 { - n += 1 + l + sovOrder(uint64(l)) - } - return n -} - -func sovOrder(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozOrder(x uint64) (n int) { - return sovOrder(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *OrderID) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OrderID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OrderID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOrder(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOrder - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Order) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Order: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Order: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OrderID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.OrderID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - m.State = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.State |= Order_State(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) - } - m.CreatedAt = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CreatedAt |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipOrder(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOrder - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OrderFilters) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OrderFilters: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OrderFilters: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) - } - m.DSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DSeq |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) - } - m.GSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.GSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) - } - m.OSeq = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OSeq |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOrder - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOrder - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOrder - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOrder(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOrder - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipOrder(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOrder - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOrder - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowOrder - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthOrder - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupOrder - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthOrder - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthOrder = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowOrder = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupOrder = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta4/params.go b/go/node/market/v1beta4/params.go deleted file mode 100644 index e154e735..00000000 --- a/go/node/market/v1beta4/params.go +++ /dev/null @@ -1,76 +0,0 @@ -package v1beta4 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/pkg/errors" -) - -var _ paramtypes.ParamSet = (*Params)(nil) - -var ( - DefaultBidMinDeposit = sdk.NewCoin("uakt", sdk.NewInt(500000)) - defaultOrderMaxBids uint32 = 20 - maxOrderMaxBids uint32 = 500 -) - -const ( - keyBidMinDeposit = "BidMinDeposit" - keyOrderMaxBids = "OrderMaxBids" -) - -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair([]byte(keyBidMinDeposit), &p.BidMinDeposit, validateCoin), - paramtypes.NewParamSetPair([]byte(keyOrderMaxBids), &p.OrderMaxBids, validateOrderMaxBids), - } -} - -func DefaultParams() Params { - return Params{ - BidMinDeposit: DefaultBidMinDeposit, - OrderMaxBids: defaultOrderMaxBids, - } -} - -func (p Params) Validate() error { - if err := validateCoin(p.BidMinDeposit); err != nil { - return err - } - - if err := validateOrderMaxBids(p.OrderMaxBids); err != nil { - return err - } - return nil -} - -func validateCoin(i interface{}) error { - _, ok := i.(sdk.Coin) - if !ok { - return errors.Wrapf(ErrInvalidParam, "invalid type %T", i) - } - - return nil -} - -func validateOrderMaxBids(i interface{}) error { - val, ok := i.(uint32) - - if !ok { - return errors.Wrapf(ErrInvalidParam, "invalid type %T", i) - } - - if val == 0 { - return errors.Wrap(ErrInvalidParam, "order max bids too low") - } - - if val > maxOrderMaxBids { - return errors.Wrap(ErrInvalidParam, "order max bids too high") - } - - return nil -} diff --git a/go/node/market/v1beta4/params.pb.go b/go/node/market/v1beta4/params.pb.go deleted file mode 100644 index a454cdf4..00000000 --- a/go/node/market/v1beta4/params.pb.go +++ /dev/null @@ -1,365 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta4/params.proto - -package v1beta4 - -import ( - fmt "fmt" - types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Params is the params for the x/market module -type Params struct { - BidMinDeposit types.Coin `protobuf:"bytes,1,opt,name=bid_min_deposit,json=bidMinDeposit,proto3" json:"bid_min_deposit" yaml:"bid_min_deposit"` - OrderMaxBids uint32 `protobuf:"varint,2,opt,name=order_max_bids,json=orderMaxBids,proto3" json:"order_max_bids" yaml:"order_max_bids"` -} - -func (m *Params) Reset() { *m = Params{} } -func (m *Params) String() string { return proto.CompactTextString(m) } -func (*Params) ProtoMessage() {} -func (*Params) Descriptor() ([]byte, []int) { - return fileDescriptor_80f1e7d9604b0c57, []int{0} -} -func (m *Params) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Params.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Params) XXX_Merge(src proto.Message) { - xxx_messageInfo_Params.Merge(m, src) -} -func (m *Params) XXX_Size() int { - return m.Size() -} -func (m *Params) XXX_DiscardUnknown() { - xxx_messageInfo_Params.DiscardUnknown(m) -} - -var xxx_messageInfo_Params proto.InternalMessageInfo - -func (m *Params) GetBidMinDeposit() types.Coin { - if m != nil { - return m.BidMinDeposit - } - return types.Coin{} -} - -func (m *Params) GetOrderMaxBids() uint32 { - if m != nil { - return m.OrderMaxBids - } - return 0 -} - -func init() { - proto.RegisterType((*Params)(nil), "akash.market.v1beta4.Params") -} - -func init() { proto.RegisterFile("akash/market/v1beta4/params.proto", fileDescriptor_80f1e7d9604b0c57) } - -var fileDescriptor_80f1e7d9604b0c57 = []byte{ - // 321 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0x31, 0x4f, 0xe3, 0x30, - 0x18, 0x86, 0xe3, 0x1b, 0x3a, 0xe4, 0xda, 0x3b, 0xa9, 0xea, 0x9d, 0x4a, 0x07, 0xbb, 0x64, 0xea, - 0x82, 0xad, 0x02, 0x0b, 0xb0, 0x05, 0xd6, 0x0a, 0x54, 0x36, 0x96, 0xc8, 0xae, 0xad, 0xd4, 0x2a, - 0xce, 0x17, 0xc5, 0x01, 0xca, 0x0f, 0x60, 0xe7, 0x67, 0x75, 0xec, 0xc8, 0x64, 0xa1, 0x74, 0xeb, - 0x58, 0xfe, 0x00, 0x6a, 0x92, 0xa1, 0x64, 0xb3, 0xfd, 0x3e, 0xef, 0xf3, 0x49, 0x9f, 0xfd, 0x63, - 0xbe, 0xe0, 0x76, 0xce, 0x0c, 0xcf, 0x16, 0x2a, 0x67, 0xcf, 0x63, 0xa1, 0x72, 0x7e, 0xce, 0x52, - 0x9e, 0x71, 0x63, 0x69, 0x9a, 0x41, 0x0e, 0xdd, 0x5e, 0x89, 0xd0, 0x0a, 0xa1, 0x35, 0x32, 0xe8, - 0xc5, 0x10, 0x43, 0x09, 0xb0, 0xfd, 0xa9, 0x62, 0x07, 0x78, 0x06, 0xd6, 0x80, 0x65, 0x82, 0x5b, - 0x55, 0xdb, 0xc6, 0x6c, 0x06, 0x3a, 0xa9, 0xf2, 0xe0, 0x0b, 0xf9, 0xad, 0xbb, 0x52, 0xde, 0x7d, - 0x43, 0xfe, 0x5f, 0xa1, 0x65, 0x64, 0x74, 0x12, 0x49, 0x95, 0x82, 0xd5, 0x79, 0x1f, 0x0d, 0xd1, - 0xe8, 0xf7, 0xe9, 0x11, 0xad, 0x2c, 0x74, 0x6f, 0xa9, 0x07, 0x8e, 0xe9, 0x35, 0xe8, 0x24, 0x0c, - 0x57, 0x8e, 0x78, 0x85, 0x23, 0x9d, 0x50, 0xcb, 0x89, 0x4e, 0x6e, 0xaa, 0xde, 0xd6, 0x91, 0xa6, - 0x6a, 0xe7, 0xc8, 0xff, 0x57, 0x6e, 0x1e, 0x2f, 0x83, 0x46, 0x10, 0x4c, 0x3b, 0xe2, 0xb0, 0xdb, - 0xe5, 0xfe, 0x1f, 0xc8, 0xa4, 0xca, 0x22, 0xc3, 0x97, 0x91, 0xd0, 0xd2, 0xf6, 0x7f, 0x0d, 0xd1, - 0xa8, 0x13, 0x5e, 0x15, 0x8e, 0xb4, 0x6f, 0xf7, 0xc9, 0x84, 0x2f, 0x43, 0x2d, 0xed, 0xd6, 0x91, - 0x06, 0xb9, 0x73, 0xe4, 0x5f, 0x35, 0xe4, 0xe7, 0x7b, 0x30, 0x6d, 0xc3, 0x41, 0x31, 0xbc, 0x5f, - 0x15, 0x18, 0xad, 0x0b, 0x8c, 0x3e, 0x0b, 0x8c, 0xde, 0x37, 0xd8, 0x5b, 0x6f, 0xb0, 0xf7, 0xb1, - 0xc1, 0xde, 0xc3, 0x45, 0xac, 0xf3, 0xf9, 0x93, 0xa0, 0x33, 0x30, 0xac, 0x5c, 0xf3, 0x49, 0xa2, - 0xf2, 0x17, 0xc8, 0x16, 0xf5, 0x8d, 0xa7, 0x9a, 0xc5, 0xc0, 0x12, 0x90, 0xaa, 0xf1, 0x47, 0xa2, - 0x55, 0x6e, 0xf4, 0xec, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x64, 0x8e, 0xc0, 0xcc, 0xc2, 0x01, 0x00, - 0x00, -} - -func (m *Params) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Params) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.OrderMaxBids != 0 { - i = encodeVarintParams(dAtA, i, uint64(m.OrderMaxBids)) - i-- - dAtA[i] = 0x10 - } - { - size, err := m.BidMinDeposit.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintParams(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintParams(dAtA []byte, offset int, v uint64) int { - offset -= sovParams(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Params) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.BidMinDeposit.Size() - n += 1 + l + sovParams(uint64(l)) - if m.OrderMaxBids != 0 { - n += 1 + sovParams(uint64(m.OrderMaxBids)) - } - return n -} - -func sovParams(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozParams(x uint64) (n int) { - return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Params) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Params: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BidMinDeposit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.BidMinDeposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OrderMaxBids", wireType) - } - m.OrderMaxBids = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.OrderMaxBids |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipParams(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipParams(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthParams - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupParams - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthParams - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta4/query.pb.go b/go/node/market/v1beta4/query.pb.go deleted file mode 100644 index 87c4eac0..00000000 --- a/go/node/market/v1beta4/query.pb.go +++ /dev/null @@ -1,3035 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta4/query.proto - -package v1beta4 - -import ( - context "context" - fmt "fmt" - v1beta3 "github.com/akash-network/akash-api/go/node/escrow/v1beta3" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryOrdersRequest is request type for the Query/Orders RPC method -type QueryOrdersRequest struct { - Filters OrderFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryOrdersRequest) Reset() { *m = QueryOrdersRequest{} } -func (m *QueryOrdersRequest) String() string { return proto.CompactTextString(m) } -func (*QueryOrdersRequest) ProtoMessage() {} -func (*QueryOrdersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_18494cd3f4a720d6, []int{0} -} -func (m *QueryOrdersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrdersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrdersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrdersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrdersRequest.Merge(m, src) -} -func (m *QueryOrdersRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryOrdersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrdersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrdersRequest proto.InternalMessageInfo - -func (m *QueryOrdersRequest) GetFilters() OrderFilters { - if m != nil { - return m.Filters - } - return OrderFilters{} -} - -func (m *QueryOrdersRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryOrdersResponse is response type for the Query/Orders RPC method -type QueryOrdersResponse struct { - Orders Orders `protobuf:"bytes,1,rep,name=orders,proto3,castrepeated=Orders" json:"orders"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryOrdersResponse) Reset() { *m = QueryOrdersResponse{} } -func (m *QueryOrdersResponse) String() string { return proto.CompactTextString(m) } -func (*QueryOrdersResponse) ProtoMessage() {} -func (*QueryOrdersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_18494cd3f4a720d6, []int{1} -} -func (m *QueryOrdersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrdersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrdersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrdersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrdersResponse.Merge(m, src) -} -func (m *QueryOrdersResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryOrdersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrdersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrdersResponse proto.InternalMessageInfo - -func (m *QueryOrdersResponse) GetOrders() Orders { - if m != nil { - return m.Orders - } - return nil -} - -func (m *QueryOrdersResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryOrderRequest is request type for the Query/Order RPC method -type QueryOrderRequest struct { - ID OrderID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryOrderRequest) Reset() { *m = QueryOrderRequest{} } -func (m *QueryOrderRequest) String() string { return proto.CompactTextString(m) } -func (*QueryOrderRequest) ProtoMessage() {} -func (*QueryOrderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_18494cd3f4a720d6, []int{2} -} -func (m *QueryOrderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrderRequest.Merge(m, src) -} -func (m *QueryOrderRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryOrderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrderRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrderRequest proto.InternalMessageInfo - -func (m *QueryOrderRequest) GetID() OrderID { - if m != nil { - return m.ID - } - return OrderID{} -} - -// QueryOrderResponse is response type for the Query/Order RPC method -type QueryOrderResponse struct { - Order Order `protobuf:"bytes,1,opt,name=order,proto3" json:"order"` -} - -func (m *QueryOrderResponse) Reset() { *m = QueryOrderResponse{} } -func (m *QueryOrderResponse) String() string { return proto.CompactTextString(m) } -func (*QueryOrderResponse) ProtoMessage() {} -func (*QueryOrderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_18494cd3f4a720d6, []int{3} -} -func (m *QueryOrderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryOrderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryOrderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryOrderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryOrderResponse.Merge(m, src) -} -func (m *QueryOrderResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryOrderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryOrderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryOrderResponse proto.InternalMessageInfo - -func (m *QueryOrderResponse) GetOrder() Order { - if m != nil { - return m.Order - } - return Order{} -} - -// QueryBidsRequest is request type for the Query/Bids RPC method -type QueryBidsRequest struct { - Filters BidFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryBidsRequest) Reset() { *m = QueryBidsRequest{} } -func (m *QueryBidsRequest) String() string { return proto.CompactTextString(m) } -func (*QueryBidsRequest) ProtoMessage() {} -func (*QueryBidsRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_18494cd3f4a720d6, []int{4} -} -func (m *QueryBidsRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidsRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidsRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidsRequest.Merge(m, src) -} -func (m *QueryBidsRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryBidsRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidsRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidsRequest proto.InternalMessageInfo - -func (m *QueryBidsRequest) GetFilters() BidFilters { - if m != nil { - return m.Filters - } - return BidFilters{} -} - -func (m *QueryBidsRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryBidsResponse is response type for the Query/Bids RPC method -type QueryBidsResponse struct { - Bids []QueryBidResponse `protobuf:"bytes,1,rep,name=bids,proto3" json:"bids"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryBidsResponse) Reset() { *m = QueryBidsResponse{} } -func (m *QueryBidsResponse) String() string { return proto.CompactTextString(m) } -func (*QueryBidsResponse) ProtoMessage() {} -func (*QueryBidsResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_18494cd3f4a720d6, []int{5} -} -func (m *QueryBidsResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidsResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidsResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidsResponse.Merge(m, src) -} -func (m *QueryBidsResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryBidsResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidsResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidsResponse proto.InternalMessageInfo - -func (m *QueryBidsResponse) GetBids() []QueryBidResponse { - if m != nil { - return m.Bids - } - return nil -} - -func (m *QueryBidsResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryBidRequest is request type for the Query/Bid RPC method -type QueryBidRequest struct { - ID BidID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryBidRequest) Reset() { *m = QueryBidRequest{} } -func (m *QueryBidRequest) String() string { return proto.CompactTextString(m) } -func (*QueryBidRequest) ProtoMessage() {} -func (*QueryBidRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_18494cd3f4a720d6, []int{6} -} -func (m *QueryBidRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidRequest.Merge(m, src) -} -func (m *QueryBidRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryBidRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidRequest proto.InternalMessageInfo - -func (m *QueryBidRequest) GetID() BidID { - if m != nil { - return m.ID - } - return BidID{} -} - -// QueryBidResponse is response type for the Query/Bid RPC method -type QueryBidResponse struct { - Bid Bid `protobuf:"bytes,1,opt,name=bid,proto3" json:"bid"` - EscrowAccount v1beta3.Account `protobuf:"bytes,2,opt,name=escrow_account,json=escrowAccount,proto3" json:"escrow_account"` -} - -func (m *QueryBidResponse) Reset() { *m = QueryBidResponse{} } -func (m *QueryBidResponse) String() string { return proto.CompactTextString(m) } -func (*QueryBidResponse) ProtoMessage() {} -func (*QueryBidResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_18494cd3f4a720d6, []int{7} -} -func (m *QueryBidResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryBidResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryBidResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryBidResponse.Merge(m, src) -} -func (m *QueryBidResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryBidResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryBidResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryBidResponse proto.InternalMessageInfo - -func (m *QueryBidResponse) GetBid() Bid { - if m != nil { - return m.Bid - } - return Bid{} -} - -func (m *QueryBidResponse) GetEscrowAccount() v1beta3.Account { - if m != nil { - return m.EscrowAccount - } - return v1beta3.Account{} -} - -// QueryLeasesRequest is request type for the Query/Leases RPC method -type QueryLeasesRequest struct { - Filters LeaseFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` - Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryLeasesRequest) Reset() { *m = QueryLeasesRequest{} } -func (m *QueryLeasesRequest) String() string { return proto.CompactTextString(m) } -func (*QueryLeasesRequest) ProtoMessage() {} -func (*QueryLeasesRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_18494cd3f4a720d6, []int{8} -} -func (m *QueryLeasesRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeasesRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeasesRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeasesRequest.Merge(m, src) -} -func (m *QueryLeasesRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryLeasesRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeasesRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeasesRequest proto.InternalMessageInfo - -func (m *QueryLeasesRequest) GetFilters() LeaseFilters { - if m != nil { - return m.Filters - } - return LeaseFilters{} -} - -func (m *QueryLeasesRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryLeasesResponse is response type for the Query/Leases RPC method -type QueryLeasesResponse struct { - Leases []QueryLeaseResponse `protobuf:"bytes,1,rep,name=leases,proto3" json:"leases"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryLeasesResponse) Reset() { *m = QueryLeasesResponse{} } -func (m *QueryLeasesResponse) String() string { return proto.CompactTextString(m) } -func (*QueryLeasesResponse) ProtoMessage() {} -func (*QueryLeasesResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_18494cd3f4a720d6, []int{9} -} -func (m *QueryLeasesResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeasesResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeasesResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeasesResponse.Merge(m, src) -} -func (m *QueryLeasesResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryLeasesResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeasesResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeasesResponse proto.InternalMessageInfo - -func (m *QueryLeasesResponse) GetLeases() []QueryLeaseResponse { - if m != nil { - return m.Leases - } - return nil -} - -func (m *QueryLeasesResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryLeaseRequest is request type for the Query/Lease RPC method -type QueryLeaseRequest struct { - ID LeaseID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` -} - -func (m *QueryLeaseRequest) Reset() { *m = QueryLeaseRequest{} } -func (m *QueryLeaseRequest) String() string { return proto.CompactTextString(m) } -func (*QueryLeaseRequest) ProtoMessage() {} -func (*QueryLeaseRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_18494cd3f4a720d6, []int{10} -} -func (m *QueryLeaseRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeaseRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeaseRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeaseRequest.Merge(m, src) -} -func (m *QueryLeaseRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryLeaseRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeaseRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeaseRequest proto.InternalMessageInfo - -func (m *QueryLeaseRequest) GetID() LeaseID { - if m != nil { - return m.ID - } - return LeaseID{} -} - -// QueryLeaseResponse is response type for the Query/Lease RPC method -type QueryLeaseResponse struct { - Lease Lease `protobuf:"bytes,1,opt,name=lease,proto3" json:"lease"` - EscrowPayment v1beta3.FractionalPayment `protobuf:"bytes,2,opt,name=escrow_payment,json=escrowPayment,proto3" json:"escrow_payment"` -} - -func (m *QueryLeaseResponse) Reset() { *m = QueryLeaseResponse{} } -func (m *QueryLeaseResponse) String() string { return proto.CompactTextString(m) } -func (*QueryLeaseResponse) ProtoMessage() {} -func (*QueryLeaseResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_18494cd3f4a720d6, []int{11} -} -func (m *QueryLeaseResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryLeaseResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryLeaseResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryLeaseResponse.Merge(m, src) -} -func (m *QueryLeaseResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryLeaseResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryLeaseResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryLeaseResponse proto.InternalMessageInfo - -func (m *QueryLeaseResponse) GetLease() Lease { - if m != nil { - return m.Lease - } - return Lease{} -} - -func (m *QueryLeaseResponse) GetEscrowPayment() v1beta3.FractionalPayment { - if m != nil { - return m.EscrowPayment - } - return v1beta3.FractionalPayment{} -} - -func init() { - proto.RegisterType((*QueryOrdersRequest)(nil), "akash.market.v1beta4.QueryOrdersRequest") - proto.RegisterType((*QueryOrdersResponse)(nil), "akash.market.v1beta4.QueryOrdersResponse") - proto.RegisterType((*QueryOrderRequest)(nil), "akash.market.v1beta4.QueryOrderRequest") - proto.RegisterType((*QueryOrderResponse)(nil), "akash.market.v1beta4.QueryOrderResponse") - proto.RegisterType((*QueryBidsRequest)(nil), "akash.market.v1beta4.QueryBidsRequest") - proto.RegisterType((*QueryBidsResponse)(nil), "akash.market.v1beta4.QueryBidsResponse") - proto.RegisterType((*QueryBidRequest)(nil), "akash.market.v1beta4.QueryBidRequest") - proto.RegisterType((*QueryBidResponse)(nil), "akash.market.v1beta4.QueryBidResponse") - proto.RegisterType((*QueryLeasesRequest)(nil), "akash.market.v1beta4.QueryLeasesRequest") - proto.RegisterType((*QueryLeasesResponse)(nil), "akash.market.v1beta4.QueryLeasesResponse") - proto.RegisterType((*QueryLeaseRequest)(nil), "akash.market.v1beta4.QueryLeaseRequest") - proto.RegisterType((*QueryLeaseResponse)(nil), "akash.market.v1beta4.QueryLeaseResponse") -} - -func init() { proto.RegisterFile("akash/market/v1beta4/query.proto", fileDescriptor_18494cd3f4a720d6) } - -var fileDescriptor_18494cd3f4a720d6 = []byte{ - // 803 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x4f, 0x13, 0x4d, - 0x18, 0xee, 0x16, 0xe8, 0x97, 0x0c, 0xf9, 0xf8, 0xbe, 0x6f, 0x3e, 0x0e, 0x58, 0xb4, 0x85, 0x1a, - 0x69, 0x31, 0x71, 0x37, 0x80, 0x09, 0xe1, 0x86, 0x2b, 0xa9, 0x81, 0xf8, 0x03, 0xab, 0x27, 0x2f, - 0x66, 0xda, 0x1d, 0x96, 0x09, 0xed, 0x4e, 0xd9, 0xd9, 0x4a, 0x38, 0x98, 0x18, 0x13, 0x13, 0x8f, - 0x1a, 0xaf, 0xc6, 0x98, 0x90, 0x78, 0xf0, 0xe2, 0xbf, 0xc1, 0x91, 0xc4, 0x8b, 0x27, 0x34, 0xc5, - 0x3f, 0xc4, 0xcc, 0xcc, 0xbb, 0xfd, 0x95, 0xed, 0x6e, 0x49, 0xe0, 0x06, 0xdd, 0xe7, 0x7d, 0xe7, - 0x79, 0x9f, 0xf7, 0x99, 0x67, 0x17, 0xcd, 0x91, 0x3d, 0x22, 0x76, 0xad, 0x06, 0xf1, 0xf7, 0x68, - 0x60, 0xbd, 0x58, 0xaa, 0xd2, 0x80, 0xdc, 0xb6, 0xf6, 0x5b, 0xd4, 0x3f, 0x34, 0x9b, 0x3e, 0x0f, - 0x38, 0x9e, 0x56, 0x08, 0x53, 0x23, 0x4c, 0x40, 0x64, 0xa7, 0x5d, 0xee, 0x72, 0x05, 0xb0, 0xe4, - 0x5f, 0x1a, 0x9b, 0xbd, 0xea, 0x72, 0xee, 0xd6, 0xa9, 0x45, 0x9a, 0xcc, 0x22, 0x9e, 0xc7, 0x03, - 0x12, 0x30, 0xee, 0x09, 0x78, 0x7a, 0xb3, 0xc6, 0x45, 0x83, 0x0b, 0xab, 0x4a, 0x04, 0xd5, 0x47, - 0xc0, 0x81, 0x4b, 0x56, 0x93, 0xb8, 0xcc, 0x53, 0x60, 0xc0, 0x02, 0x2f, 0x2a, 0x6a, 0x3e, 0x3f, - 0x00, 0xd8, 0x8a, 0x15, 0x1c, 0x36, 0xa9, 0xe8, 0x47, 0x0c, 0x30, 0xe7, 0xbe, 0x43, 0x7d, 0x40, - 0xe4, 0x22, 0x11, 0x55, 0xe6, 0xc4, 0x76, 0xa8, 0x53, 0x22, 0xa8, 0x46, 0x14, 0x3e, 0x1b, 0x08, - 0x3f, 0x96, 0x44, 0x1f, 0xc9, 0xb6, 0xa2, 0x42, 0xf7, 0x5b, 0x54, 0x04, 0xd8, 0x46, 0x7f, 0xed, - 0xb0, 0x7a, 0x40, 0x7d, 0x31, 0x63, 0xcc, 0x19, 0xa5, 0xc9, 0xe5, 0x82, 0x19, 0x25, 0x92, 0xa9, - 0xaa, 0xca, 0x1a, 0x69, 0x8f, 0x1f, 0x9f, 0xe6, 0x53, 0x95, 0xb0, 0x10, 0x97, 0x11, 0xea, 0x0e, - 0x3d, 0x93, 0x56, 0x6d, 0x16, 0x4c, 0xad, 0x90, 0x29, 0x15, 0x32, 0xf5, 0x12, 0x40, 0x21, 0x73, - 0x9b, 0xb8, 0x14, 0xce, 0xaf, 0xf4, 0x54, 0x16, 0x8e, 0x0c, 0xf4, 0x7f, 0x1f, 0x45, 0xd1, 0xe4, - 0x9e, 0xa0, 0xf8, 0x2e, 0xca, 0x28, 0x2d, 0x24, 0xc5, 0xb1, 0xd2, 0xe4, 0xf2, 0x6c, 0x0c, 0x45, - 0x7b, 0x4a, 0x72, 0xfb, 0xfa, 0x33, 0x9f, 0x81, 0x26, 0x50, 0x8a, 0xef, 0x45, 0x90, 0x2c, 0x26, - 0x92, 0xd4, 0x0c, 0xfa, 0x58, 0x3e, 0x44, 0xff, 0x75, 0x49, 0x86, 0x32, 0xae, 0xa1, 0x34, 0x73, - 0x40, 0xc1, 0x6b, 0x31, 0xf4, 0x36, 0x37, 0x6c, 0x24, 0x09, 0xb6, 0x4f, 0xf3, 0xe9, 0xcd, 0x8d, - 0x4a, 0x9a, 0x39, 0x85, 0x07, 0xbd, 0x7b, 0xe9, 0xcc, 0xbc, 0x8a, 0x26, 0x14, 0x71, 0xe8, 0x19, - 0x3b, 0xb2, 0x5e, 0x87, 0xc6, 0x17, 0x3e, 0x1a, 0xe8, 0x5f, 0xd5, 0xcf, 0x66, 0x4e, 0x67, 0xcb, - 0xeb, 0x83, 0x5b, 0x9e, 0x8b, 0xee, 0x67, 0x33, 0xe7, 0x92, 0x77, 0xfc, 0xc9, 0x00, 0xf9, 0x34, - 0x3d, 0x98, 0x76, 0x1d, 0x8d, 0x57, 0x99, 0x13, 0xee, 0x77, 0x21, 0x9a, 0x5c, 0x58, 0x16, 0x56, - 0x01, 0x45, 0x55, 0x79, 0x71, 0xeb, 0xdd, 0x42, 0xff, 0x74, 0x0f, 0xd2, 0xea, 0xad, 0xf6, 0x2c, - 0x77, 0x76, 0xa8, 0x70, 0x11, 0xab, 0x7d, 0xdf, 0xb3, 0x8b, 0xce, 0xac, 0x4b, 0x68, 0xac, 0xda, - 0x69, 0x77, 0x65, 0x68, 0x3b, 0x98, 0x4e, 0x62, 0xf1, 0x16, 0x9a, 0xd2, 0xe9, 0xf1, 0x9c, 0xd4, - 0x6a, 0xbc, 0xe5, 0x05, 0x30, 0x60, 0xe8, 0x34, 0xfd, 0x10, 0xaa, 0x57, 0xcc, 0x3b, 0x1a, 0x04, - 0x1d, 0xfe, 0xd6, 0x4f, 0xe1, 0xc7, 0x6e, 0x0e, 0xdc, 0x97, 0xe1, 0x70, 0xee, 0x1c, 0x50, 0x55, - 0x97, 0xec, 0x91, 0x2f, 0x61, 0x0e, 0x84, 0x14, 0x41, 0xb9, 0x32, 0xca, 0xa8, 0x44, 0x0b, 0x7d, - 0x52, 0x8a, 0xf1, 0x89, 0x2a, 0x1d, 0x70, 0x0a, 0x54, 0x5f, 0x7c, 0x14, 0xc0, 0x61, 0x23, 0x47, - 0x81, 0xc2, 0x47, 0xf8, 0xe5, 0xa8, 0x6f, 0x37, 0xbd, 0x59, 0xa0, 0x98, 0xc7, 0x5b, 0x50, 0xd5, - 0x84, 0x59, 0xa0, 0xf0, 0xf8, 0x69, 0xc7, 0x37, 0x4d, 0x72, 0xd8, 0xa0, 0x1d, 0xdf, 0x14, 0xa3, - 0x7d, 0x53, 0xf6, 0x49, 0x4d, 0xce, 0x45, 0xea, 0xdb, 0x1a, 0xde, 0xef, 0x20, 0xf8, 0x71, 0xf9, - 0x5b, 0x06, 0x4d, 0x28, 0x96, 0xf8, 0xad, 0x81, 0x20, 0x66, 0x71, 0xdc, 0x2e, 0xfa, 0xde, 0x38, - 0xd9, 0xc5, 0x11, 0x90, 0x7a, 0xf0, 0xc2, 0xe2, 0xeb, 0xef, 0xbf, 0x3f, 0xa4, 0xaf, 0xe3, 0x79, - 0x6b, 0xf8, 0x0b, 0x52, 0x58, 0x75, 0x26, 0x02, 0xfc, 0xc6, 0x40, 0x13, 0xaa, 0x1a, 0x17, 0x93, - 0xfa, 0x87, 0x44, 0x4a, 0xc9, 0xc0, 0x73, 0xf1, 0x60, 0xde, 0x0e, 0xc7, 0xaf, 0x0c, 0x34, 0x2e, - 0xa3, 0x0d, 0x27, 0x84, 0x58, 0x47, 0x8e, 0x62, 0x22, 0x0e, 0x48, 0x14, 0x15, 0x89, 0x79, 0x9c, - 0xb7, 0x86, 0x7d, 0x0b, 0x80, 0x14, 0x2f, 0xd1, 0x98, 0xcd, 0x1c, 0x7c, 0x23, 0x29, 0x45, 0xf5, - 0xf9, 0x23, 0x86, 0xed, 0x48, 0xc7, 0x2b, 0x05, 0xa4, 0x29, 0xf4, 0xc5, 0xc5, 0x89, 0x17, 0x74, - 0x24, 0x53, 0xf4, 0xa7, 0x40, 0xd2, 0x32, 0xf4, 0x1d, 0xef, 0x9a, 0x42, 0x55, 0xc7, 0x9a, 0xa2, - 0xf7, 0xf6, 0x66, 0x47, 0xce, 0x94, 0x11, 0x79, 0x48, 0x49, 0xec, 0x27, 0xc7, 0xed, 0x9c, 0x71, - 0xd2, 0xce, 0x19, 0xbf, 0xda, 0x39, 0xe3, 0xdd, 0x59, 0x2e, 0x75, 0x72, 0x96, 0x4b, 0xfd, 0x38, - 0xcb, 0xa5, 0x9e, 0xad, 0xb9, 0x2c, 0xd8, 0x6d, 0x55, 0xcd, 0x1a, 0x6f, 0xe8, 0x36, 0xb7, 0x3c, - 0x1a, 0x1c, 0x70, 0x7f, 0x0f, 0xfe, 0x93, 0x5f, 0x9f, 0x2e, 0xb7, 0x3c, 0xee, 0xd0, 0x81, 0x03, - 0xaa, 0x19, 0xf5, 0x5d, 0xb7, 0xf2, 0x27, 0x00, 0x00, 0xff, 0xff, 0x2f, 0x13, 0x08, 0x1a, 0xf7, - 0x0a, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Orders queries orders with filters - Orders(ctx context.Context, in *QueryOrdersRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) - // Order queries order details - Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) - // Bids queries bids with filters - Bids(ctx context.Context, in *QueryBidsRequest, opts ...grpc.CallOption) (*QueryBidsResponse, error) - // Bid queries bid details - Bid(ctx context.Context, in *QueryBidRequest, opts ...grpc.CallOption) (*QueryBidResponse, error) - // Leases queries leases with filters - Leases(ctx context.Context, in *QueryLeasesRequest, opts ...grpc.CallOption) (*QueryLeasesResponse, error) - // Lease queries lease details - Lease(ctx context.Context, in *QueryLeaseRequest, opts ...grpc.CallOption) (*QueryLeaseResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Orders(ctx context.Context, in *QueryOrdersRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) { - out := new(QueryOrdersResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta4.Query/Orders", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) { - out := new(QueryOrderResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta4.Query/Order", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Bids(ctx context.Context, in *QueryBidsRequest, opts ...grpc.CallOption) (*QueryBidsResponse, error) { - out := new(QueryBidsResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta4.Query/Bids", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Bid(ctx context.Context, in *QueryBidRequest, opts ...grpc.CallOption) (*QueryBidResponse, error) { - out := new(QueryBidResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta4.Query/Bid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Leases(ctx context.Context, in *QueryLeasesRequest, opts ...grpc.CallOption) (*QueryLeasesResponse, error) { - out := new(QueryLeasesResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta4.Query/Leases", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Lease(ctx context.Context, in *QueryLeaseRequest, opts ...grpc.CallOption) (*QueryLeaseResponse, error) { - out := new(QueryLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta4.Query/Lease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Orders queries orders with filters - Orders(context.Context, *QueryOrdersRequest) (*QueryOrdersResponse, error) - // Order queries order details - Order(context.Context, *QueryOrderRequest) (*QueryOrderResponse, error) - // Bids queries bids with filters - Bids(context.Context, *QueryBidsRequest) (*QueryBidsResponse, error) - // Bid queries bid details - Bid(context.Context, *QueryBidRequest) (*QueryBidResponse, error) - // Leases queries leases with filters - Leases(context.Context, *QueryLeasesRequest) (*QueryLeasesResponse, error) - // Lease queries lease details - Lease(context.Context, *QueryLeaseRequest) (*QueryLeaseResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Orders(ctx context.Context, req *QueryOrdersRequest) (*QueryOrdersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Orders not implemented") -} -func (*UnimplementedQueryServer) Order(ctx context.Context, req *QueryOrderRequest) (*QueryOrderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Order not implemented") -} -func (*UnimplementedQueryServer) Bids(ctx context.Context, req *QueryBidsRequest) (*QueryBidsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Bids not implemented") -} -func (*UnimplementedQueryServer) Bid(ctx context.Context, req *QueryBidRequest) (*QueryBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Bid not implemented") -} -func (*UnimplementedQueryServer) Leases(ctx context.Context, req *QueryLeasesRequest) (*QueryLeasesResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Leases not implemented") -} -func (*UnimplementedQueryServer) Lease(ctx context.Context, req *QueryLeaseRequest) (*QueryLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Lease not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Orders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryOrdersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Orders(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta4.Query/Orders", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Orders(ctx, req.(*QueryOrdersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Order_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryOrderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Order(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta4.Query/Order", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Order(ctx, req.(*QueryOrderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Bids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryBidsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Bids(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta4.Query/Bids", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Bids(ctx, req.(*QueryBidsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Bid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryBidRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Bid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta4.Query/Bid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Bid(ctx, req.(*QueryBidRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Leases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryLeasesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Leases(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta4.Query/Leases", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Leases(ctx, req.(*QueryLeasesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Lease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryLeaseRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Lease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta4.Query/Lease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Lease(ctx, req.(*QueryLeaseRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.market.v1beta4.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Orders", - Handler: _Query_Orders_Handler, - }, - { - MethodName: "Order", - Handler: _Query_Order_Handler, - }, - { - MethodName: "Bids", - Handler: _Query_Bids_Handler, - }, - { - MethodName: "Bid", - Handler: _Query_Bid_Handler, - }, - { - MethodName: "Leases", - Handler: _Query_Leases_Handler, - }, - { - MethodName: "Lease", - Handler: _Query_Lease_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/market/v1beta4/query.proto", -} - -func (m *QueryOrdersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrdersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrdersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryOrdersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrdersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrdersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Orders) > 0 { - for iNdEx := len(m.Orders) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Orders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryOrderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryOrderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryOrderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryOrderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Order.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryBidsRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidsRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryBidsResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidsResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Bids) > 0 { - for iNdEx := len(m.Bids) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Bids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryBidRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryBidResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryBidResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.EscrowAccount.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.Bid.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryLeasesRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeasesRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - { - size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryLeasesResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeasesResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Leases) > 0 { - for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryLeaseRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeaseRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeaseRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *QueryLeaseResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryLeaseResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.EscrowPayment.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.Lease.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryOrdersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryOrdersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Orders) > 0 { - for _, e := range m.Orders { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryOrderRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryOrderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Order.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryBidsRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryBidsResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Bids) > 0 { - for _, e := range m.Bids { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryBidRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryBidResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Bid.Size() - n += 1 + l + sovQuery(uint64(l)) - l = m.EscrowAccount.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryLeasesRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Filters.Size() - n += 1 + l + sovQuery(uint64(l)) - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryLeasesResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Leases) > 0 { - for _, e := range m.Leases { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryLeaseRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ID.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func (m *QueryLeaseResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Lease.Size() - n += 1 + l + sovQuery(uint64(l)) - l = m.EscrowPayment.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryOrdersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrdersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrdersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOrdersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrdersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrdersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Orders = append(m.Orders, Order{}) - if err := m.Orders[len(m.Orders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOrderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryOrderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryOrderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryOrderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Order.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidsRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidsResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidsResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bids", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bids = append(m.Bids, QueryBidResponse{}) - if err := m.Bids[len(m.Bids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryBidResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryBidResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bid", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Bid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EscrowAccount", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EscrowAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeasesRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeasesRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeasesResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeasesResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Leases = append(m.Leases, QueryLeaseResponse{}) - if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeaseRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeaseRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeaseRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryLeaseResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryLeaseResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Lease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EscrowPayment", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EscrowPayment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/market/v1beta4/query.pb.gw.go b/go/node/market/v1beta4/query.pb.gw.go deleted file mode 100644 index ee29aa23..00000000 --- a/go/node/market/v1beta4/query.pb.gw.go +++ /dev/null @@ -1,586 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/market/v1beta4/query.proto - -/* -Package v1beta4 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta4 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Orders_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Orders_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrdersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Orders_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Orders(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Orders_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrdersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Orders_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Orders(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Order_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrderRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Order_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Order(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryOrderRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Order_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Order(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Bids_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Bids_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bids_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Bids(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Bids_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidsRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bids_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Bids(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Bid_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Bid_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bid_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Bid(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Bid_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryBidRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bid_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Bid(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Leases_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Leases_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeasesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Leases_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Leases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Leases_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeasesRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Leases_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Leases(ctx, &protoReq) - return msg, metadata, err - -} - -var ( - filter_Query_Lease_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Lease_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeaseRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Lease_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Lease(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Lease_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryLeaseRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Lease_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Lease(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Orders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Orders_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Orders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Order_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Bids_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bid_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Bid_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bid_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Leases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Leases_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Leases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Lease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Lease_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Lease_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Orders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Orders_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Orders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Order_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Bids_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Bid_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Bid_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Bid_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Leases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Leases_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Leases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Lease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Lease_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Lease_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Orders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta4", "orders", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Order_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta4", "orders", "info"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Bids_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta4", "bids", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Bid_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta4", "bids", "info"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Leases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta4", "leases", "list"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Lease_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta4", "leases", "info"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Orders_0 = runtime.ForwardResponseMessage - - forward_Query_Order_0 = runtime.ForwardResponseMessage - - forward_Query_Bids_0 = runtime.ForwardResponseMessage - - forward_Query_Bid_0 = runtime.ForwardResponseMessage - - forward_Query_Leases_0 = runtime.ForwardResponseMessage - - forward_Query_Lease_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/market/v1beta4/service.pb.go b/go/node/market/v1beta4/service.pb.go deleted file mode 100644 index f9273b07..00000000 --- a/go/node/market/v1beta4/service.pb.go +++ /dev/null @@ -1,287 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/market/v1beta4/service.proto - -package v1beta4 - -import ( - context "context" - fmt "fmt" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { - proto.RegisterFile("akash/market/v1beta4/service.proto", fileDescriptor_3f002bb22b8d66bc) -} - -var fileDescriptor_3f002bb22b8d66bc = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0xcf, 0x4d, 0x2c, 0xca, 0x4e, 0x2d, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, - 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, - 0x01, 0xab, 0xd1, 0x83, 0xa8, 0xd1, 0x83, 0xaa, 0x91, 0x92, 0xc3, 0xaa, 0x33, 0x29, 0x33, 0x05, - 0xa2, 0x4b, 0x4a, 0x01, 0xab, 0x7c, 0x4e, 0x6a, 0x62, 0x31, 0xd4, 0x5c, 0xa3, 0x17, 0xcc, 0x5c, - 0xcc, 0xbe, 0xc5, 0xe9, 0x42, 0xd1, 0x5c, 0x9c, 0xce, 0x45, 0xa9, 0x89, 0x25, 0xa9, 0x4e, 0x99, - 0x29, 0x42, 0x4a, 0x7a, 0xd8, 0x6c, 0xd3, 0xf3, 0x2d, 0x4e, 0x87, 0xab, 0x91, 0xd2, 0x22, 0xac, - 0x26, 0x28, 0xb5, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0x28, 0x82, 0x8b, 0xc3, 0x39, 0x27, 0xbf, - 0x18, 0x6c, 0xb6, 0x22, 0x6e, 0x7d, 0x50, 0x25, 0x52, 0x9a, 0x04, 0x95, 0xc0, 0x4d, 0x4e, 0xe7, - 0xe2, 0x0d, 0xcf, 0x2c, 0xc9, 0x48, 0x29, 0x4a, 0x2c, 0xf7, 0x01, 0xf9, 0x4a, 0x48, 0x0d, 0xa7, - 0x5e, 0x14, 0x75, 0x52, 0x7a, 0xc4, 0xa9, 0x83, 0x5b, 0x94, 0xc8, 0xc5, 0x0d, 0xf1, 0x17, 0xc4, - 0x1a, 0x15, 0x02, 0xbe, 0x87, 0x58, 0xa2, 0x43, 0x8c, 0x2a, 0xb8, 0x15, 0x71, 0x5c, 0x5c, 0x60, - 0xff, 0x41, 0x6c, 0x50, 0xc6, 0x1f, 0x08, 0x10, 0x0b, 0xb4, 0x89, 0x50, 0x04, 0x33, 0xdf, 0x29, - 0xf8, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, - 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x2c, 0xd3, 0x33, 0x4b, 0x32, - 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, 0x06, 0xea, 0xe6, 0xa5, 0x96, 0x94, 0xe7, 0x17, - 0x65, 0x43, 0x79, 0x89, 0x05, 0x99, 0xfa, 0xe9, 0xf9, 0xfa, 0x79, 0xf9, 0x29, 0xa9, 0x68, 0x69, - 0x29, 0x89, 0x0d, 0x9c, 0x8c, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5a, 0x33, 0xac, 0x52, - 0xc4, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // CreateBid defines a method to create a bid given proper inputs. - CreateBid(ctx context.Context, in *MsgCreateBid, opts ...grpc.CallOption) (*MsgCreateBidResponse, error) - // CloseBid defines a method to close a bid given proper inputs. - CloseBid(ctx context.Context, in *MsgCloseBid, opts ...grpc.CallOption) (*MsgCloseBidResponse, error) - // WithdrawLease withdraws accrued funds from the lease payment - WithdrawLease(ctx context.Context, in *MsgWithdrawLease, opts ...grpc.CallOption) (*MsgWithdrawLeaseResponse, error) - // CreateLease creates a new lease - CreateLease(ctx context.Context, in *MsgCreateLease, opts ...grpc.CallOption) (*MsgCreateLeaseResponse, error) - // CloseLease defines a method to close an order given proper inputs. - CloseLease(ctx context.Context, in *MsgCloseLease, opts ...grpc.CallOption) (*MsgCloseLeaseResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) CreateBid(ctx context.Context, in *MsgCreateBid, opts ...grpc.CallOption) (*MsgCreateBidResponse, error) { - out := new(MsgCreateBidResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta4.Msg/CreateBid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseBid(ctx context.Context, in *MsgCloseBid, opts ...grpc.CallOption) (*MsgCloseBidResponse, error) { - out := new(MsgCloseBidResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta4.Msg/CloseBid", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) WithdrawLease(ctx context.Context, in *MsgWithdrawLease, opts ...grpc.CallOption) (*MsgWithdrawLeaseResponse, error) { - out := new(MsgWithdrawLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta4.Msg/WithdrawLease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CreateLease(ctx context.Context, in *MsgCreateLease, opts ...grpc.CallOption) (*MsgCreateLeaseResponse, error) { - out := new(MsgCreateLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta4.Msg/CreateLease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) CloseLease(ctx context.Context, in *MsgCloseLease, opts ...grpc.CallOption) (*MsgCloseLeaseResponse, error) { - out := new(MsgCloseLeaseResponse) - err := c.cc.Invoke(ctx, "/akash.market.v1beta4.Msg/CloseLease", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // CreateBid defines a method to create a bid given proper inputs. - CreateBid(context.Context, *MsgCreateBid) (*MsgCreateBidResponse, error) - // CloseBid defines a method to close a bid given proper inputs. - CloseBid(context.Context, *MsgCloseBid) (*MsgCloseBidResponse, error) - // WithdrawLease withdraws accrued funds from the lease payment - WithdrawLease(context.Context, *MsgWithdrawLease) (*MsgWithdrawLeaseResponse, error) - // CreateLease creates a new lease - CreateLease(context.Context, *MsgCreateLease) (*MsgCreateLeaseResponse, error) - // CloseLease defines a method to close an order given proper inputs. - CloseLease(context.Context, *MsgCloseLease) (*MsgCloseLeaseResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) CreateBid(ctx context.Context, req *MsgCreateBid) (*MsgCreateBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateBid not implemented") -} -func (*UnimplementedMsgServer) CloseBid(ctx context.Context, req *MsgCloseBid) (*MsgCloseBidResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseBid not implemented") -} -func (*UnimplementedMsgServer) WithdrawLease(ctx context.Context, req *MsgWithdrawLease) (*MsgWithdrawLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method WithdrawLease not implemented") -} -func (*UnimplementedMsgServer) CreateLease(ctx context.Context, req *MsgCreateLease) (*MsgCreateLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateLease not implemented") -} -func (*UnimplementedMsgServer) CloseLease(ctx context.Context, req *MsgCloseLease) (*MsgCloseLeaseResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CloseLease not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_CreateBid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateBid) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateBid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta4.Msg/CreateBid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateBid(ctx, req.(*MsgCreateBid)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseBid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseBid) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseBid(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta4.Msg/CloseBid", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseBid(ctx, req.(*MsgCloseBid)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_WithdrawLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgWithdrawLease) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).WithdrawLease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta4.Msg/WithdrawLease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).WithdrawLease(ctx, req.(*MsgWithdrawLease)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CreateLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateLease) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateLease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta4.Msg/CreateLease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateLease(ctx, req.(*MsgCreateLease)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_CloseLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCloseLease) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CloseLease(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.market.v1beta4.Msg/CloseLease", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CloseLease(ctx, req.(*MsgCloseLease)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.market.v1beta4.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateBid", - Handler: _Msg_CreateBid_Handler, - }, - { - MethodName: "CloseBid", - Handler: _Msg_CloseBid_Handler, - }, - { - MethodName: "WithdrawLease", - Handler: _Msg_WithdrawLease_Handler, - }, - { - MethodName: "CreateLease", - Handler: _Msg_CreateLease_Handler, - }, - { - MethodName: "CloseLease", - Handler: _Msg_CloseLease_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/market/v1beta4/service.proto", -} diff --git a/go/node/market/v1beta4/types.go b/go/node/market/v1beta4/types.go deleted file mode 100644 index 9aa87d68..00000000 --- a/go/node/market/v1beta4/types.go +++ /dev/null @@ -1,245 +0,0 @@ -package v1beta4 - -import ( - "strings" - - sdk "github.com/cosmos/cosmos-sdk/types" - - types "github.com/akash-network/akash-api/go/node/types/v1beta3" - - atypes "github.com/akash-network/akash-api/go/node/audit/v1beta3" - - "gopkg.in/yaml.v3" -) - -const ( - APIVersion = "v1beta4" -) - -// ID method returns OrderID details of specific order -func (o Order) ID() OrderID { - return o.OrderID -} - -// String implements the Stringer interface for a Order object. -func (o Order) String() string { - out, _ := yaml.Marshal(o) - return string(out) -} - -// Orders is a collection of Order -type Orders []Order - -// String implements the Stringer interface for a Orders object. -func (o Orders) String() string { - var out string - for _, order := range o { - out += order.String() + "\n" - } - - return strings.TrimSpace(out) -} - -// ValidateCanBid method validates whether order is open or not and -// returns error if not -func (o Order) ValidateCanBid() error { - switch o.State { - case OrderOpen: - return nil - case OrderActive: - return ErrOrderActive - default: - return ErrOrderClosed - } -} - -// ValidateInactive method validates whether order is open or not and -// returns error if not -func (o Order) ValidateInactive() error { - switch o.State { - case OrderClosed: - return nil - case OrderActive: - return ErrOrderActive - default: - return ErrOrderClosed - } -} - -// Price method returns price of specific order -func (o Order) Price() sdk.DecCoin { - return o.Spec.Price() -} - -// MatchAttributes method compares provided attributes with specific order attributes -func (o Order) MatchAttributes(attrs []types.Attribute) bool { - return o.Spec.MatchAttributes(attrs) -} - -// MatchRequirements method compares provided attributes with specific order attributes -func (o Order) MatchRequirements(prov []atypes.Provider) bool { - return o.Spec.MatchRequirements(prov) -} - -// MatchResourcesRequirements method compares provider capabilities with specific order resources attributes -func (o Order) MatchResourcesRequirements(attr types.Attributes) bool { - return o.Spec.MatchResourcesRequirements(attr) -} - -// Accept returns whether order filters valid or not -func (filters OrderFilters) Accept(obj Order, stateVal Order_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.OrderID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.OrderID.DSeq { - return false - } - - // Checking gseq filter - if filters.GSeq != 0 && filters.GSeq != obj.OrderID.GSeq { - return false - } - - // Checking oseq filter - if filters.OSeq != 0 && filters.OSeq != obj.OrderID.OSeq { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} - -// ID method returns BidID details of specific bid -func (obj Bid) ID() BidID { - return obj.BidID -} - -// String implements the Stringer interface for a Bid object. -func (obj Bid) String() string { - out, _ := yaml.Marshal(obj) - return string(out) -} - -// Bids is a collection of Bid -type Bids []Bid - -// String implements the Stringer interface for a Bids object. -func (b Bids) String() string { - var out string - for _, bid := range b { - out += bid.String() + "\n" - } - - return strings.TrimSpace(out) -} - -// Accept returns whether bid filters valid or not -func (filters BidFilters) Accept(obj Bid, stateVal Bid_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.BidID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.BidID.DSeq { - return false - } - - // Checking gseq filter - if filters.GSeq != 0 && filters.GSeq != obj.BidID.GSeq { - return false - } - - // Checking oseq filter - if filters.OSeq != 0 && filters.OSeq != obj.BidID.OSeq { - return false - } - - // Checking provider filter - if filters.Provider != "" && filters.Provider != obj.BidID.Provider { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} - -// ID method returns LeaseID details of specific lease -func (obj Lease) ID() LeaseID { - return obj.LeaseID -} - -// String implements the Stringer interface for a Lease object. -func (obj Lease) String() string { - out, _ := yaml.Marshal(obj) - return string(out) -} - -// Leases is a collection of Lease -type Leases []Lease - -// String implements the Stringer interface for a Leases object. -func (l Leases) String() string { - var out string - for _, order := range l { - out += order.String() + "\n" - } - - return strings.TrimSpace(out) -} - -// Accept returns whether lease filters valid or not -func (filters LeaseFilters) Accept(obj Lease, stateVal Lease_State) bool { - // Checking owner filter - if filters.Owner != "" && filters.Owner != obj.LeaseID.Owner { - return false - } - - // Checking dseq filter - if filters.DSeq != 0 && filters.DSeq != obj.LeaseID.DSeq { - return false - } - - // Checking gseq filter - if filters.GSeq != 0 && filters.GSeq != obj.LeaseID.GSeq { - return false - } - - // Checking oseq filter - if filters.OSeq != 0 && filters.OSeq != obj.LeaseID.OSeq { - return false - } - - // Checking provider filter - if filters.Provider != "" && filters.Provider != obj.LeaseID.Provider { - return false - } - - // Checking state filter - if stateVal != 0 && stateVal != obj.State { - return false - } - - return true -} - -func (m QueryLeasesResponse) TotalPriceAmount() sdk.Dec { - total := sdk.NewDec(0) - - for _, lease := range m.Leases { - total = total.Add(lease.Lease.Price.Amount) - } - - return total -} diff --git a/go/node/market/v1beta5/bid.go b/go/node/market/v1beta5/bid.go new file mode 100644 index 00000000..e2f9b279 --- /dev/null +++ b/go/node/market/v1beta5/bid.go @@ -0,0 +1,139 @@ +package v1beta5 + +import ( + "sort" + "strings" + + "gopkg.in/yaml.v3" + + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" +) + +type ResourcesOffer []ResourceOffer + +// Bids is a collection of Bid +type Bids []Bid + +var _ sort.Interface = (*ResourcesOffer)(nil) + +// String implements the Stringer interface for a Bid object. +func (o *Bid) String() string { + out, _ := yaml.Marshal(o) + return string(out) +} + +// String implements the Stringer interface for a Bids object. +func (b Bids) String() string { + var out string + for _, bid := range b { + out += bid.String() + "\n" + } + + return strings.TrimSpace(out) +} + +// Filters returns whether bid filters valid or not +func (o *Bid) Filters(filters BidFilters, stateVal Bid_State) bool { + // Checking owner filter + if filters.Owner != "" && filters.Owner != o.ID.Owner { + return false + } + + // Checking dseq filter + if filters.DSeq != 0 && filters.DSeq != o.ID.DSeq { + return false + } + + // Checking gseq filter + if filters.GSeq != 0 && filters.GSeq != o.ID.GSeq { + return false + } + + // Checking oseq filter + if filters.OSeq != 0 && filters.OSeq != o.ID.OSeq { + return false + } + + // Checking provider filter + if filters.Provider != "" && filters.Provider != o.ID.Provider { + return false + } + + // Checking state filter + if stateVal != 0 && stateVal != o.State { + return false + } + + return true +} + +func (s ResourcesOffer) MatchGSpec(gspec dtypes.GroupSpec) bool { + if len(s) == 0 { + return true + } + + ru := make(map[uint32]*dtypes.ResourceUnit) + + for idx := range gspec.Resources { + ru[gspec.Resources[idx].ID] = &gspec.Resources[idx] + } + + for _, ro := range s { + res, exists := ru[ro.Resources.ID] + if !exists { + return false + } + + ru[ro.Resources.ID] = nil + + if res.Count != ro.Count { + return false + } + + // TODO @troian check resources boundaries + } + + return true +} + +func (r *ResourceOffer) Dup() ResourceOffer { + return ResourceOffer{ + Resources: r.Resources.Dup(), + Count: r.Count, + } +} + +func (s ResourcesOffer) Len() int { + return len(s) +} + +func (s ResourcesOffer) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s ResourcesOffer) Less(i, j int) bool { + return s[i].Resources.ID < s[j].Resources.ID +} + +func (s ResourcesOffer) Dup() ResourcesOffer { + res := make(ResourcesOffer, 0, len(s)) + + for _, ru := range s { + res = append(res, ru.Dup()) + } + + return res +} + +func ResourceOfferFromRU(ru dtypes.ResourceUnits) ResourcesOffer { + res := make(ResourcesOffer, 0, len(ru)) + + for _, r := range ru { + res = append(res, ResourceOffer{ + Resources: r.Resources, + Count: r.Count, + }) + } + + return res +} diff --git a/go/node/market/v1beta5/bid.pb.go b/go/node/market/v1beta5/bid.pb.go new file mode 100644 index 00000000..5949dd4a --- /dev/null +++ b/go/node/market/v1beta5/bid.pb.go @@ -0,0 +1,569 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/bid.proto + +package v1beta5 + +import ( + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + v1 "pkg.akt.dev/go/node/market/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// BidState is an enum which refers to state of bid +type Bid_State int32 + +const ( + // Prefix should start with 0 in enum. So declaring dummy state + BidStateInvalid Bid_State = 0 + // BidOpen denotes state for bid open + BidOpen Bid_State = 1 + // BidMatched denotes state for bid open + BidActive Bid_State = 2 + // BidLost denotes state for bid lost + BidLost Bid_State = 3 + // BidClosed denotes state for bid closed + BidClosed Bid_State = 4 +) + +var Bid_State_name = map[int32]string{ + 0: "invalid", + 1: "open", + 2: "active", + 3: "lost", + 4: "closed", +} + +var Bid_State_value = map[string]int32{ + "invalid": 0, + "open": 1, + "active": 2, + "lost": 3, + "closed": 4, +} + +func (x Bid_State) String() string { + return proto.EnumName(Bid_State_name, int32(x)) +} + +func (Bid_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9a051a9ee62f13b0, []int{0, 0} +} + +// Bid stores BidID, state of bid and price +type Bid struct { + ID v1.BidID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + State Bid_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta5.Bid_State" json:"state" yaml:"state"` + Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"state" yaml:"created_at"` + ResourcesOffer ResourcesOffer `protobuf:"bytes,5,rep,name=resources_offer,json=resourcesOffer,proto3,castrepeated=ResourcesOffer" json:"resources_offer" yaml:"resources_offer"` +} + +func (m *Bid) Reset() { *m = Bid{} } +func (*Bid) ProtoMessage() {} +func (*Bid) Descriptor() ([]byte, []int) { + return fileDescriptor_9a051a9ee62f13b0, []int{0} +} +func (m *Bid) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Bid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Bid.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Bid) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bid.Merge(m, src) +} +func (m *Bid) XXX_Size() int { + return m.Size() +} +func (m *Bid) XXX_DiscardUnknown() { + xxx_messageInfo_Bid.DiscardUnknown(m) +} + +var xxx_messageInfo_Bid proto.InternalMessageInfo + +func (m *Bid) GetID() v1.BidID { + if m != nil { + return m.ID + } + return v1.BidID{} +} + +func (m *Bid) GetState() Bid_State { + if m != nil { + return m.State + } + return BidStateInvalid +} + +func (m *Bid) GetPrice() types.DecCoin { + if m != nil { + return m.Price + } + return types.DecCoin{} +} + +func (m *Bid) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *Bid) GetResourcesOffer() ResourcesOffer { + if m != nil { + return m.ResourcesOffer + } + return nil +} + +func init() { + proto.RegisterEnum("akash.market.v1beta5.Bid_State", Bid_State_name, Bid_State_value) + proto.RegisterType((*Bid)(nil), "akash.market.v1beta5.Bid") +} + +func init() { proto.RegisterFile("akash/market/v1beta5/bid.proto", fileDescriptor_9a051a9ee62f13b0) } + +var fileDescriptor_9a051a9ee62f13b0 = []byte{ + // 536 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x3f, 0x6f, 0xd3, 0x40, + 0x1c, 0xb5, 0x9d, 0xa4, 0xa5, 0x57, 0x48, 0x83, 0x29, 0x55, 0x62, 0x81, 0x6d, 0x99, 0x25, 0x2c, + 0x67, 0x25, 0x88, 0xa5, 0x82, 0xa1, 0x6e, 0x18, 0x22, 0x21, 0x15, 0x8c, 0x58, 0x58, 0xaa, 0x8b, + 0xef, 0x1a, 0x4e, 0x49, 0x7c, 0x91, 0xef, 0x88, 0xc4, 0x37, 0x40, 0x99, 0x90, 0x58, 0x58, 0x22, + 0x55, 0xea, 0xc6, 0x27, 0xe9, 0xd8, 0x91, 0xc9, 0xa0, 0x64, 0x41, 0x19, 0x33, 0x30, 0x23, 0xdf, + 0x99, 0x04, 0xac, 0x6c, 0xf9, 0xbd, 0x7f, 0x79, 0x7e, 0x3a, 0x60, 0xa3, 0x01, 0xe2, 0xef, 0xfd, + 0x11, 0x4a, 0x06, 0x44, 0xf8, 0x93, 0x56, 0x8f, 0x08, 0xf4, 0xd4, 0xef, 0x51, 0x0c, 0xc7, 0x09, + 0x13, 0xcc, 0x3c, 0x94, 0x3c, 0x54, 0x3c, 0xcc, 0x79, 0xeb, 0xb0, 0xcf, 0xfa, 0x4c, 0x0a, 0xfc, + 0xec, 0x97, 0xd2, 0x5a, 0x76, 0xc4, 0xf8, 0x88, 0x71, 0xbf, 0x87, 0x38, 0xc9, 0xa3, 0x5a, 0x7e, + 0xc4, 0x68, 0x9c, 0xf3, 0x8f, 0xb7, 0xfe, 0x57, 0x42, 0x38, 0xfb, 0x90, 0x44, 0x84, 0xb3, 0x8b, + 0x0b, 0x92, 0xe4, 0xd2, 0x46, 0x41, 0xba, 0x69, 0xe4, 0xfd, 0x2e, 0x83, 0x52, 0x40, 0xb1, 0xf9, + 0x02, 0x18, 0x14, 0xd7, 0x75, 0x57, 0x6f, 0xee, 0xb7, 0x8f, 0x60, 0xa1, 0x26, 0x0c, 0x28, 0xee, + 0x76, 0x82, 0x87, 0xd7, 0xa9, 0xa3, 0xcd, 0x53, 0xc7, 0xe8, 0x76, 0x96, 0xa9, 0x63, 0x50, 0xbc, + 0x4a, 0x9d, 0xbd, 0x8f, 0x68, 0x34, 0x3c, 0xf6, 0x28, 0xf6, 0x42, 0x83, 0x62, 0xf3, 0x15, 0xa8, + 0x70, 0x81, 0x04, 0xa9, 0x1b, 0xae, 0xde, 0xac, 0xb6, 0x1d, 0xb8, 0xed, 0x83, 0xb3, 0x38, 0xf8, + 0x26, 0x93, 0x05, 0x8d, 0x65, 0xea, 0x28, 0xc7, 0x2a, 0x75, 0x6e, 0xab, 0x2c, 0x79, 0x7a, 0xa1, + 0x82, 0xcd, 0xd7, 0xa0, 0x32, 0x4e, 0x68, 0x44, 0xea, 0x25, 0xd9, 0xed, 0x01, 0x54, 0xb3, 0xc0, + 0x6c, 0x96, 0x3c, 0xb0, 0x05, 0x3b, 0x24, 0x3a, 0x65, 0x34, 0x56, 0x0d, 0xb3, 0x48, 0x69, 0xd9, + 0x44, 0xca, 0xd3, 0x0b, 0x15, 0x6c, 0x3e, 0x07, 0x20, 0x4a, 0x08, 0x12, 0x04, 0x9f, 0x23, 0x51, + 0x2f, 0xbb, 0x7a, 0xb3, 0x14, 0xd8, 0xff, 0x16, 0xb9, 0xab, 0x5c, 0x1b, 0x91, 0x17, 0xee, 0xe5, + 0xc7, 0x89, 0x30, 0xaf, 0x74, 0x70, 0xb0, 0x9e, 0xf9, 0x5c, 0xee, 0x5c, 0xaf, 0xb8, 0xa5, 0xe6, + 0x7e, 0xfb, 0xd1, 0xf6, 0xcf, 0x0d, 0x73, 0xf1, 0x59, 0x26, 0x0d, 0xde, 0xe6, 0x2b, 0x56, 0xff, + 0xc2, 0x5c, 0xe2, 0xcb, 0xd4, 0x29, 0xa6, 0xae, 0x52, 0xe7, 0x48, 0x35, 0x29, 0x10, 0xde, 0xb7, + 0x1f, 0x45, 0x7b, 0x58, 0x4d, 0xfe, 0xbb, 0xbd, 0x2f, 0x3a, 0xa8, 0xc8, 0x8d, 0x4d, 0x17, 0xec, + 0xd2, 0x78, 0x82, 0x86, 0x14, 0xd7, 0x34, 0xeb, 0xde, 0x74, 0xe6, 0x1e, 0x04, 0x14, 0x4b, 0xaa, + 0xab, 0x60, 0xf3, 0x3e, 0x28, 0xb3, 0x31, 0x89, 0x6b, 0xba, 0xb5, 0x3f, 0x9d, 0xb9, 0xbb, 0x01, + 0xc5, 0x67, 0x63, 0x12, 0x9b, 0x0d, 0xb0, 0x83, 0x22, 0x41, 0x27, 0xa4, 0x66, 0x58, 0x77, 0xa6, + 0x33, 0x77, 0x2f, 0xa0, 0xf8, 0x44, 0x02, 0x99, 0x63, 0xc8, 0xb8, 0xa8, 0x95, 0xd6, 0x8e, 0x97, + 0x8c, 0x8b, 0xcc, 0x11, 0x0d, 0x19, 0x27, 0xb8, 0x56, 0x5e, 0x3b, 0x4e, 0x25, 0x60, 0x95, 0x3f, + 0x5d, 0xd9, 0xda, 0xf1, 0xad, 0xaf, 0x97, 0x8e, 0xf6, 0xeb, 0xd2, 0xd1, 0x82, 0x67, 0xd7, 0x73, + 0x5b, 0xbf, 0x99, 0xdb, 0xfa, 0xcf, 0xb9, 0xad, 0x7f, 0x5e, 0xd8, 0xda, 0xcd, 0xc2, 0xd6, 0xbe, + 0x2f, 0x6c, 0xed, 0x9d, 0x37, 0x1e, 0xf4, 0x21, 0x1a, 0x08, 0x88, 0xc9, 0xc4, 0xef, 0x33, 0x3f, + 0x66, 0x98, 0x14, 0x9e, 0x79, 0x6f, 0x47, 0xbe, 0xde, 0x27, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, + 0x8e, 0x2a, 0xbe, 0xba, 0x71, 0x03, 0x00, 0x00, +} + +func (m *Bid) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bid) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Bid) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourcesOffer) > 0 { + for iNdEx := len(m.ResourcesOffer) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourcesOffer[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBid(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.CreatedAt != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBid(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.State != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBid(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintBid(dAtA []byte, offset int, v uint64) int { + offset -= sovBid(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Bid) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovBid(uint64(l)) + if m.State != 0 { + n += 1 + sovBid(uint64(m.State)) + } + l = m.Price.Size() + n += 1 + l + sovBid(uint64(l)) + if m.CreatedAt != 0 { + n += 1 + sovBid(uint64(m.CreatedAt)) + } + if len(m.ResourcesOffer) > 0 { + for _, e := range m.ResourcesOffer { + l = e.Size() + n += 1 + l + sovBid(uint64(l)) + } + } + return n +} + +func sovBid(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBid(x uint64) (n int) { + return sovBid(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Bid) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Bid: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Bid: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Bid_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourcesOffer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourcesOffer = append(m.ResourcesOffer, ResourceOffer{}) + if err := m.ResourcesOffer[len(m.ResourcesOffer)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBid(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBid + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBid(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBid + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupBid + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthBid + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthBid = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBid = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupBid = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/bid_test.go b/go/node/market/v1beta5/bid_test.go new file mode 100644 index 00000000..3e952755 --- /dev/null +++ b/go/node/market/v1beta5/bid_test.go @@ -0,0 +1,48 @@ +package v1beta5_test + +// import ( +// "testing" +// +// "github.com/stretchr/testify/require" +// +// testutil "pkg.akt.dev/go/node/client/testutil/v1beta3" +// +// "pkg.akt.dev/go/node/market/v1beta5" +// ) +// +// func TestBid_GSpecMatch_Valid(t *testing.T) { +// gspec := testutil.GroupSpec(t) +// +// rOffer := v1beta5.ResourceOfferFromRU(gspec.Resources) +// +// require.True(t, rOffer.MatchGSpec(gspec)) +// } +// +// func TestBid_GSpecMatch_Valid2(t *testing.T) { +// gspec := testutil.GroupSpec(t) +// +// if len(gspec.Resources) == 1 { +// rl := testutil.ResourcesList(t, 2) +// rl[0].Count = 4 +// gspec.Resources = append(gspec.Resources, rl...) +// } +// +// rOffer := v1beta5.ResourceOfferFromRU(gspec.Resources) +// +// require.True(t, rOffer.MatchGSpec(gspec)) +// } +// +// func TestBid_GSpecMatch_InvalidCount(t *testing.T) { +// gspec := testutil.GroupSpec(t) +// +// if len(gspec.Resources) == 1 { +// rl := testutil.ResourcesList(t, 2) +// gspec.Resources = append(gspec.Resources, rl...) +// } +// +// rOffer := v1beta5.ResourceOfferFromRU(gspec.Resources) +// +// gspec.Resources[0].Count = 2 +// +// require.False(t, rOffer.MatchGSpec(gspec)) +// } diff --git a/go/node/market/v1beta5/bidmsg.pb.go b/go/node/market/v1beta5/bidmsg.pb.go new file mode 100644 index 00000000..775d70e5 --- /dev/null +++ b/go/node/market/v1beta5/bidmsg.pb.go @@ -0,0 +1,980 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/bidmsg.proto + +package v1beta5 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + v1 "pkg.akt.dev/go/node/market/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgCreateBid defines an SDK message for creating Bid +type MsgCreateBid struct { + OrderID v1.OrderID `protobuf:"bytes,1,opt,name=order_id,json=orderId,proto3" json:"order_id" yaml:"order_id"` + Provider string `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider" yaml:"provider"` + Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` + Deposit types.Coin `protobuf:"bytes,4,opt,name=deposit,proto3" json:"deposit" yaml:"deposit"` + ResourcesOffer ResourcesOffer `protobuf:"bytes,5,rep,name=resources_offer,json=resourcesOffer,proto3,castrepeated=ResourcesOffer" json:"resources_offer" yaml:"resources_offer"` +} + +func (m *MsgCreateBid) Reset() { *m = MsgCreateBid{} } +func (m *MsgCreateBid) String() string { return proto.CompactTextString(m) } +func (*MsgCreateBid) ProtoMessage() {} +func (*MsgCreateBid) Descriptor() ([]byte, []int) { + return fileDescriptor_63a6438beb6a54f7, []int{0} +} +func (m *MsgCreateBid) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateBid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateBid.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateBid) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateBid.Merge(m, src) +} +func (m *MsgCreateBid) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateBid) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateBid.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateBid proto.InternalMessageInfo + +func (m *MsgCreateBid) GetOrderID() v1.OrderID { + if m != nil { + return m.OrderID + } + return v1.OrderID{} +} + +func (m *MsgCreateBid) GetProvider() string { + if m != nil { + return m.Provider + } + return "" +} + +func (m *MsgCreateBid) GetPrice() types.DecCoin { + if m != nil { + return m.Price + } + return types.DecCoin{} +} + +func (m *MsgCreateBid) GetDeposit() types.Coin { + if m != nil { + return m.Deposit + } + return types.Coin{} +} + +func (m *MsgCreateBid) GetResourcesOffer() ResourcesOffer { + if m != nil { + return m.ResourcesOffer + } + return nil +} + +// MsgCreateBidResponse defines the Msg/CreateBid response type. +type MsgCreateBidResponse struct { +} + +func (m *MsgCreateBidResponse) Reset() { *m = MsgCreateBidResponse{} } +func (m *MsgCreateBidResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateBidResponse) ProtoMessage() {} +func (*MsgCreateBidResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_63a6438beb6a54f7, []int{1} +} +func (m *MsgCreateBidResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateBidResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateBidResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateBidResponse.Merge(m, src) +} +func (m *MsgCreateBidResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateBidResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateBidResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateBidResponse proto.InternalMessageInfo + +// MsgCloseBid defines an SDK message for closing bid +type MsgCloseBid struct { + ID v1.BidID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *MsgCloseBid) Reset() { *m = MsgCloseBid{} } +func (m *MsgCloseBid) String() string { return proto.CompactTextString(m) } +func (*MsgCloseBid) ProtoMessage() {} +func (*MsgCloseBid) Descriptor() ([]byte, []int) { + return fileDescriptor_63a6438beb6a54f7, []int{2} +} +func (m *MsgCloseBid) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseBid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseBid.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseBid) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseBid.Merge(m, src) +} +func (m *MsgCloseBid) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseBid) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseBid.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseBid proto.InternalMessageInfo + +func (m *MsgCloseBid) GetID() v1.BidID { + if m != nil { + return m.ID + } + return v1.BidID{} +} + +// MsgCloseBidResponse defines the Msg/CloseBid response type. +type MsgCloseBidResponse struct { +} + +func (m *MsgCloseBidResponse) Reset() { *m = MsgCloseBidResponse{} } +func (m *MsgCloseBidResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCloseBidResponse) ProtoMessage() {} +func (*MsgCloseBidResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_63a6438beb6a54f7, []int{3} +} +func (m *MsgCloseBidResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseBidResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseBidResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseBidResponse.Merge(m, src) +} +func (m *MsgCloseBidResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseBidResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseBidResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseBidResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCreateBid)(nil), "akash.market.v1beta5.MsgCreateBid") + proto.RegisterType((*MsgCreateBidResponse)(nil), "akash.market.v1beta5.MsgCreateBidResponse") + proto.RegisterType((*MsgCloseBid)(nil), "akash.market.v1beta5.MsgCloseBid") + proto.RegisterType((*MsgCloseBidResponse)(nil), "akash.market.v1beta5.MsgCloseBidResponse") +} + +func init() { proto.RegisterFile("akash/market/v1beta5/bidmsg.proto", fileDescriptor_63a6438beb6a54f7) } + +var fileDescriptor_63a6438beb6a54f7 = []byte{ + // 571 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x3f, 0x6f, 0xd3, 0x4e, + 0x18, 0x8e, 0xd3, 0xf6, 0xd7, 0xd6, 0xa9, 0x5a, 0xfd, 0x4c, 0x08, 0x6e, 0x00, 0x5f, 0x6a, 0x96, + 0x80, 0xc4, 0x59, 0x29, 0x42, 0x48, 0x11, 0x0b, 0x6e, 0x18, 0x32, 0xa0, 0x0a, 0x57, 0x30, 0xb0, + 0x44, 0x8e, 0xef, 0x6a, 0x8e, 0x34, 0x39, 0xeb, 0xce, 0x44, 0x62, 0xe5, 0x13, 0xf0, 0x11, 0x90, + 0xd8, 0x10, 0x03, 0x03, 0x1f, 0xa2, 0x63, 0xc5, 0xc4, 0x64, 0x50, 0x32, 0x80, 0x32, 0xe6, 0x13, + 0x20, 0xdf, 0x1f, 0x53, 0x4c, 0x36, 0xdf, 0xf3, 0x3c, 0xef, 0x7b, 0x8f, 0xdf, 0xf7, 0x39, 0xf3, + 0x20, 0x1c, 0x85, 0xfc, 0xa5, 0x37, 0x0e, 0xd9, 0x08, 0xa7, 0xde, 0xb4, 0x33, 0xc4, 0x69, 0x78, + 0xdf, 0x1b, 0x12, 0x34, 0xe6, 0x31, 0x4c, 0x18, 0x4d, 0xa9, 0x55, 0x17, 0x12, 0x28, 0x25, 0x50, + 0x49, 0x9a, 0xf5, 0x98, 0xc6, 0x54, 0x08, 0xbc, 0xfc, 0x4b, 0x6a, 0x9b, 0xfb, 0x11, 0xe5, 0x63, + 0xca, 0x07, 0x92, 0x90, 0x07, 0x45, 0x5d, 0x93, 0x27, 0x6f, 0xcc, 0x63, 0x6f, 0xda, 0xf1, 0x8a, + 0xfe, 0x4d, 0x47, 0x11, 0xc3, 0x90, 0x63, 0xe5, 0xa0, 0xe3, 0x45, 0x94, 0x4c, 0x14, 0x7f, 0x7b, + 0xa5, 0x45, 0x86, 0x39, 0x7d, 0xcd, 0x22, 0xcc, 0xe9, 0xe9, 0x29, 0x66, 0xfa, 0xfa, 0x92, 0x34, + 0xff, 0x11, 0x45, 0x5d, 0x2f, 0x53, 0x94, 0x21, 0x5d, 0xe7, 0x7e, 0x5a, 0x37, 0x77, 0x9e, 0xf0, + 0xf8, 0x88, 0xe1, 0x30, 0xc5, 0x3e, 0x41, 0x56, 0x64, 0x6e, 0x09, 0x7e, 0x40, 0x90, 0x6d, 0xb4, + 0x8c, 0x76, 0xed, 0xd0, 0x86, 0xa5, 0x31, 0xc0, 0xe3, 0x5c, 0xd0, 0xef, 0xf9, 0xf0, 0x3c, 0x03, + 0x95, 0x59, 0x06, 0x36, 0x15, 0xb0, 0xc8, 0x40, 0x51, 0xbc, 0xcc, 0xc0, 0xde, 0x9b, 0x70, 0x7c, + 0xd6, 0x75, 0x35, 0xe2, 0x06, 0x9b, 0xe2, 0xb3, 0x8f, 0xac, 0x13, 0x73, 0x2b, 0x61, 0x74, 0x4a, + 0x10, 0x66, 0x76, 0xb5, 0x65, 0xb4, 0xb7, 0xfd, 0x07, 0x79, 0xad, 0xc6, 0xfe, 0xd4, 0x6a, 0xc4, + 0xfd, 0xfa, 0xe5, 0x6e, 0x5d, 0x0d, 0xf5, 0x11, 0x42, 0x0c, 0x73, 0x7e, 0x92, 0x32, 0x32, 0x89, + 0x83, 0xa2, 0xc8, 0x7a, 0x6a, 0x6e, 0x24, 0x8c, 0x44, 0xd8, 0x5e, 0x13, 0xb6, 0x6f, 0x40, 0xa5, + 0xcf, 0xa7, 0xab, 0x96, 0xd7, 0x81, 0x3d, 0x1c, 0x1d, 0x51, 0x32, 0xf1, 0x6f, 0xe6, 0xd6, 0x17, + 0x19, 0x90, 0x25, 0xcb, 0x0c, 0xec, 0xe8, 0x0b, 0x49, 0x84, 0xdd, 0x40, 0xc2, 0xd6, 0x73, 0x73, + 0x13, 0xe1, 0x84, 0x72, 0x92, 0xda, 0xeb, 0xa2, 0xe9, 0xfe, 0xca, 0xa6, 0xa2, 0xe3, 0x81, 0xea, + 0xa8, 0x2b, 0x96, 0x19, 0xd8, 0x95, 0x3d, 0x15, 0xe0, 0x06, 0x9a, 0xb2, 0x3e, 0x18, 0xe6, 0x5e, + 0xb1, 0xc6, 0x81, 0xd8, 0xa3, 0xbd, 0xd1, 0x5a, 0x6b, 0xd7, 0x0e, 0x6f, 0xc1, 0x55, 0x99, 0x83, + 0x81, 0x12, 0x1f, 0xe7, 0x52, 0xff, 0x99, 0x9a, 0xfb, 0xae, 0x86, 0xb9, 0xc0, 0x17, 0x19, 0x28, + 0x77, 0x5d, 0x66, 0xa0, 0x21, 0x4d, 0x94, 0x08, 0xf7, 0xe3, 0xf7, 0x72, 0x79, 0xb0, 0xcb, 0xfe, + 0x3a, 0x77, 0xff, 0xff, 0xf5, 0x1e, 0x54, 0xde, 0xfe, 0xfc, 0x7c, 0xa7, 0x98, 0xb1, 0xdb, 0x30, + 0xeb, 0x97, 0xd3, 0x12, 0x60, 0x9e, 0xd0, 0x09, 0xc7, 0xee, 0x2b, 0xb3, 0x96, 0xe3, 0x67, 0x94, + 0x8b, 0x10, 0x3d, 0x36, 0xab, 0x45, 0x7c, 0x1a, 0xff, 0xc4, 0xc7, 0x27, 0xa8, 0xdf, 0x93, 0x1b, + 0x98, 0x65, 0xa0, 0x2a, 0x72, 0x53, 0x15, 0x89, 0xd9, 0x96, 0x5e, 0xf3, 0xac, 0x54, 0x09, 0xea, + 0xd6, 0xb5, 0x81, 0x9a, 0x0c, 0xb3, 0xf4, 0x70, 0xd5, 0xbc, 0x72, 0xe9, 0x2e, 0x6d, 0xc1, 0x7f, + 0x78, 0x3e, 0x73, 0x8c, 0x8b, 0x99, 0x63, 0xfc, 0x98, 0x39, 0xc6, 0xbb, 0xb9, 0x53, 0xb9, 0x98, + 0x3b, 0x95, 0x6f, 0x73, 0xa7, 0xf2, 0xc2, 0x4d, 0x46, 0x31, 0x0c, 0x47, 0x29, 0x44, 0x78, 0xea, + 0xc5, 0xd4, 0x9b, 0x50, 0x84, 0x4b, 0x8f, 0x6a, 0xf8, 0x9f, 0x78, 0x0e, 0xf7, 0x7e, 0x07, 0x00, + 0x00, 0xff, 0xff, 0x61, 0x2f, 0xeb, 0xe1, 0x16, 0x04, 0x00, 0x00, +} + +func (m *MsgCreateBid) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateBid) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateBid) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourcesOffer) > 0 { + for iNdEx := len(m.ResourcesOffer) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourcesOffer[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBidmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + { + size, err := m.Deposit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBidmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBidmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Provider) > 0 { + i -= len(m.Provider) + copy(dAtA[i:], m.Provider) + i = encodeVarintBidmsg(dAtA, i, uint64(len(m.Provider))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.OrderID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBidmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCreateBidResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateBidResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgCloseBid) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseBid) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseBid) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBidmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCloseBidResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseBidResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintBidmsg(dAtA []byte, offset int, v uint64) int { + offset -= sovBidmsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCreateBid) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.OrderID.Size() + n += 1 + l + sovBidmsg(uint64(l)) + l = len(m.Provider) + if l > 0 { + n += 1 + l + sovBidmsg(uint64(l)) + } + l = m.Price.Size() + n += 1 + l + sovBidmsg(uint64(l)) + l = m.Deposit.Size() + n += 1 + l + sovBidmsg(uint64(l)) + if len(m.ResourcesOffer) > 0 { + for _, e := range m.ResourcesOffer { + l = e.Size() + n += 1 + l + sovBidmsg(uint64(l)) + } + } + return n +} + +func (m *MsgCreateBidResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgCloseBid) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovBidmsg(uint64(l)) + return n +} + +func (m *MsgCloseBidResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovBidmsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBidmsg(x uint64) (n int) { + return sovBidmsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCreateBid) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBidmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateBid: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateBid: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrderID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBidmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBidmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBidmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OrderID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBidmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBidmsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBidmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provider = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBidmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBidmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBidmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBidmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBidmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBidmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Deposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourcesOffer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBidmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBidmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBidmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourcesOffer = append(m.ResourcesOffer, ResourceOffer{}) + if err := m.ResourcesOffer[len(m.ResourcesOffer)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBidmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBidmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateBidResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBidmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateBidResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipBidmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBidmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseBid) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBidmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseBid: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseBid: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBidmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBidmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBidmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBidmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBidmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseBidResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBidmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseBidResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipBidmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBidmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBidmsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBidmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBidmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBidmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBidmsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupBidmsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthBidmsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthBidmsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBidmsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupBidmsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/codec.go b/go/node/market/v1beta5/codec.go new file mode 100644 index 00000000..262bde1f --- /dev/null +++ b/go/node/market/v1beta5/codec.go @@ -0,0 +1,54 @@ +package v1beta5 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +var ( + // amino = codec.NewLegacyAmino() + + // ModuleCdc references the global x/market module codec. Note, the codec should + // ONLY be used in certain instances of tests and for JSON encoding as Amino is + // still used for that purpose. + // + // The actual codec used for serialization should be provided to x/market and + // defined at the application level. + // + // Deprecated: ModuleCdc use is deprecated + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) + +// func init() { +// RegisterLegacyAminoCodec(amino) +// cryptocodec.RegisterCrypto(amino) +// amino.Seal() +// } + +// RegisterLegacyAminoCodec registers the necessary x/market interfaces and concrete types +// on the provided Amino codec. These types are used for Amino JSON serialization. +// +// Deprecated: RegisterLegacyAminoCodec is deprecated +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgCreateBid{}, "akash-sdk/x/"+ModuleName+"/"+(&MsgCreateBid{}).Type(), nil) + cdc.RegisterConcrete(&MsgCloseBid{}, "akash-sdk/x/"+ModuleName+"/"+(&MsgCloseBid{}).Type(), nil) + cdc.RegisterConcrete(&MsgCreateLease{}, "akash-sdk/x/"+ModuleName+"/"+(&MsgCreateLease{}).Type(), nil) + cdc.RegisterConcrete(&MsgCloseLease{}, "akash-sdk/x/"+ModuleName+"/"+(&MsgCloseLease{}).Type(), nil) + cdc.RegisterConcrete(&MsgWithdrawLease{}, "akash-sdk/x/"+ModuleName+"/"+(&MsgWithdrawLease{}).Type(), nil) +} + +// RegisterInterfaces registers the x/market interfaces types with the interface registry +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgCreateBid{}, + &MsgCloseBid{}, + &MsgCreateLease{}, + &MsgCloseLease{}, + &MsgWithdrawLease{}, + &MsgUpdateParams{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} diff --git a/go/node/market/v1beta5/errors.go b/go/node/market/v1beta5/errors.go new file mode 100644 index 00000000..d00908c4 --- /dev/null +++ b/go/node/market/v1beta5/errors.go @@ -0,0 +1,107 @@ +package v1beta5 + +import ( + sdkerrors "cosmossdk.io/errors" +) + +const ( + errCodeEmptyProvider uint32 = iota + 1 + errCodeSameAccount + errCodeInternal + errCodeOverOrder + errCodeAttributeMismatch + errCodeUnknownBid + errCodeUnknownLease + errCodeUnknownLeaseForOrder + errCodeUnknownOrderForBid + errCodeLeaseNotActive + errCodeBidNotActive + errCodeBidNotOpen + errCodeOrderNotOpen + errCodeNoLeaseForOrder + errCodeOrderNotFound + errCodeGroupNotFound + errCodeGroupNotOpen + errCodeBidNotFound + errCodeBidZeroPrice + errCodeLeaseNotFound + errCodeBidExists + errCodeInvalidPrice + errCodeOrderActive + errCodeOrderClosed + errCodeOrderExists + errCodeOrderDurationExceeded + errCodeOrderTooEarly + errInvalidDeposit + errInvalidParam + errUnknownProvider + errInvalidBid + errCodeCapabilitiesMismatch +) + +var ( + // ErrEmptyProvider is the error when provider is empty + ErrEmptyProvider = sdkerrors.Register(ModuleName, errCodeEmptyProvider, "empty provider") + // ErrSameAccount is the error when owner and provider are the same account + ErrSameAccount = sdkerrors.Register(ModuleName, errCodeSameAccount, "owner and provider are the same account") + // ErrInternal is the error for internal error + ErrInternal = sdkerrors.Register(ModuleName, errCodeInternal, "internal error") + // ErrBidOverOrder is the error when bid price is above max order price + ErrBidOverOrder = sdkerrors.Register(ModuleName, errCodeOverOrder, "bid price above max order price") + // ErrAttributeMismatch is the error for attribute mismatch + ErrAttributeMismatch = sdkerrors.Register(ModuleName, errCodeAttributeMismatch, "attribute mismatch") + // ErrCapabilitiesMismatch is the error for capabilities mismatch + ErrCapabilitiesMismatch = sdkerrors.Register(ModuleName, errCodeCapabilitiesMismatch, "capabilities mismatch") + // ErrUnknownBid is the error for unknown bid + ErrUnknownBid = sdkerrors.Register(ModuleName, errCodeUnknownBid, "unknown bid") + // ErrUnknownLease is the error for unknown bid + ErrUnknownLease = sdkerrors.Register(ModuleName, errCodeUnknownLease, "unknown lease") + // ErrUnknownLeaseForBid is the error when lease is unknown for bid + ErrUnknownLeaseForBid = sdkerrors.Register(ModuleName, errCodeUnknownLeaseForOrder, "unknown lease for bid") + // ErrUnknownOrderForBid is the error when order is unknown for bid + ErrUnknownOrderForBid = sdkerrors.Register(ModuleName, errCodeUnknownOrderForBid, "unknown order for bid") + // ErrLeaseNotActive is the error when lease is not active + ErrLeaseNotActive = sdkerrors.Register(ModuleName, errCodeLeaseNotActive, "lease not active") + // ErrBidNotActive is the error when bid is not matched + ErrBidNotActive = sdkerrors.Register(ModuleName, errCodeBidNotActive, "bid not active") + // ErrBidNotOpen is the error when bid is not matched + ErrBidNotOpen = sdkerrors.Register(ModuleName, errCodeBidNotOpen, "bid not open") + // ErrNoLeaseForOrder is the error when there is no lease for order + ErrNoLeaseForOrder = sdkerrors.Register(ModuleName, errCodeNoLeaseForOrder, "no lease for order") + // ErrOrderNotFound order not found + ErrOrderNotFound = sdkerrors.Register(ModuleName, errCodeOrderNotFound, "invalid order: order not found") + // ErrGroupNotFound order not found + ErrGroupNotFound = sdkerrors.Register(ModuleName, errCodeGroupNotFound, "order not found") + // ErrGroupNotOpen order not found + ErrGroupNotOpen = sdkerrors.Register(ModuleName, errCodeGroupNotOpen, "order not open") + // ErrOrderNotOpen order not found + ErrOrderNotOpen = sdkerrors.Register(ModuleName, errCodeOrderNotOpen, "bid: order not open") + // ErrBidNotFound bid not found + ErrBidNotFound = sdkerrors.Register(ModuleName, errCodeBidNotFound, "invalid bid: bid not found") + // ErrBidZeroPrice zero price + ErrBidZeroPrice = sdkerrors.Register(ModuleName, errCodeBidZeroPrice, "invalid bid: zero price") + // ErrLeaseNotFound lease not found + ErrLeaseNotFound = sdkerrors.Register(ModuleName, errCodeLeaseNotFound, "invalid lease: lease not found") + // ErrBidExists bid exists + ErrBidExists = sdkerrors.Register(ModuleName, errCodeBidExists, "invalid bid: bid exists from provider") + // ErrBidInvalidPrice bid invalid price + ErrBidInvalidPrice = sdkerrors.Register(ModuleName, errCodeInvalidPrice, "bid price is invalid") + // ErrOrderActive order active + ErrOrderActive = sdkerrors.New(ModuleName, errCodeOrderActive, "order active") + // ErrOrderClosed order closed + ErrOrderClosed = sdkerrors.New(ModuleName, errCodeOrderClosed, "order closed") + // ErrOrderExists indicates a new order was proposed overwrite the existing store key + ErrOrderExists = sdkerrors.New(ModuleName, errCodeOrderExists, "order already exists in store") + // ErrOrderTooEarly to match bid + ErrOrderTooEarly = sdkerrors.New(ModuleName, errCodeOrderTooEarly, "order: chain height to low for bidding") + // ErrOrderDurationExceeded order should be closed + ErrOrderDurationExceeded = sdkerrors.New(ModuleName, errCodeOrderDurationExceeded, "order duration has exceeded the bidding duration") + // ErrInvalidDeposit indicates an invalid deposit + ErrInvalidDeposit = sdkerrors.Register(ModuleName, errInvalidDeposit, "Deposit invalid") + // ErrInvalidParam indicates an invalid chain parameter + ErrInvalidParam = sdkerrors.Register(ModuleName, errInvalidParam, "parameter invalid") + // ErrUnknownProvider indicates an invalid chain parameter + ErrUnknownProvider = sdkerrors.Register(ModuleName, errUnknownProvider, "unknown provider") + // ErrInvalidBid indicates an invalid chain parameter + ErrInvalidBid = sdkerrors.Register(ModuleName, errInvalidBid, "unknown provider") +) diff --git a/go/node/market/v1beta5/escrow.go b/go/node/market/v1beta5/escrow.go new file mode 100644 index 00000000..9a335094 --- /dev/null +++ b/go/node/market/v1beta5/escrow.go @@ -0,0 +1,62 @@ +package v1beta5 + +import ( + "fmt" + "strconv" + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + + dtypesv1 "pkg.akt.dev/go/node/deployment/v1" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + etypes "pkg.akt.dev/go/node/escrow/v1" + v1 "pkg.akt.dev/go/node/market/v1" +) + +const ( + bidEscrowScope = "bid" +) + +func EscrowAccountForBid(id v1.BidID) etypes.AccountID { + return etypes.AccountID{ + Scope: bidEscrowScope, + XID: id.String(), + } +} + +func EscrowPaymentForLease(id v1.LeaseID) string { + return fmt.Sprintf("%v/%v/%s", id.GSeq, id.OSeq, id.Provider) +} + +func LeaseIDFromEscrowAccount(id etypes.AccountID, pid string) (v1.LeaseID, bool) { + did, ok := dtypes.DeploymentIDFromEscrowAccount(id) + if !ok { + return v1.LeaseID{}, false + } + + parts := strings.Split(pid, "/") + if len(parts) != 3 { + return v1.LeaseID{}, false + } + + gseq, err := strconv.ParseUint(parts[0], 10, 32) + if err != nil { + return v1.LeaseID{}, false + } + + oseq, err := strconv.ParseUint(parts[1], 10, 32) + if err != nil { + return v1.LeaseID{}, false + } + + owner, err := sdk.AccAddressFromBech32(parts[2]) + if err != nil { + return v1.LeaseID{}, false + } + + return v1.MakeLeaseID( + v1.MakeBidID( + v1.MakeOrderID( + dtypesv1.MakeGroupID( + did, uint32(gseq)), uint32(oseq)), owner)), true // nolint: gosec +} diff --git a/go/node/market/v1beta5/filters.go b/go/node/market/v1beta5/filters.go new file mode 100644 index 00000000..91d02e41 --- /dev/null +++ b/go/node/market/v1beta5/filters.go @@ -0,0 +1,66 @@ +package v1beta5 + +// Accept returns whether order filters valid or not +func (filters *OrderFilters) Accept(obj Order, stateVal Order_State) bool { + // Checking owner filter + if filters.Owner != "" && filters.Owner != obj.ID.Owner { + return false + } + + // Checking dseq filter + if filters.DSeq != 0 && filters.DSeq != obj.ID.DSeq { + return false + } + + // Checking gseq filter + if filters.GSeq != 0 && filters.GSeq != obj.ID.GSeq { + return false + } + + // Checking oseq filter + if filters.OSeq != 0 && filters.OSeq != obj.ID.OSeq { + return false + } + + // Checking state filter + if stateVal != 0 && stateVal != obj.State { + return false + } + + return true +} + +// Accept returns whether bid filters valid or not +func (filters *BidFilters) Accept(obj Bid, stateVal Bid_State) bool { + // Checking owner filter + if filters.Owner != "" && filters.Owner != obj.ID.Owner { + return false + } + + // Checking dseq filter + if filters.DSeq != 0 && filters.DSeq != obj.ID.DSeq { + return false + } + + // Checking gseq filter + if filters.GSeq != 0 && filters.GSeq != obj.ID.GSeq { + return false + } + + // Checking oseq filter + if filters.OSeq != 0 && filters.OSeq != obj.ID.OSeq { + return false + } + + // Checking provider filter + if filters.Provider != "" && filters.Provider != obj.ID.Provider { + return false + } + + // Checking state filter + if stateVal != 0 && stateVal != obj.State { + return false + } + + return true +} diff --git a/go/node/market/v1beta5/filters.pb.go b/go/node/market/v1beta5/filters.pb.go new file mode 100644 index 00000000..07681336 --- /dev/null +++ b/go/node/market/v1beta5/filters.pb.go @@ -0,0 +1,868 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/filters.proto + +package v1beta5 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// BidFilters defines flags for bid list filter +type BidFilters struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` + OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` + Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` + State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state" yaml:"state"` +} + +func (m *BidFilters) Reset() { *m = BidFilters{} } +func (m *BidFilters) String() string { return proto.CompactTextString(m) } +func (*BidFilters) ProtoMessage() {} +func (*BidFilters) Descriptor() ([]byte, []int) { + return fileDescriptor_a70a613b4758a7e8, []int{0} +} +func (m *BidFilters) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BidFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BidFilters.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BidFilters) XXX_Merge(src proto.Message) { + xxx_messageInfo_BidFilters.Merge(m, src) +} +func (m *BidFilters) XXX_Size() int { + return m.Size() +} +func (m *BidFilters) XXX_DiscardUnknown() { + xxx_messageInfo_BidFilters.DiscardUnknown(m) +} + +var xxx_messageInfo_BidFilters proto.InternalMessageInfo + +func (m *BidFilters) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *BidFilters) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *BidFilters) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func (m *BidFilters) GetOSeq() uint32 { + if m != nil { + return m.OSeq + } + return 0 +} + +func (m *BidFilters) GetProvider() string { + if m != nil { + return m.Provider + } + return "" +} + +func (m *BidFilters) GetState() string { + if m != nil { + return m.State + } + return "" +} + +// OrderFilters defines flags for order list filter +type OrderFilters struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` + OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` + State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state" yaml:"state"` +} + +func (m *OrderFilters) Reset() { *m = OrderFilters{} } +func (m *OrderFilters) String() string { return proto.CompactTextString(m) } +func (*OrderFilters) ProtoMessage() {} +func (*OrderFilters) Descriptor() ([]byte, []int) { + return fileDescriptor_a70a613b4758a7e8, []int{1} +} +func (m *OrderFilters) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OrderFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OrderFilters.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *OrderFilters) XXX_Merge(src proto.Message) { + xxx_messageInfo_OrderFilters.Merge(m, src) +} +func (m *OrderFilters) XXX_Size() int { + return m.Size() +} +func (m *OrderFilters) XXX_DiscardUnknown() { + xxx_messageInfo_OrderFilters.DiscardUnknown(m) +} + +var xxx_messageInfo_OrderFilters proto.InternalMessageInfo + +func (m *OrderFilters) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *OrderFilters) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *OrderFilters) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func (m *OrderFilters) GetOSeq() uint32 { + if m != nil { + return m.OSeq + } + return 0 +} + +func (m *OrderFilters) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func init() { + proto.RegisterType((*BidFilters)(nil), "akash.market.v1beta5.BidFilters") + proto.RegisterType((*OrderFilters)(nil), "akash.market.v1beta5.OrderFilters") +} + +func init() { + proto.RegisterFile("akash/market/v1beta5/filters.proto", fileDescriptor_a70a613b4758a7e8) +} + +var fileDescriptor_a70a613b4758a7e8 = []byte{ + // 389 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x93, 0xb1, 0x6a, 0xeb, 0x30, + 0x18, 0x85, 0xed, 0xc4, 0x0e, 0xf7, 0xea, 0xe6, 0x72, 0xc1, 0x64, 0x70, 0x32, 0x58, 0x41, 0x53, + 0x96, 0x6b, 0x13, 0x42, 0x29, 0x84, 0x2e, 0x35, 0xa5, 0x19, 0x03, 0xc9, 0xd6, 0xa5, 0x38, 0x95, + 0xaa, 0x1a, 0x27, 0x51, 0x22, 0x89, 0x94, 0x6e, 0x7d, 0x84, 0x3e, 0x42, 0x5f, 0xa0, 0x5b, 0x1f, + 0xa2, 0x63, 0xe8, 0xd4, 0xc9, 0x14, 0x67, 0x29, 0x19, 0xf3, 0x04, 0xc5, 0x92, 0x9b, 0x90, 0x42, + 0x87, 0xce, 0x9d, 0xec, 0xff, 0xe8, 0x7c, 0x07, 0xfe, 0x63, 0x0b, 0xa0, 0x28, 0x89, 0xc4, 0x55, + 0x30, 0x89, 0x78, 0x42, 0x64, 0xb0, 0x68, 0x8f, 0x88, 0x8c, 0x0e, 0x82, 0xcb, 0x78, 0x2c, 0x09, + 0x17, 0xfe, 0x8c, 0x33, 0xc9, 0x9c, 0x9a, 0xf2, 0xf8, 0xda, 0xe3, 0x17, 0x9e, 0x46, 0x8d, 0x32, + 0xca, 0x94, 0x21, 0xc8, 0xdf, 0xb4, 0xb7, 0x51, 0xbf, 0x60, 0x62, 0xc2, 0xc4, 0xb9, 0x3e, 0xd0, + 0x83, 0x3e, 0x42, 0xb7, 0x65, 0x00, 0xc2, 0x18, 0x9f, 0xea, 0x6c, 0xa7, 0x07, 0x6c, 0x76, 0x3d, + 0x25, 0xdc, 0x35, 0x9b, 0x66, 0xeb, 0x77, 0xd8, 0x5e, 0xa7, 0x50, 0x0b, 0x9b, 0x14, 0x56, 0x6f, + 0xa2, 0xc9, 0xb8, 0x8b, 0xd4, 0x88, 0x9e, 0x1f, 0xff, 0xd7, 0x8a, 0xa0, 0x63, 0x8c, 0x39, 0x11, + 0x62, 0x28, 0x79, 0x3c, 0xa5, 0x03, 0x6d, 0x77, 0x3a, 0xc0, 0xc2, 0x82, 0xcc, 0xdd, 0x52, 0xd3, + 0x6c, 0x59, 0x21, 0xcc, 0x52, 0x68, 0x9d, 0x0c, 0xc9, 0x7c, 0x9d, 0x42, 0xa5, 0x6f, 0x52, 0xf8, + 0x47, 0xc7, 0xe5, 0x13, 0x1a, 0x28, 0x31, 0x87, 0x68, 0x0e, 0x95, 0x9b, 0x66, 0xeb, 0xaf, 0x86, + 0x7a, 0x05, 0x44, 0xf7, 0x20, 0xaa, 0x21, 0x5a, 0x40, 0x2c, 0x87, 0xac, 0x1d, 0xd4, 0x2f, 0x20, + 0xb6, 0x07, 0x31, 0x0d, 0xe5, 0x0f, 0x67, 0x08, 0x7e, 0xcd, 0x38, 0x5b, 0xc4, 0x98, 0x70, 0xd7, + 0x56, 0xab, 0x1e, 0xae, 0x53, 0xb8, 0xd5, 0x36, 0x29, 0xfc, 0xa7, 0xa1, 0x0f, 0xe5, 0xeb, 0x85, + 0xb7, 0x90, 0x13, 0x00, 0x5b, 0xc8, 0x48, 0x12, 0xb7, 0xa2, 0x12, 0xeb, 0x79, 0x79, 0x4a, 0xd8, + 0x95, 0xa7, 0x46, 0x34, 0xd0, 0x72, 0xd7, 0x7a, 0xbb, 0x87, 0x06, 0x7a, 0x28, 0x81, 0x6a, 0x9f, + 0x63, 0xc2, 0x7f, 0xda, 0x47, 0xd8, 0xf6, 0x65, 0x7f, 0xa7, 0xaf, 0xf0, 0xe8, 0x29, 0xf3, 0xcc, + 0x65, 0xe6, 0x99, 0xaf, 0x99, 0x67, 0xde, 0xad, 0x3c, 0x63, 0xb9, 0xf2, 0x8c, 0x97, 0x95, 0x67, + 0x9c, 0xa1, 0x59, 0x42, 0xfd, 0x28, 0x91, 0x3e, 0x26, 0x8b, 0x80, 0xb2, 0x60, 0xca, 0x30, 0xf9, + 0x74, 0x8b, 0x46, 0x15, 0xf5, 0xdf, 0x77, 0xde, 0x03, 0x00, 0x00, 0xff, 0xff, 0xff, 0x10, 0xc9, + 0xec, 0x64, 0x03, 0x00, 0x00, +} + +func (m *BidFilters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BidFilters) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BidFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintFilters(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x32 + } + if len(m.Provider) > 0 { + i -= len(m.Provider) + copy(dAtA[i:], m.Provider) + i = encodeVarintFilters(dAtA, i, uint64(len(m.Provider))) + i-- + dAtA[i] = 0x2a + } + if m.OSeq != 0 { + i = encodeVarintFilters(dAtA, i, uint64(m.OSeq)) + i-- + dAtA[i] = 0x20 + } + if m.GSeq != 0 { + i = encodeVarintFilters(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintFilters(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintFilters(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *OrderFilters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrderFilters) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OrderFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintFilters(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x2a + } + if m.OSeq != 0 { + i = encodeVarintFilters(dAtA, i, uint64(m.OSeq)) + i-- + dAtA[i] = 0x20 + } + if m.GSeq != 0 { + i = encodeVarintFilters(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintFilters(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintFilters(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintFilters(dAtA []byte, offset int, v uint64) int { + offset -= sovFilters(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BidFilters) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovFilters(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovFilters(uint64(m.GSeq)) + } + if m.OSeq != 0 { + n += 1 + sovFilters(uint64(m.OSeq)) + } + l = len(m.Provider) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + return n +} + +func (m *OrderFilters) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovFilters(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovFilters(uint64(m.GSeq)) + } + if m.OSeq != 0 { + n += 1 + sovFilters(uint64(m.OSeq)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovFilters(uint64(l)) + } + return n +} + +func sovFilters(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozFilters(x uint64) (n int) { + return sovFilters(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *BidFilters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BidFilters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BidFilters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) + } + m.OSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provider = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFilters(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthFilters + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OrderFilters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OrderFilters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OrderFilters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) + } + m.OSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFilters + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFilters + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthFilters + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFilters(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthFilters + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFilters(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilters + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilters + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFilters + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthFilters + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupFilters + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthFilters + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthFilters = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFilters = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupFilters = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/genesis.pb.go b/go/node/market/v1beta5/genesis.pb.go new file mode 100644 index 00000000..76201ba6 --- /dev/null +++ b/go/node/market/v1beta5/genesis.pb.go @@ -0,0 +1,521 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/genesis.proto + +package v1beta5 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + pkg_akt_dev_go_node_market_v1 "pkg.akt.dev/go/node/market/v1" + v1 "pkg.akt.dev/go/node/market/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the basic genesis state used by market module +type GenesisState struct { + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params" yaml:"params"` + Orders Orders `protobuf:"bytes,2,rep,name=orders,proto3,castrepeated=Orders" json:"orders" yaml:"orders"` + Leases pkg_akt_dev_go_node_market_v1.Leases `protobuf:"bytes,3,rep,name=leases,proto3,castrepeated=pkg.akt.dev/go/node/market/v1.Leases" json:"leases" yaml:"leases"` + Bids Bids `protobuf:"bytes,4,rep,name=bids,proto3,castrepeated=Bids" json:"bids" yaml:"bids"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_73efc258394be6e9, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func (m *GenesisState) GetOrders() Orders { + if m != nil { + return m.Orders + } + return nil +} + +func (m *GenesisState) GetLeases() pkg_akt_dev_go_node_market_v1.Leases { + if m != nil { + return m.Leases + } + return nil +} + +func (m *GenesisState) GetBids() Bids { + if m != nil { + return m.Bids + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "akash.market.v1beta5.GenesisState") +} + +func init() { + proto.RegisterFile("akash/market/v1beta5/genesis.proto", fileDescriptor_73efc258394be6e9) +} + +var fileDescriptor_73efc258394be6e9 = []byte{ + // 361 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xb1, 0x4a, 0xc3, 0x40, + 0x18, 0xc7, 0x13, 0x5b, 0x32, 0xa4, 0xba, 0x84, 0x22, 0xb1, 0x95, 0x4b, 0x8d, 0x0e, 0x75, 0xb9, + 0xc3, 0x8a, 0x8b, 0x38, 0xdd, 0xd2, 0x45, 0x50, 0x22, 0x2e, 0x0e, 0xc2, 0x85, 0x3b, 0x62, 0x48, + 0xdb, 0x2b, 0xb9, 0x50, 0xf1, 0x2d, 0xc4, 0xc7, 0xf0, 0x49, 0x3a, 0x76, 0x74, 0x8a, 0xd2, 0x6e, + 0x8e, 0x7d, 0x02, 0xc9, 0x7d, 0x57, 0x0a, 0x25, 0xb8, 0xe5, 0xe3, 0xff, 0xbb, 0xdf, 0xff, 0xee, + 0x8b, 0x1b, 0xb2, 0x8c, 0xa9, 0x17, 0x32, 0x66, 0x79, 0x26, 0x0a, 0x32, 0xbb, 0x88, 0x45, 0xc1, + 0xae, 0x48, 0x22, 0x26, 0x42, 0xa5, 0x0a, 0x4f, 0x73, 0x59, 0x48, 0xaf, 0xad, 0x19, 0x0c, 0x0c, + 0x36, 0x4c, 0xa7, 0x9d, 0xc8, 0x44, 0x6a, 0x80, 0x54, 0x5f, 0xc0, 0x76, 0xba, 0x3b, 0x3e, 0x32, + 0x12, 0x4c, 0x09, 0x13, 0x9e, 0xd4, 0x96, 0x4d, 0x59, 0xce, 0xc6, 0xa6, 0xab, 0x83, 0x6a, 0x91, + 0x38, 0xe5, 0x26, 0xef, 0xd5, 0xe6, 0x32, 0xe7, 0x22, 0x07, 0x22, 0xfc, 0x68, 0xb8, 0xfb, 0x43, + 0xb8, 0xff, 0x43, 0xc1, 0x0a, 0xe1, 0x3d, 0xba, 0x0e, 0x54, 0xf8, 0x76, 0xcf, 0xee, 0xb7, 0x06, + 0xc7, 0xb8, 0xee, 0x3d, 0xf8, 0x5e, 0x33, 0x34, 0x98, 0x97, 0x81, 0xf5, 0x5b, 0x06, 0xe6, 0xcc, + 0xba, 0x0c, 0x0e, 0xde, 0xd8, 0x78, 0x74, 0x1d, 0xc2, 0x1c, 0x46, 0x26, 0xf0, 0x9e, 0x5d, 0x47, + 0xd7, 0x2a, 0x7f, 0xaf, 0xd7, 0xe8, 0xb7, 0x06, 0xdd, 0x7a, 0xed, 0x5d, 0xc5, 0xd0, 0xf3, 0x8d, + 0x15, 0x8e, 0x6c, 0xad, 0x30, 0x87, 0x9f, 0xdf, 0x81, 0xa3, 0x49, 0x15, 0x19, 0xc4, 0x7b, 0x75, + 0x1d, 0xbd, 0x3b, 0xe5, 0x37, 0xb4, 0xff, 0x70, 0xd7, 0x8f, 0x6f, 0xab, 0x98, 0x0e, 0x37, 0x6a, + 0xa0, 0xb7, 0x6a, 0x98, 0x2b, 0xf5, 0xd9, 0x34, 0x4b, 0x30, 0xcb, 0x0a, 0xcc, 0xc5, 0x8c, 0x24, + 0x92, 0x4c, 0x24, 0x17, 0xdb, 0x15, 0x82, 0x47, 0x45, 0x46, 0xe0, 0x45, 0x6e, 0x33, 0x4e, 0xb9, + 0xf2, 0x9b, 0xba, 0xf6, 0xa8, 0xfe, 0x59, 0x34, 0xe5, 0xf4, 0xd4, 0x34, 0x6b, 0x7c, 0x5d, 0x06, + 0x2d, 0xe8, 0xad, 0xa6, 0xaa, 0xb5, 0x49, 0x53, 0xae, 0x22, 0x1d, 0xd2, 0x9b, 0xf9, 0x12, 0xd9, + 0x8b, 0x25, 0xb2, 0x7f, 0x96, 0xc8, 0x7e, 0x5f, 0x21, 0x6b, 0xb1, 0x42, 0xd6, 0xd7, 0x0a, 0x59, + 0x4f, 0xe1, 0xbf, 0x77, 0xd3, 0x65, 0xb1, 0xa3, 0xff, 0xec, 0xe5, 0x5f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x95, 0x94, 0x69, 0x84, 0xad, 0x02, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Bids) > 0 { + for iNdEx := len(m.Bids) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Bids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Leases) > 0 { + for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Orders) > 0 { + for iNdEx := len(m.Orders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Orders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.Orders) > 0 { + for _, e := range m.Orders { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.Leases) > 0 { + for _, e := range m.Leases { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.Bids) > 0 { + for _, e := range m.Bids { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Orders = append(m.Orders, Order{}) + if err := m.Orders[len(m.Orders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Leases = append(m.Leases, v1.Lease{}) + if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bids = append(m.Bids, Bid{}) + if err := m.Bids[len(m.Bids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/key.go b/go/node/market/v1beta5/key.go new file mode 100644 index 00000000..48a1eca6 --- /dev/null +++ b/go/node/market/v1beta5/key.go @@ -0,0 +1,32 @@ +package v1beta5 + +const ( + // ModuleName is the module name constant used in many places + ModuleName = "market" + + // StoreKey is the store key string for market + StoreKey = ModuleName + + // RouterKey is the message route for market + RouterKey = ModuleName +) + +func OrderPrefix() []byte { + return []byte{0x01, 0x00} +} + +func BidPrefix() []byte { + return []byte{0x02, 0x00} +} + +func LeasePrefix() []byte { + return []byte{0x03, 0x00} +} + +func SecondaryLeasePrefix() []byte { + return []byte{0x03, 0x01} +} + +func ParamsPrefix() []byte { + return []byte{0x04, 0x00} +} diff --git a/go/node/market/v1beta5/leasemsg.pb.go b/go/node/market/v1beta5/leasemsg.pb.go new file mode 100644 index 00000000..39bd3017 --- /dev/null +++ b/go/node/market/v1beta5/leasemsg.pb.go @@ -0,0 +1,1043 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/leasemsg.proto + +package v1beta5 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + v1 "pkg.akt.dev/go/node/market/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgCreateLease is sent to create a lease +type MsgCreateLease struct { + BidID v1.BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` +} + +func (m *MsgCreateLease) Reset() { *m = MsgCreateLease{} } +func (m *MsgCreateLease) String() string { return proto.CompactTextString(m) } +func (*MsgCreateLease) ProtoMessage() {} +func (*MsgCreateLease) Descriptor() ([]byte, []int) { + return fileDescriptor_394bd78777079a40, []int{0} +} +func (m *MsgCreateLease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateLease.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateLease) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateLease.Merge(m, src) +} +func (m *MsgCreateLease) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateLease) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateLease.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateLease proto.InternalMessageInfo + +func (m *MsgCreateLease) GetBidID() v1.BidID { + if m != nil { + return m.BidID + } + return v1.BidID{} +} + +// MsgCreateLeaseResponse is the response from creating a lease +type MsgCreateLeaseResponse struct { +} + +func (m *MsgCreateLeaseResponse) Reset() { *m = MsgCreateLeaseResponse{} } +func (m *MsgCreateLeaseResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateLeaseResponse) ProtoMessage() {} +func (*MsgCreateLeaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_394bd78777079a40, []int{1} +} +func (m *MsgCreateLeaseResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateLeaseResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateLeaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateLeaseResponse.Merge(m, src) +} +func (m *MsgCreateLeaseResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateLeaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateLeaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateLeaseResponse proto.InternalMessageInfo + +// MsgWithdrawLease defines an SDK message for withdrawing lease funds +type MsgWithdrawLease struct { + ID v1.LeaseID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` +} + +func (m *MsgWithdrawLease) Reset() { *m = MsgWithdrawLease{} } +func (m *MsgWithdrawLease) String() string { return proto.CompactTextString(m) } +func (*MsgWithdrawLease) ProtoMessage() {} +func (*MsgWithdrawLease) Descriptor() ([]byte, []int) { + return fileDescriptor_394bd78777079a40, []int{2} +} +func (m *MsgWithdrawLease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgWithdrawLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgWithdrawLease.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgWithdrawLease) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithdrawLease.Merge(m, src) +} +func (m *MsgWithdrawLease) XXX_Size() int { + return m.Size() +} +func (m *MsgWithdrawLease) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithdrawLease.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithdrawLease proto.InternalMessageInfo + +func (m *MsgWithdrawLease) GetID() v1.LeaseID { + if m != nil { + return m.ID + } + return v1.LeaseID{} +} + +// MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. +type MsgWithdrawLeaseResponse struct { +} + +func (m *MsgWithdrawLeaseResponse) Reset() { *m = MsgWithdrawLeaseResponse{} } +func (m *MsgWithdrawLeaseResponse) String() string { return proto.CompactTextString(m) } +func (*MsgWithdrawLeaseResponse) ProtoMessage() {} +func (*MsgWithdrawLeaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_394bd78777079a40, []int{3} +} +func (m *MsgWithdrawLeaseResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgWithdrawLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgWithdrawLeaseResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgWithdrawLeaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithdrawLeaseResponse.Merge(m, src) +} +func (m *MsgWithdrawLeaseResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgWithdrawLeaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithdrawLeaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithdrawLeaseResponse proto.InternalMessageInfo + +// MsgCloseLease defines an SDK message for closing order +type MsgCloseLease struct { + ID v1.LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"id" yaml:"id"` +} + +func (m *MsgCloseLease) Reset() { *m = MsgCloseLease{} } +func (m *MsgCloseLease) String() string { return proto.CompactTextString(m) } +func (*MsgCloseLease) ProtoMessage() {} +func (*MsgCloseLease) Descriptor() ([]byte, []int) { + return fileDescriptor_394bd78777079a40, []int{4} +} +func (m *MsgCloseLease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseLease.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseLease) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseLease.Merge(m, src) +} +func (m *MsgCloseLease) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseLease) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseLease.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseLease proto.InternalMessageInfo + +func (m *MsgCloseLease) GetID() v1.LeaseID { + if m != nil { + return m.ID + } + return v1.LeaseID{} +} + +// MsgCloseLeaseResponse defines the Msg/CloseLease response type. +type MsgCloseLeaseResponse struct { +} + +func (m *MsgCloseLeaseResponse) Reset() { *m = MsgCloseLeaseResponse{} } +func (m *MsgCloseLeaseResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCloseLeaseResponse) ProtoMessage() {} +func (*MsgCloseLeaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_394bd78777079a40, []int{5} +} +func (m *MsgCloseLeaseResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseLeaseResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseLeaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseLeaseResponse.Merge(m, src) +} +func (m *MsgCloseLeaseResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseLeaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseLeaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseLeaseResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCreateLease)(nil), "akash.market.v1beta5.MsgCreateLease") + proto.RegisterType((*MsgCreateLeaseResponse)(nil), "akash.market.v1beta5.MsgCreateLeaseResponse") + proto.RegisterType((*MsgWithdrawLease)(nil), "akash.market.v1beta5.MsgWithdrawLease") + proto.RegisterType((*MsgWithdrawLeaseResponse)(nil), "akash.market.v1beta5.MsgWithdrawLeaseResponse") + proto.RegisterType((*MsgCloseLease)(nil), "akash.market.v1beta5.MsgCloseLease") + proto.RegisterType((*MsgCloseLeaseResponse)(nil), "akash.market.v1beta5.MsgCloseLeaseResponse") +} + +func init() { + proto.RegisterFile("akash/market/v1beta5/leasemsg.proto", fileDescriptor_394bd78777079a40) +} + +var fileDescriptor_394bd78777079a40 = []byte{ + // 390 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4e, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0xcf, 0x4d, 0x2c, 0xca, 0x4e, 0x2d, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, + 0xd5, 0xcf, 0x49, 0x4d, 0x2c, 0x4e, 0xcd, 0x2d, 0x4e, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0x12, 0x01, 0x2b, 0xd2, 0x83, 0x28, 0xd2, 0x83, 0x2a, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, + 0x2b, 0xd0, 0x07, 0xb1, 0x20, 0x6a, 0xa5, 0xc4, 0x93, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, 0xf5, 0x73, + 0x8b, 0xd3, 0xf5, 0xcb, 0x0c, 0xf5, 0xe1, 0x86, 0x48, 0x49, 0xa2, 0xd9, 0xa4, 0x9f, 0x94, 0x99, + 0x02, 0x95, 0x92, 0x46, 0x97, 0x02, 0xdb, 0x0f, 0x91, 0x54, 0xaa, 0xe0, 0xe2, 0xf3, 0x2d, 0x4e, + 0x77, 0x2e, 0x4a, 0x4d, 0x2c, 0x49, 0xf5, 0x01, 0x89, 0x0b, 0xf9, 0x73, 0xb1, 0x25, 0x65, 0xa6, + 0xc4, 0x67, 0xa6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x1b, 0x89, 0xe9, 0xa1, 0xb9, 0x4f, 0xcf, + 0x29, 0x33, 0xc5, 0xd3, 0xc5, 0x49, 0xe1, 0xc4, 0x3d, 0x79, 0x86, 0x47, 0xf7, 0xe4, 0x59, 0xc1, + 0xdc, 0x57, 0xf7, 0xe4, 0x99, 0x32, 0x53, 0x3e, 0xdd, 0x93, 0xe7, 0xac, 0x4c, 0xcc, 0xcd, 0xb1, + 0x52, 0xca, 0x4c, 0x51, 0x0a, 0x62, 0x4d, 0xca, 0x4c, 0xf1, 0x4c, 0xb1, 0x12, 0x7d, 0xb1, 0x40, + 0x9e, 0xa1, 0xe9, 0xf9, 0x06, 0x2d, 0x1e, 0x88, 0xb9, 0x7a, 0xf9, 0xe5, 0x79, 0xa9, 0x45, 0x4a, + 0x12, 0x5c, 0x62, 0xa8, 0x36, 0x07, 0xa5, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x2a, 0xd5, 0x70, + 0x09, 0xf8, 0x16, 0xa7, 0x87, 0x67, 0x96, 0x64, 0xa4, 0x14, 0x25, 0x96, 0x43, 0x5c, 0xe5, 0x87, + 0xe6, 0x2a, 0x09, 0x0c, 0x57, 0x81, 0xd5, 0x79, 0xba, 0x38, 0xc9, 0x42, 0xdd, 0xc5, 0x84, 0xd7, + 0x51, 0x12, 0x30, 0x47, 0xf1, 0x43, 0x1d, 0x55, 0x50, 0x94, 0x5f, 0x96, 0x99, 0x92, 0x5a, 0xa4, + 0x24, 0xc5, 0x25, 0x81, 0x6e, 0x3b, 0x92, 0xcb, 0x78, 0x41, 0x6e, 0xce, 0xc9, 0x2f, 0x86, 0x06, + 0x56, 0x10, 0x17, 0x07, 0x38, 0x34, 0xa9, 0xe0, 0x30, 0x76, 0xb0, 0x41, 0x9e, 0x29, 0x56, 0xe2, + 0x30, 0xa7, 0xf1, 0xc1, 0x8c, 0x86, 0x86, 0x98, 0x38, 0x97, 0x28, 0x8a, 0xed, 0x30, 0x67, 0x39, + 0xd9, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, + 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x52, 0x41, 0x76, 0xba, + 0x5e, 0x62, 0x76, 0x89, 0x5e, 0x4a, 0x6a, 0x99, 0x7e, 0x7a, 0xbe, 0x7e, 0x5e, 0x7e, 0x4a, 0x2a, + 0x5a, 0x72, 0x4c, 0x62, 0x03, 0xa7, 0x04, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x56, 0x40, + 0x98, 0x6d, 0xad, 0x02, 0x00, 0x00, +} + +func (m *MsgCreateLease) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateLease) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLeasemsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCreateLeaseResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateLeaseResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgWithdrawLease) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgWithdrawLease) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgWithdrawLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLeasemsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgWithdrawLeaseResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgWithdrawLeaseResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgWithdrawLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgCloseLease) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseLease) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLeasemsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCloseLeaseResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseLeaseResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintLeasemsg(dAtA []byte, offset int, v uint64) int { + offset -= sovLeasemsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCreateLease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.BidID.Size() + n += 1 + l + sovLeasemsg(uint64(l)) + return n +} + +func (m *MsgCreateLeaseResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgWithdrawLease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovLeasemsg(uint64(l)) + return n +} + +func (m *MsgWithdrawLeaseResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgCloseLease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovLeasemsg(uint64(l)) + return n +} + +func (m *MsgCloseLeaseResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovLeasemsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozLeasemsg(x uint64) (n int) { + return sovLeasemsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCreateLease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeasemsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateLease: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateLease: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeasemsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLeasemsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLeasemsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLeasemsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLeasemsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateLeaseResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeasemsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateLeaseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLeasemsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLeasemsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgWithdrawLease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeasemsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgWithdrawLease: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgWithdrawLease: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeasemsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLeasemsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLeasemsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLeasemsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLeasemsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgWithdrawLeaseResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeasemsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgWithdrawLeaseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgWithdrawLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLeasemsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLeasemsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseLease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeasemsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseLease: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseLease: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeasemsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLeasemsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLeasemsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLeasemsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLeasemsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseLeaseResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLeasemsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseLeaseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLeasemsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLeasemsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLeasemsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLeasemsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLeasemsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLeasemsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthLeasemsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupLeasemsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthLeasemsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthLeasemsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLeasemsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupLeasemsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/msgs.go b/go/node/market/v1beta5/msgs.go new file mode 100644 index 00000000..a4555845 --- /dev/null +++ b/go/node/market/v1beta5/msgs.go @@ -0,0 +1,294 @@ +package v1beta5 + +import ( + "fmt" + "reflect" + + cerrors "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + + v1 "pkg.akt.dev/go/node/market/v1" +) + +var ( + msgTypeCreateBid = "" + msgTypeCloseBid = "" + msgTypeCreateLease = "" + msgTypeCloseLease = "" + msgTypeWithdrawLease = "" + msgTypeUpdateParams = "" +) + +var ( + _ sdk.Msg = &MsgCreateBid{} + _ sdk.Msg = &MsgCloseBid{} + _ sdk.Msg = &MsgCreateLease{} + _ sdk.Msg = &MsgWithdrawLease{} + _ sdk.Msg = &MsgCloseLease{} + _ sdk.Msg = &MsgUpdateParams{} +) + +func init() { + msgTypeCreateBid = reflect.TypeOf(&MsgCreateBid{}).Elem().Name() + msgTypeCloseBid = reflect.TypeOf(&MsgCloseBid{}).Elem().Name() + msgTypeCreateLease = reflect.TypeOf(&MsgCreateLease{}).Elem().Name() + msgTypeCloseLease = reflect.TypeOf(&MsgCloseLease{}).Elem().Name() + msgTypeWithdrawLease = reflect.TypeOf(&MsgWithdrawLease{}).Elem().Name() + msgTypeUpdateParams = reflect.TypeOf(&MsgUpdateParams{}).Elem().Name() +} + +// NewMsgCreateBid creates a new MsgCreateBid instance +func NewMsgCreateBid(id v1.OrderID, provider sdk.AccAddress, price sdk.DecCoin, deposit sdk.Coin, roffer ResourcesOffer) *MsgCreateBid { + return &MsgCreateBid{ + OrderID: id, + Provider: provider.String(), + Price: price, + Deposit: deposit, + ResourcesOffer: roffer, + } +} + +// Type implements the sdk.Msg interface +func (msg *MsgCreateBid) Type() string { return msgTypeCreateBid } + +// GetSigners defines whose signature is required +func (msg *MsgCreateBid) GetSigners() []sdk.AccAddress { + provider, err := sdk.AccAddressFromBech32(msg.Provider) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{provider} +} + +// ValidateBasic does basic validation of provider and order +func (msg *MsgCreateBid) ValidateBasic() error { + if err := msg.OrderID.Validate(); err != nil { + return err + } + + provider, err := sdk.AccAddressFromBech32(msg.Provider) + if err != nil { + return ErrEmptyProvider + } + + owner, err := sdk.AccAddressFromBech32(msg.OrderID.Owner) + if err != nil { + return fmt.Errorf("%w: empty owner", ErrInvalidBid) + } + + if provider.Equals(owner) { + return ErrSameAccount + } + + if msg.Price.IsZero() { + return ErrBidZeroPrice + } + + return nil +} + +// NewMsgWithdrawLease creates a new MsgWithdrawLease instance +func NewMsgWithdrawLease(id v1.LeaseID) *MsgWithdrawLease { + return &MsgWithdrawLease{ + ID: id, + } +} + +// Type implements the sdk.Msg interface +func (msg *MsgWithdrawLease) Type() string { return msgTypeWithdrawLease } + +// GetSigners defines whose signature is required +func (msg *MsgWithdrawLease) GetSigners() []sdk.AccAddress { + provider, err := sdk.AccAddressFromBech32(msg.GetID().Provider) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{provider} +} + +// ValidateBasic does basic validation of provider and order +func (msg *MsgWithdrawLease) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + return nil +} + +// NewMsgCreateLease creates a new MsgCreateLease instance +func NewMsgCreateLease(id v1.BidID) *MsgCreateLease { + return &MsgCreateLease{ + BidID: id, + } +} + +// Type implements the sdk.Msg interface +func (msg *MsgCreateLease) Type() string { return msgTypeCreateLease } + +// GetSigners defines whose signature is required +func (msg *MsgCreateLease) GetSigners() []sdk.AccAddress { + provider, err := sdk.AccAddressFromBech32(msg.BidID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{provider} +} + +// ValidateBasic method for MsgCreateLease +func (msg *MsgCreateLease) ValidateBasic() error { + return msg.BidID.Validate() +} + +// NewMsgCloseBid creates a new MsgCloseBid instance +func NewMsgCloseBid(id v1.BidID) *MsgCloseBid { + return &MsgCloseBid{ + ID: id, + } +} + +// Type implements the sdk.Msg interface +func (msg *MsgCloseBid) Type() string { return msgTypeCloseBid } + +// GetSigners defines whose signature is required +func (msg *MsgCloseBid) GetSigners() []sdk.AccAddress { + provider, err := sdk.AccAddressFromBech32(msg.ID.Provider) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{provider} +} + +// ValidateBasic method for MsgCloseBid +func (msg *MsgCloseBid) ValidateBasic() error { + return msg.ID.Validate() +} + +// NewMsgCloseLease creates a new MsgCloseLease instance +func NewMsgCloseLease(id v1.LeaseID) *MsgCloseLease { + return &MsgCloseLease{ + ID: id, + } +} + +// Type implements the sdk.Msg interface +func (msg *MsgCloseLease) Type() string { return msgTypeCloseLease } + +// GetSigners defines whose signature is required +func (msg *MsgCloseLease) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// ValidateBasic method for MsgCloseLease +func (msg *MsgCloseLease) ValidateBasic() error { + return msg.ID.Validate() +} + +// Type implements the sdk.Msg interface +func (msg *MsgUpdateParams) Type() string { return msgTypeUpdateParams } + +// GetSigners returns the expected signers for a MsgUpdateParams message. +func (msg *MsgUpdateParams) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgUpdateParams) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return cerrors.Wrap(err, "invalid authority address") + } + + if err := msg.Params.Validate(); err != nil { + return err + } + + return nil +} + +// ============= GetSignBytes ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgCreateBid) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgWithdrawLease) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// GetSignBytes encodes the message for signing +func (msg *MsgCreateLease) GetSignBytes() []byte { + // + // Deprecated: GetSignBytes is deprecated + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgCloseBid) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgCloseLease) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgUpdateParams) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// ============= Route ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all since sdk.Msg does not not have Route defined anymore + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (msg *MsgCreateBid) Route() string { return RouterKey } + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (msg *MsgWithdrawLease) Route() string { return RouterKey } + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (msg *MsgCreateLease) Route() string { return RouterKey } + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (msg *MsgCloseBid) Route() string { return RouterKey } + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (msg *MsgCloseLease) Route() string { return RouterKey } + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (msg *MsgUpdateParams) Route() string { return RouterKey } diff --git a/go/node/market/v1beta5/order.go b/go/node/market/v1beta5/order.go new file mode 100644 index 00000000..ef901e2d --- /dev/null +++ b/go/node/market/v1beta5/order.go @@ -0,0 +1,107 @@ +package v1beta5 + +import ( + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + "gopkg.in/yaml.v3" + + atypes "pkg.akt.dev/go/node/audit/v1" + + attr "pkg.akt.dev/go/node/types/attributes/v1" +) + +// String implements the Stringer interface for a Order object. +func (o *Order) String() string { + out, _ := yaml.Marshal(o) + return string(out) +} + +// Orders is a collection of Order +type Orders []Order + +// String implements the Stringer interface for a Orders object. +func (o Orders) String() string { + var out string + for _, order := range o { + out += order.String() + "\n" + } + + return strings.TrimSpace(out) +} + +// ValidateCanBid method validates whether order is open or not and +// returns error if not +func (o *Order) ValidateCanBid() error { + switch o.State { + case OrderOpen: + return nil + case OrderActive: + return ErrOrderActive + default: + return ErrOrderClosed + } +} + +// ValidateInactive method validates whether order is open or not and +// returns error if not +func (o *Order) ValidateInactive() error { + switch o.State { + case OrderClosed: + return nil + case OrderActive: + return ErrOrderActive + default: + return ErrOrderClosed + } +} + +// Price method returns price of specific order +func (o *Order) Price() sdk.DecCoin { + return o.Spec.Price() +} + +// MatchAttributes method compares provided attributes with specific order attributes +func (o *Order) MatchAttributes(attrs attr.Attributes) bool { + return o.Spec.MatchAttributes(attrs) +} + +// MatchRequirements method compares provided attributes with specific order attributes +func (o *Order) MatchRequirements(prov []atypes.AuditedProvider) bool { + return o.Spec.MatchRequirements(prov) +} + +// MatchResourcesRequirements method compares provider capabilities with specific order resources attributes +func (o *Order) MatchResourcesRequirements(attr attr.Attributes) bool { + return o.Spec.MatchResourcesRequirements(attr) +} + +// Filters returns whether order filters valid or not +func (o *Order) Filters(filters OrderFilters, stateVal Order_State) bool { + // Checking owner filter + if filters.Owner != "" && filters.Owner != o.ID.Owner { + return false + } + + // Checking dseq filter + if filters.DSeq != 0 && filters.DSeq != o.ID.DSeq { + return false + } + + // Checking gseq filter + if filters.GSeq != 0 && filters.GSeq != o.ID.GSeq { + return false + } + + // Checking oseq filter + if filters.OSeq != 0 && filters.OSeq != o.ID.OSeq { + return false + } + + // Checking state filter + if stateVal != 0 && stateVal != o.State { + return false + } + + return true +} diff --git a/go/node/market/v1beta5/order.pb.go b/go/node/market/v1beta5/order.pb.go new file mode 100644 index 00000000..5f099d08 --- /dev/null +++ b/go/node/market/v1beta5/order.pb.go @@ -0,0 +1,498 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/order.proto + +package v1beta5 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + v1beta4 "pkg.akt.dev/go/node/deployment/v1beta4" + v1 "pkg.akt.dev/go/node/market/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State is an enum which refers to state of order +type Order_State int32 + +const ( + // Prefix should start with 0 in enum. So declaring dummy state + OrderStateInvalid Order_State = 0 + // OrderOpen denotes state for order open + OrderOpen Order_State = 1 + // OrderMatched denotes state for order matched + OrderActive Order_State = 2 + // OrderClosed denotes state for order lost + OrderClosed Order_State = 3 +) + +var Order_State_name = map[int32]string{ + 0: "invalid", + 1: "open", + 2: "active", + 3: "closed", +} + +var Order_State_value = map[string]int32{ + "invalid": 0, + "open": 1, + "active": 2, + "closed": 3, +} + +func (x Order_State) String() string { + return proto.EnumName(Order_State_name, int32(x)) +} + +func (Order_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_a72454f2c693d67f, []int{0, 0} +} + +// Order stores orderID, state of order and other details +type Order struct { + ID v1.OrderID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + State Order_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta5.Order_State" json:"state" yaml:"state"` + Spec v1beta4.GroupSpec `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec" yaml:"spec"` + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` +} + +func (m *Order) Reset() { *m = Order{} } +func (*Order) ProtoMessage() {} +func (*Order) Descriptor() ([]byte, []int) { + return fileDescriptor_a72454f2c693d67f, []int{0} +} +func (m *Order) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Order.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Order) XXX_Merge(src proto.Message) { + xxx_messageInfo_Order.Merge(m, src) +} +func (m *Order) XXX_Size() int { + return m.Size() +} +func (m *Order) XXX_DiscardUnknown() { + xxx_messageInfo_Order.DiscardUnknown(m) +} + +var xxx_messageInfo_Order proto.InternalMessageInfo + +func (m *Order) GetID() v1.OrderID { + if m != nil { + return m.ID + } + return v1.OrderID{} +} + +func (m *Order) GetState() Order_State { + if m != nil { + return m.State + } + return OrderStateInvalid +} + +func (m *Order) GetSpec() v1beta4.GroupSpec { + if m != nil { + return m.Spec + } + return v1beta4.GroupSpec{} +} + +func (m *Order) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func init() { + proto.RegisterEnum("akash.market.v1beta5.Order_State", Order_State_name, Order_State_value) + proto.RegisterType((*Order)(nil), "akash.market.v1beta5.Order") +} + +func init() { proto.RegisterFile("akash/market/v1beta5/order.proto", fileDescriptor_a72454f2c693d67f) } + +var fileDescriptor_a72454f2c693d67f = []byte{ + // 455 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x3f, 0x6f, 0xd3, 0x40, + 0x18, 0xc6, 0x7d, 0x4e, 0x52, 0xc8, 0x85, 0x3f, 0xc1, 0x2a, 0xc2, 0x38, 0xaa, 0xcf, 0x98, 0x25, + 0xd3, 0x59, 0x94, 0xb2, 0x54, 0x2c, 0x0d, 0x95, 0xaa, 0x4c, 0x95, 0x5c, 0x26, 0x16, 0x74, 0xf5, + 0xbd, 0x32, 0x56, 0x12, 0xdf, 0xc9, 0x39, 0x22, 0x75, 0x67, 0x40, 0x99, 0x18, 0x59, 0x22, 0x55, + 0x62, 0xe3, 0x93, 0x74, 0xec, 0xc8, 0x64, 0xa1, 0x64, 0x41, 0x19, 0xf3, 0x09, 0x90, 0xef, 0x8c, + 0x88, 0xa2, 0x6e, 0xbe, 0xe7, 0xf9, 0xdd, 0xf3, 0x3e, 0xaf, 0x7c, 0x38, 0x60, 0x23, 0x36, 0xfd, + 0x14, 0x4d, 0x58, 0x31, 0x02, 0x15, 0xcd, 0x5e, 0x5d, 0x82, 0x62, 0x6f, 0x22, 0x51, 0x70, 0x28, + 0xa8, 0x2c, 0x84, 0x12, 0xce, 0xbe, 0x26, 0xa8, 0x21, 0x68, 0x4d, 0x78, 0xfb, 0xa9, 0x48, 0x85, + 0x06, 0xa2, 0xea, 0xcb, 0xb0, 0x5e, 0xdf, 0xa4, 0x71, 0x90, 0x63, 0x71, 0x35, 0x81, 0xfc, 0x5f, + 0xe2, 0x51, 0x94, 0x16, 0xe2, 0xb3, 0x9c, 0x4a, 0x48, 0x6a, 0xb2, 0xb7, 0x33, 0x77, 0x7b, 0x64, + 0xf8, 0xb3, 0x81, 0x5b, 0xe7, 0xd5, 0xd9, 0x39, 0xc3, 0x76, 0xc6, 0x5d, 0x14, 0xa0, 0x7e, 0xe7, + 0xd0, 0xa5, 0x3b, 0x4d, 0xa8, 0x66, 0x86, 0xa7, 0x83, 0x83, 0x9b, 0x92, 0x58, 0xcb, 0x92, 0xd8, + 0xc3, 0xd3, 0x75, 0x49, 0xec, 0x8c, 0x6f, 0x4a, 0xd2, 0xbe, 0x62, 0x93, 0xf1, 0x71, 0x98, 0xf1, + 0x30, 0xb6, 0x33, 0xee, 0xc4, 0xb8, 0x35, 0x55, 0x4c, 0x81, 0x6b, 0x07, 0xa8, 0xff, 0xe8, 0xf0, + 0x05, 0xbd, 0x6b, 0x2b, 0x13, 0x48, 0x2f, 0x2a, 0x70, 0xf0, 0x7c, 0x5d, 0x12, 0x73, 0x67, 0x53, + 0x92, 0x07, 0x26, 0x4d, 0x1f, 0xc3, 0xd8, 0xc8, 0xce, 0x7b, 0xdc, 0xac, 0x36, 0x72, 0x1b, 0xba, + 0xde, 0xcb, 0x3a, 0xf2, 0xff, 0xf2, 0x75, 0xec, 0x11, 0x3d, 0xab, 0x96, 0xbf, 0x90, 0x90, 0x0c, + 0x7a, 0x55, 0xd3, 0x75, 0x49, 0xf4, 0xc5, 0x4d, 0x49, 0x3a, 0x75, 0xae, 0x84, 0x24, 0x8c, 0xb5, + 0xe8, 0x1c, 0x60, 0x9c, 0x14, 0xc0, 0x14, 0xf0, 0x8f, 0x4c, 0xb9, 0xcd, 0x00, 0xf5, 0x1b, 0x71, + 0xbb, 0x56, 0x4e, 0x54, 0xf8, 0x05, 0xe1, 0x96, 0x2e, 0xe8, 0x84, 0xf8, 0x5e, 0x96, 0xcf, 0xd8, + 0x38, 0xe3, 0x5d, 0xcb, 0x7b, 0x3a, 0x5f, 0x04, 0x4f, 0x74, 0x7d, 0x6d, 0x0e, 0x8d, 0xe1, 0x3c, + 0xc3, 0x4d, 0x21, 0x21, 0xef, 0x22, 0xef, 0xe1, 0x7c, 0x11, 0xb4, 0x35, 0x70, 0x2e, 0x21, 0x77, + 0x7a, 0x78, 0x8f, 0x25, 0x2a, 0x9b, 0x41, 0xd7, 0xf6, 0x1e, 0xcf, 0x17, 0x41, 0x47, 0x5b, 0x27, + 0x5a, 0xaa, 0xcc, 0x64, 0x2c, 0xa6, 0xc0, 0xbb, 0x8d, 0x2d, 0xf3, 0x9d, 0x96, 0xbc, 0xe6, 0xd7, + 0x1f, 0xbe, 0x75, 0x7c, 0xff, 0xfb, 0x35, 0xb1, 0xfe, 0x5c, 0x13, 0x6b, 0xf0, 0xf6, 0x66, 0xe9, + 0xa3, 0xdb, 0xa5, 0x8f, 0x7e, 0x2f, 0x7d, 0xf4, 0x6d, 0xe5, 0x5b, 0xb7, 0x2b, 0xdf, 0xfa, 0xb5, + 0xf2, 0xad, 0x0f, 0xa1, 0x1c, 0xa5, 0x94, 0x8d, 0x14, 0xe5, 0x30, 0x8b, 0x52, 0x11, 0xe5, 0x82, + 0xc3, 0xce, 0x4b, 0xbb, 0xdc, 0xd3, 0x7f, 0xfc, 0xf5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2a, + 0xc1, 0x94, 0xc8, 0x88, 0x02, 0x00, 0x00, +} + +func (m *Order) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Order) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Order) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreatedAt != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOrder(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.State != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOrder(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintOrder(dAtA []byte, offset int, v uint64) int { + offset -= sovOrder(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Order) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovOrder(uint64(l)) + if m.State != 0 { + n += 1 + sovOrder(uint64(m.State)) + } + l = m.Spec.Size() + n += 1 + l + sovOrder(uint64(l)) + if m.CreatedAt != 0 { + n += 1 + sovOrder(uint64(m.CreatedAt)) + } + return n +} + +func sovOrder(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozOrder(x uint64) (n int) { + return sovOrder(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Order) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Order: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Order: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Order_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOrder(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOrder + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipOrder(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOrder + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOrder + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOrder + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthOrder + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupOrder + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthOrder + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthOrder = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowOrder = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupOrder = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/params.go b/go/node/market/v1beta5/params.go new file mode 100644 index 00000000..23d4954d --- /dev/null +++ b/go/node/market/v1beta5/params.go @@ -0,0 +1,77 @@ +package v1beta5 + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +var _ paramtypes.ParamSet = (*Params)(nil) + +var ( + DefaultBidMinDeposit = sdk.NewCoin("uakt", sdk.NewInt(500000)) + defaultOrderMaxBids uint32 = 20 + maxOrderMaxBids uint32 = 500 +) + +const ( + keyBidMinDeposit = "BidMinDeposit" + keyOrderMaxBids = "OrderMaxBids" +) + +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{ + paramtypes.NewParamSetPair([]byte(keyBidMinDeposit), &p.BidMinDeposit, validateCoin), + paramtypes.NewParamSetPair([]byte(keyOrderMaxBids), &p.OrderMaxBids, validateOrderMaxBids), + } +} + +func DefaultParams() Params { + return Params{ + BidMinDeposit: DefaultBidMinDeposit, + OrderMaxBids: defaultOrderMaxBids, + } +} + +func (p Params) Validate() error { + if err := validateCoin(p.BidMinDeposit); err != nil { + return err + } + + if err := validateOrderMaxBids(p.OrderMaxBids); err != nil { + return err + } + return nil +} + +func validateCoin(i interface{}) error { + _, ok := i.(sdk.Coin) + if !ok { + return fmt.Errorf("%w: invalid type %T", ErrInvalidParam, i) + } + + return nil +} + +func validateOrderMaxBids(i interface{}) error { + val, ok := i.(uint32) + + if !ok { + return fmt.Errorf("%w: invalid type %T", ErrInvalidParam, i) + } + + if val == 0 { + return fmt.Errorf("%w: order max bids too low", ErrInvalidParam) + } + + if val > maxOrderMaxBids { + return fmt.Errorf("%w: order max bids too high", ErrInvalidParam) + } + + return nil +} diff --git a/go/node/market/v1beta5/params.pb.go b/go/node/market/v1beta5/params.pb.go new file mode 100644 index 00000000..2efe7cee --- /dev/null +++ b/go/node/market/v1beta5/params.pb.go @@ -0,0 +1,364 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/params.proto + +package v1beta5 + +import ( + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params is the params for the x/market module +type Params struct { + BidMinDeposit types.Coin `protobuf:"bytes,1,opt,name=bid_min_deposit,json=bidMinDeposit,proto3" json:"bid_min_deposit" yaml:"bid_min_deposit"` + OrderMaxBids uint32 `protobuf:"varint,2,opt,name=order_max_bids,json=orderMaxBids,proto3" json:"order_max_bids" yaml:"order_max_bids"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_5db3b08f7b20cd98, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetBidMinDeposit() types.Coin { + if m != nil { + return m.BidMinDeposit + } + return types.Coin{} +} + +func (m *Params) GetOrderMaxBids() uint32 { + if m != nil { + return m.OrderMaxBids + } + return 0 +} + +func init() { + proto.RegisterType((*Params)(nil), "akash.market.v1beta5.Params") +} + +func init() { proto.RegisterFile("akash/market/v1beta5/params.proto", fileDescriptor_5db3b08f7b20cd98) } + +var fileDescriptor_5db3b08f7b20cd98 = []byte{ + // 307 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0xb1, 0x4e, 0xeb, 0x30, + 0x18, 0x85, 0xe3, 0x3b, 0x74, 0xc8, 0x6d, 0x41, 0xaa, 0x0a, 0x2a, 0x1d, 0xec, 0x92, 0xa9, 0x93, + 0xad, 0x82, 0x58, 0x80, 0x29, 0xb0, 0x56, 0xa0, 0x8e, 0x2c, 0xd1, 0x9f, 0xda, 0x0a, 0x56, 0x48, + 0xfe, 0x28, 0x8e, 0xaa, 0xf2, 0x00, 0xec, 0x3c, 0x56, 0xc7, 0x8e, 0x4c, 0x16, 0x4a, 0xb7, 0x8e, + 0xe5, 0x05, 0x50, 0x93, 0x0c, 0x25, 0x9b, 0xe5, 0xf3, 0x9d, 0xef, 0x48, 0xb6, 0x7b, 0x09, 0x31, + 0x98, 0x57, 0x91, 0x40, 0x1e, 0xab, 0x42, 0x2c, 0xa7, 0xa1, 0x2a, 0xe0, 0x46, 0x64, 0x90, 0x43, + 0x62, 0x78, 0x96, 0x63, 0x81, 0xfd, 0x41, 0x85, 0xf0, 0x1a, 0xe1, 0x0d, 0x32, 0x1a, 0x44, 0x18, + 0x61, 0x05, 0x88, 0xc3, 0xa9, 0x66, 0x47, 0x74, 0x81, 0x26, 0x41, 0x23, 0x42, 0x30, 0xaa, 0xb1, + 0x4d, 0xc5, 0x02, 0x75, 0x5a, 0xe7, 0xde, 0x0f, 0x71, 0x3b, 0xcf, 0x95, 0xbc, 0xff, 0x41, 0xdc, + 0xd3, 0x50, 0xcb, 0x20, 0xd1, 0x69, 0x20, 0x55, 0x86, 0x46, 0x17, 0x43, 0x32, 0x26, 0x93, 0xff, + 0x57, 0x17, 0xbc, 0xb6, 0xf0, 0x83, 0xa5, 0x19, 0x9c, 0xf2, 0x07, 0xd4, 0xa9, 0xef, 0xaf, 0x2d, + 0x73, 0x4a, 0xcb, 0x7a, 0xbe, 0x96, 0x33, 0x9d, 0x3e, 0xd6, 0xbd, 0x9d, 0x65, 0x6d, 0xd5, 0xde, + 0xb2, 0xf3, 0x77, 0x48, 0xde, 0x6e, 0xbd, 0x56, 0xe0, 0xcd, 0x7b, 0xe1, 0x71, 0xb7, 0x0f, 0xee, + 0x09, 0xe6, 0x52, 0xe5, 0x41, 0x02, 0xab, 0x20, 0xd4, 0xd2, 0x0c, 0xff, 0x8d, 0xc9, 0xa4, 0xe7, + 0xdf, 0x95, 0x96, 0x75, 0x9f, 0x0e, 0xc9, 0x0c, 0x56, 0xbe, 0x96, 0x66, 0x67, 0x59, 0x8b, 0xdc, + 0x5b, 0x76, 0x56, 0x8f, 0xfc, 0xbd, 0xf7, 0xe6, 0x5d, 0x3c, 0x2a, 0xfa, 0xf7, 0xeb, 0x92, 0x92, + 0x4d, 0x49, 0xc9, 0x77, 0x49, 0xc9, 0xe7, 0x96, 0x3a, 0x9b, 0x2d, 0x75, 0xbe, 0xb6, 0xd4, 0x79, + 0xf1, 0xb2, 0x38, 0xe2, 0x10, 0x17, 0x5c, 0xaa, 0xa5, 0x88, 0x50, 0xa4, 0x28, 0x55, 0xeb, 0x33, + 0xc2, 0x4e, 0xf5, 0x74, 0xd7, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x74, 0x33, 0xbe, 0x8d, 0xab, + 0x01, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OrderMaxBids != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.OrderMaxBids)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.BidMinDeposit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.BidMinDeposit.Size() + n += 1 + l + sovParams(uint64(l)) + if m.OrderMaxBids != 0 { + n += 1 + sovParams(uint64(m.OrderMaxBids)) + } + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BidMinDeposit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BidMinDeposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OrderMaxBids", wireType) + } + m.OrderMaxBids = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OrderMaxBids |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/paramsmsg.pb.go b/go/node/market/v1beta5/paramsmsg.pb.go new file mode 100644 index 00000000..1a64bb00 --- /dev/null +++ b/go/node/market/v1beta5/paramsmsg.pb.go @@ -0,0 +1,510 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/paramsmsg.proto + +package v1beta5 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgUpdateParams is the Msg/UpdateParams request type. +// +// Since: akash v1.0.0 +type MsgUpdateParams struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // params defines the x/deployment parameters to update. + // + // NOTE: All parameters must be supplied. + Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"` +} + +func (m *MsgUpdateParams) Reset() { *m = MsgUpdateParams{} } +func (m *MsgUpdateParams) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParams) ProtoMessage() {} +func (*MsgUpdateParams) Descriptor() ([]byte, []int) { + return fileDescriptor_1fb52c43b568c966, []int{0} +} +func (m *MsgUpdateParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParams.Merge(m, src) +} +func (m *MsgUpdateParams) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParams) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParams.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParams proto.InternalMessageInfo + +func (m *MsgUpdateParams) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgUpdateParams) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +// MsgUpdateParamsResponse defines the response structure for executing a +// MsgUpdateParams message. +// +// Since: akash v1.0.0 +type MsgUpdateParamsResponse struct { +} + +func (m *MsgUpdateParamsResponse) Reset() { *m = MsgUpdateParamsResponse{} } +func (m *MsgUpdateParamsResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParamsResponse) ProtoMessage() {} +func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1fb52c43b568c966, []int{1} +} +func (m *MsgUpdateParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParamsResponse.Merge(m, src) +} +func (m *MsgUpdateParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgUpdateParams)(nil), "akash.market.v1beta5.MsgUpdateParams") + proto.RegisterType((*MsgUpdateParamsResponse)(nil), "akash.market.v1beta5.MsgUpdateParamsResponse") +} + +func init() { + proto.RegisterFile("akash/market/v1beta5/paramsmsg.proto", fileDescriptor_1fb52c43b568c966) +} + +var fileDescriptor_1fb52c43b568c966 = []byte{ + // 288 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0xcf, 0x4d, 0x2c, 0xca, 0x4e, 0x2d, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, + 0xd5, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xce, 0x2d, 0x4e, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, + 0x17, 0x12, 0x01, 0xab, 0xd2, 0x83, 0xa8, 0xd2, 0x83, 0xaa, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, + 0x07, 0x2b, 0xd0, 0x07, 0xb1, 0x20, 0x6a, 0xa5, 0xc4, 0x93, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, 0xf5, + 0x73, 0x8b, 0xd3, 0xf5, 0xcb, 0x0c, 0xf5, 0xe1, 0x86, 0x48, 0x49, 0x42, 0x24, 0xe2, 0x21, 0x3a, + 0x20, 0x1c, 0xa8, 0x94, 0x22, 0x1e, 0x57, 0x40, 0x94, 0x28, 0x4d, 0x65, 0xe4, 0xe2, 0xf7, 0x2d, + 0x4e, 0x0f, 0x2d, 0x48, 0x49, 0x2c, 0x49, 0x0d, 0x00, 0xcb, 0x08, 0x99, 0x71, 0x71, 0x26, 0x96, + 0x96, 0x64, 0xe4, 0x17, 0x65, 0x96, 0x54, 0x4a, 0x30, 0x2a, 0x30, 0x6a, 0x70, 0x3a, 0x49, 0x5c, + 0xda, 0xa2, 0x2b, 0x02, 0x35, 0xdb, 0x31, 0x25, 0xa5, 0x28, 0xb5, 0xb8, 0x38, 0xb8, 0xa4, 0x28, + 0x33, 0x2f, 0x3d, 0x08, 0xa1, 0x54, 0xc8, 0x8a, 0x8b, 0x0d, 0x62, 0xb6, 0x04, 0x93, 0x02, 0xa3, + 0x06, 0xb7, 0x91, 0x8c, 0x1e, 0x36, 0xff, 0xe9, 0x41, 0x6c, 0x71, 0x62, 0x39, 0x71, 0x4f, 0x9e, + 0x21, 0x08, 0xaa, 0xc3, 0x8a, 0xaf, 0xe9, 0xf9, 0x06, 0x2d, 0x84, 0x59, 0x4a, 0x92, 0x5c, 0xe2, + 0x68, 0xce, 0x0a, 0x4a, 0x2d, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, 0x75, 0xb2, 0x39, 0xf1, 0x48, 0x8e, + 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, + 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xa5, 0x82, 0xec, 0x74, 0xbd, 0xc4, 0xec, 0x12, 0xbd, + 0x94, 0xd4, 0x32, 0xfd, 0xf4, 0x7c, 0xfd, 0xbc, 0xfc, 0x94, 0x54, 0x34, 0xdf, 0x27, 0xb1, 0x81, + 0xfd, 0x6d, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x94, 0x7e, 0x82, 0xa2, 0x01, 0x00, 0x00, +} + +func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParamsmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintParamsmsg(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintParamsmsg(dAtA []byte, offset int, v uint64) int { + offset -= sovParamsmsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgUpdateParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovParamsmsg(uint64(l)) + } + l = m.Params.Size() + n += 1 + l + sovParamsmsg(uint64(l)) + return n +} + +func (m *MsgUpdateParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovParamsmsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParamsmsg(x uint64) (n int) { + return sovParamsmsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgUpdateParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParamsmsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParamsmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParamsmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParamsmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParamsmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParamsmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipParamsmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParamsmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParamsmsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParamsmsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParamsmsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParamsmsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParamsmsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParamsmsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParamsmsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/path.go b/go/node/market/v1beta5/path.go new file mode 100644 index 00000000..139e8e09 --- /dev/null +++ b/go/node/market/v1beta5/path.go @@ -0,0 +1,116 @@ +package v1beta5 + +import ( + "errors" + "fmt" + "strconv" + + sdk "github.com/cosmos/cosmos-sdk/types" + + dpath "pkg.akt.dev/go/node/deployment/v1beta4" + v1 "pkg.akt.dev/go/node/market/v1" +) + +const ( + ordersPath = "orders" + orderPath = "order" + bidsPath = "bids" + bidPath = "bid" + leasesPath = "leases" + leasePath = "lease" +) + +var ( + ErrInvalidPath = errors.New("query: invalid path") + ErrOwnerValue = errors.New("query: invalid owner value") + ErrStateValue = errors.New("query: invalid state value") +) + +// getOrdersPath returns orders path for queries +// nolint: unused +func getOrdersPath(ofilters OrderFilters) string { + return fmt.Sprintf("%s/%s/%v", ordersPath, ofilters.Owner, ofilters.State) +} + +// OrderPath return order path of given order id for queries +func OrderPath(id v1.OrderID) string { + return fmt.Sprintf("%s/%s", orderPath, orderParts(id)) +} + +// getBidsPath returns bids path for queries +// nolint: unused +func getBidsPath(bfilters BidFilters) string { + return fmt.Sprintf("%s/%s/%v", bidsPath, bfilters.Owner, bfilters.State) +} + +// getBidPath return bid path of given bid id for queries +// nolint: unused +func getBidPath(id v1.BidID) string { + return fmt.Sprintf("%s/%s/%s", bidPath, orderParts(id.OrderID()), id.Provider) +} + +// getLeasesPath returns leases path for queries +// nolint: unused +func getLeasesPath(lfilters v1.LeaseFilters) string { + return fmt.Sprintf("%s/%s/%v", leasesPath, lfilters.Owner, lfilters.State) +} + +// LeasePath return lease path of given lease id for queries +func LeasePath(id v1.LeaseID) string { + return fmt.Sprintf("%s/%s/%s", leasePath, orderParts(id.OrderID()), id.Provider) +} + +func orderParts(id v1.OrderID) string { + return fmt.Sprintf("%s/%v/%v/%v", id.Owner, id.DSeq, id.GSeq, id.OSeq) +} + +// parseOrderPath returns orderID details with provided queries, and return +// error if occurred due to wrong query +func parseOrderPath(parts []string) (v1.OrderID, error) { + if len(parts) < 4 { + return v1.OrderID{}, ErrInvalidPath + } + + did, err := dpath.ParseGroupPath(parts[0:3]) + if err != nil { + return v1.OrderID{}, err + } + + oseq, err := strconv.ParseUint(parts[3], 10, 32) + if err != nil { + return v1.OrderID{}, err + } + + return v1.MakeOrderID(did, uint32(oseq)), nil +} + +// parseBidPath returns bidID details with provided queries, and return +// error if occurred due to wrong query +func parseBidPath(parts []string) (v1.BidID, error) { + if len(parts) < 5 { + return v1.BidID{}, ErrInvalidPath + } + + oid, err := parseOrderPath(parts[0:4]) + if err != nil { + return v1.BidID{}, err + } + + provider, err := sdk.AccAddressFromBech32(parts[4]) + if err != nil { + return v1.BidID{}, err + } + + return v1.MakeBidID(oid, provider), nil +} + +// ParseLeasePath returns leaseID details with provided queries, and return +// error if occurred due to wrong query +func ParseLeasePath(parts []string) (v1.LeaseID, error) { + bid, err := parseBidPath(parts) + if err != nil { + return v1.LeaseID{}, err + } + + return v1.MakeLeaseID(bid), nil +} diff --git a/go/node/market/v1beta5/query.pb.go b/go/node/market/v1beta5/query.pb.go new file mode 100644 index 00000000..ba1fd095 --- /dev/null +++ b/go/node/market/v1beta5/query.pb.go @@ -0,0 +1,3376 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/query.proto + +package v1beta5 + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" + v11 "pkg.akt.dev/go/node/escrow/v1" + v1 "pkg.akt.dev/go/node/market/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryOrdersRequest is request type for the Query/Orders RPC method +type QueryOrdersRequest struct { + Filters OrderFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryOrdersRequest) Reset() { *m = QueryOrdersRequest{} } +func (m *QueryOrdersRequest) String() string { return proto.CompactTextString(m) } +func (*QueryOrdersRequest) ProtoMessage() {} +func (*QueryOrdersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{0} +} +func (m *QueryOrdersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrdersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrdersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrdersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrdersRequest.Merge(m, src) +} +func (m *QueryOrdersRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryOrdersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrdersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrdersRequest proto.InternalMessageInfo + +func (m *QueryOrdersRequest) GetFilters() OrderFilters { + if m != nil { + return m.Filters + } + return OrderFilters{} +} + +func (m *QueryOrdersRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryOrdersResponse is response type for the Query/Orders RPC method +type QueryOrdersResponse struct { + Orders Orders `protobuf:"bytes,1,rep,name=orders,proto3,castrepeated=Orders" json:"orders"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryOrdersResponse) Reset() { *m = QueryOrdersResponse{} } +func (m *QueryOrdersResponse) String() string { return proto.CompactTextString(m) } +func (*QueryOrdersResponse) ProtoMessage() {} +func (*QueryOrdersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{1} +} +func (m *QueryOrdersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrdersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrdersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrdersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrdersResponse.Merge(m, src) +} +func (m *QueryOrdersResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryOrdersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrdersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrdersResponse proto.InternalMessageInfo + +func (m *QueryOrdersResponse) GetOrders() Orders { + if m != nil { + return m.Orders + } + return nil +} + +func (m *QueryOrdersResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryOrderRequest is request type for the Query/Order RPC method +type QueryOrderRequest struct { + ID v1.OrderID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` +} + +func (m *QueryOrderRequest) Reset() { *m = QueryOrderRequest{} } +func (m *QueryOrderRequest) String() string { return proto.CompactTextString(m) } +func (*QueryOrderRequest) ProtoMessage() {} +func (*QueryOrderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{2} +} +func (m *QueryOrderRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrderRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrderRequest.Merge(m, src) +} +func (m *QueryOrderRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryOrderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrderRequest proto.InternalMessageInfo + +func (m *QueryOrderRequest) GetID() v1.OrderID { + if m != nil { + return m.ID + } + return v1.OrderID{} +} + +// QueryOrderResponse is response type for the Query/Order RPC method +type QueryOrderResponse struct { + Order Order `protobuf:"bytes,1,opt,name=order,proto3" json:"order"` +} + +func (m *QueryOrderResponse) Reset() { *m = QueryOrderResponse{} } +func (m *QueryOrderResponse) String() string { return proto.CompactTextString(m) } +func (*QueryOrderResponse) ProtoMessage() {} +func (*QueryOrderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{3} +} +func (m *QueryOrderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrderResponse.Merge(m, src) +} +func (m *QueryOrderResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryOrderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrderResponse proto.InternalMessageInfo + +func (m *QueryOrderResponse) GetOrder() Order { + if m != nil { + return m.Order + } + return Order{} +} + +// QueryBidsRequest is request type for the Query/Bids RPC method +type QueryBidsRequest struct { + Filters BidFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryBidsRequest) Reset() { *m = QueryBidsRequest{} } +func (m *QueryBidsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryBidsRequest) ProtoMessage() {} +func (*QueryBidsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{4} +} +func (m *QueryBidsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBidsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBidsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBidsRequest.Merge(m, src) +} +func (m *QueryBidsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryBidsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBidsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBidsRequest proto.InternalMessageInfo + +func (m *QueryBidsRequest) GetFilters() BidFilters { + if m != nil { + return m.Filters + } + return BidFilters{} +} + +func (m *QueryBidsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryBidsResponse is response type for the Query/Bids RPC method +type QueryBidsResponse struct { + Bids []QueryBidResponse `protobuf:"bytes,1,rep,name=bids,proto3" json:"bids"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryBidsResponse) Reset() { *m = QueryBidsResponse{} } +func (m *QueryBidsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryBidsResponse) ProtoMessage() {} +func (*QueryBidsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{5} +} +func (m *QueryBidsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBidsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBidsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBidsResponse.Merge(m, src) +} +func (m *QueryBidsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryBidsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBidsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBidsResponse proto.InternalMessageInfo + +func (m *QueryBidsResponse) GetBids() []QueryBidResponse { + if m != nil { + return m.Bids + } + return nil +} + +func (m *QueryBidsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryBidRequest is request type for the Query/Bid RPC method +type QueryBidRequest struct { + ID v1.BidID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` +} + +func (m *QueryBidRequest) Reset() { *m = QueryBidRequest{} } +func (m *QueryBidRequest) String() string { return proto.CompactTextString(m) } +func (*QueryBidRequest) ProtoMessage() {} +func (*QueryBidRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{6} +} +func (m *QueryBidRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBidRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBidRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBidRequest.Merge(m, src) +} +func (m *QueryBidRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryBidRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBidRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBidRequest proto.InternalMessageInfo + +func (m *QueryBidRequest) GetID() v1.BidID { + if m != nil { + return m.ID + } + return v1.BidID{} +} + +// QueryBidResponse is response type for the Query/Bid RPC method +type QueryBidResponse struct { + Bid Bid `protobuf:"bytes,1,opt,name=bid,proto3" json:"bid"` + EscrowAccount v11.Account `protobuf:"bytes,2,opt,name=escrow_account,json=escrowAccount,proto3" json:"escrow_account"` +} + +func (m *QueryBidResponse) Reset() { *m = QueryBidResponse{} } +func (m *QueryBidResponse) String() string { return proto.CompactTextString(m) } +func (*QueryBidResponse) ProtoMessage() {} +func (*QueryBidResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{7} +} +func (m *QueryBidResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBidResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBidResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBidResponse.Merge(m, src) +} +func (m *QueryBidResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryBidResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBidResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBidResponse proto.InternalMessageInfo + +func (m *QueryBidResponse) GetBid() Bid { + if m != nil { + return m.Bid + } + return Bid{} +} + +func (m *QueryBidResponse) GetEscrowAccount() v11.Account { + if m != nil { + return m.EscrowAccount + } + return v11.Account{} +} + +// QueryLeasesRequest is request type for the Query/Leases RPC method +type QueryLeasesRequest struct { + Filters v1.LeaseFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryLeasesRequest) Reset() { *m = QueryLeasesRequest{} } +func (m *QueryLeasesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryLeasesRequest) ProtoMessage() {} +func (*QueryLeasesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{8} +} +func (m *QueryLeasesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLeasesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLeasesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLeasesRequest.Merge(m, src) +} +func (m *QueryLeasesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryLeasesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLeasesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLeasesRequest proto.InternalMessageInfo + +func (m *QueryLeasesRequest) GetFilters() v1.LeaseFilters { + if m != nil { + return m.Filters + } + return v1.LeaseFilters{} +} + +func (m *QueryLeasesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryLeasesResponse is response type for the Query/Leases RPC method +type QueryLeasesResponse struct { + Leases []QueryLeaseResponse `protobuf:"bytes,1,rep,name=leases,proto3" json:"leases"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryLeasesResponse) Reset() { *m = QueryLeasesResponse{} } +func (m *QueryLeasesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryLeasesResponse) ProtoMessage() {} +func (*QueryLeasesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{9} +} +func (m *QueryLeasesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLeasesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLeasesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLeasesResponse.Merge(m, src) +} +func (m *QueryLeasesResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryLeasesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLeasesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLeasesResponse proto.InternalMessageInfo + +func (m *QueryLeasesResponse) GetLeases() []QueryLeaseResponse { + if m != nil { + return m.Leases + } + return nil +} + +func (m *QueryLeasesResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryLeaseRequest is request type for the Query/Lease RPC method +type QueryLeaseRequest struct { + ID v1.LeaseID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` +} + +func (m *QueryLeaseRequest) Reset() { *m = QueryLeaseRequest{} } +func (m *QueryLeaseRequest) String() string { return proto.CompactTextString(m) } +func (*QueryLeaseRequest) ProtoMessage() {} +func (*QueryLeaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{10} +} +func (m *QueryLeaseRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLeaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLeaseRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLeaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLeaseRequest.Merge(m, src) +} +func (m *QueryLeaseRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryLeaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLeaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLeaseRequest proto.InternalMessageInfo + +func (m *QueryLeaseRequest) GetID() v1.LeaseID { + if m != nil { + return m.ID + } + return v1.LeaseID{} +} + +// QueryLeaseResponse is response type for the Query/Lease RPC method +type QueryLeaseResponse struct { + Lease v1.Lease `protobuf:"bytes,1,opt,name=lease,proto3" json:"lease"` + EscrowPayment v11.FractionalPayment `protobuf:"bytes,2,opt,name=escrow_payment,json=escrowPayment,proto3" json:"escrow_payment"` +} + +func (m *QueryLeaseResponse) Reset() { *m = QueryLeaseResponse{} } +func (m *QueryLeaseResponse) String() string { return proto.CompactTextString(m) } +func (*QueryLeaseResponse) ProtoMessage() {} +func (*QueryLeaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{11} +} +func (m *QueryLeaseResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLeaseResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLeaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLeaseResponse.Merge(m, src) +} +func (m *QueryLeaseResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryLeaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLeaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLeaseResponse proto.InternalMessageInfo + +func (m *QueryLeaseResponse) GetLease() v1.Lease { + if m != nil { + return m.Lease + } + return v1.Lease{} +} + +func (m *QueryLeaseResponse) GetEscrowPayment() v11.FractionalPayment { + if m != nil { + return m.EscrowPayment + } + return v11.FractionalPayment{} +} + +// QueryParamsRequest is the request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{12} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is the response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params defines the parameters of the module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{13} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*QueryOrdersRequest)(nil), "akash.market.v1beta5.QueryOrdersRequest") + proto.RegisterType((*QueryOrdersResponse)(nil), "akash.market.v1beta5.QueryOrdersResponse") + proto.RegisterType((*QueryOrderRequest)(nil), "akash.market.v1beta5.QueryOrderRequest") + proto.RegisterType((*QueryOrderResponse)(nil), "akash.market.v1beta5.QueryOrderResponse") + proto.RegisterType((*QueryBidsRequest)(nil), "akash.market.v1beta5.QueryBidsRequest") + proto.RegisterType((*QueryBidsResponse)(nil), "akash.market.v1beta5.QueryBidsResponse") + proto.RegisterType((*QueryBidRequest)(nil), "akash.market.v1beta5.QueryBidRequest") + proto.RegisterType((*QueryBidResponse)(nil), "akash.market.v1beta5.QueryBidResponse") + proto.RegisterType((*QueryLeasesRequest)(nil), "akash.market.v1beta5.QueryLeasesRequest") + proto.RegisterType((*QueryLeasesResponse)(nil), "akash.market.v1beta5.QueryLeasesResponse") + proto.RegisterType((*QueryLeaseRequest)(nil), "akash.market.v1beta5.QueryLeaseRequest") + proto.RegisterType((*QueryLeaseResponse)(nil), "akash.market.v1beta5.QueryLeaseResponse") + proto.RegisterType((*QueryParamsRequest)(nil), "akash.market.v1beta5.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "akash.market.v1beta5.QueryParamsResponse") +} + +func init() { proto.RegisterFile("akash/market/v1beta5/query.proto", fileDescriptor_4fc8c96bdc37dc38) } + +var fileDescriptor_4fc8c96bdc37dc38 = []byte{ + // 923 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xc1, 0x6b, 0x3b, 0x55, + 0x10, 0xce, 0xa6, 0x4d, 0x8a, 0xaf, 0x58, 0xed, 0xb3, 0x48, 0x9b, 0xb6, 0x9b, 0x76, 0xd5, 0x26, + 0xed, 0x61, 0x1f, 0x89, 0x8a, 0x17, 0xc5, 0xba, 0xb6, 0x91, 0x82, 0xd2, 0x9a, 0x83, 0x07, 0x2f, + 0xe5, 0x25, 0xfb, 0xba, 0x2e, 0x49, 0xf6, 0xa5, 0xbb, 0xdb, 0x4a, 0x0f, 0x82, 0x28, 0x82, 0x07, + 0x0f, 0x82, 0x27, 0x51, 0x44, 0x10, 0x44, 0x3c, 0x79, 0xf1, 0x7f, 0xe8, 0xb1, 0xe0, 0xc5, 0x53, + 0x95, 0x54, 0xf0, 0xdf, 0xf8, 0xb1, 0xef, 0xcd, 0x6e, 0xb2, 0xdb, 0xcd, 0x6e, 0x7e, 0xd0, 0x5e, + 0x4a, 0xd9, 0xf9, 0x66, 0xe6, 0x9b, 0xf9, 0x66, 0xe6, 0x05, 0x6d, 0xd1, 0x1e, 0xf5, 0x3e, 0x21, + 0x03, 0xea, 0xf6, 0x98, 0x4f, 0x2e, 0x1b, 0x1d, 0xe6, 0xd3, 0xd7, 0xc9, 0xf9, 0x05, 0x73, 0xaf, + 0xf4, 0xa1, 0xcb, 0x7d, 0x8e, 0x57, 0x04, 0x42, 0x97, 0x08, 0x1d, 0x10, 0x95, 0x15, 0x8b, 0x5b, + 0x5c, 0x00, 0x48, 0xf0, 0x9f, 0xc4, 0x56, 0x96, 0xe9, 0xc0, 0x76, 0x38, 0x11, 0x7f, 0xe1, 0xd3, + 0x86, 0xc5, 0xb9, 0xd5, 0x67, 0x84, 0x0e, 0x6d, 0x42, 0x1d, 0x87, 0xfb, 0xd4, 0xb7, 0xb9, 0xe3, + 0x81, 0x75, 0xaf, 0xcb, 0xbd, 0x01, 0xf7, 0x48, 0x87, 0x7a, 0x4c, 0x66, 0x05, 0x0e, 0x0d, 0x32, + 0xa4, 0x96, 0xed, 0x08, 0x30, 0x60, 0x37, 0x25, 0x55, 0xe6, 0x75, 0x5d, 0xfe, 0x29, 0xb9, 0x6c, + 0x10, 0xda, 0xed, 0xf2, 0x0b, 0xc7, 0x07, 0x73, 0x3d, 0x69, 0x3e, 0x73, 0x69, 0x37, 0x70, 0xa7, + 0xfd, 0xd3, 0x21, 0xbd, 0x1a, 0xb0, 0x08, 0xb9, 0x9e, 0xa8, 0x99, 0x70, 0xd7, 0x64, 0x2e, 0x18, + 0xd7, 0x92, 0xc6, 0x8e, 0x6d, 0x4e, 0xf3, 0xeb, 0x33, 0xea, 0xb1, 0x38, 0xbb, 0xb1, 0xf1, 0xcc, + 0xee, 0xfb, 0xcc, 0x0d, 0x0b, 0x4d, 0xef, 0xf3, 0x64, 0x62, 0x35, 0x15, 0x31, 0xce, 0xae, 0xa5, + 0xda, 0xe3, 0x59, 0xb6, 0x53, 0x31, 0x43, 0xea, 0xd2, 0x01, 0x40, 0xb4, 0x9f, 0x15, 0x84, 0x3f, + 0x0c, 0x1a, 0x7d, 0x1c, 0xe4, 0xf6, 0xda, 0xec, 0xfc, 0x82, 0x79, 0x3e, 0x36, 0xd0, 0x02, 0x84, + 0x5a, 0x55, 0xb6, 0x94, 0xfa, 0x62, 0x53, 0xd3, 0xd3, 0x74, 0xd7, 0x85, 0x57, 0x4b, 0x22, 0x8d, + 0xf9, 0xeb, 0xdb, 0x6a, 0xa1, 0x1d, 0x3a, 0xe2, 0x16, 0x42, 0x63, 0xd1, 0x56, 0x8b, 0x22, 0xcc, + 0x8e, 0x2e, 0x15, 0xd6, 0x03, 0x85, 0x75, 0x39, 0x57, 0xa0, 0xb0, 0x7e, 0x42, 0x2d, 0x06, 0xf9, + 0xdb, 0x13, 0x9e, 0xda, 0x2f, 0x0a, 0x7a, 0x21, 0x46, 0xd1, 0x1b, 0x72, 0xc7, 0x63, 0xf8, 0x5d, + 0x54, 0x16, 0x0d, 0x0b, 0x28, 0xce, 0xd5, 0x17, 0x9b, 0xeb, 0x19, 0x14, 0x8d, 0xa5, 0x80, 0xdb, + 0xef, 0xff, 0x54, 0xcb, 0x10, 0x04, 0x5c, 0xf1, 0x7b, 0x29, 0x24, 0x6b, 0xb9, 0x24, 0x25, 0x83, + 0x18, 0xcb, 0x23, 0xb4, 0x3c, 0x26, 0x19, 0xb6, 0xf1, 0x35, 0x54, 0xb4, 0x4d, 0xe8, 0xe0, 0x6a, + 0x92, 0x9e, 0x64, 0x76, 0x74, 0x60, 0xa0, 0x80, 0xdb, 0xe8, 0xb6, 0x5a, 0x3c, 0x3a, 0x68, 0x17, + 0x6d, 0x53, 0xfb, 0x60, 0x52, 0x92, 0xa8, 0xdc, 0x37, 0x50, 0x49, 0x70, 0x86, 0x70, 0x99, 0xd5, + 0x4a, 0x25, 0x24, 0x5e, 0xfb, 0x51, 0x41, 0xcf, 0x8b, 0x78, 0x86, 0x6d, 0x46, 0x02, 0xef, 0x27, + 0x05, 0xde, 0x4a, 0x8f, 0x67, 0xd8, 0xe6, 0x23, 0xcb, 0xfb, 0x93, 0x02, 0x9d, 0x93, 0xf4, 0xa0, + 0xda, 0x7d, 0x34, 0xdf, 0xb1, 0xcd, 0x50, 0xda, 0x9d, 0x74, 0x72, 0xa1, 0x5b, 0xe8, 0x05, 0x14, + 0x85, 0xe7, 0xc3, 0x29, 0x7b, 0x88, 0x9e, 0x1b, 0x27, 0x92, 0xdd, 0x6b, 0x4e, 0xe8, 0xfa, 0xe2, + 0x3d, 0x5d, 0x0d, 0xdb, 0x4c, 0x51, 0xf5, 0x9b, 0x09, 0x19, 0xa2, 0x32, 0x1b, 0x68, 0xae, 0x13, + 0x45, 0x5a, 0x9b, 0x2a, 0x01, 0x14, 0x16, 0x60, 0xf1, 0x21, 0x5a, 0x92, 0x47, 0xed, 0x14, 0x0e, + 0x1e, 0xd4, 0x16, 0xce, 0x97, 0x34, 0x06, 0x3c, 0xde, 0x91, 0x76, 0x70, 0x7e, 0x56, 0x1a, 0xe0, + 0xa3, 0xf6, 0x43, 0xb8, 0xf8, 0xef, 0x07, 0x57, 0x2b, 0x9a, 0x8b, 0xb7, 0x92, 0x73, 0xb1, 0x79, + 0xaf, 0x3c, 0xe1, 0xf0, 0xc8, 0x43, 0xf1, 0x6b, 0xb8, 0xf3, 0x21, 0x3b, 0xe8, 0x57, 0x0b, 0x95, + 0xc5, 0x95, 0x0d, 0x07, 0xa3, 0x9e, 0x31, 0x18, 0xc2, 0x35, 0x31, 0x1a, 0xe0, 0xfd, 0xf0, 0x6b, + 0x0f, 0xc9, 0x66, 0x59, 0x7b, 0x01, 0x4d, 0x19, 0x90, 0xef, 0x63, 0x8a, 0x44, 0x25, 0x37, 0x51, + 0x49, 0x90, 0x9e, 0x3a, 0x6e, 0x02, 0x1e, 0xae, 0xbc, 0x80, 0xe2, 0xe3, 0x68, 0x46, 0xe0, 0xa9, + 0x83, 0x12, 0xb5, 0x7b, 0x33, 0xd2, 0x8a, 0x5e, 0xc5, 0x13, 0x89, 0x8c, 0x4f, 0x0b, 0x7c, 0xd4, + 0x56, 0x80, 0xda, 0x89, 0x78, 0x3b, 0xa0, 0x4e, 0xed, 0x23, 0x10, 0x29, 0xfc, 0x0a, 0x8c, 0xdf, + 0x46, 0x65, 0xf9, 0xc6, 0x00, 0xe5, 0x8d, 0x74, 0x91, 0xa4, 0x97, 0xf1, 0x4c, 0x90, 0xef, 0xb7, + 0xff, 0xff, 0xd8, 0x53, 0xda, 0xe0, 0xd6, 0xfc, 0x73, 0x01, 0x95, 0x44, 0x60, 0xfc, 0xb5, 0x82, + 0xe0, 0x62, 0xe3, 0x2c, 0xa9, 0x63, 0x8f, 0x57, 0x65, 0x77, 0x06, 0xa4, 0xa4, 0xaa, 0xed, 0x7e, + 0xf1, 0xd7, 0x7f, 0xdf, 0x15, 0x5f, 0xc2, 0xdb, 0x64, 0xfa, 0x83, 0xec, 0x91, 0xbe, 0xed, 0xf9, + 0xf8, 0x2b, 0x05, 0x95, 0x84, 0x37, 0xae, 0xe5, 0xc5, 0x0f, 0x89, 0xd4, 0xf3, 0x81, 0x4f, 0xc5, + 0xc3, 0x76, 0xce, 0x38, 0xfe, 0x5c, 0x41, 0xf3, 0xc1, 0xa9, 0xc4, 0x39, 0x47, 0x31, 0x6a, 0x47, + 0x2d, 0x17, 0x07, 0x24, 0x6a, 0x82, 0xc4, 0x36, 0xae, 0x92, 0x69, 0xbf, 0x3d, 0xa0, 0x15, 0x9f, + 0xa1, 0x39, 0xc3, 0x36, 0xf1, 0x2b, 0x79, 0x57, 0x59, 0xe6, 0x9f, 0xf1, 0x78, 0xcf, 0x94, 0x5e, + 0x74, 0x20, 0x18, 0x0a, 0x79, 0x17, 0x70, 0xee, 0xfe, 0xcf, 0x34, 0x14, 0xf1, 0x23, 0x93, 0x27, + 0x86, 0x3c, 0x21, 0xe3, 0xa1, 0x10, 0xde, 0x99, 0x43, 0x31, 0x79, 0x1c, 0x2a, 0x33, 0x9f, 0xac, + 0x19, 0x79, 0x88, 0x96, 0x7c, 0xa9, 0xa0, 0xb2, 0xdc, 0xa7, 0xcc, 0x96, 0xc4, 0xd6, 0x37, 0xb3, + 0x25, 0xf1, 0x95, 0xd6, 0x5e, 0x16, 0x54, 0x54, 0xbc, 0x41, 0x32, 0x7e, 0x52, 0x1a, 0x6f, 0x5e, + 0x8f, 0x54, 0xe5, 0x66, 0xa4, 0x2a, 0xff, 0x8e, 0x54, 0xe5, 0xdb, 0x3b, 0xb5, 0x70, 0x73, 0xa7, + 0x16, 0xfe, 0xbe, 0x53, 0x0b, 0x1f, 0x6b, 0xc3, 0x9e, 0xa5, 0xd3, 0x9e, 0xaf, 0x9b, 0xec, 0x92, + 0x58, 0x9c, 0x38, 0xdc, 0x64, 0x89, 0x20, 0x9d, 0xb2, 0xf8, 0x45, 0xfa, 0xea, 0x93, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xba, 0x96, 0x9c, 0x22, 0x84, 0x0c, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Orders queries orders with filters + Orders(ctx context.Context, in *QueryOrdersRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) + // Order queries order details + Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) + // Bids queries bids with filters + Bids(ctx context.Context, in *QueryBidsRequest, opts ...grpc.CallOption) (*QueryBidsResponse, error) + // Bid queries bid details + Bid(ctx context.Context, in *QueryBidRequest, opts ...grpc.CallOption) (*QueryBidResponse, error) + // Leases queries leases with filters + Leases(ctx context.Context, in *QueryLeasesRequest, opts ...grpc.CallOption) (*QueryLeasesResponse, error) + // Lease queries lease details + Lease(ctx context.Context, in *QueryLeaseRequest, opts ...grpc.CallOption) (*QueryLeaseResponse, error) + // Params returns the total set of minting parameters. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Orders(ctx context.Context, in *QueryOrdersRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) { + out := new(QueryOrdersResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Query/Orders", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) { + out := new(QueryOrderResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Query/Order", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Bids(ctx context.Context, in *QueryBidsRequest, opts ...grpc.CallOption) (*QueryBidsResponse, error) { + out := new(QueryBidsResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Query/Bids", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Bid(ctx context.Context, in *QueryBidRequest, opts ...grpc.CallOption) (*QueryBidResponse, error) { + out := new(QueryBidResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Query/Bid", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Leases(ctx context.Context, in *QueryLeasesRequest, opts ...grpc.CallOption) (*QueryLeasesResponse, error) { + out := new(QueryLeasesResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Query/Leases", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Lease(ctx context.Context, in *QueryLeaseRequest, opts ...grpc.CallOption) (*QueryLeaseResponse, error) { + out := new(QueryLeaseResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Query/Lease", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Orders queries orders with filters + Orders(context.Context, *QueryOrdersRequest) (*QueryOrdersResponse, error) + // Order queries order details + Order(context.Context, *QueryOrderRequest) (*QueryOrderResponse, error) + // Bids queries bids with filters + Bids(context.Context, *QueryBidsRequest) (*QueryBidsResponse, error) + // Bid queries bid details + Bid(context.Context, *QueryBidRequest) (*QueryBidResponse, error) + // Leases queries leases with filters + Leases(context.Context, *QueryLeasesRequest) (*QueryLeasesResponse, error) + // Lease queries lease details + Lease(context.Context, *QueryLeaseRequest) (*QueryLeaseResponse, error) + // Params returns the total set of minting parameters. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Orders(ctx context.Context, req *QueryOrdersRequest) (*QueryOrdersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Orders not implemented") +} +func (*UnimplementedQueryServer) Order(ctx context.Context, req *QueryOrderRequest) (*QueryOrderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Order not implemented") +} +func (*UnimplementedQueryServer) Bids(ctx context.Context, req *QueryBidsRequest) (*QueryBidsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Bids not implemented") +} +func (*UnimplementedQueryServer) Bid(ctx context.Context, req *QueryBidRequest) (*QueryBidResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Bid not implemented") +} +func (*UnimplementedQueryServer) Leases(ctx context.Context, req *QueryLeasesRequest) (*QueryLeasesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Leases not implemented") +} +func (*UnimplementedQueryServer) Lease(ctx context.Context, req *QueryLeaseRequest) (*QueryLeaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Lease not implemented") +} +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Orders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryOrdersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Orders(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Query/Orders", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Orders(ctx, req.(*QueryOrdersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Order_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryOrderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Order(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Query/Order", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Order(ctx, req.(*QueryOrderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Bids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryBidsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Bids(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Query/Bids", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Bids(ctx, req.(*QueryBidsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Bid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryBidRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Bid(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Query/Bid", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Bid(ctx, req.(*QueryBidRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Leases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryLeasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Leases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Query/Leases", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Leases(ctx, req.(*QueryLeasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Lease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryLeaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Lease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Query/Lease", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Lease(ctx, req.(*QueryLeaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.market.v1beta5.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Orders", + Handler: _Query_Orders_Handler, + }, + { + MethodName: "Order", + Handler: _Query_Order_Handler, + }, + { + MethodName: "Bids", + Handler: _Query_Bids_Handler, + }, + { + MethodName: "Bid", + Handler: _Query_Bid_Handler, + }, + { + MethodName: "Leases", + Handler: _Query_Leases_Handler, + }, + { + MethodName: "Lease", + Handler: _Query_Lease_Handler, + }, + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/market/v1beta5/query.proto", +} + +func (m *QueryOrdersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrdersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrdersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryOrdersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrdersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrdersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Orders) > 0 { + for iNdEx := len(m.Orders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Orders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryOrderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrderRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryOrderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Order.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryBidsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBidsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryBidsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBidsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Bids) > 0 { + for iNdEx := len(m.Bids) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Bids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryBidRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBidRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryBidResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBidResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.EscrowAccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Bid.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryLeasesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLeasesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryLeasesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLeasesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Leases) > 0 { + for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryLeaseRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLeaseRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLeaseRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryLeaseResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLeaseResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.EscrowPayment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Lease.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryOrdersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Filters.Size() + n += 1 + l + sovQuery(uint64(l)) + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryOrdersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Orders) > 0 { + for _, e := range m.Orders { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryOrderRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryOrderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Order.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryBidsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Filters.Size() + n += 1 + l + sovQuery(uint64(l)) + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryBidsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Bids) > 0 { + for _, e := range m.Bids { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryBidRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryBidResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Bid.Size() + n += 1 + l + sovQuery(uint64(l)) + l = m.EscrowAccount.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryLeasesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Filters.Size() + n += 1 + l + sovQuery(uint64(l)) + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryLeasesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Leases) > 0 { + for _, e := range m.Leases { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryLeaseRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryLeaseResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Lease.Size() + n += 1 + l + sovQuery(uint64(l)) + l = m.EscrowPayment.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryOrdersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrdersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrdersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOrdersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrdersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrdersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Orders = append(m.Orders, Order{}) + if err := m.Orders[len(m.Orders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOrderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOrderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Order.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBidsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBidsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBidsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBidsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bids = append(m.Bids, QueryBidResponse{}) + if err := m.Bids[len(m.Bids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBidRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBidRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBidRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBidResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBidResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bid", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Bid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EscrowAccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EscrowAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLeasesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLeasesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLeasesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLeasesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Leases = append(m.Leases, QueryLeaseResponse{}) + if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLeaseRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLeaseRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLeaseRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLeaseResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLeaseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Lease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EscrowPayment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EscrowPayment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/query.pb.gw.go b/go/node/market/v1beta5/query.pb.gw.go new file mode 100644 index 00000000..2b22cc8c --- /dev/null +++ b/go/node/market/v1beta5/query.pb.gw.go @@ -0,0 +1,651 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: akash/market/v1beta5/query.proto + +/* +Package v1beta5 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1beta5 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_Query_Orders_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Orders_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrdersRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Orders_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Orders(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Orders_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrdersRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Orders_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Orders(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Order_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrderRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Order_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Order(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrderRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Order_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Order(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Bids_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Bids_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBidsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bids_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Bids(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Bids_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBidsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bids_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Bids(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Bid_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Bid_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBidRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bid_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Bid(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Bid_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBidRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bid_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Bid(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Leases_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Leases_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLeasesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Leases_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Leases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Leases_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLeasesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Leases_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Leases(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Lease_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Lease_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLeaseRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Lease_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Lease(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Lease_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLeaseRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Lease_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Lease(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Orders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Orders_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Orders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Order_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Bids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Bids_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Bids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Bid_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Bid_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Bid_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Leases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Leases_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Leases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Lease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Lease_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Lease_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Orders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Orders_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Orders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Order_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Bids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Bids_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Bids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Bid_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Bid_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Bid_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Leases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Leases_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Leases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Lease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Lease_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Lease_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Orders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta5", "orders", "list"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Order_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta5", "orders", "info"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Bids_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta5", "bids", "list"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Bid_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta5", "bids", "info"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Leases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta5", "leases", "list"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Lease_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta5", "leases", "info"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"akash", "market", "v1beta5", "params"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_Orders_0 = runtime.ForwardResponseMessage + + forward_Query_Order_0 = runtime.ForwardResponseMessage + + forward_Query_Bids_0 = runtime.ForwardResponseMessage + + forward_Query_Bid_0 = runtime.ForwardResponseMessage + + forward_Query_Leases_0 = runtime.ForwardResponseMessage + + forward_Query_Lease_0 = runtime.ForwardResponseMessage + + forward_Query_Params_0 = runtime.ForwardResponseMessage +) diff --git a/go/node/market/v1beta5/resourcesoffer.pb.go b/go/node/market/v1beta5/resourcesoffer.pb.go new file mode 100644 index 00000000..08613a81 --- /dev/null +++ b/go/node/market/v1beta5/resourcesoffer.pb.go @@ -0,0 +1,391 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/resourcesoffer.proto + +package v1beta5 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + v1beta4 "pkg.akt.dev/go/node/types/resources/v1beta4" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ResourceOffer describes resources that provider is offering +// for deployment +type ResourceOffer struct { + Resources v1beta4.Resources `protobuf:"bytes,1,opt,name=resources,proto3" json:"resources" yaml:"resources"` + Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count" yaml:"count"` +} + +func (m *ResourceOffer) Reset() { *m = ResourceOffer{} } +func (m *ResourceOffer) String() string { return proto.CompactTextString(m) } +func (*ResourceOffer) ProtoMessage() {} +func (*ResourceOffer) Descriptor() ([]byte, []int) { + return fileDescriptor_90a800b77898768f, []int{0} +} +func (m *ResourceOffer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceOffer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceOffer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceOffer) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceOffer.Merge(m, src) +} +func (m *ResourceOffer) XXX_Size() int { + return m.Size() +} +func (m *ResourceOffer) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceOffer.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceOffer proto.InternalMessageInfo + +func (m *ResourceOffer) GetResources() v1beta4.Resources { + if m != nil { + return m.Resources + } + return v1beta4.Resources{} +} + +func (m *ResourceOffer) GetCount() uint32 { + if m != nil { + return m.Count + } + return 0 +} + +func init() { + proto.RegisterType((*ResourceOffer)(nil), "akash.market.v1beta5.ResourceOffer") +} + +func init() { + proto.RegisterFile("akash/market/v1beta5/resourcesoffer.proto", fileDescriptor_90a800b77898768f) +} + +var fileDescriptor_90a800b77898768f = []byte{ + // 271 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0xcf, 0x4d, 0x2c, 0xca, 0x4e, 0x2d, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, + 0xd5, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, 0xce, 0x4f, 0x4b, 0x4b, 0x2d, 0xd2, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x01, 0x2b, 0xd5, 0x83, 0x28, 0xd5, 0x83, 0x2a, 0x95, + 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd0, 0x07, 0xb1, 0x20, 0x6a, 0xa5, 0x74, 0x20, 0xc6, + 0x26, 0x25, 0x16, 0xa7, 0x22, 0x0c, 0x83, 0x1a, 0x6f, 0x82, 0x10, 0x81, 0xa8, 0x56, 0xda, 0xc2, + 0xc8, 0xc5, 0x1b, 0x04, 0x15, 0xf3, 0x07, 0xd9, 0x28, 0x94, 0xc3, 0xc5, 0x09, 0x57, 0x24, 0xc1, + 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0xa4, 0xae, 0x07, 0xb1, 0x1f, 0x64, 0xa6, 0x1e, 0xc2, 0x04, 0xa8, + 0x99, 0x7a, 0x30, 0xfd, 0xc5, 0x4e, 0xaa, 0x27, 0xee, 0xc9, 0x33, 0xbc, 0xba, 0x27, 0x8f, 0x30, + 0xe1, 0xd3, 0x3d, 0x79, 0x81, 0xca, 0xc4, 0xdc, 0x1c, 0x2b, 0x25, 0xb8, 0x90, 0x52, 0x10, 0x42, + 0x5a, 0x48, 0x9f, 0x8b, 0x35, 0x39, 0xbf, 0x34, 0xaf, 0x44, 0x82, 0x49, 0x81, 0x51, 0x83, 0xd7, + 0x49, 0xf2, 0xd5, 0x3d, 0x79, 0x88, 0xc0, 0xa7, 0x7b, 0xf2, 0x3c, 0x10, 0x8d, 0x60, 0xae, 0x52, + 0x10, 0x44, 0xd8, 0x8a, 0xe5, 0xc5, 0x02, 0x79, 0x46, 0x27, 0x9b, 0x13, 0x8f, 0xe4, 0x18, 0x2f, + 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, + 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x52, 0x2a, 0xc8, 0x4e, 0xd7, 0x4b, 0xcc, 0x2e, 0xd1, 0x4b, 0x49, + 0x2d, 0xd3, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, 0x49, 0x45, 0x0b, 0xe3, 0x24, 0x36, 0xb0, 0xdf, + 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfe, 0x52, 0xb2, 0x5f, 0x82, 0x01, 0x00, 0x00, +} + +func (this *ResourceOffer) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceOffer) + if !ok { + that2, ok := that.(ResourceOffer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Resources.Equal(&that1.Resources) { + return false + } + if this.Count != that1.Count { + return false + } + return true +} +func (m *ResourceOffer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceOffer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceOffer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != 0 { + i = encodeVarintResourcesoffer(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResourcesoffer(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintResourcesoffer(dAtA []byte, offset int, v uint64) int { + offset -= sovResourcesoffer(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ResourceOffer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Resources.Size() + n += 1 + l + sovResourcesoffer(uint64(l)) + if m.Count != 0 { + n += 1 + sovResourcesoffer(uint64(m.Count)) + } + return n +} + +func sovResourcesoffer(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozResourcesoffer(x uint64) (n int) { + return sovResourcesoffer(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResourceOffer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResourcesoffer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceOffer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceOffer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResourcesoffer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResourcesoffer + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResourcesoffer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResourcesoffer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipResourcesoffer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthResourcesoffer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipResourcesoffer(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResourcesoffer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResourcesoffer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResourcesoffer + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthResourcesoffer + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupResourcesoffer + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthResourcesoffer + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthResourcesoffer = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowResourcesoffer = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupResourcesoffer = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/service.pb.go b/go/node/market/v1beta5/service.pb.go new file mode 100644 index 00000000..def4b0dc --- /dev/null +++ b/go/node/market/v1beta5/service.pb.go @@ -0,0 +1,334 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/service.proto + +package v1beta5 + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("akash/market/v1beta5/service.proto", fileDescriptor_f1203af46a0757a8) +} + +var fileDescriptor_f1203af46a0757a8 = []byte{ + // 336 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0xd2, 0xcd, 0x4a, 0xc3, 0x40, + 0x10, 0x07, 0xf0, 0x06, 0x3f, 0xd0, 0x55, 0x2f, 0x41, 0x10, 0xf6, 0x10, 0xe8, 0x87, 0x82, 0x55, + 0x77, 0xa9, 0xe2, 0xcd, 0x53, 0x7b, 0xb5, 0x20, 0x82, 0x28, 0x0a, 0xc2, 0xb6, 0x3b, 0x6c, 0x43, + 0x9a, 0x6e, 0xc8, 0x2c, 0xf1, 0xea, 0x23, 0xf8, 0x28, 0x3e, 0x86, 0xc7, 0x5e, 0x04, 0x8f, 0xd2, + 0x1e, 0x7c, 0x0d, 0x31, 0x5b, 0x53, 0x95, 0xd4, 0xe4, 0x9c, 0xdf, 0x7f, 0xfe, 0x99, 0x65, 0x48, + 0x4d, 0x04, 0x02, 0x07, 0x3c, 0x14, 0x71, 0x00, 0x86, 0x27, 0xad, 0x1e, 0x18, 0x71, 0xca, 0x11, + 0xe2, 0xc4, 0xef, 0x03, 0x8b, 0x62, 0x6d, 0xb4, 0xbb, 0x9d, 0x1a, 0x66, 0x0d, 0x9b, 0x19, 0x5a, + 0xcd, 0x4d, 0xf6, 0x7c, 0x19, 0xa2, 0xb2, 0x41, 0x5a, 0xcf, 0x25, 0x43, 0x10, 0x08, 0x73, 0xd4, + 0xc8, 0x45, 0x91, 0x88, 0x45, 0x88, 0x73, 0xb5, 0xd3, 0xd7, 0x18, 0x6a, 0xe4, 0x21, 0x2a, 0x9e, + 0xb4, 0x78, 0xf6, 0xe1, 0xf8, 0x75, 0x99, 0x2c, 0x75, 0x51, 0xb9, 0x77, 0x64, 0xbd, 0x13, 0x83, + 0x30, 0xd0, 0xf6, 0xa5, 0x5b, 0x63, 0x79, 0xbf, 0xcc, 0xba, 0xa8, 0x32, 0x43, 0x9b, 0xc5, 0xe6, + 0x12, 0x30, 0xd2, 0x23, 0x04, 0xf7, 0x86, 0xac, 0x75, 0x86, 0x1a, 0xd3, 0xd9, 0xd5, 0xc5, 0xb9, + 0x19, 0xa1, 0xfb, 0x85, 0x24, 0x9b, 0xac, 0xc8, 0xd6, 0xb5, 0x6f, 0x06, 0x32, 0x16, 0x0f, 0xe7, + 0x5f, 0xef, 0xe2, 0xee, 0x2d, 0xcc, 0xfe, 0x72, 0x94, 0x95, 0x73, 0x59, 0x91, 0x20, 0x1b, 0x76, + 0x2f, 0x5b, 0xd3, 0x28, 0xd8, 0xde, 0x96, 0x1c, 0x96, 0x51, 0x59, 0xc5, 0x3d, 0x21, 0xe9, 0x7e, + 0xb6, 0xa1, 0xfe, 0xff, 0x23, 0xd8, 0x82, 0x83, 0x12, 0x28, 0x9b, 0x2f, 0xc9, 0xe6, 0x55, 0x24, + 0x85, 0x81, 0x8b, 0xf4, 0x38, 0xdc, 0xdd, 0x85, 0xe1, 0x9f, 0x8c, 0x1e, 0x95, 0x62, 0xdf, 0x2d, + 0x74, 0xe5, 0xf1, 0xe3, 0xb9, 0xe9, 0xb4, 0xcf, 0x5e, 0x26, 0x9e, 0x33, 0x9e, 0x78, 0xce, 0xfb, + 0xc4, 0x73, 0x9e, 0xa6, 0x5e, 0x65, 0x3c, 0xf5, 0x2a, 0x6f, 0x53, 0xaf, 0x72, 0x5b, 0x8b, 0x02, + 0xc5, 0x44, 0x60, 0x98, 0x84, 0x84, 0x2b, 0xcd, 0x47, 0x5a, 0xc2, 0x9f, 0xf3, 0xed, 0xad, 0xa6, + 0xc7, 0x79, 0xf2, 0x19, 0x00, 0x00, 0xff, 0xff, 0xe8, 0xd5, 0x96, 0xec, 0x5f, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // CreateBid defines a method to create a bid given proper inputs. + CreateBid(ctx context.Context, in *MsgCreateBid, opts ...grpc.CallOption) (*MsgCreateBidResponse, error) + // CloseBid defines a method to close a bid given proper inputs. + CloseBid(ctx context.Context, in *MsgCloseBid, opts ...grpc.CallOption) (*MsgCloseBidResponse, error) + // WithdrawLease withdraws accrued funds from the lease payment + WithdrawLease(ctx context.Context, in *MsgWithdrawLease, opts ...grpc.CallOption) (*MsgWithdrawLeaseResponse, error) + // CreateLease creates a new lease + CreateLease(ctx context.Context, in *MsgCreateLease, opts ...grpc.CallOption) (*MsgCreateLeaseResponse, error) + // CloseLease defines a method to close an order given proper inputs. + CloseLease(ctx context.Context, in *MsgCloseLease, opts ...grpc.CallOption) (*MsgCloseLeaseResponse, error) + // UpdateParams defines a governance operation for updating the x/market module + // parameters. The authority is hard-coded to the x/gov module account. + // + // Since: akash v1.0.0 + UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) CreateBid(ctx context.Context, in *MsgCreateBid, opts ...grpc.CallOption) (*MsgCreateBidResponse, error) { + out := new(MsgCreateBidResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Msg/CreateBid", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CloseBid(ctx context.Context, in *MsgCloseBid, opts ...grpc.CallOption) (*MsgCloseBidResponse, error) { + out := new(MsgCloseBidResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Msg/CloseBid", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) WithdrawLease(ctx context.Context, in *MsgWithdrawLease, opts ...grpc.CallOption) (*MsgWithdrawLeaseResponse, error) { + out := new(MsgWithdrawLeaseResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Msg/WithdrawLease", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CreateLease(ctx context.Context, in *MsgCreateLease, opts ...grpc.CallOption) (*MsgCreateLeaseResponse, error) { + out := new(MsgCreateLeaseResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Msg/CreateLease", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CloseLease(ctx context.Context, in *MsgCloseLease, opts ...grpc.CallOption) (*MsgCloseLeaseResponse, error) { + out := new(MsgCloseLeaseResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Msg/CloseLease", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) { + out := new(MsgUpdateParamsResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Msg/UpdateParams", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // CreateBid defines a method to create a bid given proper inputs. + CreateBid(context.Context, *MsgCreateBid) (*MsgCreateBidResponse, error) + // CloseBid defines a method to close a bid given proper inputs. + CloseBid(context.Context, *MsgCloseBid) (*MsgCloseBidResponse, error) + // WithdrawLease withdraws accrued funds from the lease payment + WithdrawLease(context.Context, *MsgWithdrawLease) (*MsgWithdrawLeaseResponse, error) + // CreateLease creates a new lease + CreateLease(context.Context, *MsgCreateLease) (*MsgCreateLeaseResponse, error) + // CloseLease defines a method to close an order given proper inputs. + CloseLease(context.Context, *MsgCloseLease) (*MsgCloseLeaseResponse, error) + // UpdateParams defines a governance operation for updating the x/market module + // parameters. The authority is hard-coded to the x/gov module account. + // + // Since: akash v1.0.0 + UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) CreateBid(ctx context.Context, req *MsgCreateBid) (*MsgCreateBidResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateBid not implemented") +} +func (*UnimplementedMsgServer) CloseBid(ctx context.Context, req *MsgCloseBid) (*MsgCloseBidResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CloseBid not implemented") +} +func (*UnimplementedMsgServer) WithdrawLease(ctx context.Context, req *MsgWithdrawLease) (*MsgWithdrawLeaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WithdrawLease not implemented") +} +func (*UnimplementedMsgServer) CreateLease(ctx context.Context, req *MsgCreateLease) (*MsgCreateLeaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateLease not implemented") +} +func (*UnimplementedMsgServer) CloseLease(ctx context.Context, req *MsgCloseLease) (*MsgCloseLeaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CloseLease not implemented") +} +func (*UnimplementedMsgServer) UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_CreateBid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateBid) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateBid(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Msg/CreateBid", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateBid(ctx, req.(*MsgCreateBid)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CloseBid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCloseBid) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CloseBid(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Msg/CloseBid", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CloseBid(ctx, req.(*MsgCloseBid)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_WithdrawLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgWithdrawLease) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).WithdrawLease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Msg/WithdrawLease", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).WithdrawLease(ctx, req.(*MsgWithdrawLease)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CreateLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateLease) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateLease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Msg/CreateLease", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateLease(ctx, req.(*MsgCreateLease)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CloseLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCloseLease) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CloseLease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Msg/CloseLease", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CloseLease(ctx, req.(*MsgCloseLease)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateParams) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateParams(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Msg/UpdateParams", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateParams(ctx, req.(*MsgUpdateParams)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.market.v1beta5.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateBid", + Handler: _Msg_CreateBid_Handler, + }, + { + MethodName: "CloseBid", + Handler: _Msg_CloseBid_Handler, + }, + { + MethodName: "WithdrawLease", + Handler: _Msg_WithdrawLease_Handler, + }, + { + MethodName: "CreateLease", + Handler: _Msg_CreateLease_Handler, + }, + { + MethodName: "CloseLease", + Handler: _Msg_CloseLease_Handler, + }, + { + MethodName: "UpdateParams", + Handler: _Msg_UpdateParams_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/market/v1beta5/service.proto", +} diff --git a/go/node/market/v1beta5/types.go b/go/node/market/v1beta5/types.go new file mode 100644 index 00000000..fe94978c --- /dev/null +++ b/go/node/market/v1beta5/types.go @@ -0,0 +1,19 @@ +package v1beta5 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" +) + +const ( + GatewayVersion = "v1beta5" +) + +func (m QueryLeasesResponse) TotalPriceAmount() sdk.Dec { + total := sdk.NewDec(0) + + for _, lease := range m.Leases { + total = total.Add(lease.Lease.Price.Amount) + } + + return total +} diff --git a/go/node/migrate/audit.go b/go/node/migrate/audit.go new file mode 100644 index 00000000..90f38779 --- /dev/null +++ b/go/node/migrate/audit.go @@ -0,0 +1,25 @@ +package migrate + +import ( + "github.com/akash-network/akash-api/go/node/audit/v1beta3" + "github.com/cosmos/cosmos-sdk/codec" + + v1 "pkg.akt.dev/go/node/audit/v1" +) + +func AuditedAttributesV1beta3Prefix() []byte { + return v1beta3.PrefixProviderID() +} + +func AuditedProviderFromV1beta3(cdc codec.BinaryCodec, fromBz []byte) v1.AuditedProvider { + var from v1beta3.AuditedAttributes + cdc.MustUnmarshal(fromBz, &from) + + to := v1.AuditedProvider{ + Owner: from.Owner, + Auditor: from.Auditor, + Attributes: AttributesFromV1Beta3(from.Attributes), + } + + return to +} diff --git a/go/node/migrate/cert.go b/go/node/migrate/cert.go new file mode 100644 index 00000000..424fb295 --- /dev/null +++ b/go/node/migrate/cert.go @@ -0,0 +1,28 @@ +package migrate + +import ( + "github.com/akash-network/akash-api/go/node/cert/v1beta3" + "github.com/cosmos/cosmos-sdk/codec" + + v1 "pkg.akt.dev/go/node/cert/v1" +) + +func CertV1beta3Prefix() []byte { + return v1beta3.PrefixCertificateID() +} + +func CertFromV1beta3(cdc codec.BinaryCodec, fromBz []byte) v1.Certificate { + var from v1beta3.Certificate + cdc.MustUnmarshal(fromBz, &from) + + to := v1.Certificate{ + State: v1.State(from.State), + Cert: make([]byte, len(from.Cert)), + Pubkey: make([]byte, len(from.Pubkey)), + } + + copy(to.Cert, from.Cert) + copy(to.Pubkey, from.Pubkey) + + return to +} diff --git a/go/node/migrate/deployment.go b/go/node/migrate/deployment.go new file mode 100644 index 00000000..7c5dbc74 --- /dev/null +++ b/go/node/migrate/deployment.go @@ -0,0 +1,108 @@ +package migrate + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/authz" + "github.com/cosmos/gogoproto/proto" + + "github.com/akash-network/akash-api/go/node/deployment/v1beta3" + v1 "pkg.akt.dev/go/node/deployment/v1" + "pkg.akt.dev/go/node/deployment/v1beta4" +) + +func init() { + proto.RegisterType((*v1beta3.MsgDepositDeployment)(nil), "akash.deployment.v1beta3.MsgDepositDeployment") + proto.RegisterType((*v1beta3.DepositDeploymentAuthorization)(nil), "akash.deployment.v1beta3.DepositDeploymentAuthorization") +} + +func RegisterDeploymentInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), + &v1beta3.MsgDepositDeployment{}, + ) + + registry.RegisterImplementations( + (*authz.Authorization)(nil), + &v1beta3.DepositDeploymentAuthorization{}, + ) +} + +func DeploymentV1beta3Prefix() []byte { + return v1beta3.DeploymentPrefix() +} + +func GroupV1beta3Prefix() []byte { + return v1beta3.GroupPrefix() +} + +func DeploymentIDFromV1Beta3(from v1beta3.DeploymentID) v1.DeploymentID { + return v1.DeploymentID{ + Owner: from.Owner, + DSeq: from.DSeq, + } +} + +func GroupIDFromV1Beta3(from v1beta3.GroupID) v1.GroupID { + return v1.GroupID{ + Owner: from.Owner, + DSeq: from.DSeq, + GSeq: from.GSeq, + } +} + +func DeploymentFromV1beta3(cdc codec.BinaryCodec, fromBz []byte) v1.Deployment { + var from v1beta3.Deployment + cdc.MustUnmarshal(fromBz, &from) + + return v1.Deployment{ + ID: DeploymentIDFromV1Beta3(from.DeploymentID), + State: v1.Deployment_State(from.State), + Hash: from.Version, + CreatedAt: from.CreatedAt, + } +} + +func DepositAuthorizationFromV1beta3(from v1beta3.DepositDeploymentAuthorization) v1.DepositAuthorization { + return v1.DepositAuthorization{ + SpendLimit: from.SpendLimit, + } +} + +func ResourceUnitFromV1Beta3(id uint32, from v1beta3.ResourceUnit) v1beta4.ResourceUnit { + return v1beta4.ResourceUnit{ + Resources: ResourcesFromV1Beta3(id, from.Resources), + Count: from.Count, + Price: from.Price, + } +} + +func ResourcesUnitsFromV1Beta3(from []v1beta3.ResourceUnit) v1beta4.ResourceUnits { + res := make(v1beta4.ResourceUnits, 0, len(from)) + + for i, oval := range from { + res = append(res, ResourceUnitFromV1Beta3(uint32(i+1), oval)) // nolint gosec + } + + return res +} + +func GroupSpecFromV1Beta3(from v1beta3.GroupSpec) v1beta4.GroupSpec { + return v1beta4.GroupSpec{ + Name: from.Name, + Requirements: PlacementRequirementsFromV1Beta3(from.Requirements), + Resources: ResourcesUnitsFromV1Beta3(from.Resources), + } +} + +func GroupFromV1Beta3(cdc codec.BinaryCodec, fromBz []byte) v1beta4.Group { + var from v1beta3.Group + cdc.MustUnmarshal(fromBz, &from) + + return v1beta4.Group{ + ID: GroupIDFromV1Beta3(from.GroupID), + State: v1beta4.Group_State(from.State), + GroupSpec: GroupSpecFromV1Beta3(from.GroupSpec), + CreatedAt: from.CreatedAt, + } +} diff --git a/go/node/migrate/escrow.go b/go/node/migrate/escrow.go new file mode 100644 index 00000000..f9fe436b --- /dev/null +++ b/go/node/migrate/escrow.go @@ -0,0 +1,58 @@ +package migrate + +import ( + "github.com/akash-network/akash-api/go/node/escrow/v1beta3" + "github.com/cosmos/cosmos-sdk/codec" + + v1 "pkg.akt.dev/go/node/escrow/v1" +) + +func AccountV1beta3Prefix() []byte { + return v1beta3.AccountKeyPrefix() +} + +func PaymentV1beta3Prefix() []byte { + return v1beta3.PaymentKeyPrefix() +} + +func AccountIDFromV1beta3(from v1beta3.AccountID) v1.AccountID { + return v1.AccountID{ + Scope: from.Scope, + XID: from.XID, + } +} + +func AccountFromV1beta3(cdc codec.BinaryCodec, fromBz []byte) v1.Account { + var from v1beta3.Account + cdc.MustUnmarshal(fromBz, &from) + + to := v1.Account{ + ID: AccountIDFromV1beta3(from.ID), + Owner: from.Owner, + State: v1.Account_State(from.State), + Balance: from.Balance, + Transferred: from.Transferred, + SettledAt: from.SettledAt, + Depositor: from.Depositor, + Funds: from.Funds, + } + + return to +} + +func FractionalPaymentFromV1beta3(cdc codec.BinaryCodec, fromBz []byte) v1.FractionalPayment { + var from v1beta3.FractionalPayment + cdc.MustUnmarshal(fromBz, &from) + + to := v1.FractionalPayment{ + AccountID: AccountIDFromV1beta3(from.AccountID), + PaymentID: from.PaymentID, + Owner: from.Owner, + State: v1.FractionalPayment_State(from.State), + Rate: from.Rate, + Balance: from.Balance, + Withdrawn: from.Withdrawn, + } + + return to +} diff --git a/go/node/migrate/go.mod b/go/node/migrate/go.mod new file mode 100644 index 00000000..6673ac81 --- /dev/null +++ b/go/node/migrate/go.mod @@ -0,0 +1,142 @@ +module pkg.akt.dev/go/node/migrate + +go 1.22.5 + +require ( + github.com/akash-network/akash-api v0.0.72 + github.com/cosmos/cosmos-sdk v0.47.16-ics-lsm + github.com/cosmos/gogoproto v1.4.12 + pkg.akt.dev/go v0.0.1-rc6 +) + +replace ( + github.com/cosmos/cosmos-sdk => github.com/cosmos/cosmos-sdk v0.47.16-ics-lsm + // pin gogoproto version to v1.4.10 + github.com/cosmos/gogoproto => github.com/cosmos/gogoproto v1.4.10 + // Use regen gogoproto fork + // To be removed in akash v2 release + github.com/gogo/protobuf => github.com/cosmos/gogoproto v1.3.3-alpha.regen.1 + github.com/tendermint/tendermint => github.com/akash-network/cometbft v0.34.27-akash + golang.org/x/exp => golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb +) + +require ( + cosmossdk.io/api v0.3.1 // indirect + cosmossdk.io/core v0.5.1 // indirect + cosmossdk.io/depinject v1.0.0-alpha.4 // indirect + cosmossdk.io/errors v1.0.1 // indirect + cosmossdk.io/math v1.3.0 // indirect + filippo.io/edwards25519 v1.1.0 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.2 // indirect + github.com/ChainSafe/go-schnorrkel v1.0.0 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/errors v1.10.0 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cometbft/cometbft v0.37.6 // indirect + github.com/cometbft/cometbft-db v0.7.0 // indirect + github.com/confio/ics23/go v0.9.1 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect + github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/iavl v0.20.1 // indirect + github.com/cosmos/ledger-cosmos-go v0.12.4 // indirect + github.com/danieljoos/wincred v1.1.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/getsentry/sentry-go v0.23.0 // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gogo/protobuf v1.3.3 // indirect + github.com/golang/glog v1.2.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/gorilla/websocket v1.5.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/gtank/merlin v0.1.1 // indirect + github.com/gtank/ristretto255 v0.1.2 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-uuid v1.0.3 // indirect + github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect + github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c // indirect + github.com/hdevalence/ed25519consensus v0.1.0 // indirect + github.com/huandu/skiplist v1.2.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac // indirect + github.com/klauspost/compress v1.17.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/onsi/gomega v1.31.0 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/regen-network/cosmos-proto v0.3.1 // indirect + github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/cobra v1.8.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.18.2 // indirect + github.com/stretchr/testify v1.9.0 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + github.com/tendermint/tendermint v0.37.0-rc2 // indirect + github.com/tidwall/btree v1.6.0 // indirect + github.com/zondax/hid v0.9.2 // indirect + github.com/zondax/ledger-go v0.14.3 // indirect + go.etcd.io/bbolt v1.3.7 // indirect + go.uber.org/multierr v1.11.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sync v0.7.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/term v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + pgregory.net/rapid v1.1.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/go/node/migrate/go.sum b/go/node/migrate/go.sum new file mode 100644 index 00000000..4b49b9d2 --- /dev/null +++ b/go/node/migrate/go.sum @@ -0,0 +1,654 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cosmossdk.io/api v0.3.1 h1:NNiOclKRR0AOlO4KIqeaG6PS6kswOMhHD0ir0SscNXE= +cosmossdk.io/api v0.3.1/go.mod h1:DfHfMkiNA2Uhy8fj0JJlOCYOBp4eWUUJ1te5zBGNyIw= +cosmossdk.io/core v0.5.1 h1:vQVtFrIYOQJDV3f7rw4pjjVqc1id4+mE0L9hHP66pyI= +cosmossdk.io/core v0.5.1/go.mod h1:KZtwHCLjcFuo0nmDc24Xy6CRNEL9Vl/MeimQ2aC7NLE= +cosmossdk.io/depinject v1.0.0-alpha.4 h1:PLNp8ZYAMPTUKyG9IK2hsbciDWqna2z1Wsl98okJopc= +cosmossdk.io/depinject v1.0.0-alpha.4/go.mod h1:HeDk7IkR5ckZ3lMGs/o91AVUc7E596vMaOmslGFM3yU= +cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= +cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= +cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= +cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= +cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= +cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= +cosmossdk.io/tools/rosetta v0.2.1 h1:ddOMatOH+pbxWbrGJKRAawdBkPYLfKXutK9IETnjYxw= +cosmossdk.io/tools/rosetta v0.2.1/go.mod h1:Pqdc1FdvkNV3LcNIkYWt2RQY6IP1ge6YWZk8MhhO9Hw= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= +github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/akash-network/akash-api v0.0.72 h1:DnVL97jlx5fZHsI2YFE6ZinuvVmllDA9pWmxT7QDSJE= +github.com/akash-network/akash-api v0.0.72/go.mod h1:PdOQGTCX3kLBoKHdbPF9pe5+vSLANaMJbgA04UE+OqY= +github.com/akash-network/cometbft v0.34.27-akash h1:V1dApDOr8Ee7BJzYyQ7Z9VBtrAul4+baMeA6C49dje0= +github.com/akash-network/cometbft v0.34.27-akash/go.mod h1:BcCbhKv7ieM0KEddnYXvQZR+pZykTKReJJYf7YC7qhw= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/boz/go-lifecycle v0.1.1 h1:tG/wff7Zxbkf19g4D4I0G8Y4sq83iT5QjD4rzEf/zrI= +github.com/boz/go-lifecycle v0.1.1/go.mod h1:zdagAUMcC2C0OmQkBlJZFV77uF4GCVaGphAexGi7oho= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= +github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/errors v1.10.0 h1:lfxS8zZz1+OjtV4MtNWgboi/W5tyLEB6VQZBXN+0VUU= +github.com/cockroachdb/errors v1.10.0/go.mod h1:lknhIsEVQ9Ss/qKDBQS/UqFSvPQjOwNq2qyKAxtHRqE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/coinbase/rosetta-sdk-go/types v1.0.0 h1:jpVIwLcPoOeCR6o1tU+Xv7r5bMONNbHU7MuEHboiFuA= +github.com/coinbase/rosetta-sdk-go/types v1.0.0/go.mod h1:eq7W2TMRH22GTW0N0beDnN931DW0/WOI1R2sdHNHG4c= +github.com/cometbft/cometbft v0.37.6 h1:2BSD0lGPbcIyRd99Pf1zH0Sa8o0pbfqVWEDbZ4Ec2Uc= +github.com/cometbft/cometbft v0.37.6/go.mod h1:5FRkFil9uagHZogIX9x8z51c3GIPpQmdIN8Mq46HfzY= +github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= +github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= +github.com/confio/ics23/go v0.9.1 h1:3MV46eeWwO3xCauKyAtuAdJYMyPnnchW4iLr2bTw6/U= +github.com/confio/ics23/go v0.9.1/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= +github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= +github.com/cosmos/cosmos-sdk v0.47.16-ics-lsm h1:+mlfnZ4Cs8HMw9xy7Epjv56avptYSTsX3TVlUDX3Qcs= +github.com/cosmos/cosmos-sdk v0.47.16-ics-lsm/go.mod h1:uzvMwHXmuRDSOaF8ec9HickjLHJcItWBREdkaDHcPiE= +github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= +github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= +github.com/cosmos/gogoproto v1.3.3-alpha.regen.1 h1:Qmv/wAw4xHnjN5iZ9qHergfk1O7nnYl7ZsIY5lF+E9k= +github.com/cosmos/gogoproto v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= +github.com/cosmos/gogoproto v1.4.10 h1:QH/yT8X+c0F4ZDacDv3z+xE3WU1P1Z3wQoLMBRJoKuI= +github.com/cosmos/gogoproto v1.4.10/go.mod h1:3aAZzeRWpAwr+SS/LLkICX2/kDFyaYVzckBDzygIxek= +github.com/cosmos/iavl v0.20.1 h1:rM1kqeG3/HBT85vsZdoSNsehciqUQPWrR4BYmqE2+zg= +github.com/cosmos/iavl v0.20.1/go.mod h1:WO7FyvaZJoH65+HFOsDir7xU9FWk2w9cHXNW1XHcl7A= +github.com/cosmos/ledger-cosmos-go v0.12.4 h1:drvWt+GJP7Aiw550yeb3ON/zsrgW0jgh5saFCr7pDnw= +github.com/cosmos/ledger-cosmos-go v0.12.4/go.mod h1:fjfVWRf++Xkygt9wzCsjEBdjcf7wiiY35fv3ctT+k4M= +github.com/cosmos/rosetta-sdk-go v0.10.0 h1:E5RhTruuoA7KTIXUcMicL76cffyeoyvNybzUGSKFTcM= +github.com/cosmos/rosetta-sdk-go v0.10.0/go.mod h1:SImAZkb96YbwvoRkzSMQB6noNJXFgWl/ENIznEoYQI4= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= +github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/edwingeng/deque/v2 v2.1.1 h1:+xjC3TnaeMPLZMi7QQf9jN2K00MZmTwruApqplbL9IY= +github.com/edwingeng/deque/v2 v2.1.1/go.mod h1:HukI8CQe9KDmZCcURPZRYVYjH79Zy2tIjTF9sN3Bgb0= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE= +github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= +github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c h1:PdZEHcpa3117kJ1Wa5EYupzCzn9QlBby8Fx2YpZPYvo= +github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= +github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= +github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac h1:GcJkaxD5Wy/Ucn+L0USlpbGJy9O6+7r0nBI7ftJ7Uu0= +github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac/go.mod h1:dM7ihgFM8Do6WGIfOXWPgpJ+4bKGR/4ZkYh8HKDdFy4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= +github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/regen-network/cosmos-proto v0.3.1 h1:rV7iM4SSFAagvy8RiyhiACbWEGotmqzywPxOvwMdxcg= +github.com/regen-network/cosmos-proto v0.3.1/go.mod h1:jO0sVX6a1B36nmE8C9xBFXpNwWejXC7QqCOnH3O0+YM= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= +github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d h1:vfofYNRScrDdvS342BElfbETmL1Aiz3i2t0zfRj16Hs= +github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d/go.mod h1:RRCYJbIwD5jmqPI9XoAFR0OcDxqUctll6zUj/+B4S48= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= +github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= +github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.step.sm/crypto v0.45.1 h1:Xb8XldsbqT6pDYsg46BVPP1euASNbeNAhzrlvUP3QWo= +go.step.sm/crypto v0.45.1/go.mod h1:XtJBuMuZb11YeJpG8uP3fyBl2MerXWJ/pWQX/Au+Kt8= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb h1:xIApU0ow1zwMa2uL1VDNeQlNVFTWMQxZUZCMDy0Q4Us= +golang.org/x/exp v0.0.0-20230711153332-06a737ee72cb/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= +google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 h1:W5Xj/70xIA4x60O/IFyXivR5MGqblAb8R3w26pnD6No= +google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.mod h1:vPrPUTsDCYxXWjP7clS81mZ6/803D8K4iM9Ma27VKas= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 h1:mxSlqyb8ZAHsYDCfiXN1EDdNTdvjUJSLY+OnAUtYNYA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v1.1.0 h1:CMa0sjHSru3puNx+J0MIAuiiEV4N0qj8/cMWGBBCsjw= +pgregory.net/rapid v1.1.0/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +pkg.akt.dev/go v0.0.1-rc6 h1:XALrpVAkloA0PfnjOmIfbJVjQhVz96jEMXaTdz4bUww= +pkg.akt.dev/go v0.0.1-rc6/go.mod h1:zT4oCfg4GtuicSiH65mmlt3Y7InU3f5KUvFsdLZ7IYs= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/go/node/migrate/market.go b/go/node/migrate/market.go new file mode 100644 index 00000000..5414731e --- /dev/null +++ b/go/node/migrate/market.go @@ -0,0 +1,109 @@ +package migrate + +import ( + "github.com/akash-network/akash-api/go/node/market/v1beta4" + "github.com/cosmos/cosmos-sdk/codec" + v1 "pkg.akt.dev/go/node/market/v1" + "pkg.akt.dev/go/node/market/v1beta5" +) + +func NewLeaseV1beta4() v1beta4.Lease { + return v1beta4.Lease{} +} + +func NewBidV1beta4() v1beta4.Bid { + return v1beta4.Bid{} +} + +func NewOrderV1beta4() v1beta4.Order { + return v1beta4.Order{} +} + +func LeaseIDFromV1beta4(from v1beta4.LeaseID) v1.LeaseID { + return v1.LeaseID{ + Owner: from.Owner, + DSeq: from.DSeq, + GSeq: from.GSeq, + OSeq: from.OSeq, + Provider: from.Provider, + } +} + +func BidIDFromV1beta4(from v1beta4.BidID) v1.BidID { + return v1.BidID{ + Owner: from.Owner, + DSeq: from.DSeq, + GSeq: from.GSeq, + OSeq: from.OSeq, + Provider: from.Provider, + } +} + +func OrderIDFromV1beta4(from v1beta4.OrderID) v1.OrderID { + return v1.OrderID{ + Owner: from.Owner, + DSeq: from.DSeq, + GSeq: from.GSeq, + OSeq: from.OSeq, + } +} + +func BidV1beta4Prefix() []byte { + return v1beta4.BidPrefix() +} + +func OrderV1beta4Prefix() []byte { + return v1beta4.OrderPrefix() +} + +func LeaseV1beta4Prefix() []byte { + return v1beta4.LeasePrefix() +} + +func LeaseFromV1beta4(cdc codec.BinaryCodec, fromBz []byte) v1.Lease { + var from v1beta4.Lease + cdc.MustUnmarshal(fromBz, &from) + + return v1.Lease{ + ID: LeaseIDFromV1beta4(from.LeaseID), + State: v1.Lease_State(from.State), + Price: from.Price, + CreatedAt: from.CreatedAt, + ClosedOn: from.ClosedOn, + } +} + +func ResourcesOfferFromV1beta4(from v1beta4.ResourcesOffer) v1beta5.ResourcesOffer { + res := make(v1beta5.ResourcesOffer, 0, len(from)) + + return res +} + +func BidStateFromV1beta4(from v1beta4.Bid_State) v1beta5.Bid_State { + return v1beta5.Bid_State(from) +} + +func BidFromV1beta4(cdc codec.BinaryCodec, fromBz []byte) v1beta5.Bid { + var from v1beta4.Bid + cdc.MustUnmarshal(fromBz, &from) + + return v1beta5.Bid{ + ID: BidIDFromV1beta4(from.BidID), + State: BidStateFromV1beta4(from.State), + Price: from.Price, + CreatedAt: from.CreatedAt, + ResourcesOffer: ResourcesOfferFromV1beta4(from.ResourcesOffer), + } +} + +func OrderFromV1beta4(cdc codec.BinaryCodec, fromBz []byte) v1beta5.Order { + var from v1beta4.Order + cdc.MustUnmarshal(fromBz, &from) + + return v1beta5.Order{ + ID: OrderIDFromV1beta4(from.OrderID), + State: v1beta5.Order_State(from.State), + Spec: GroupSpecFromV1Beta3(from.Spec), + CreatedAt: from.CreatedAt, + } +} diff --git a/go/node/migrate/provider.go b/go/node/migrate/provider.go new file mode 100644 index 00000000..f740044b --- /dev/null +++ b/go/node/migrate/provider.go @@ -0,0 +1,28 @@ +package migrate + +import ( + "github.com/cosmos/cosmos-sdk/codec" + + "github.com/akash-network/akash-api/go/node/provider/v1beta3" + + "pkg.akt.dev/go/node/provider/v1beta4" +) + +func ProviderInfoFromV1beta3(from v1beta3.ProviderInfo) v1beta4.Info { + return v1beta4.Info{ + EMail: from.EMail, + Website: from.Website, + } +} + +func ProviderFromV1beta3(cdc codec.BinaryCodec, fromBz []byte) v1beta4.Provider { + var from v1beta3.Provider + cdc.MustUnmarshal(fromBz, &from) + + return v1beta4.Provider{ + Owner: from.Owner, + HostURI: from.HostURI, + Attributes: AttributesFromV1Beta3(from.Attributes), + Info: ProviderInfoFromV1beta3(from.Info), + } +} diff --git a/go/node/migrate/types.go b/go/node/migrate/types.go new file mode 100644 index 00000000..cdfa8f6f --- /dev/null +++ b/go/node/migrate/types.go @@ -0,0 +1,112 @@ +package migrate + +import ( + "github.com/akash-network/akash-api/go/node/types/v1beta3" + + v1 "pkg.akt.dev/go/node/types/attributes/v1" + "pkg.akt.dev/go/node/types/resources/v1beta4" +) + +func AttributesFromV1Beta3(from v1beta3.Attributes) v1.Attributes { + res := make(v1.Attributes, 0, len(from)) + + for _, attr := range from { + res = append(res, v1.Attribute{ + Key: attr.Key, + Value: attr.Value, + }) + } + + return res +} + +func SignedByFromV1Beta3(from v1beta3.SignedBy) v1.SignedBy { + return v1.SignedBy{ + AllOf: from.AllOf, + AnyOf: from.AnyOf, + } +} + +func PlacementRequirementsFromV1Beta3(from v1beta3.PlacementRequirements) v1.PlacementRequirements { + res := v1.PlacementRequirements{ + SignedBy: SignedByFromV1Beta3(from.SignedBy), + Attributes: AttributesFromV1Beta3(from.Attributes), + } + + return res +} + +func ResourceValueFromV1Beta3(from v1beta3.ResourceValue) v1beta4.ResourceValue { + return v1beta4.NewResourceValue(from.Value()) +} + +func CPUFromV1Beta3(from *v1beta3.CPU) *v1beta4.CPU { + if from == nil { + return nil + } + + return &v1beta4.CPU{ + Units: ResourceValueFromV1Beta3(from.Units), + Attributes: AttributesFromV1Beta3(from.Attributes), + } +} + +func GPUFromV1Beta3(from *v1beta3.GPU) *v1beta4.GPU { + if from == nil { + return nil + } + + return &v1beta4.GPU{ + Units: ResourceValueFromV1Beta3(from.Units), + Attributes: AttributesFromV1Beta3(from.Attributes), + } +} + +func MemoryFromV1Beta3(from *v1beta3.Memory) *v1beta4.Memory { + if from == nil { + return nil + } + + return &v1beta4.Memory{ + Quantity: ResourceValueFromV1Beta3(from.Quantity), + Attributes: AttributesFromV1Beta3(from.Attributes), + } +} + +func VolumesFromV1Beta3(from v1beta3.Volumes) v1beta4.Volumes { + res := make(v1beta4.Volumes, 0, len(from)) + + for _, storage := range from { + res = append(res, v1beta4.Storage{ + Name: "default", + Quantity: ResourceValueFromV1Beta3(storage.Quantity), + Attributes: AttributesFromV1Beta3(storage.Attributes), + }) + } + + return res +} + +func EndpointsFromV1Beta3(from []v1beta3.Endpoint) []v1beta4.Endpoint { + res := make([]v1beta4.Endpoint, 0, len(from)) + + for _, endpoint := range from { + res = append(res, v1beta4.Endpoint{ + Kind: v1beta4.Endpoint_Kind(endpoint.Kind), + SequenceNumber: endpoint.SequenceNumber, + }) + } + + return res +} + +func ResourcesFromV1Beta3(id uint32, from v1beta3.Resources) v1beta4.Resources { + return v1beta4.Resources{ + ID: id, + CPU: CPUFromV1Beta3(from.CPU), + GPU: GPUFromV1Beta3(from.GPU), + Memory: MemoryFromV1Beta3(from.Memory), + Storage: VolumesFromV1Beta3(from.Storage), + Endpoints: EndpointsFromV1Beta3(from.Endpoints), + } +} diff --git a/go/node/provider/v1beta1/codec.go b/go/node/provider/v1beta1/codec.go deleted file mode 100644 index d1f72bde..00000000 --- a/go/node/provider/v1beta1/codec.go +++ /dev/null @@ -1,45 +0,0 @@ -package v1beta1 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/provider module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/provider and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterCodec register concrete types on codec -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgCreateProvider{}, ModuleName+"/"+MsgTypeCreateProvider, nil) - cdc.RegisterConcrete(&MsgUpdateProvider{}, ModuleName+"/"+MsgTypeUpdateProvider, nil) - cdc.RegisterConcrete(&MsgDeleteProvider{}, ModuleName+"/"+MsgTypeDeleteProvider, nil) -} - -// RegisterInterfaces registers the x/provider interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgCreateProvider{}, - &MsgUpdateProvider{}, - &MsgDeleteProvider{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/provider/v1beta1/errors.go b/go/node/provider/v1beta1/errors.go deleted file mode 100644 index f7674bbb..00000000 --- a/go/node/provider/v1beta1/errors.go +++ /dev/null @@ -1,16 +0,0 @@ -package v1beta1 - -import ( - "errors" -) - -var ( - // ErrInvalidProviderURI register error code for invalid provider uri - ErrInvalidProviderURI = errors.New("invalid provider: invalid host uri") - - // ErrNotAbsProviderURI register error code for not absolute provider uri - ErrNotAbsProviderURI = errors.New("invalid provider: not absolute host uri") - - // ErrInvalidInfoWebsite register error code for invalid info website - ErrInvalidInfoWebsite = errors.New("invalid provider: invalid info website") -) diff --git a/go/node/provider/v1beta1/event.go b/go/node/provider/v1beta1/event.go deleted file mode 100644 index a4e7425e..00000000 --- a/go/node/provider/v1beta1/event.go +++ /dev/null @@ -1,142 +0,0 @@ -package v1beta1 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -const ( - evActionProviderCreated = "provider-created" - evActionProviderUpdated = "provider-updated" - evActionProviderDeleted = "provider-deleted" - evOwnerKey = "owner" -) - -// EventProviderCreated struct -type EventProviderCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.AccAddress `json:"owner"` -} - -func NewEventProviderCreated(owner sdk.AccAddress) EventProviderCreated { - return EventProviderCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionProviderCreated, - }, - Owner: owner, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderCreated struct -func (ev EventProviderCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionProviderCreated), - }, ProviderEVAttributes(ev.Owner)...)..., - ) -} - -// EventProviderUpdated struct -type EventProviderUpdated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.AccAddress `json:"owner"` -} - -func NewEventProviderUpdated(owner sdk.AccAddress) EventProviderUpdated { - return EventProviderUpdated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionProviderUpdated, - }, - Owner: owner, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderUpdated struct -func (ev EventProviderUpdated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionProviderUpdated), - }, ProviderEVAttributes(ev.Owner)...)..., - ) -} - -// EventProviderDeleted struct -type EventProviderDeleted struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.AccAddress `json:"owner"` -} - -func NewEventProviderDeleted(owner sdk.AccAddress) EventProviderDeleted { - return EventProviderDeleted{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: evActionProviderDeleted, - }, - Owner: owner, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderDeleted struct -func (ev EventProviderDeleted) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, evActionProviderDeleted), - }, ProviderEVAttributes(ev.Owner)...)..., - ) -} - -// ProviderEVAttributes returns event attribues for given Provider -func ProviderEVAttributes(owner sdk.AccAddress) []sdk.Attribute { - return []sdk.Attribute{ - sdk.NewAttribute(evOwnerKey, owner.String()), - } -} - -// ParseEVProvider returns provider details for given event attributes -func ParseEVProvider(attrs []sdk.Attribute) (sdk.AccAddress, error) { - owner, err := sdkutil.GetAccAddress(attrs, evOwnerKey) - if err != nil { - return sdk.AccAddress{}, err - } - - return owner, nil -} - -// ParseEvent parses event and returns details of event and error if occurred -// TODO: Enable returning actual events. -func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { - if ev.Type != sdkutil.EventTypeMessage { - return nil, sdkutil.ErrUnknownType - } - if ev.Module != ModuleName { - return nil, sdkutil.ErrUnknownModule - } - switch ev.Action { - case evActionProviderCreated: - owner, err := ParseEVProvider(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventProviderCreated(owner), nil - case evActionProviderUpdated: - owner, err := ParseEVProvider(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventProviderUpdated(owner), nil - case evActionProviderDeleted: - owner, err := ParseEVProvider(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventProviderDeleted(owner), nil - default: - return nil, sdkutil.ErrUnknownAction - } -} diff --git a/go/node/provider/v1beta1/events_test.go b/go/node/provider/v1beta1/events_test.go deleted file mode 100644 index 469b5400..00000000 --- a/go/node/provider/v1beta1/events_test.go +++ /dev/null @@ -1,193 +0,0 @@ -package v1beta1 - -import ( - "fmt" - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -var ( - errWildcard = errors.New("wildcard string error can't be matched") -) - -type testEventParsing struct { - msg sdkutil.Event - expErr error -} - -func (tep testEventParsing) testMessageType() func(t *testing.T) { - _, err := ParseEvent(tep.msg) - return func(t *testing.T) { - // if the error expected is errWildcard to catch untyped errors, don't fail the test, the error was expected. - if errors.Is(tep.expErr, errWildcard) { - require.Error(t, err) - } else { - require.Equal(t, tep.expErr, err) - } - } -} - -var TEPS = []testEventParsing{ - { - msg: sdkutil.Event{ - Type: "nil", - }, - expErr: sdkutil.ErrUnknownType, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - }, - expErr: sdkutil.ErrUnknownAction, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: "nil", - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: "nil", - }, - expErr: sdkutil.ErrUnknownAction, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionProviderCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionProviderCreated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "hello", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionProviderCreated, - Attributes: []sdk.Attribute{}, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionProviderUpdated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionProviderUpdated, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "hello", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionProviderUpdated, - Attributes: []sdk.Attribute{}, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionProviderDeleted, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionProviderDeleted, - Attributes: []sdk.Attribute{ - { - Key: evOwnerKey, - Value: "hello", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: ModuleName, - Action: evActionProviderDeleted, - Attributes: []sdk.Attribute{}, - }, - expErr: errWildcard, - }, -} - -func TestEventParsing(t *testing.T) { - for i, test := range TEPS { - t.Run(fmt.Sprintf("%d", i), - test.testMessageType()) - } -} diff --git a/go/node/provider/v1beta1/key.go b/go/node/provider/v1beta1/key.go deleted file mode 100644 index c9229179..00000000 --- a/go/node/provider/v1beta1/key.go +++ /dev/null @@ -1,12 +0,0 @@ -package v1beta1 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "provider" - - // StoreKey is the store key string for provider - StoreKey = ModuleName - - // RouterKey is the message route for provider - RouterKey = ModuleName -) diff --git a/go/node/provider/v1beta1/msgs.go b/go/node/provider/v1beta1/msgs.go deleted file mode 100644 index 91eaed39..00000000 --- a/go/node/provider/v1beta1/msgs.go +++ /dev/null @@ -1,190 +0,0 @@ -package v1beta1 - -import ( - "net/url" - - types "github.com/akash-network/akash-api/go/node/types/v1beta1" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/pkg/errors" -) - -const ( - MsgTypeCreateProvider = "create-provider" - MsgTypeUpdateProvider = "update-provider" - MsgTypeDeleteProvider = "delete-provider" -) - -var ( - _, _, _ sdk.Msg = &MsgCreateProvider{}, &MsgUpdateProvider{}, &MsgDeleteProvider{} -) - -var ( - ErrInvalidStorageClass = errors.New("provider: invalid storage class") - ErrUnsupportedAttribute = errors.New("provider: unsupported attribute") -) - -// NewMsgCreateProvider creates a new MsgCreateProvider instance -func NewMsgCreateProvider(owner sdk.AccAddress, hostURI string, attributes types.Attributes) *MsgCreateProvider { - return &MsgCreateProvider{ - Owner: owner.String(), - HostURI: hostURI, - Attributes: attributes, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateProvider) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateProvider) Type() string { return MsgTypeCreateProvider } - -// ValidateBasic does basic validation of a HostURI -func (msg MsgCreateProvider) ValidateBasic() error { - if err := validateProviderURI(msg.HostURI); err != nil { - return err - } - if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Provider Address") - } - if err := msg.Attributes.Validate(); err != nil { - return err - } - if err := validateProviderAttributes(msg.Attributes); err != nil { - return err - } - if err := msg.Info.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgCreateProvider) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateProvider) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgUpdateProvider creates a new MsgUpdateProvider instance -func NewMsgUpdateProvider(owner sdk.AccAddress, hostURI string, attributes types.Attributes) *MsgUpdateProvider { - return &MsgUpdateProvider{ - Owner: owner.String(), - HostURI: hostURI, - Attributes: attributes, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgUpdateProvider) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgUpdateProvider) Type() string { return MsgTypeUpdateProvider } - -// ValidateBasic does basic validation of a ProviderURI -func (msg MsgUpdateProvider) ValidateBasic() error { - if err := validateProviderURI(msg.HostURI); err != nil { - return err - } - if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgUpdate: Invalid Provider Address") - } - if err := msg.Attributes.Validate(); err != nil { - return err - } - if err := validateProviderAttributes(msg.Attributes); err != nil { - return err - } - if err := msg.Info.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgUpdateProvider) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgUpdateProvider) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgDeleteProvider creates a new MsgDeleteProvider instance -func NewMsgDeleteProvider(owner sdk.AccAddress) *MsgDeleteProvider { - return &MsgDeleteProvider{ - Owner: owner.String(), - } -} - -// Route implements the sdk.Msg interface -func (msg MsgDeleteProvider) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgDeleteProvider) Type() string { return MsgTypeDeleteProvider } - -// ValidateBasic does basic validation -func (msg MsgDeleteProvider) ValidateBasic() error { - if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgDelete: Invalid Provider Address") - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgDeleteProvider) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgDeleteProvider) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -func validateProviderURI(val string) error { - u, err := url.Parse(val) - if err != nil { - return ErrInvalidProviderURI - } - if !u.IsAbs() { - return errors.Wrapf(ErrNotAbsProviderURI, "validating %q for absolute URI", val) - } - - if u.Scheme != "https" { - return errors.Wrapf(ErrInvalidProviderURI, "scheme in %q should be https", val) - } - - if u.Host == "" { - return errors.Wrapf(ErrInvalidProviderURI, "validating %q for valid host", val) - } - - if u.Path != "" { - return errors.Wrapf(ErrInvalidProviderURI, "path in %q should be empty", val) - } - - return nil -} - -func validateProviderAttributes(_ types.Attributes) error { - return nil -} diff --git a/go/node/provider/v1beta1/msgs_test.go b/go/node/provider/v1beta1/msgs_test.go deleted file mode 100644 index 4fc08c25..00000000 --- a/go/node/provider/v1beta1/msgs_test.go +++ /dev/null @@ -1,241 +0,0 @@ -package v1beta1 - -import ( - "fmt" - "net/url" - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/pkg/errors" - - types "github.com/akash-network/akash-api/go/node/types/v1beta1" -) - -func TestConfigPath(t *testing.T) { - type testConfigPath struct { - path string - expErr error - } - tests := []testConfigPath{ - { - path: "foo.yaml", - expErr: ErrNotAbsProviderURI, - }, - { - path: "localhost", - expErr: ErrNotAbsProviderURI, - }, - { - path: "localhost/foo", - expErr: ErrNotAbsProviderURI, - }, - { - path: "localhost:80", - expErr: ErrInvalidProviderURI, - }, - { - path: "localhost:80/foo", - expErr: ErrInvalidProviderURI, - }, - { - path: "127.0.0.1", - expErr: ErrNotAbsProviderURI, - }, - { - path: "127.0.0.1/foo", - expErr: ErrNotAbsProviderURI, - }, - { - path: "127.0.0.1:80", - expErr: ErrInvalidProviderURI, - }, - { - path: "127.0.0.1:80/foo", - expErr: ErrInvalidProviderURI, - }, - { - path: "file:///foo.yaml", - expErr: ErrInvalidProviderURI, - }, - { - path: "https://localhost", - expErr: nil, - }, - { - path: "http://localhost/foo", - expErr: ErrInvalidProviderURI, - }, - { - path: "https://localhost:80", - expErr: nil, - }, - { - path: "http://localhost:80/foo", - expErr: ErrInvalidProviderURI, - }, - { - path: "http://localhost:3001/", - expErr: ErrInvalidProviderURI, - }, - { - path: "https://localhost:80", - expErr: nil, - }, - { - path: "https://localhost:80/foo", - expErr: ErrInvalidProviderURI, - }, - } - - for i, testUnit := range tests { - closure := func(test testConfigPath) func(t *testing.T) { - testFunc := func(t *testing.T) { - err := validateProviderURI(test.path) - if test.expErr != nil && !errors.Is(err, test.expErr) || - err != nil && test.expErr == nil { - t.Errorf("unexpected error occurred: %v", err) - - _, err := url.Parse(test.path) - if err != nil { - t.Errorf("url.Parse() of %q err: %v", test.path, err) - } - } - } - return testFunc - } - tf := closure(testUnit) - t.Run(fmt.Sprintf("%d->%q", i, testUnit.path), tf) - } -} - -type providerTestParams struct { - msg Provider - expErr error - delErr error -} - -func (test providerTestParams) testCreate() func(t *testing.T) { - msg := MsgCreateProvider{ - Owner: test.msg.Owner, - HostURI: test.msg.HostURI, - Attributes: test.msg.Attributes, - } - vErr := msg.ValidateBasic() - return func(t *testing.T) { - if test.expErr != nil && !errors.Is(vErr, test.expErr) { - t.Errorf("error expected: '%v' VS: %v", test.expErr, vErr) - return - } - sb := msg.GetSignBytes() - if len(sb) == 0 { - t.Error("no signed bytes returned") - } - } -} - -func (test providerTestParams) testUpdate() func(t *testing.T) { - msg := MsgUpdateProvider{ - Owner: test.msg.Owner, - HostURI: test.msg.HostURI, - Attributes: test.msg.Attributes, - } - vErr := msg.ValidateBasic() - return func(t *testing.T) { - if test.expErr != nil && !errors.Is(vErr, test.expErr) { - t.Errorf("error expected: '%v' VS: %v", test.expErr, vErr) - return - } - sb := msg.GetSignBytes() - if len(sb) == 0 { - t.Error("no signed bytes returned") - } - } -} - -func (test providerTestParams) testDelete() func(t *testing.T) { - msg := MsgDeleteProvider{ - Owner: test.msg.Owner, - } - vErr := msg.ValidateBasic() - return func(t *testing.T) { - if test.delErr != nil && !errors.Is(vErr, test.delErr) { - t.Errorf("error expected: '%v' VS: %v", test.expErr, vErr) - return - } - sb := msg.GetSignBytes() - if len(sb) == 0 { - t.Error("no signed bytes returned") - } - } -} - -var msgCreateTests = []providerTestParams{ - { - msg: Provider{ - Owner: sdk.AccAddress("hihi").String(), - HostURI: "https://localhost:3001", - Attributes: []types.Attribute{ - { - Key: "hihi", - Value: "neh", - }, - }, - }, - expErr: nil, - }, - { - msg: Provider{ - Owner: sdk.AccAddress("").String(), - HostURI: "https://localhost:3001", - Attributes: []types.Attribute{ - { - Key: "hihi", - Value: "neh", - }, - }, - }, - expErr: sdkerrors.ErrInvalidAddress, - delErr: sdkerrors.ErrInvalidAddress, - }, - { - msg: Provider{ - Owner: sdk.AccAddress("hihi").String(), - HostURI: "ht tp://foo.com", - Attributes: []types.Attribute{ - { - Key: "hihi", - Value: "neh", - }, - }, - }, - expErr: ErrInvalidProviderURI, - }, - { - msg: Provider{ - Owner: sdk.AccAddress("hihi").String(), - HostURI: "", - Attributes: []types.Attribute{ - { - Key: "hihi", - Value: "neh", - }, - }, - }, - expErr: ErrNotAbsProviderURI, - }, -} - -func TestMsgStarValidation(t *testing.T) { - for i, test := range msgCreateTests { - main := func(test providerTestParams) func(t *testing.T) { - return func(t *testing.T) { - t.Run("msg-create", test.testCreate()) - t.Run("msg-update", test.testUpdate()) - t.Run("msg-delete", test.testDelete()) - } - } - f := main(test) - t.Run(fmt.Sprintf("%d", i), f) - } -} diff --git a/go/node/provider/v1beta1/provider.pb.go b/go/node/provider/v1beta1/provider.pb.go deleted file mode 100644 index 22daa571..00000000 --- a/go/node/provider/v1beta1/provider.pb.go +++ /dev/null @@ -1,2102 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/provider/v1beta1/provider.proto - -package v1beta1 - -import ( - context "context" - fmt "fmt" - github_com_akash_network_akash_api_go_node_types_v1beta1 "github.com/akash-network/akash-api/go/node/types/v1beta1" - v1beta1 "github.com/akash-network/akash-api/go/node/types/v1beta1" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ProviderInfo -type ProviderInfo struct { - EMail string `protobuf:"bytes,1,opt,name=email,proto3" json:"email" yaml:"email"` - Website string `protobuf:"bytes,2,opt,name=website,proto3" json:"website" yaml:"website"` -} - -func (m *ProviderInfo) Reset() { *m = ProviderInfo{} } -func (m *ProviderInfo) String() string { return proto.CompactTextString(m) } -func (*ProviderInfo) ProtoMessage() {} -func (*ProviderInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_12a8109840398be6, []int{0} -} -func (m *ProviderInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProviderInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProviderInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProviderInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProviderInfo.Merge(m, src) -} -func (m *ProviderInfo) XXX_Size() int { - return m.Size() -} -func (m *ProviderInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ProviderInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_ProviderInfo proto.InternalMessageInfo - -func (m *ProviderInfo) GetEMail() string { - if m != nil { - return m.EMail - } - return "" -} - -func (m *ProviderInfo) GetWebsite() string { - if m != nil { - return m.Website - } - return "" -} - -// MsgCreateProvider defines an SDK message for creating a provider -type MsgCreateProvider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta1.Attributes" json:"attributes" yaml:"attributes"` - Info ProviderInfo `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` -} - -func (m *MsgCreateProvider) Reset() { *m = MsgCreateProvider{} } -func (m *MsgCreateProvider) String() string { return proto.CompactTextString(m) } -func (*MsgCreateProvider) ProtoMessage() {} -func (*MsgCreateProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_12a8109840398be6, []int{1} -} -func (m *MsgCreateProvider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateProvider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateProvider) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateProvider.Merge(m, src) -} -func (m *MsgCreateProvider) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateProvider) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateProvider.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateProvider proto.InternalMessageInfo - -func (m *MsgCreateProvider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgCreateProvider) GetHostURI() string { - if m != nil { - return m.HostURI - } - return "" -} - -func (m *MsgCreateProvider) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta1.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *MsgCreateProvider) GetInfo() ProviderInfo { - if m != nil { - return m.Info - } - return ProviderInfo{} -} - -// MsgCreateProviderResponse defines the Msg/CreateProvider response type. -type MsgCreateProviderResponse struct { -} - -func (m *MsgCreateProviderResponse) Reset() { *m = MsgCreateProviderResponse{} } -func (m *MsgCreateProviderResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateProviderResponse) ProtoMessage() {} -func (*MsgCreateProviderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_12a8109840398be6, []int{2} -} -func (m *MsgCreateProviderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateProviderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateProviderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateProviderResponse.Merge(m, src) -} -func (m *MsgCreateProviderResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateProviderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateProviderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateProviderResponse proto.InternalMessageInfo - -// MsgUpdateProvider defines an SDK message for updating a provider -type MsgUpdateProvider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta1.Attributes" json:"attributes" yaml:"attributes"` - Info ProviderInfo `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` -} - -func (m *MsgUpdateProvider) Reset() { *m = MsgUpdateProvider{} } -func (m *MsgUpdateProvider) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateProvider) ProtoMessage() {} -func (*MsgUpdateProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_12a8109840398be6, []int{3} -} -func (m *MsgUpdateProvider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateProvider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateProvider) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateProvider.Merge(m, src) -} -func (m *MsgUpdateProvider) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateProvider) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateProvider.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateProvider proto.InternalMessageInfo - -func (m *MsgUpdateProvider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgUpdateProvider) GetHostURI() string { - if m != nil { - return m.HostURI - } - return "" -} - -func (m *MsgUpdateProvider) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta1.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *MsgUpdateProvider) GetInfo() ProviderInfo { - if m != nil { - return m.Info - } - return ProviderInfo{} -} - -// MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. -type MsgUpdateProviderResponse struct { -} - -func (m *MsgUpdateProviderResponse) Reset() { *m = MsgUpdateProviderResponse{} } -func (m *MsgUpdateProviderResponse) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateProviderResponse) ProtoMessage() {} -func (*MsgUpdateProviderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_12a8109840398be6, []int{4} -} -func (m *MsgUpdateProviderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateProviderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateProviderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateProviderResponse.Merge(m, src) -} -func (m *MsgUpdateProviderResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateProviderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateProviderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateProviderResponse proto.InternalMessageInfo - -// MsgDeleteProvider defines an SDK message for deleting a provider -type MsgDeleteProvider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` -} - -func (m *MsgDeleteProvider) Reset() { *m = MsgDeleteProvider{} } -func (m *MsgDeleteProvider) String() string { return proto.CompactTextString(m) } -func (*MsgDeleteProvider) ProtoMessage() {} -func (*MsgDeleteProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_12a8109840398be6, []int{5} -} -func (m *MsgDeleteProvider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDeleteProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDeleteProvider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDeleteProvider) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDeleteProvider.Merge(m, src) -} -func (m *MsgDeleteProvider) XXX_Size() int { - return m.Size() -} -func (m *MsgDeleteProvider) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDeleteProvider.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDeleteProvider proto.InternalMessageInfo - -func (m *MsgDeleteProvider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -// MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. -type MsgDeleteProviderResponse struct { -} - -func (m *MsgDeleteProviderResponse) Reset() { *m = MsgDeleteProviderResponse{} } -func (m *MsgDeleteProviderResponse) String() string { return proto.CompactTextString(m) } -func (*MsgDeleteProviderResponse) ProtoMessage() {} -func (*MsgDeleteProviderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_12a8109840398be6, []int{6} -} -func (m *MsgDeleteProviderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDeleteProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDeleteProviderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDeleteProviderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDeleteProviderResponse.Merge(m, src) -} -func (m *MsgDeleteProviderResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgDeleteProviderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDeleteProviderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDeleteProviderResponse proto.InternalMessageInfo - -// Provider stores owner and host details -type Provider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta1.Attributes" json:"attributes" yaml:"attributes"` - Info ProviderInfo `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` -} - -func (m *Provider) Reset() { *m = Provider{} } -func (*Provider) ProtoMessage() {} -func (*Provider) Descriptor() ([]byte, []int) { - return fileDescriptor_12a8109840398be6, []int{7} -} -func (m *Provider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Provider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Provider) XXX_Merge(src proto.Message) { - xxx_messageInfo_Provider.Merge(m, src) -} -func (m *Provider) XXX_Size() int { - return m.Size() -} -func (m *Provider) XXX_DiscardUnknown() { - xxx_messageInfo_Provider.DiscardUnknown(m) -} - -var xxx_messageInfo_Provider proto.InternalMessageInfo - -func (m *Provider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *Provider) GetHostURI() string { - if m != nil { - return m.HostURI - } - return "" -} - -func (m *Provider) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta1.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Provider) GetInfo() ProviderInfo { - if m != nil { - return m.Info - } - return ProviderInfo{} -} - -func init() { - proto.RegisterType((*ProviderInfo)(nil), "akash.provider.v1beta1.ProviderInfo") - proto.RegisterType((*MsgCreateProvider)(nil), "akash.provider.v1beta1.MsgCreateProvider") - proto.RegisterType((*MsgCreateProviderResponse)(nil), "akash.provider.v1beta1.MsgCreateProviderResponse") - proto.RegisterType((*MsgUpdateProvider)(nil), "akash.provider.v1beta1.MsgUpdateProvider") - proto.RegisterType((*MsgUpdateProviderResponse)(nil), "akash.provider.v1beta1.MsgUpdateProviderResponse") - proto.RegisterType((*MsgDeleteProvider)(nil), "akash.provider.v1beta1.MsgDeleteProvider") - proto.RegisterType((*MsgDeleteProviderResponse)(nil), "akash.provider.v1beta1.MsgDeleteProviderResponse") - proto.RegisterType((*Provider)(nil), "akash.provider.v1beta1.Provider") -} - -func init() { - proto.RegisterFile("akash/provider/v1beta1/provider.proto", fileDescriptor_12a8109840398be6) -} - -var fileDescriptor_12a8109840398be6 = []byte{ - // 577 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xb1, 0x6b, 0xdb, 0x4e, - 0x18, 0x95, 0x62, 0xfb, 0x67, 0xff, 0xce, 0x21, 0x25, 0xa2, 0x14, 0xc7, 0x21, 0x3a, 0x73, 0xb4, - 0xe0, 0x16, 0x2a, 0x61, 0x77, 0x68, 0x49, 0xa7, 0x3a, 0x0d, 0x34, 0x05, 0x43, 0x11, 0x78, 0xe9, - 0x52, 0xa4, 0xfa, 0x22, 0x8b, 0xd8, 0x3a, 0xa1, 0x3b, 0xc7, 0x64, 0xec, 0x7f, 0x50, 0xe8, 0x92, - 0x31, 0x73, 0xa1, 0xff, 0x47, 0xc6, 0x2c, 0x85, 0x4e, 0xd7, 0x62, 0x2f, 0xc5, 0xa3, 0xff, 0x82, - 0xa2, 0x3b, 0x49, 0xb1, 0x6c, 0xd7, 0xb4, 0x74, 0xc9, 0x90, 0x4d, 0xf7, 0xee, 0x7d, 0xdf, 0xfb, - 0xf8, 0xde, 0xe3, 0x6c, 0xf0, 0xc0, 0x3e, 0xb1, 0x69, 0xcf, 0x0c, 0x42, 0x72, 0xea, 0x75, 0x71, - 0x68, 0x9e, 0x36, 0x1c, 0xcc, 0xec, 0x46, 0x0a, 0x18, 0x41, 0x48, 0x18, 0xd1, 0xee, 0x09, 0x9a, - 0x91, 0xa2, 0x31, 0xad, 0x7a, 0xd7, 0x25, 0x2e, 0x11, 0x14, 0x33, 0xfa, 0x92, 0xec, 0x2a, 0x92, - 0x4d, 0x1d, 0x9b, 0xe2, 0xb4, 0xa1, 0xcd, 0x58, 0xe8, 0x39, 0x43, 0x86, 0x25, 0x07, 0x7d, 0x50, - 0xc1, 0xe6, 0x9b, 0xb8, 0xdd, 0x91, 0x7f, 0x4c, 0xb4, 0x67, 0xa0, 0x80, 0x07, 0xb6, 0xd7, 0xaf, - 0xa8, 0x35, 0xb5, 0xfe, 0x7f, 0x0b, 0x8d, 0x39, 0x2c, 0x1c, 0xb6, 0x6d, 0xaf, 0x3f, 0xe5, 0x50, - 0xde, 0xcc, 0x38, 0xdc, 0x3c, 0xb3, 0x07, 0xfd, 0x7d, 0x24, 0x8e, 0xc8, 0x92, 0xb0, 0xf6, 0x14, - 0x14, 0x47, 0xd8, 0xa1, 0x1e, 0xc3, 0x95, 0x0d, 0x51, 0xbb, 0x37, 0xe5, 0x30, 0x81, 0x66, 0x1c, - 0x6e, 0xc9, 0xa2, 0x18, 0x40, 0x56, 0x72, 0x85, 0xce, 0x73, 0x60, 0xbb, 0x4d, 0xdd, 0x83, 0x10, - 0xdb, 0x0c, 0x27, 0xc3, 0x68, 0x26, 0x28, 0x90, 0x91, 0x8f, 0xc3, 0x78, 0x90, 0x9d, 0x48, 0x5f, - 0x00, 0xd7, 0xfa, 0xe2, 0x88, 0x2c, 0x09, 0x6b, 0x87, 0xa0, 0xd4, 0x23, 0x94, 0xbd, 0x1b, 0x86, - 0x5e, 0x3c, 0xc0, 0xa3, 0x31, 0x87, 0xc5, 0x57, 0x84, 0xb2, 0x8e, 0x75, 0x34, 0xe5, 0x30, 0xbd, - 0x9e, 0x71, 0x78, 0x47, 0x76, 0x48, 0x10, 0x64, 0x15, 0xa3, 0xcf, 0x4e, 0xe8, 0x69, 0x5f, 0x54, - 0x00, 0xd2, 0x2d, 0xd1, 0x4a, 0xae, 0x96, 0xab, 0x97, 0x9b, 0x7b, 0x86, 0xdc, 0x7c, 0xb4, 0xcb, - 0x64, 0xeb, 0xc6, 0x8b, 0x84, 0xd5, 0xf2, 0x2f, 0x39, 0x54, 0xa6, 0x1c, 0xce, 0x15, 0xce, 0x38, - 0xdc, 0x96, 0x1a, 0xd7, 0x18, 0xfa, 0xfc, 0x1d, 0x1e, 0xb8, 0x1e, 0xeb, 0x0d, 0x1d, 0xe3, 0x3d, - 0x19, 0x98, 0xa2, 0xe7, 0x63, 0x1f, 0xb3, 0x11, 0x09, 0x4f, 0xe2, 0x93, 0x1d, 0x78, 0xa6, 0x4b, - 0x4c, 0x9f, 0x74, 0xb1, 0xc9, 0xce, 0x02, 0x4c, 0xcd, 0x25, 0x39, 0x6a, 0xcd, 0xe9, 0x68, 0x1d, - 0x90, 0xf7, 0xfc, 0x63, 0x52, 0xc9, 0xd7, 0xd4, 0x7a, 0xb9, 0x79, 0xdf, 0x58, 0x1d, 0x11, 0x63, - 0xde, 0xe4, 0xd6, 0x6e, 0x3c, 0xaf, 0xa8, 0x9c, 0x71, 0x58, 0x96, 0x93, 0x46, 0x27, 0x64, 0x09, - 0x70, 0x3f, 0xff, 0xf3, 0x02, 0x2a, 0x68, 0x17, 0xec, 0x2c, 0x39, 0x63, 0x61, 0x1a, 0x10, 0x9f, - 0xa6, 0xbe, 0x75, 0x82, 0xee, 0xad, 0x6f, 0x37, 0xd2, 0xb7, 0xac, 0x33, 0xa9, 0x6f, 0xaf, 0x85, - 0x6d, 0x2f, 0x71, 0x1f, 0xff, 0x83, 0x6d, 0x19, 0xa1, 0x6c, 0xaf, 0x54, 0xe8, 0x53, 0x0e, 0x94, - 0x6e, 0x73, 0x71, 0x33, 0x72, 0x51, 0x3a, 0xbf, 0x80, 0x4a, 0x64, 0x59, 0xf3, 0xeb, 0x06, 0xc8, - 0xb5, 0xa9, 0xab, 0xf9, 0x60, 0x6b, 0xe1, 0xc9, 0x7d, 0xf8, 0x3b, 0xb1, 0xa5, 0x37, 0xa0, 0xda, - 0xf8, 0x63, 0x6a, 0x92, 0x86, 0x48, 0x6f, 0xe1, 0xa9, 0x58, 0xa7, 0x97, 0xa5, 0xae, 0xd5, 0x5b, - 0x1d, 0xf3, 0x48, 0x6f, 0x21, 0xe3, 0xeb, 0xf4, 0xb2, 0xd4, 0xb5, 0x7a, 0xab, 0xd3, 0xde, 0xea, - 0x5c, 0x8e, 0x75, 0xf5, 0x6a, 0xac, 0xab, 0x3f, 0xc6, 0xba, 0xfa, 0x71, 0xa2, 0x2b, 0x57, 0x13, - 0x5d, 0xf9, 0x36, 0xd1, 0x95, 0xb7, 0xcf, 0xff, 0x22, 0x23, 0x8b, 0x7f, 0x01, 0x9c, 0xff, 0xc4, - 0x0f, 0xf5, 0x93, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x2c, 0xf6, 0x77, 0x26, 0x23, 0x08, 0x00, - 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // CreateProvider defines a method that creates a provider given the proper inputs - CreateProvider(ctx context.Context, in *MsgCreateProvider, opts ...grpc.CallOption) (*MsgCreateProviderResponse, error) - // UpdateProvider defines a method that updates a provider given the proper inputs - UpdateProvider(ctx context.Context, in *MsgUpdateProvider, opts ...grpc.CallOption) (*MsgUpdateProviderResponse, error) - // DeleteProvider defines a method that deletes a provider given the proper inputs - DeleteProvider(ctx context.Context, in *MsgDeleteProvider, opts ...grpc.CallOption) (*MsgDeleteProviderResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) CreateProvider(ctx context.Context, in *MsgCreateProvider, opts ...grpc.CallOption) (*MsgCreateProviderResponse, error) { - out := new(MsgCreateProviderResponse) - err := c.cc.Invoke(ctx, "/akash.provider.v1beta1.Msg/CreateProvider", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) UpdateProvider(ctx context.Context, in *MsgUpdateProvider, opts ...grpc.CallOption) (*MsgUpdateProviderResponse, error) { - out := new(MsgUpdateProviderResponse) - err := c.cc.Invoke(ctx, "/akash.provider.v1beta1.Msg/UpdateProvider", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) DeleteProvider(ctx context.Context, in *MsgDeleteProvider, opts ...grpc.CallOption) (*MsgDeleteProviderResponse, error) { - out := new(MsgDeleteProviderResponse) - err := c.cc.Invoke(ctx, "/akash.provider.v1beta1.Msg/DeleteProvider", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // CreateProvider defines a method that creates a provider given the proper inputs - CreateProvider(context.Context, *MsgCreateProvider) (*MsgCreateProviderResponse, error) - // UpdateProvider defines a method that updates a provider given the proper inputs - UpdateProvider(context.Context, *MsgUpdateProvider) (*MsgUpdateProviderResponse, error) - // DeleteProvider defines a method that deletes a provider given the proper inputs - DeleteProvider(context.Context, *MsgDeleteProvider) (*MsgDeleteProviderResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) CreateProvider(ctx context.Context, req *MsgCreateProvider) (*MsgCreateProviderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateProvider not implemented") -} -func (*UnimplementedMsgServer) UpdateProvider(ctx context.Context, req *MsgUpdateProvider) (*MsgUpdateProviderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateProvider not implemented") -} -func (*UnimplementedMsgServer) DeleteProvider(ctx context.Context, req *MsgDeleteProvider) (*MsgDeleteProviderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteProvider not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_CreateProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateProvider) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateProvider(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.provider.v1beta1.Msg/CreateProvider", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateProvider(ctx, req.(*MsgCreateProvider)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_UpdateProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgUpdateProvider) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).UpdateProvider(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.provider.v1beta1.Msg/UpdateProvider", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).UpdateProvider(ctx, req.(*MsgUpdateProvider)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_DeleteProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgDeleteProvider) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).DeleteProvider(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.provider.v1beta1.Msg/DeleteProvider", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).DeleteProvider(ctx, req.(*MsgDeleteProvider)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.provider.v1beta1.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateProvider", - Handler: _Msg_CreateProvider_Handler, - }, - { - MethodName: "UpdateProvider", - Handler: _Msg_UpdateProvider_Handler, - }, - { - MethodName: "DeleteProvider", - Handler: _Msg_DeleteProvider_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/provider/v1beta1/provider.proto", -} - -func (m *ProviderInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProviderInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProviderInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Website) > 0 { - i -= len(m.Website) - copy(dAtA[i:], m.Website) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Website))) - i-- - dAtA[i] = 0x12 - } - if len(m.EMail) > 0 { - i -= len(m.EMail) - copy(dAtA[i:], m.EMail) - i = encodeVarintProvider(dAtA, i, uint64(len(m.EMail))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateProvider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateProvider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.HostURI) > 0 { - i -= len(m.HostURI) - copy(dAtA[i:], m.HostURI) - i = encodeVarintProvider(dAtA, i, uint64(len(m.HostURI))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateProviderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateProviderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgUpdateProvider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateProvider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.HostURI) > 0 { - i -= len(m.HostURI) - copy(dAtA[i:], m.HostURI) - i = encodeVarintProvider(dAtA, i, uint64(len(m.HostURI))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgUpdateProviderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateProviderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgDeleteProvider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDeleteProvider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDeleteProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgDeleteProviderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDeleteProviderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDeleteProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *Provider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Provider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.HostURI) > 0 { - i -= len(m.HostURI) - copy(dAtA[i:], m.HostURI) - i = encodeVarintProvider(dAtA, i, uint64(len(m.HostURI))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintProvider(dAtA []byte, offset int, v uint64) int { - offset -= sovProvider(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ProviderInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.EMail) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - l = len(m.Website) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - return n -} - -func (m *MsgCreateProvider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - l = len(m.HostURI) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovProvider(uint64(l)) - } - } - l = m.Info.Size() - n += 1 + l + sovProvider(uint64(l)) - return n -} - -func (m *MsgCreateProviderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgUpdateProvider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - l = len(m.HostURI) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovProvider(uint64(l)) - } - } - l = m.Info.Size() - n += 1 + l + sovProvider(uint64(l)) - return n -} - -func (m *MsgUpdateProviderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgDeleteProvider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - return n -} - -func (m *MsgDeleteProviderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *Provider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - l = len(m.HostURI) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovProvider(uint64(l)) - } - } - l = m.Info.Size() - n += 1 + l + sovProvider(uint64(l)) - return n -} - -func sovProvider(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozProvider(x uint64) (n int) { - return sovProvider(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ProviderInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProviderInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProviderInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EMail", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EMail = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Website", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Website = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateProvider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateProvider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateProvider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostURI = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta1.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateProviderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateProviderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgUpdateProvider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateProvider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateProvider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostURI = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta1.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgUpdateProviderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateProviderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDeleteProvider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDeleteProvider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDeleteProvider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDeleteProviderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDeleteProviderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDeleteProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Provider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Provider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostURI = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta1.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipProvider(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProvider - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProvider - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProvider - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthProvider - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupProvider - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthProvider - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthProvider = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowProvider = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupProvider = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/provider/v1beta1/types.go b/go/node/provider/v1beta1/types.go deleted file mode 100644 index feaa0459..00000000 --- a/go/node/provider/v1beta1/types.go +++ /dev/null @@ -1,67 +0,0 @@ -package v1beta1 - -import ( - "bytes" - "fmt" - "net/url" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// String implements the Stringer interface for a Provider object. -func (p Provider) String() string { - res := fmt.Sprintf(`Deployment - Owner: %s - HostURI: %s - Attributes: %v - `, p.Owner, p.HostURI, p.Attributes) - - if !p.Info.IsEmpty() { - res += fmt.Sprintf("Info: %v\n", p.Info) - } - return res -} - -// Providers is the collection of Provider -type Providers []Provider - -// String implements the Stringer interface for a Providers object. -func (obj Providers) String() string { - var buf bytes.Buffer - - const sep = "\n\n" - - for _, p := range obj { - buf.WriteString(p.String()) - buf.WriteString(sep) - } - - if len(obj) > 0 { - buf.Truncate(buf.Len() - len(sep)) - } - - return buf.String() -} - -// Address implements provider and returns owner of provider -func (p *Provider) Address() sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(p.Owner) - if err != nil { - panic(err) - } - - return owner -} - -func (m ProviderInfo) IsEmpty() bool { - return m.EMail == "" && m.Website == "" -} - -func (m ProviderInfo) Validate() error { - if m.Website != "" { - if _, err := url.Parse(m.Website); err != nil { - return ErrInvalidInfoWebsite - } - } - return nil -} diff --git a/go/node/provider/v1beta2/codec.go b/go/node/provider/v1beta2/codec.go deleted file mode 100644 index b1ea5f2a..00000000 --- a/go/node/provider/v1beta2/codec.go +++ /dev/null @@ -1,45 +0,0 @@ -package v1beta2 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/provider module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/provider and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterCodec register concrete types on codec -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgCreateProvider{}, ModuleName+"/"+MsgTypeCreateProvider, nil) - cdc.RegisterConcrete(&MsgUpdateProvider{}, ModuleName+"/"+MsgTypeUpdateProvider, nil) - cdc.RegisterConcrete(&MsgDeleteProvider{}, ModuleName+"/"+MsgTypeDeleteProvider, nil) -} - -// RegisterInterfaces registers the x/provider interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgCreateProvider{}, - &MsgUpdateProvider{}, - &MsgDeleteProvider{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/provider/v1beta2/errors.go b/go/node/provider/v1beta2/errors.go deleted file mode 100644 index e6c5c66b..00000000 --- a/go/node/provider/v1beta2/errors.go +++ /dev/null @@ -1,31 +0,0 @@ -package v1beta2 - -import ( - "errors" -) - -var ( - // ErrInvalidProviderURI register error code for invalid provider uri - ErrInvalidProviderURI = errors.New("invalid provider: invalid host uri") - - // ErrNotAbsProviderURI register error code for not absolute provider uri - ErrNotAbsProviderURI = errors.New("invalid provider: not absolute host uri") - - // ErrProviderNotFound provider not found - ErrProviderNotFound = errors.New("invalid provider: address not found") - - // ErrProviderExists provider already exists - ErrProviderExists = errors.New("invalid provider: already exists") - - // ErrInvalidAddress invalid provider address - ErrInvalidAddress = errors.New("invalid address") - - // ErrAttributes error code for provider attribute problems - ErrAttributes = errors.New("attribute specification error") - - // ErrIncompatibleAttributes error code for attributes update - ErrIncompatibleAttributes = errors.New("attributes cannot be changed") - - // ErrInvalidInfoWebsite register error code for invalid info website - ErrInvalidInfoWebsite = errors.New("invalid provider: invalid info website") -) diff --git a/go/node/provider/v1beta2/event.go b/go/node/provider/v1beta2/event.go deleted file mode 100644 index f10635a7..00000000 --- a/go/node/provider/v1beta2/event.go +++ /dev/null @@ -1,142 +0,0 @@ -package v1beta2 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -const ( - EvActionProviderCreated = "provider-created" - EvActionProviderUpdated = "provider-updated" - EvActionProviderDeleted = "provider-deleted" - EvOwnerKey = "owner" -) - -// EventProviderCreated struct -type EventProviderCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.AccAddress `json:"owner"` -} - -func NewEventProviderCreated(owner sdk.AccAddress) EventProviderCreated { - return EventProviderCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: EvActionProviderCreated, - }, - Owner: owner, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderCreated struct -func (ev EventProviderCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, EvActionProviderCreated), - }, ProviderEVAttributes(ev.Owner)...)..., - ) -} - -// EventProviderUpdated struct -type EventProviderUpdated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.AccAddress `json:"owner"` -} - -func NewEventProviderUpdated(owner sdk.AccAddress) EventProviderUpdated { - return EventProviderUpdated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: EvActionProviderUpdated, - }, - Owner: owner, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderUpdated struct -func (ev EventProviderUpdated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, EvActionProviderUpdated), - }, ProviderEVAttributes(ev.Owner)...)..., - ) -} - -// EventProviderDeleted struct -type EventProviderDeleted struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.AccAddress `json:"owner"` -} - -func NewEventProviderDeleted(owner sdk.AccAddress) EventProviderDeleted { - return EventProviderDeleted{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: EvActionProviderDeleted, - }, - Owner: owner, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderDeleted struct -func (ev EventProviderDeleted) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, EvActionProviderDeleted), - }, ProviderEVAttributes(ev.Owner)...)..., - ) -} - -// ProviderEVAttributes returns event attribues for given Provider -func ProviderEVAttributes(owner sdk.AccAddress) []sdk.Attribute { - return []sdk.Attribute{ - sdk.NewAttribute(EvOwnerKey, owner.String()), - } -} - -// ParseEVProvider returns provider details for given event attributes -func ParseEVProvider(attrs []sdk.Attribute) (sdk.AccAddress, error) { - owner, err := sdkutil.GetAccAddress(attrs, EvOwnerKey) - if err != nil { - return sdk.AccAddress{}, err - } - - return owner, nil -} - -// ParseEvent parses event and returns details of event and error if occurred -// TODO: Enable returning actual events. -func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { - if ev.Type != sdkutil.EventTypeMessage { - return nil, sdkutil.ErrUnknownType - } - if ev.Module != ModuleName { - return nil, sdkutil.ErrUnknownModule - } - switch ev.Action { - case EvActionProviderCreated: - owner, err := ParseEVProvider(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventProviderCreated(owner), nil - case EvActionProviderUpdated: - owner, err := ParseEVProvider(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventProviderUpdated(owner), nil - case EvActionProviderDeleted: - owner, err := ParseEVProvider(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventProviderDeleted(owner), nil - default: - return nil, sdkutil.ErrUnknownAction - } -} diff --git a/go/node/provider/v1beta2/genesis.pb.go b/go/node/provider/v1beta2/genesis.pb.go deleted file mode 100644 index 6284492b..00000000 --- a/go/node/provider/v1beta2/genesis.pb.go +++ /dev/null @@ -1,334 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/provider/v1beta2/genesis.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState defines the basic genesis state used by provider module -type GenesisState struct { - Providers []Provider `protobuf:"bytes,1,rep,name=providers,proto3" json:"providers" yaml:"providers"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_d89e94590927be88, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetProviders() []Provider { - if m != nil { - return m.Providers - } - return nil -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.provider.v1beta2.GenesisState") -} - -func init() { - proto.RegisterFile("akash/provider/v1beta2/genesis.proto", fileDescriptor_d89e94590927be88) -} - -var fileDescriptor_d89e94590927be88 = []byte{ - // 226 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x2f, 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, - 0x49, 0x34, 0xd2, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, - 0x17, 0x12, 0x03, 0xab, 0xd2, 0x83, 0xa9, 0xd2, 0x83, 0xaa, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, - 0x07, 0x2b, 0xd1, 0x07, 0xb1, 0x20, 0xaa, 0xa5, 0x54, 0x71, 0x98, 0x09, 0xd7, 0x0e, 0x56, 0xa6, - 0x54, 0xca, 0xc5, 0xe3, 0x0e, 0xb1, 0x25, 0xb8, 0x24, 0xb1, 0x24, 0x55, 0x28, 0x95, 0x8b, 0x13, - 0xa6, 0xa2, 0x58, 0x82, 0x51, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x41, 0x0f, 0xbb, 0xc5, 0x7a, 0x01, - 0x50, 0x01, 0x27, 0xd5, 0x13, 0xf7, 0xe4, 0x19, 0x5e, 0xdd, 0x93, 0x47, 0x68, 0xfd, 0x74, 0x4f, - 0x5e, 0xa0, 0x32, 0x31, 0x37, 0xc7, 0x4a, 0x09, 0x2e, 0xa4, 0x14, 0x84, 0x90, 0x76, 0x0a, 0x3d, - 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, - 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xeb, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, - 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0xbd, 0xba, 0x79, 0xa9, 0x25, 0xe5, 0xf9, 0x45, 0xd9, - 0x50, 0x5e, 0x62, 0x41, 0xa6, 0x7e, 0x7a, 0xbe, 0x7e, 0x5e, 0x7e, 0x4a, 0x2a, 0x86, 0xe7, 0x92, - 0xd8, 0xc0, 0x9e, 0x32, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x1b, 0x1d, 0x93, 0x13, 0x51, 0x01, - 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Providers) > 0 { - for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Providers) > 0 { - for _, e := range m.Providers { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Providers = append(m.Providers, Provider{}) - if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/provider/v1beta2/key.go b/go/node/provider/v1beta2/key.go deleted file mode 100644 index 93b959b0..00000000 --- a/go/node/provider/v1beta2/key.go +++ /dev/null @@ -1,12 +0,0 @@ -package v1beta2 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "provider" - - // StoreKey is the store key string for provider - StoreKey = ModuleName - - // RouterKey is the message route for provider - RouterKey = ModuleName -) diff --git a/go/node/provider/v1beta2/msgs.go b/go/node/provider/v1beta2/msgs.go deleted file mode 100644 index 5a6d4150..00000000 --- a/go/node/provider/v1beta2/msgs.go +++ /dev/null @@ -1,175 +0,0 @@ -package v1beta2 - -import ( - "net/url" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/pkg/errors" - - types "github.com/akash-network/akash-api/go/node/types/v1beta2" -) - -const ( - MsgTypeCreateProvider = "create-provider" - MsgTypeUpdateProvider = "update-provider" - MsgTypeDeleteProvider = "delete-provider" -) - -var ( - _, _, _ sdk.Msg = &MsgCreateProvider{}, &MsgUpdateProvider{}, &MsgDeleteProvider{} -) - -// NewMsgCreateProvider creates a new MsgCreateProvider instance -func NewMsgCreateProvider(owner sdk.AccAddress, hostURI string, attributes types.Attributes) *MsgCreateProvider { - return &MsgCreateProvider{ - Owner: owner.String(), - HostURI: hostURI, - Attributes: attributes, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateProvider) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateProvider) Type() string { return MsgTypeCreateProvider } - -// ValidateBasic does basic validation of a HostURI -func (msg MsgCreateProvider) ValidateBasic() error { - if err := validateProviderURI(msg.HostURI); err != nil { - return err - } - if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Provider Address") - } - if err := msg.Attributes.Validate(); err != nil { - return err - } - if err := msg.Info.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgCreateProvider) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateProvider) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgUpdateProvider creates a new MsgUpdateProvider instance -func NewMsgUpdateProvider(owner sdk.AccAddress, hostURI string, attributes types.Attributes) *MsgUpdateProvider { - return &MsgUpdateProvider{ - Owner: owner.String(), - HostURI: hostURI, - Attributes: attributes, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgUpdateProvider) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgUpdateProvider) Type() string { return MsgTypeUpdateProvider } - -// ValidateBasic does basic validation of a ProviderURI -func (msg MsgUpdateProvider) ValidateBasic() error { - if err := validateProviderURI(msg.HostURI); err != nil { - return err - } - if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgUpdate: Invalid Provider Address") - } - if err := msg.Attributes.Validate(); err != nil { - return err - } - if err := msg.Info.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgUpdateProvider) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgUpdateProvider) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgDeleteProvider creates a new MsgDeleteProvider instance -func NewMsgDeleteProvider(owner sdk.AccAddress) *MsgDeleteProvider { - return &MsgDeleteProvider{ - Owner: owner.String(), - } -} - -// Route implements the sdk.Msg interface -func (msg MsgDeleteProvider) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgDeleteProvider) Type() string { return MsgTypeDeleteProvider } - -// ValidateBasic does basic validation -func (msg MsgDeleteProvider) ValidateBasic() error { - if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgDelete: Invalid Provider Address") - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgDeleteProvider) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgDeleteProvider) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -func validateProviderURI(val string) error { - u, err := url.Parse(val) - if err != nil { - return ErrInvalidProviderURI - } - if !u.IsAbs() { - return errors.Wrapf(ErrNotAbsProviderURI, "validating %q for absolute URI", val) - } - - if u.Scheme != "https" { - return errors.Wrapf(ErrInvalidProviderURI, "scheme in %q should be https", val) - } - - if u.Host == "" { - return errors.Wrapf(ErrInvalidProviderURI, "validating %q for valid host", val) - } - - if u.Path != "" { - return errors.Wrapf(ErrInvalidProviderURI, "path in %q should be empty", val) - } - - return nil -} diff --git a/go/node/provider/v1beta2/provider.pb.go b/go/node/provider/v1beta2/provider.pb.go deleted file mode 100644 index 8e28d016..00000000 --- a/go/node/provider/v1beta2/provider.pb.go +++ /dev/null @@ -1,2101 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/provider/v1beta2/provider.proto - -package v1beta2 - -import ( - context "context" - fmt "fmt" - github_com_akash_network_akash_api_go_node_types_v1beta2 "github.com/akash-network/akash-api/go/node/types/v1beta2" - v1beta2 "github.com/akash-network/akash-api/go/node/types/v1beta2" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ProviderInfo -type ProviderInfo struct { - EMail string `protobuf:"bytes,1,opt,name=email,proto3" json:"email" yaml:"email"` - Website string `protobuf:"bytes,2,opt,name=website,proto3" json:"website" yaml:"website"` -} - -func (m *ProviderInfo) Reset() { *m = ProviderInfo{} } -func (m *ProviderInfo) String() string { return proto.CompactTextString(m) } -func (*ProviderInfo) ProtoMessage() {} -func (*ProviderInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_3b2702cb5ab07405, []int{0} -} -func (m *ProviderInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProviderInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProviderInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProviderInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProviderInfo.Merge(m, src) -} -func (m *ProviderInfo) XXX_Size() int { - return m.Size() -} -func (m *ProviderInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ProviderInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_ProviderInfo proto.InternalMessageInfo - -func (m *ProviderInfo) GetEMail() string { - if m != nil { - return m.EMail - } - return "" -} - -func (m *ProviderInfo) GetWebsite() string { - if m != nil { - return m.Website - } - return "" -} - -// MsgCreateProvider defines an SDK message for creating a provider -type MsgCreateProvider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta2.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta2.Attributes" json:"attributes" yaml:"attributes"` - Info ProviderInfo `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` -} - -func (m *MsgCreateProvider) Reset() { *m = MsgCreateProvider{} } -func (m *MsgCreateProvider) String() string { return proto.CompactTextString(m) } -func (*MsgCreateProvider) ProtoMessage() {} -func (*MsgCreateProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_3b2702cb5ab07405, []int{1} -} -func (m *MsgCreateProvider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateProvider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateProvider) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateProvider.Merge(m, src) -} -func (m *MsgCreateProvider) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateProvider) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateProvider.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateProvider proto.InternalMessageInfo - -func (m *MsgCreateProvider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgCreateProvider) GetHostURI() string { - if m != nil { - return m.HostURI - } - return "" -} - -func (m *MsgCreateProvider) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta2.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *MsgCreateProvider) GetInfo() ProviderInfo { - if m != nil { - return m.Info - } - return ProviderInfo{} -} - -// MsgCreateProviderResponse defines the Msg/CreateProvider response type. -type MsgCreateProviderResponse struct { -} - -func (m *MsgCreateProviderResponse) Reset() { *m = MsgCreateProviderResponse{} } -func (m *MsgCreateProviderResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateProviderResponse) ProtoMessage() {} -func (*MsgCreateProviderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3b2702cb5ab07405, []int{2} -} -func (m *MsgCreateProviderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateProviderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateProviderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateProviderResponse.Merge(m, src) -} -func (m *MsgCreateProviderResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateProviderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateProviderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateProviderResponse proto.InternalMessageInfo - -// MsgUpdateProvider defines an SDK message for updating a provider -type MsgUpdateProvider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta2.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta2.Attributes" json:"attributes" yaml:"attributes"` - Info ProviderInfo `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` -} - -func (m *MsgUpdateProvider) Reset() { *m = MsgUpdateProvider{} } -func (m *MsgUpdateProvider) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateProvider) ProtoMessage() {} -func (*MsgUpdateProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_3b2702cb5ab07405, []int{3} -} -func (m *MsgUpdateProvider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateProvider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateProvider) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateProvider.Merge(m, src) -} -func (m *MsgUpdateProvider) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateProvider) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateProvider.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateProvider proto.InternalMessageInfo - -func (m *MsgUpdateProvider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgUpdateProvider) GetHostURI() string { - if m != nil { - return m.HostURI - } - return "" -} - -func (m *MsgUpdateProvider) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta2.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *MsgUpdateProvider) GetInfo() ProviderInfo { - if m != nil { - return m.Info - } - return ProviderInfo{} -} - -// MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. -type MsgUpdateProviderResponse struct { -} - -func (m *MsgUpdateProviderResponse) Reset() { *m = MsgUpdateProviderResponse{} } -func (m *MsgUpdateProviderResponse) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateProviderResponse) ProtoMessage() {} -func (*MsgUpdateProviderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3b2702cb5ab07405, []int{4} -} -func (m *MsgUpdateProviderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateProviderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateProviderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateProviderResponse.Merge(m, src) -} -func (m *MsgUpdateProviderResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateProviderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateProviderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateProviderResponse proto.InternalMessageInfo - -// MsgDeleteProvider defines an SDK message for deleting a provider -type MsgDeleteProvider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` -} - -func (m *MsgDeleteProvider) Reset() { *m = MsgDeleteProvider{} } -func (m *MsgDeleteProvider) String() string { return proto.CompactTextString(m) } -func (*MsgDeleteProvider) ProtoMessage() {} -func (*MsgDeleteProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_3b2702cb5ab07405, []int{5} -} -func (m *MsgDeleteProvider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDeleteProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDeleteProvider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDeleteProvider) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDeleteProvider.Merge(m, src) -} -func (m *MsgDeleteProvider) XXX_Size() int { - return m.Size() -} -func (m *MsgDeleteProvider) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDeleteProvider.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDeleteProvider proto.InternalMessageInfo - -func (m *MsgDeleteProvider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -// MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. -type MsgDeleteProviderResponse struct { -} - -func (m *MsgDeleteProviderResponse) Reset() { *m = MsgDeleteProviderResponse{} } -func (m *MsgDeleteProviderResponse) String() string { return proto.CompactTextString(m) } -func (*MsgDeleteProviderResponse) ProtoMessage() {} -func (*MsgDeleteProviderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_3b2702cb5ab07405, []int{6} -} -func (m *MsgDeleteProviderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDeleteProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDeleteProviderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDeleteProviderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDeleteProviderResponse.Merge(m, src) -} -func (m *MsgDeleteProviderResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgDeleteProviderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDeleteProviderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDeleteProviderResponse proto.InternalMessageInfo - -// Provider stores owner and host details -type Provider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta2.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta2.Attributes" json:"attributes" yaml:"attributes"` - Info ProviderInfo `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` -} - -func (m *Provider) Reset() { *m = Provider{} } -func (*Provider) ProtoMessage() {} -func (*Provider) Descriptor() ([]byte, []int) { - return fileDescriptor_3b2702cb5ab07405, []int{7} -} -func (m *Provider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Provider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Provider) XXX_Merge(src proto.Message) { - xxx_messageInfo_Provider.Merge(m, src) -} -func (m *Provider) XXX_Size() int { - return m.Size() -} -func (m *Provider) XXX_DiscardUnknown() { - xxx_messageInfo_Provider.DiscardUnknown(m) -} - -var xxx_messageInfo_Provider proto.InternalMessageInfo - -func (m *Provider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *Provider) GetHostURI() string { - if m != nil { - return m.HostURI - } - return "" -} - -func (m *Provider) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta2.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Provider) GetInfo() ProviderInfo { - if m != nil { - return m.Info - } - return ProviderInfo{} -} - -func init() { - proto.RegisterType((*ProviderInfo)(nil), "akash.provider.v1beta2.ProviderInfo") - proto.RegisterType((*MsgCreateProvider)(nil), "akash.provider.v1beta2.MsgCreateProvider") - proto.RegisterType((*MsgCreateProviderResponse)(nil), "akash.provider.v1beta2.MsgCreateProviderResponse") - proto.RegisterType((*MsgUpdateProvider)(nil), "akash.provider.v1beta2.MsgUpdateProvider") - proto.RegisterType((*MsgUpdateProviderResponse)(nil), "akash.provider.v1beta2.MsgUpdateProviderResponse") - proto.RegisterType((*MsgDeleteProvider)(nil), "akash.provider.v1beta2.MsgDeleteProvider") - proto.RegisterType((*MsgDeleteProviderResponse)(nil), "akash.provider.v1beta2.MsgDeleteProviderResponse") - proto.RegisterType((*Provider)(nil), "akash.provider.v1beta2.Provider") -} - -func init() { - proto.RegisterFile("akash/provider/v1beta2/provider.proto", fileDescriptor_3b2702cb5ab07405) -} - -var fileDescriptor_3b2702cb5ab07405 = []byte{ - // 575 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x41, 0x6b, 0x13, 0x41, - 0x18, 0xdd, 0x6d, 0x12, 0x13, 0x27, 0xa5, 0xd2, 0x45, 0x24, 0x4d, 0xe9, 0x4e, 0x18, 0x14, 0xa2, - 0xe0, 0x2e, 0x8d, 0x07, 0xa5, 0x9e, 0x4c, 0x2d, 0x58, 0x21, 0x20, 0x0b, 0xb9, 0x78, 0x91, 0x5d, - 0x33, 0xdd, 0x2c, 0x4d, 0x76, 0x96, 0x9d, 0x49, 0x43, 0x8f, 0xfe, 0x03, 0xc1, 0x4b, 0x8f, 0x3d, - 0x0b, 0xfe, 0x8f, 0x1e, 0x7b, 0x11, 0x3c, 0x8d, 0x92, 0x5c, 0x24, 0xc7, 0xfc, 0x02, 0xd9, 0x99, - 0xdd, 0x6d, 0x36, 0x89, 0x41, 0xe9, 0xa5, 0x87, 0xde, 0x76, 0xde, 0xbc, 0xef, 0x7b, 0x1f, 0xdf, - 0x7b, 0x4c, 0x02, 0x1e, 0xd9, 0xc7, 0x36, 0xed, 0x9a, 0x41, 0x48, 0x4e, 0xbc, 0x0e, 0x0e, 0xcd, - 0x93, 0x5d, 0x07, 0x33, 0xbb, 0x91, 0x02, 0x46, 0x10, 0x12, 0x46, 0xb4, 0x07, 0x82, 0x66, 0xa4, - 0x68, 0x4c, 0xab, 0xde, 0x77, 0x89, 0x4b, 0x04, 0xc5, 0x8c, 0xbe, 0x24, 0xbb, 0x8a, 0x64, 0x53, - 0xc7, 0xa6, 0x38, 0x6d, 0x68, 0x33, 0x16, 0x7a, 0xce, 0x80, 0x61, 0xc9, 0x41, 0x9f, 0x54, 0xb0, - 0xfe, 0x2e, 0x6e, 0x77, 0xe8, 0x1f, 0x11, 0xed, 0x05, 0x28, 0xe0, 0xbe, 0xed, 0xf5, 0x2a, 0x6a, - 0x4d, 0xad, 0xdf, 0x6d, 0xa2, 0x11, 0x87, 0x85, 0x83, 0x96, 0xed, 0xf5, 0x26, 0x1c, 0xca, 0x9b, - 0x29, 0x87, 0xeb, 0xa7, 0x76, 0xbf, 0xb7, 0x87, 0xc4, 0x11, 0x59, 0x12, 0xd6, 0x9e, 0x83, 0xe2, - 0x10, 0x3b, 0xd4, 0x63, 0xb8, 0xb2, 0x26, 0x6a, 0x77, 0x26, 0x1c, 0x26, 0xd0, 0x94, 0xc3, 0x0d, - 0x59, 0x14, 0x03, 0xc8, 0x4a, 0xae, 0xd0, 0x59, 0x0e, 0x6c, 0xb6, 0xa8, 0xbb, 0x1f, 0x62, 0x9b, - 0xe1, 0x64, 0x18, 0xcd, 0x04, 0x05, 0x32, 0xf4, 0x71, 0x18, 0x0f, 0xb2, 0x15, 0xe9, 0x0b, 0xe0, - 0x4a, 0x5f, 0x1c, 0x91, 0x25, 0x61, 0xed, 0x00, 0x94, 0xba, 0x84, 0xb2, 0x0f, 0x83, 0xd0, 0x8b, - 0x07, 0x78, 0x32, 0xe2, 0xb0, 0xf8, 0x86, 0x50, 0xd6, 0xb6, 0x0e, 0x27, 0x1c, 0xa6, 0xd7, 0x53, - 0x0e, 0xef, 0xc9, 0x0e, 0x09, 0x82, 0xac, 0x62, 0xf4, 0xd9, 0x0e, 0x3d, 0xed, 0x9b, 0x0a, 0x40, - 0xba, 0x25, 0x5a, 0xc9, 0xd5, 0x72, 0xf5, 0x72, 0x63, 0xc7, 0x90, 0x9b, 0x8f, 0x76, 0x99, 0x6c, - 0xdd, 0x78, 0x95, 0xb0, 0x9a, 0xfe, 0x05, 0x87, 0xca, 0x84, 0xc3, 0x99, 0xc2, 0x29, 0x87, 0x9b, - 0x52, 0xe3, 0x0a, 0x43, 0x5f, 0x7f, 0xc2, 0x7d, 0xd7, 0x63, 0xdd, 0x81, 0x63, 0x7c, 0x24, 0x7d, - 0x53, 0xf4, 0x7c, 0xea, 0x63, 0x36, 0x24, 0xe1, 0x71, 0x7c, 0xb2, 0x03, 0xcf, 0x74, 0x89, 0xe9, - 0x93, 0x0e, 0x36, 0xd9, 0x69, 0x80, 0xa9, 0xb9, 0x20, 0x47, 0xad, 0x19, 0x1d, 0xad, 0x0d, 0xf2, - 0x9e, 0x7f, 0x44, 0x2a, 0xf9, 0x9a, 0x5a, 0x2f, 0x37, 0x1e, 0x1a, 0xcb, 0x23, 0x62, 0xcc, 0x9a, - 0xdc, 0xdc, 0x8e, 0xe7, 0x15, 0x95, 0x53, 0x0e, 0xcb, 0x72, 0xd2, 0xe8, 0x84, 0x2c, 0x01, 0xee, - 0xe5, 0x7f, 0x9f, 0x43, 0x05, 0x6d, 0x83, 0xad, 0x05, 0x67, 0x2c, 0x4c, 0x03, 0xe2, 0xd3, 0xd4, - 0xb7, 0x76, 0xd0, 0xb9, 0xf5, 0xed, 0x46, 0xfa, 0x96, 0x75, 0x26, 0xf5, 0xed, 0xad, 0xb0, 0xed, - 0x35, 0xee, 0xe1, 0x6b, 0xd8, 0x96, 0x11, 0xca, 0xf6, 0x4a, 0x85, 0xbe, 0xe4, 0x40, 0xe9, 0x36, - 0x17, 0x37, 0x23, 0x17, 0xa5, 0xb3, 0x73, 0xa8, 0x44, 0x96, 0x35, 0xbe, 0xaf, 0x81, 0x5c, 0x8b, - 0xba, 0x9a, 0x0f, 0x36, 0xe6, 0x9e, 0xdc, 0xc7, 0x7f, 0x13, 0x5b, 0x78, 0x03, 0xaa, 0xbb, 0xff, - 0x4c, 0x4d, 0xd2, 0x10, 0xe9, 0xcd, 0x3d, 0x15, 0xab, 0xf4, 0xb2, 0xd4, 0x95, 0x7a, 0xcb, 0x63, - 0x1e, 0xe9, 0xcd, 0x65, 0x7c, 0x95, 0x5e, 0x96, 0xba, 0x52, 0x6f, 0x79, 0xda, 0x9b, 0xed, 0x8b, - 0x91, 0xae, 0x5e, 0x8e, 0x74, 0xf5, 0xd7, 0x48, 0x57, 0x3f, 0x8f, 0x75, 0xe5, 0x72, 0xac, 0x2b, - 0x3f, 0xc6, 0xba, 0xf2, 0xfe, 0xe5, 0x7f, 0x64, 0x64, 0xfe, 0x2f, 0x80, 0x73, 0x47, 0xfc, 0x50, - 0x3f, 0xfb, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x57, 0xe5, 0x36, 0x23, 0x08, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // CreateProvider defines a method that creates a provider given the proper inputs - CreateProvider(ctx context.Context, in *MsgCreateProvider, opts ...grpc.CallOption) (*MsgCreateProviderResponse, error) - // UpdateProvider defines a method that updates a provider given the proper inputs - UpdateProvider(ctx context.Context, in *MsgUpdateProvider, opts ...grpc.CallOption) (*MsgUpdateProviderResponse, error) - // DeleteProvider defines a method that deletes a provider given the proper inputs - DeleteProvider(ctx context.Context, in *MsgDeleteProvider, opts ...grpc.CallOption) (*MsgDeleteProviderResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) CreateProvider(ctx context.Context, in *MsgCreateProvider, opts ...grpc.CallOption) (*MsgCreateProviderResponse, error) { - out := new(MsgCreateProviderResponse) - err := c.cc.Invoke(ctx, "/akash.provider.v1beta2.Msg/CreateProvider", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) UpdateProvider(ctx context.Context, in *MsgUpdateProvider, opts ...grpc.CallOption) (*MsgUpdateProviderResponse, error) { - out := new(MsgUpdateProviderResponse) - err := c.cc.Invoke(ctx, "/akash.provider.v1beta2.Msg/UpdateProvider", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) DeleteProvider(ctx context.Context, in *MsgDeleteProvider, opts ...grpc.CallOption) (*MsgDeleteProviderResponse, error) { - out := new(MsgDeleteProviderResponse) - err := c.cc.Invoke(ctx, "/akash.provider.v1beta2.Msg/DeleteProvider", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // CreateProvider defines a method that creates a provider given the proper inputs - CreateProvider(context.Context, *MsgCreateProvider) (*MsgCreateProviderResponse, error) - // UpdateProvider defines a method that updates a provider given the proper inputs - UpdateProvider(context.Context, *MsgUpdateProvider) (*MsgUpdateProviderResponse, error) - // DeleteProvider defines a method that deletes a provider given the proper inputs - DeleteProvider(context.Context, *MsgDeleteProvider) (*MsgDeleteProviderResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) CreateProvider(ctx context.Context, req *MsgCreateProvider) (*MsgCreateProviderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateProvider not implemented") -} -func (*UnimplementedMsgServer) UpdateProvider(ctx context.Context, req *MsgUpdateProvider) (*MsgUpdateProviderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateProvider not implemented") -} -func (*UnimplementedMsgServer) DeleteProvider(ctx context.Context, req *MsgDeleteProvider) (*MsgDeleteProviderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteProvider not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_CreateProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateProvider) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateProvider(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.provider.v1beta2.Msg/CreateProvider", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateProvider(ctx, req.(*MsgCreateProvider)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_UpdateProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgUpdateProvider) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).UpdateProvider(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.provider.v1beta2.Msg/UpdateProvider", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).UpdateProvider(ctx, req.(*MsgUpdateProvider)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_DeleteProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgDeleteProvider) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).DeleteProvider(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.provider.v1beta2.Msg/DeleteProvider", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).DeleteProvider(ctx, req.(*MsgDeleteProvider)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.provider.v1beta2.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateProvider", - Handler: _Msg_CreateProvider_Handler, - }, - { - MethodName: "UpdateProvider", - Handler: _Msg_UpdateProvider_Handler, - }, - { - MethodName: "DeleteProvider", - Handler: _Msg_DeleteProvider_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/provider/v1beta2/provider.proto", -} - -func (m *ProviderInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProviderInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProviderInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Website) > 0 { - i -= len(m.Website) - copy(dAtA[i:], m.Website) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Website))) - i-- - dAtA[i] = 0x12 - } - if len(m.EMail) > 0 { - i -= len(m.EMail) - copy(dAtA[i:], m.EMail) - i = encodeVarintProvider(dAtA, i, uint64(len(m.EMail))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateProvider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateProvider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.HostURI) > 0 { - i -= len(m.HostURI) - copy(dAtA[i:], m.HostURI) - i = encodeVarintProvider(dAtA, i, uint64(len(m.HostURI))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateProviderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateProviderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgUpdateProvider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateProvider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.HostURI) > 0 { - i -= len(m.HostURI) - copy(dAtA[i:], m.HostURI) - i = encodeVarintProvider(dAtA, i, uint64(len(m.HostURI))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgUpdateProviderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateProviderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgDeleteProvider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDeleteProvider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDeleteProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgDeleteProviderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDeleteProviderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDeleteProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *Provider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Provider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.HostURI) > 0 { - i -= len(m.HostURI) - copy(dAtA[i:], m.HostURI) - i = encodeVarintProvider(dAtA, i, uint64(len(m.HostURI))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintProvider(dAtA []byte, offset int, v uint64) int { - offset -= sovProvider(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ProviderInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.EMail) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - l = len(m.Website) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - return n -} - -func (m *MsgCreateProvider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - l = len(m.HostURI) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovProvider(uint64(l)) - } - } - l = m.Info.Size() - n += 1 + l + sovProvider(uint64(l)) - return n -} - -func (m *MsgCreateProviderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgUpdateProvider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - l = len(m.HostURI) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovProvider(uint64(l)) - } - } - l = m.Info.Size() - n += 1 + l + sovProvider(uint64(l)) - return n -} - -func (m *MsgUpdateProviderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgDeleteProvider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - return n -} - -func (m *MsgDeleteProviderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *Provider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - l = len(m.HostURI) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovProvider(uint64(l)) - } - } - l = m.Info.Size() - n += 1 + l + sovProvider(uint64(l)) - return n -} - -func sovProvider(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozProvider(x uint64) (n int) { - return sovProvider(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ProviderInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProviderInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProviderInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EMail", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EMail = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Website", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Website = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateProvider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateProvider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateProvider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostURI = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta2.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateProviderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateProviderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgUpdateProvider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateProvider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateProvider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostURI = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta2.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgUpdateProviderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateProviderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDeleteProvider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDeleteProvider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDeleteProvider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDeleteProviderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDeleteProviderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDeleteProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Provider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Provider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostURI = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta2.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipProvider(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProvider - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProvider - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProvider - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthProvider - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupProvider - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthProvider - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthProvider = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowProvider = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupProvider = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/provider/v1beta2/query.pb.go b/go/node/provider/v1beta2/query.pb.go deleted file mode 100644 index 40c6e8eb..00000000 --- a/go/node/provider/v1beta2/query.pb.go +++ /dev/null @@ -1,1059 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/provider/v1beta2/query.proto - -package v1beta2 - -import ( - context "context" - fmt "fmt" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryProvidersRequest is request type for the Query/Providers RPC method -type QueryProvidersRequest struct { - Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryProvidersRequest) Reset() { *m = QueryProvidersRequest{} } -func (m *QueryProvidersRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProvidersRequest) ProtoMessage() {} -func (*QueryProvidersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_42d9228a7f66d48a, []int{0} -} -func (m *QueryProvidersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProvidersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProvidersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProvidersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProvidersRequest.Merge(m, src) -} -func (m *QueryProvidersRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryProvidersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProvidersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProvidersRequest proto.InternalMessageInfo - -func (m *QueryProvidersRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -type QueryProvidersResponse struct { - Providers Providers `protobuf:"bytes,1,rep,name=providers,proto3,castrepeated=Providers" json:"providers"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryProvidersResponse) Reset() { *m = QueryProvidersResponse{} } -func (m *QueryProvidersResponse) String() string { return proto.CompactTextString(m) } -func (*QueryProvidersResponse) ProtoMessage() {} -func (*QueryProvidersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42d9228a7f66d48a, []int{1} -} -func (m *QueryProvidersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProvidersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProvidersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProvidersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProvidersResponse.Merge(m, src) -} -func (m *QueryProvidersResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryProvidersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProvidersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProvidersResponse proto.InternalMessageInfo - -func (m *QueryProvidersResponse) GetProviders() Providers { - if m != nil { - return m.Providers - } - return nil -} - -func (m *QueryProvidersResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProviderRequest is request type for the Query/Provider RPC method -type QueryProviderRequest struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` -} - -func (m *QueryProviderRequest) Reset() { *m = QueryProviderRequest{} } -func (m *QueryProviderRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProviderRequest) ProtoMessage() {} -func (*QueryProviderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_42d9228a7f66d48a, []int{2} -} -func (m *QueryProviderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProviderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProviderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProviderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProviderRequest.Merge(m, src) -} -func (m *QueryProviderRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryProviderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProviderRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProviderRequest proto.InternalMessageInfo - -func (m *QueryProviderRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -// QueryProviderResponse is response type for the Query/Provider RPC method -type QueryProviderResponse struct { - Provider Provider `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider"` -} - -func (m *QueryProviderResponse) Reset() { *m = QueryProviderResponse{} } -func (m *QueryProviderResponse) String() string { return proto.CompactTextString(m) } -func (*QueryProviderResponse) ProtoMessage() {} -func (*QueryProviderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_42d9228a7f66d48a, []int{3} -} -func (m *QueryProviderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProviderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProviderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProviderResponse.Merge(m, src) -} -func (m *QueryProviderResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryProviderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProviderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProviderResponse proto.InternalMessageInfo - -func (m *QueryProviderResponse) GetProvider() Provider { - if m != nil { - return m.Provider - } - return Provider{} -} - -func init() { - proto.RegisterType((*QueryProvidersRequest)(nil), "akash.provider.v1beta2.QueryProvidersRequest") - proto.RegisterType((*QueryProvidersResponse)(nil), "akash.provider.v1beta2.QueryProvidersResponse") - proto.RegisterType((*QueryProviderRequest)(nil), "akash.provider.v1beta2.QueryProviderRequest") - proto.RegisterType((*QueryProviderResponse)(nil), "akash.provider.v1beta2.QueryProviderResponse") -} - -func init() { - proto.RegisterFile("akash/provider/v1beta2/query.proto", fileDescriptor_42d9228a7f66d48a) -} - -var fileDescriptor_42d9228a7f66d48a = []byte{ - // 449 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xc7, 0x7d, 0x81, 0xa2, 0xe6, 0x3a, 0x71, 0x0a, 0x55, 0x15, 0x21, 0x37, 0x18, 0x01, 0x2d, - 0x34, 0x77, 0x8a, 0x19, 0xd9, 0x32, 0xc0, 0xda, 0x5a, 0x62, 0x81, 0x01, 0x9d, 0xdb, 0xd3, 0xd5, - 0x2a, 0xbd, 0xe7, 0xfa, 0x9c, 0x44, 0x08, 0xb1, 0xf0, 0x09, 0x90, 0x10, 0x0b, 0x1f, 0x81, 0x99, - 0x91, 0x0f, 0x90, 0x31, 0x12, 0x0b, 0x13, 0xa0, 0x84, 0x0f, 0x82, 0x7c, 0x77, 0x8e, 0x49, 0x48, - 0xe4, 0x6c, 0x3e, 0xfb, 0xff, 0xfe, 0xef, 0xf7, 0xfe, 0xe7, 0x87, 0x03, 0x7e, 0xc1, 0xf5, 0x39, - 0x4b, 0x33, 0x18, 0x26, 0x67, 0x22, 0x63, 0xc3, 0x5e, 0x2c, 0x72, 0x1e, 0xb2, 0xab, 0x81, 0xc8, - 0xde, 0xd0, 0x34, 0x83, 0x1c, 0xc8, 0xae, 0xd1, 0xd0, 0x52, 0x43, 0x9d, 0xa6, 0xdd, 0x92, 0x20, - 0xc1, 0x48, 0x58, 0xf1, 0x64, 0xd5, 0xed, 0xdb, 0x12, 0x40, 0xbe, 0x16, 0x8c, 0xa7, 0x09, 0xe3, - 0x4a, 0x41, 0xce, 0xf3, 0x04, 0x94, 0x76, 0x5f, 0x1f, 0x9e, 0x82, 0xbe, 0x04, 0xcd, 0x62, 0xae, - 0x85, 0x6d, 0xe2, 0x5a, 0xf6, 0x58, 0xca, 0x65, 0xa2, 0x8c, 0xd8, 0x69, 0xef, 0xad, 0x61, 0x9b, - 0x83, 0x18, 0x59, 0xf0, 0x0a, 0xdf, 0x3a, 0x29, 0x8c, 0x8e, 0xdd, 0x6b, 0x1d, 0x89, 0xab, 0x81, - 0xd0, 0x39, 0x79, 0x8a, 0x71, 0xe5, 0xb9, 0x87, 0x3a, 0xe8, 0x60, 0x27, 0xbc, 0x4f, 0x2d, 0x00, - 0x2d, 0x00, 0xa8, 0x9d, 0xd2, 0x01, 0xd0, 0x63, 0x2e, 0x85, 0xab, 0x8d, 0xfe, 0xa9, 0x0c, 0xbe, - 0x22, 0xbc, 0xbb, 0xdc, 0x41, 0xa7, 0xa0, 0xb4, 0x20, 0x27, 0xb8, 0x59, 0xd2, 0xe8, 0x3d, 0xd4, - 0xb9, 0x76, 0xb0, 0x13, 0x76, 0xe8, 0xea, 0xb8, 0x68, 0x59, 0xdd, 0xbf, 0x39, 0xfe, 0xb9, 0xef, - 0x7d, 0xf9, 0xb5, 0xdf, 0xac, 0xfc, 0x2a, 0x17, 0xf2, 0x6c, 0x81, 0xba, 0x61, 0xa8, 0x1f, 0xd4, - 0x52, 0x5b, 0x9e, 0x05, 0xec, 0x23, 0xdc, 0x5a, 0xa0, 0x2e, 0x63, 0x69, 0xe1, 0x2d, 0x18, 0x29, - 0x91, 0x99, 0x44, 0x9a, 0x91, 0x3d, 0x04, 0x2f, 0x97, 0x52, 0x9c, 0x8f, 0xd8, 0xc7, 0xdb, 0x25, - 0x9c, 0xcb, 0xb0, 0x7e, 0xc2, 0xeb, 0xc5, 0x84, 0xd1, 0xbc, 0x2e, 0xfc, 0xd6, 0xc0, 0x5b, 0xc6, - 0x9d, 0x7c, 0x42, 0xb8, 0x1a, 0x9b, 0x74, 0xd7, 0x39, 0xad, 0xbc, 0xd0, 0x36, 0xdd, 0x54, 0x6e, - 0xd1, 0x83, 0xc3, 0xf7, 0xdf, 0xff, 0x7c, 0x6c, 0xdc, 0x25, 0x77, 0x58, 0xcd, 0x9f, 0xa4, 0xc9, - 0x67, 0x84, 0xb7, 0x4b, 0x03, 0x72, 0xb4, 0x51, 0x9f, 0x92, 0xaa, 0xbb, 0xa1, 0xda, 0x41, 0xf5, - 0x0c, 0xd4, 0x23, 0x72, 0x58, 0x0b, 0xc5, 0xde, 0x9a, 0xab, 0x79, 0xd7, 0x7f, 0x3e, 0x9e, 0xfa, - 0x68, 0x32, 0xf5, 0xd1, 0xef, 0xa9, 0x8f, 0x3e, 0xcc, 0x7c, 0x6f, 0x32, 0xf3, 0xbd, 0x1f, 0x33, - 0xdf, 0x7b, 0xf1, 0x44, 0x26, 0xf9, 0xf9, 0x20, 0xa6, 0xa7, 0x70, 0x69, 0xed, 0xba, 0x4a, 0xe4, - 0x23, 0xc8, 0x2e, 0xdc, 0xa9, 0x58, 0x42, 0x09, 0x4c, 0xc1, 0x99, 0xf8, 0xaf, 0x51, 0x7c, 0xc3, - 0xec, 0xcf, 0xe3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd8, 0x71, 0x5f, 0x48, 0x04, 0x04, 0x00, - 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Providers queries providers - Providers(ctx context.Context, in *QueryProvidersRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) - // Provider queries provider details - Provider(ctx context.Context, in *QueryProviderRequest, opts ...grpc.CallOption) (*QueryProviderResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Providers(ctx context.Context, in *QueryProvidersRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.provider.v1beta2.Query/Providers", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Provider(ctx context.Context, in *QueryProviderRequest, opts ...grpc.CallOption) (*QueryProviderResponse, error) { - out := new(QueryProviderResponse) - err := c.cc.Invoke(ctx, "/akash.provider.v1beta2.Query/Provider", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Providers queries providers - Providers(context.Context, *QueryProvidersRequest) (*QueryProvidersResponse, error) - // Provider queries provider details - Provider(context.Context, *QueryProviderRequest) (*QueryProviderResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Providers(ctx context.Context, req *QueryProvidersRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Providers not implemented") -} -func (*UnimplementedQueryServer) Provider(ctx context.Context, req *QueryProviderRequest) (*QueryProviderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Provider not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Providers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryProvidersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Providers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.provider.v1beta2.Query/Providers", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Providers(ctx, req.(*QueryProvidersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Provider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryProviderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Provider(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.provider.v1beta2.Query/Provider", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Provider(ctx, req.(*QueryProviderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.provider.v1beta2.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Providers", - Handler: _Query_Providers_Handler, - }, - { - MethodName: "Provider", - Handler: _Query_Provider_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/provider/v1beta2/query.proto", -} - -func (m *QueryProvidersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProvidersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProvidersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryProvidersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProvidersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProvidersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Providers) > 0 { - for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryProviderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProviderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProviderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryProviderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProviderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryProvidersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProvidersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Providers) > 0 { - for _, e := range m.Providers { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProviderRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProviderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Provider.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryProvidersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProvidersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProvidersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProvidersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProvidersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProvidersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Providers = append(m.Providers, Provider{}) - if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProviderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProviderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProviderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProviderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProviderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/provider/v1beta2/query.pb.gw.go b/go/node/provider/v1beta2/query.pb.gw.go deleted file mode 100644 index ded3e913..00000000 --- a/go/node/provider/v1beta2/query.pb.gw.go +++ /dev/null @@ -1,272 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/provider/v1beta2/query.proto - -/* -Package v1beta2 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta2 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Providers_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Providers_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProvidersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Providers_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Providers(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Providers_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProvidersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Providers_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Providers(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Query_Provider_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - msg, err := client.Provider(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Provider_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - msg, err := server.Provider(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Providers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Providers_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Providers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Provider_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Provider_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Provider_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Providers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Providers_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Providers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Provider_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Provider_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Provider_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Providers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"akash", "provider", "v1beta2", "providers"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Provider_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"akash", "provider", "v1beta2", "providers", "owner"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Providers_0 = runtime.ForwardResponseMessage - - forward_Query_Provider_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/provider/v1beta2/types.go b/go/node/provider/v1beta2/types.go deleted file mode 100644 index fd76cee0..00000000 --- a/go/node/provider/v1beta2/types.go +++ /dev/null @@ -1,67 +0,0 @@ -package v1beta2 - -import ( - "bytes" - "fmt" - "net/url" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// String implements the Stringer interface for a Provider object. -func (p Provider) String() string { - res := fmt.Sprintf(`Deployment - Owner: %s - HostURI: %s - Attributes: %v - `, p.Owner, p.HostURI, p.Attributes) - - if !p.Info.IsEmpty() { - res += fmt.Sprintf("Info: %v\n", p.Info) - } - return res -} - -// Providers is the collection of Provider -type Providers []Provider - -// String implements the Stringer interface for a Providers object. -func (obj Providers) String() string { - var buf bytes.Buffer - - const sep = "\n\n" - - for _, p := range obj { - buf.WriteString(p.String()) - buf.WriteString(sep) - } - - if len(obj) > 0 { - buf.Truncate(buf.Len() - len(sep)) - } - - return buf.String() -} - -// Address implements provider and returns owner of provider -func (p *Provider) Address() sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(p.Owner) - if err != nil { - panic(err) - } - - return owner -} - -func (m ProviderInfo) IsEmpty() bool { - return m.EMail == "" && m.Website == "" -} - -func (m ProviderInfo) Validate() error { - if m.Website != "" { - if _, err := url.Parse(m.Website); err != nil { - return ErrInvalidInfoWebsite - } - } - return nil -} diff --git a/go/node/provider/v1beta3/codec.go b/go/node/provider/v1beta3/codec.go deleted file mode 100644 index ff85fe96..00000000 --- a/go/node/provider/v1beta3/codec.go +++ /dev/null @@ -1,45 +0,0 @@ -package v1beta3 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/msgservice" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/provider module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/provider and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -// RegisterCodec register concrete types on codec -func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { - cdc.RegisterConcrete(&MsgCreateProvider{}, ModuleName+"/"+MsgTypeCreateProvider, nil) - cdc.RegisterConcrete(&MsgUpdateProvider{}, ModuleName+"/"+MsgTypeUpdateProvider, nil) - cdc.RegisterConcrete(&MsgDeleteProvider{}, ModuleName+"/"+MsgTypeDeleteProvider, nil) -} - -// RegisterInterfaces registers the x/provider interfaces types with the interface registry -func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { - registry.RegisterImplementations((*sdk.Msg)(nil), - &MsgCreateProvider{}, - &MsgUpdateProvider{}, - &MsgDeleteProvider{}, - ) - - msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) -} diff --git a/go/node/provider/v1beta3/errors.go b/go/node/provider/v1beta3/errors.go deleted file mode 100644 index b5b7a5c1..00000000 --- a/go/node/provider/v1beta3/errors.go +++ /dev/null @@ -1,42 +0,0 @@ -package v1beta3 - -import ( - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - errInvalidProviderURI uint32 = iota + 1 - errNotAbsProviderURI - errProviderNotFound - errProviderExists - errInvalidAddress - errAttributes - errIncompatibleAttributes - errInvalidInfoWebsite -) - -var ( - // ErrInvalidProviderURI register error code for invalid provider uri - ErrInvalidProviderURI = sdkerrors.Register(ModuleName, errInvalidProviderURI, "invalid provider: invalid host uri") - - // ErrNotAbsProviderURI register error code for not absolute provider uri - ErrNotAbsProviderURI = sdkerrors.Register(ModuleName, errNotAbsProviderURI, "invalid provider: not absolute host uri") - - // ErrProviderNotFound provider not found - ErrProviderNotFound = sdkerrors.Register(ModuleName, errProviderNotFound, "invalid provider: address not found") - - // ErrProviderExists provider already exists - ErrProviderExists = sdkerrors.Register(ModuleName, errProviderExists, "invalid provider: already exists") - - // ErrInvalidAddress invalid provider address - ErrInvalidAddress = sdkerrors.Register(ModuleName, errInvalidAddress, "invalid address") - - // ErrAttributes error code for provider attribute problems - ErrAttributes = sdkerrors.Register(ModuleName, errAttributes, "attribute specification error") - - // ErrIncompatibleAttributes error code for attributes update - ErrIncompatibleAttributes = sdkerrors.Register(ModuleName, errIncompatibleAttributes, "attributes cannot be changed") - - // ErrInvalidInfoWebsite register error code for invalid info website - ErrInvalidInfoWebsite = sdkerrors.Register(ModuleName, errInvalidInfoWebsite, "invalid provider: invalid info website") -) diff --git a/go/node/provider/v1beta3/event.go b/go/node/provider/v1beta3/event.go deleted file mode 100644 index f41e6ca2..00000000 --- a/go/node/provider/v1beta3/event.go +++ /dev/null @@ -1,142 +0,0 @@ -package v1beta3 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/akash-network/akash-api/go/sdkutil" -) - -const ( - EvActionProviderCreated = "provider-created" - EvActionProviderUpdated = "provider-updated" - EvActionProviderDeleted = "provider-deleted" - EvOwnerKey = "owner" -) - -// EventProviderCreated struct -type EventProviderCreated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.AccAddress `json:"owner"` -} - -func NewEventProviderCreated(owner sdk.AccAddress) EventProviderCreated { - return EventProviderCreated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: EvActionProviderCreated, - }, - Owner: owner, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderCreated struct -func (ev EventProviderCreated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, EvActionProviderCreated), - }, ProviderEVAttributes(ev.Owner)...)..., - ) -} - -// EventProviderUpdated struct -type EventProviderUpdated struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.AccAddress `json:"owner"` -} - -func NewEventProviderUpdated(owner sdk.AccAddress) EventProviderUpdated { - return EventProviderUpdated{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: EvActionProviderUpdated, - }, - Owner: owner, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderUpdated struct -func (ev EventProviderUpdated) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, EvActionProviderUpdated), - }, ProviderEVAttributes(ev.Owner)...)..., - ) -} - -// EventProviderDeleted struct -type EventProviderDeleted struct { - Context sdkutil.BaseModuleEvent `json:"context"` - Owner sdk.AccAddress `json:"owner"` -} - -func NewEventProviderDeleted(owner sdk.AccAddress) EventProviderDeleted { - return EventProviderDeleted{ - Context: sdkutil.BaseModuleEvent{ - Module: ModuleName, - Action: EvActionProviderDeleted, - }, - Owner: owner, - } -} - -// ToSDKEvent method creates new sdk event for EventProviderDeleted struct -func (ev EventProviderDeleted) ToSDKEvent() sdk.Event { - return sdk.NewEvent(sdkutil.EventTypeMessage, - append([]sdk.Attribute{ - sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), - sdk.NewAttribute(sdk.AttributeKeyAction, EvActionProviderDeleted), - }, ProviderEVAttributes(ev.Owner)...)..., - ) -} - -// ProviderEVAttributes returns event attribues for given Provider -func ProviderEVAttributes(owner sdk.AccAddress) []sdk.Attribute { - return []sdk.Attribute{ - sdk.NewAttribute(EvOwnerKey, owner.String()), - } -} - -// ParseEVProvider returns provider details for given event attributes -func ParseEVProvider(attrs []sdk.Attribute) (sdk.AccAddress, error) { - owner, err := sdkutil.GetAccAddress(attrs, EvOwnerKey) - if err != nil { - return sdk.AccAddress{}, err - } - - return owner, nil -} - -// ParseEvent parses event and returns details of event and error if occurred -// TODO: Enable returning actual events. -func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { - if ev.Type != sdkutil.EventTypeMessage { - return nil, sdkutil.ErrUnknownType - } - if ev.Module != ModuleName { - return nil, sdkutil.ErrUnknownModule - } - switch ev.Action { - case EvActionProviderCreated: - owner, err := ParseEVProvider(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventProviderCreated(owner), nil - case EvActionProviderUpdated: - owner, err := ParseEVProvider(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventProviderUpdated(owner), nil - case EvActionProviderDeleted: - owner, err := ParseEVProvider(ev.Attributes) - if err != nil { - return nil, err - } - return NewEventProviderDeleted(owner), nil - default: - return nil, sdkutil.ErrUnknownAction - } -} diff --git a/go/node/provider/v1beta3/events_test.go b/go/node/provider/v1beta3/events_test.go deleted file mode 100644 index 6adf7fa7..00000000 --- a/go/node/provider/v1beta3/events_test.go +++ /dev/null @@ -1,195 +0,0 @@ -package v1beta3_test - -import ( - "fmt" - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" - "github.com/stretchr/testify/require" - - types "github.com/akash-network/akash-api/go/node/provider/v1beta3" - "github.com/akash-network/akash-api/go/sdkutil" - _ "github.com/akash-network/akash-api/go/testutil" -) - -var ( - errWildcard = errors.New("wildcard string error can't be matched") -) - -type testEventParsing struct { - msg sdkutil.Event - expErr error -} - -func (tep testEventParsing) testMessageType() func(t *testing.T) { - _, err := types.ParseEvent(tep.msg) - return func(t *testing.T) { - // if the error expected is errWildcard to catch untyped errors, don't fail the test, the error was expected. - if errors.Is(tep.expErr, errWildcard) { - require.Error(t, err) - } else { - require.Equal(t, tep.expErr, err) - } - } -} - -var TEPS = []testEventParsing{ - { - msg: sdkutil.Event{ - Type: "nil", - }, - expErr: sdkutil.ErrUnknownType, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: types.ModuleName, - }, - expErr: sdkutil.ErrUnknownAction, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: "nil", - }, - expErr: sdkutil.ErrUnknownModule, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: types.ModuleName, - Action: "nil", - }, - expErr: sdkutil.ErrUnknownAction, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: types.ModuleName, - Action: types.EvActionProviderCreated, - Attributes: []sdk.Attribute{ - { - Key: types.EvOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: types.ModuleName, - Action: types.EvActionProviderCreated, - Attributes: []sdk.Attribute{ - { - Key: types.EvOwnerKey, - Value: "hello", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: types.ModuleName, - Action: types.EvActionProviderCreated, - Attributes: []sdk.Attribute{}, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: types.ModuleName, - Action: types.EvActionProviderUpdated, - Attributes: []sdk.Attribute{ - { - Key: types.EvOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: types.ModuleName, - Action: types.EvActionProviderUpdated, - Attributes: []sdk.Attribute{ - { - Key: types.EvOwnerKey, - Value: "hello", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: types.ModuleName, - Action: types.EvActionProviderUpdated, - Attributes: []sdk.Attribute{}, - }, - expErr: errWildcard, - }, - - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: types.ModuleName, - Action: types.EvActionProviderDeleted, - Attributes: []sdk.Attribute{ - { - Key: types.EvOwnerKey, - Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", - }, - }, - }, - expErr: nil, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: types.ModuleName, - Action: types.EvActionProviderDeleted, - Attributes: []sdk.Attribute{ - { - Key: types.EvOwnerKey, - Value: "hello", - }, - }, - }, - expErr: errWildcard, - }, - { - msg: sdkutil.Event{ - Type: sdkutil.EventTypeMessage, - Module: types.ModuleName, - Action: types.EvActionProviderDeleted, - Attributes: []sdk.Attribute{}, - }, - expErr: errWildcard, - }, -} - -func TestEventParsing(t *testing.T) { - for i, test := range TEPS { - t.Run(fmt.Sprintf("%d", i), - test.testMessageType()) - } -} diff --git a/go/node/provider/v1beta3/genesis.pb.go b/go/node/provider/v1beta3/genesis.pb.go deleted file mode 100644 index bcc68bdc..00000000 --- a/go/node/provider/v1beta3/genesis.pb.go +++ /dev/null @@ -1,334 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/provider/v1beta3/genesis.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState defines the basic genesis state used by provider module -type GenesisState struct { - Providers []Provider `protobuf:"bytes,1,rep,name=providers,proto3" json:"providers" yaml:"providers"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_4a3393cd40e1bd09, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetProviders() []Provider { - if m != nil { - return m.Providers - } - return nil -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.provider.v1beta3.GenesisState") -} - -func init() { - proto.RegisterFile("akash/provider/v1beta3/genesis.proto", fileDescriptor_4a3393cd40e1bd09) -} - -var fileDescriptor_4a3393cd40e1bd09 = []byte{ - // 226 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x2f, 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, - 0x49, 0x34, 0xd6, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, - 0x17, 0x12, 0x03, 0xab, 0xd2, 0x83, 0xa9, 0xd2, 0x83, 0xaa, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, - 0x07, 0x2b, 0xd1, 0x07, 0xb1, 0x20, 0xaa, 0xa5, 0x54, 0x71, 0x98, 0x09, 0xd7, 0x0e, 0x56, 0xa6, - 0x54, 0xca, 0xc5, 0xe3, 0x0e, 0xb1, 0x25, 0xb8, 0x24, 0xb1, 0x24, 0x55, 0x28, 0x95, 0x8b, 0x13, - 0xa6, 0xa2, 0x58, 0x82, 0x51, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x41, 0x0f, 0xbb, 0xc5, 0x7a, 0x01, - 0x50, 0x01, 0x27, 0xd5, 0x13, 0xf7, 0xe4, 0x19, 0x5e, 0xdd, 0x93, 0x47, 0x68, 0xfd, 0x74, 0x4f, - 0x5e, 0xa0, 0x32, 0x31, 0x37, 0xc7, 0x4a, 0x09, 0x2e, 0xa4, 0x14, 0x84, 0x90, 0x76, 0x0a, 0x3d, - 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, - 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xeb, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, - 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0xbd, 0xba, 0x79, 0xa9, 0x25, 0xe5, 0xf9, 0x45, 0xd9, - 0x50, 0x5e, 0x62, 0x41, 0xa6, 0x7e, 0x7a, 0xbe, 0x7e, 0x5e, 0x7e, 0x4a, 0x2a, 0x86, 0xe7, 0x92, - 0xd8, 0xc0, 0x9e, 0x32, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xd8, 0x17, 0x39, 0x51, 0x01, - 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Providers) > 0 { - for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Providers) > 0 { - for _, e := range m.Providers { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) - } - } - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Providers = append(m.Providers, Provider{}) - if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/provider/v1beta3/key.go b/go/node/provider/v1beta3/key.go deleted file mode 100644 index 0a39047b..00000000 --- a/go/node/provider/v1beta3/key.go +++ /dev/null @@ -1,12 +0,0 @@ -package v1beta3 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "provider" - - // StoreKey is the store key string for provider - StoreKey = ModuleName - - // RouterKey is the message route for provider - RouterKey = ModuleName -) diff --git a/go/node/provider/v1beta3/msgs.go b/go/node/provider/v1beta3/msgs.go deleted file mode 100644 index e4b638d5..00000000 --- a/go/node/provider/v1beta3/msgs.go +++ /dev/null @@ -1,177 +0,0 @@ -package v1beta3 - -import ( - "net/url" - "regexp" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/pkg/errors" - - types "github.com/akash-network/akash-api/go/node/types/v1beta3" -) - -const ( - MsgTypeCreateProvider = "create-provider" - MsgTypeUpdateProvider = "update-provider" - MsgTypeDeleteProvider = "delete-provider" -) - -var ( - _, _, _ sdk.Msg = &MsgCreateProvider{}, &MsgUpdateProvider{}, &MsgDeleteProvider{} - attributeNameRegexp = regexp.MustCompile(types.AttributeNameRegexpString) -) - -// NewMsgCreateProvider creates a new MsgCreateProvider instance -func NewMsgCreateProvider(owner sdk.AccAddress, hostURI string, attributes types.Attributes) *MsgCreateProvider { - return &MsgCreateProvider{ - Owner: owner.String(), - HostURI: hostURI, - Attributes: attributes, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgCreateProvider) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgCreateProvider) Type() string { return MsgTypeCreateProvider } - -// ValidateBasic does basic validation of a HostURI -func (msg MsgCreateProvider) ValidateBasic() error { - if err := validateProviderURI(msg.HostURI); err != nil { - return err - } - if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Provider Address") - } - if err := msg.Attributes.ValidateWithRegex(attributeNameRegexp); err != nil { - return err - } - if err := msg.Info.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgCreateProvider) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgCreateProvider) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgUpdateProvider creates a new MsgUpdateProvider instance -func NewMsgUpdateProvider(owner sdk.AccAddress, hostURI string, attributes types.Attributes) *MsgUpdateProvider { - return &MsgUpdateProvider{ - Owner: owner.String(), - HostURI: hostURI, - Attributes: attributes, - } -} - -// Route implements the sdk.Msg interface -func (msg MsgUpdateProvider) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgUpdateProvider) Type() string { return MsgTypeUpdateProvider } - -// ValidateBasic does basic validation of a ProviderURI -func (msg MsgUpdateProvider) ValidateBasic() error { - if err := validateProviderURI(msg.HostURI); err != nil { - return err - } - if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgUpdate: Invalid Provider Address") - } - if err := msg.Attributes.ValidateWithRegex(attributeNameRegexp); err != nil { - return err - } - if err := msg.Info.Validate(); err != nil { - return err - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgUpdateProvider) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgUpdateProvider) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -// NewMsgDeleteProvider creates a new MsgDeleteProvider instance -func NewMsgDeleteProvider(owner sdk.AccAddress) *MsgDeleteProvider { - return &MsgDeleteProvider{ - Owner: owner.String(), - } -} - -// Route implements the sdk.Msg interface -func (msg MsgDeleteProvider) Route() string { return RouterKey } - -// Type implements the sdk.Msg interface -func (msg MsgDeleteProvider) Type() string { return MsgTypeDeleteProvider } - -// ValidateBasic does basic validation -func (msg MsgDeleteProvider) ValidateBasic() error { - if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgDelete: Invalid Provider Address") - } - return nil -} - -// GetSignBytes encodes the message for signing -func (msg MsgDeleteProvider) GetSignBytes() []byte { - return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) -} - -// GetSigners defines whose signature is required -func (msg MsgDeleteProvider) GetSigners() []sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(msg.Owner) - if err != nil { - panic(err) - } - - return []sdk.AccAddress{owner} -} - -func validateProviderURI(val string) error { - u, err := url.Parse(val) - if err != nil { - return ErrInvalidProviderURI - } - if !u.IsAbs() { - return errors.Wrapf(ErrNotAbsProviderURI, "validating %q for absolute URI", val) - } - - if u.Scheme != "https" { - return errors.Wrapf(ErrInvalidProviderURI, "scheme in %q should be https", val) - } - - if u.Host == "" { - return errors.Wrapf(ErrInvalidProviderURI, "validating %q for valid host", val) - } - - if u.Path != "" { - return errors.Wrapf(ErrInvalidProviderURI, "path in %q should be empty", val) - } - - return nil -} diff --git a/go/node/provider/v1beta3/msgs_test.go b/go/node/provider/v1beta3/msgs_test.go deleted file mode 100644 index f20abea6..00000000 --- a/go/node/provider/v1beta3/msgs_test.go +++ /dev/null @@ -1,255 +0,0 @@ -package v1beta3 - -import ( - "fmt" - "net/url" - "testing" - - "github.com/pkg/errors" - - sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - - types "github.com/akash-network/akash-api/go/node/types/v1beta3" -) - -func TestConfigPath(t *testing.T) { - type testConfigPath struct { - path string - expErr error - } - tests := []testConfigPath{ - { - path: "foo.yaml", - expErr: ErrNotAbsProviderURI, - }, - { - path: "localhost", - expErr: ErrNotAbsProviderURI, - }, - { - path: "localhost/foo", - expErr: ErrNotAbsProviderURI, - }, - { - path: "localhost:80", - expErr: ErrInvalidProviderURI, - }, - { - path: "localhost:80/foo", - expErr: ErrInvalidProviderURI, - }, - { - path: "127.0.0.1", - expErr: ErrNotAbsProviderURI, - }, - { - path: "127.0.0.1/foo", - expErr: ErrNotAbsProviderURI, - }, - { - path: "127.0.0.1:80", - expErr: ErrInvalidProviderURI, - }, - { - path: "127.0.0.1:80/foo", - expErr: ErrInvalidProviderURI, - }, - { - path: "file:///foo.yaml", - expErr: ErrInvalidProviderURI, - }, - { - path: "https://localhost", - expErr: nil, - }, - { - path: "http://localhost/foo", - expErr: ErrInvalidProviderURI, - }, - { - path: "https://localhost:80", - expErr: nil, - }, - { - path: "http://localhost:80/foo", - expErr: ErrInvalidProviderURI, - }, - { - path: "http://localhost:3001/", - expErr: ErrInvalidProviderURI, - }, - { - path: "https://localhost:80", - expErr: nil, - }, - { - path: "https://localhost:80/foo", - expErr: ErrInvalidProviderURI, - }, - } - - for i, testUnit := range tests { - closure := func(test testConfigPath) func(t *testing.T) { - testFunc := func(t *testing.T) { - err := validateProviderURI(test.path) - if test.expErr != nil && !errors.Is(err, test.expErr) || - err != nil && test.expErr == nil { - t.Errorf("unexpected error occurred: %v", err) - - _, err := url.Parse(test.path) - if err != nil { - t.Errorf("url.Parse() of %q err: %v", test.path, err) - } - } - } - return testFunc - } - tf := closure(testUnit) - t.Run(fmt.Sprintf("%d->%q", i, testUnit.path), tf) - } -} - -type providerTestParams struct { - msg Provider - expErr error - delErr error -} - -func (test providerTestParams) testCreate() func(t *testing.T) { - msg := MsgCreateProvider{ - Owner: test.msg.Owner, - HostURI: test.msg.HostURI, - Attributes: test.msg.Attributes, - } - vErr := msg.ValidateBasic() - return func(t *testing.T) { - if test.expErr != nil && !errors.Is(vErr, test.expErr) { - t.Errorf("error expected: '%v' VS: %v", test.expErr, vErr) - return - } - sb := msg.GetSignBytes() - if len(sb) == 0 { - t.Error("no signed bytes returned") - } - } -} - -func (test providerTestParams) testUpdate() func(t *testing.T) { - msg := MsgUpdateProvider{ - Owner: test.msg.Owner, - HostURI: test.msg.HostURI, - Attributes: test.msg.Attributes, - } - vErr := msg.ValidateBasic() - return func(t *testing.T) { - if test.expErr != nil && !errors.Is(vErr, test.expErr) { - t.Errorf("error expected: '%v' VS: %v", test.expErr, vErr) - return - } - sb := msg.GetSignBytes() - if len(sb) == 0 { - t.Error("no signed bytes returned") - } - } -} - -func (test providerTestParams) testDelete() func(t *testing.T) { - msg := MsgDeleteProvider{ - Owner: test.msg.Owner, - } - vErr := msg.ValidateBasic() - return func(t *testing.T) { - if test.delErr != nil && !errors.Is(vErr, test.delErr) { - t.Errorf("error expected: '%v' VS: %v", test.expErr, vErr) - return - } - sb := msg.GetSignBytes() - if len(sb) == 0 { - t.Error("no signed bytes returned") - } - } -} - -var msgCreateTests = []providerTestParams{ - { - msg: Provider{ - Owner: sdk.AccAddress("hihi").String(), - HostURI: "https://localhost:3001", - Attributes: []types.Attribute{ - { - Key: "hihi", - Value: "neh", - }, - }, - }, - expErr: nil, - }, - { - msg: Provider{ - Owner: sdk.AccAddress("hihi").String(), - HostURI: "https://localhost:3001", - Attributes: []types.Attribute{ - { - Key: "hihi*", - Value: "neh", - }, - }, - }, - expErr: types.ErrInvalidAttributeKey, - }, - { - msg: Provider{ - Owner: sdk.AccAddress("").String(), - HostURI: "https://localhost:3001", - Attributes: []types.Attribute{ - { - Key: "hihi", - Value: "neh", - }, - }, - }, - expErr: sdkerrors.ErrInvalidAddress, - delErr: sdkerrors.ErrInvalidAddress, - }, - { - msg: Provider{ - Owner: sdk.AccAddress("hihi").String(), - HostURI: "ht tp://foo.com", - Attributes: []types.Attribute{ - { - Key: "hihi", - Value: "neh", - }, - }, - }, - expErr: ErrInvalidProviderURI, - }, - { - msg: Provider{ - Owner: sdk.AccAddress("hihi").String(), - HostURI: "", - Attributes: []types.Attribute{ - { - Key: "hihi", - Value: "neh", - }, - }, - }, - expErr: ErrNotAbsProviderURI, - }, -} - -func TestMsgStarValidation(t *testing.T) { - for i, test := range msgCreateTests { - main := func(test providerTestParams) func(t *testing.T) { - return func(t *testing.T) { - t.Run("msg-create", test.testCreate()) - t.Run("msg-update", test.testUpdate()) - t.Run("msg-delete", test.testDelete()) - } - } - f := main(test) - t.Run(fmt.Sprintf("%d", i), f) - } -} diff --git a/go/node/provider/v1beta3/provider.pb.go b/go/node/provider/v1beta3/provider.pb.go deleted file mode 100644 index 1f9c018a..00000000 --- a/go/node/provider/v1beta3/provider.pb.go +++ /dev/null @@ -1,2101 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/provider/v1beta3/provider.proto - -package v1beta3 - -import ( - context "context" - fmt "fmt" - github_com_akash_network_akash_api_go_node_types_v1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" - v1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ProviderInfo -type ProviderInfo struct { - EMail string `protobuf:"bytes,1,opt,name=email,proto3" json:"email" yaml:"email"` - Website string `protobuf:"bytes,2,opt,name=website,proto3" json:"website" yaml:"website"` -} - -func (m *ProviderInfo) Reset() { *m = ProviderInfo{} } -func (m *ProviderInfo) String() string { return proto.CompactTextString(m) } -func (*ProviderInfo) ProtoMessage() {} -func (*ProviderInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_f65279e900573cf7, []int{0} -} -func (m *ProviderInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProviderInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ProviderInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ProviderInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProviderInfo.Merge(m, src) -} -func (m *ProviderInfo) XXX_Size() int { - return m.Size() -} -func (m *ProviderInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ProviderInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_ProviderInfo proto.InternalMessageInfo - -func (m *ProviderInfo) GetEMail() string { - if m != nil { - return m.EMail - } - return "" -} - -func (m *ProviderInfo) GetWebsite() string { - if m != nil { - return m.Website - } - return "" -} - -// MsgCreateProvider defines an SDK message for creating a provider -type MsgCreateProvider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes" json:"attributes" yaml:"attributes"` - Info ProviderInfo `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` -} - -func (m *MsgCreateProvider) Reset() { *m = MsgCreateProvider{} } -func (m *MsgCreateProvider) String() string { return proto.CompactTextString(m) } -func (*MsgCreateProvider) ProtoMessage() {} -func (*MsgCreateProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_f65279e900573cf7, []int{1} -} -func (m *MsgCreateProvider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateProvider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateProvider) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateProvider.Merge(m, src) -} -func (m *MsgCreateProvider) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateProvider) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateProvider.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateProvider proto.InternalMessageInfo - -func (m *MsgCreateProvider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgCreateProvider) GetHostURI() string { - if m != nil { - return m.HostURI - } - return "" -} - -func (m *MsgCreateProvider) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *MsgCreateProvider) GetInfo() ProviderInfo { - if m != nil { - return m.Info - } - return ProviderInfo{} -} - -// MsgCreateProviderResponse defines the Msg/CreateProvider response type. -type MsgCreateProviderResponse struct { -} - -func (m *MsgCreateProviderResponse) Reset() { *m = MsgCreateProviderResponse{} } -func (m *MsgCreateProviderResponse) String() string { return proto.CompactTextString(m) } -func (*MsgCreateProviderResponse) ProtoMessage() {} -func (*MsgCreateProviderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f65279e900573cf7, []int{2} -} -func (m *MsgCreateProviderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgCreateProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgCreateProviderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgCreateProviderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgCreateProviderResponse.Merge(m, src) -} -func (m *MsgCreateProviderResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgCreateProviderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgCreateProviderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgCreateProviderResponse proto.InternalMessageInfo - -// MsgUpdateProvider defines an SDK message for updating a provider -type MsgUpdateProvider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes" json:"attributes" yaml:"attributes"` - Info ProviderInfo `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` -} - -func (m *MsgUpdateProvider) Reset() { *m = MsgUpdateProvider{} } -func (m *MsgUpdateProvider) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateProvider) ProtoMessage() {} -func (*MsgUpdateProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_f65279e900573cf7, []int{3} -} -func (m *MsgUpdateProvider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateProvider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateProvider) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateProvider.Merge(m, src) -} -func (m *MsgUpdateProvider) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateProvider) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateProvider.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateProvider proto.InternalMessageInfo - -func (m *MsgUpdateProvider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *MsgUpdateProvider) GetHostURI() string { - if m != nil { - return m.HostURI - } - return "" -} - -func (m *MsgUpdateProvider) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *MsgUpdateProvider) GetInfo() ProviderInfo { - if m != nil { - return m.Info - } - return ProviderInfo{} -} - -// MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. -type MsgUpdateProviderResponse struct { -} - -func (m *MsgUpdateProviderResponse) Reset() { *m = MsgUpdateProviderResponse{} } -func (m *MsgUpdateProviderResponse) String() string { return proto.CompactTextString(m) } -func (*MsgUpdateProviderResponse) ProtoMessage() {} -func (*MsgUpdateProviderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f65279e900573cf7, []int{4} -} -func (m *MsgUpdateProviderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgUpdateProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgUpdateProviderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgUpdateProviderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgUpdateProviderResponse.Merge(m, src) -} -func (m *MsgUpdateProviderResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgUpdateProviderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgUpdateProviderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgUpdateProviderResponse proto.InternalMessageInfo - -// MsgDeleteProvider defines an SDK message for deleting a provider -type MsgDeleteProvider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` -} - -func (m *MsgDeleteProvider) Reset() { *m = MsgDeleteProvider{} } -func (m *MsgDeleteProvider) String() string { return proto.CompactTextString(m) } -func (*MsgDeleteProvider) ProtoMessage() {} -func (*MsgDeleteProvider) Descriptor() ([]byte, []int) { - return fileDescriptor_f65279e900573cf7, []int{5} -} -func (m *MsgDeleteProvider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDeleteProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDeleteProvider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDeleteProvider) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDeleteProvider.Merge(m, src) -} -func (m *MsgDeleteProvider) XXX_Size() int { - return m.Size() -} -func (m *MsgDeleteProvider) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDeleteProvider.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDeleteProvider proto.InternalMessageInfo - -func (m *MsgDeleteProvider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -// MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. -type MsgDeleteProviderResponse struct { -} - -func (m *MsgDeleteProviderResponse) Reset() { *m = MsgDeleteProviderResponse{} } -func (m *MsgDeleteProviderResponse) String() string { return proto.CompactTextString(m) } -func (*MsgDeleteProviderResponse) ProtoMessage() {} -func (*MsgDeleteProviderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f65279e900573cf7, []int{6} -} -func (m *MsgDeleteProviderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MsgDeleteProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_MsgDeleteProviderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *MsgDeleteProviderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_MsgDeleteProviderResponse.Merge(m, src) -} -func (m *MsgDeleteProviderResponse) XXX_Size() int { - return m.Size() -} -func (m *MsgDeleteProviderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_MsgDeleteProviderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_MsgDeleteProviderResponse proto.InternalMessageInfo - -// Provider stores owner and host details -type Provider struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` - HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` - Attributes github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes" json:"attributes" yaml:"attributes"` - Info ProviderInfo `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` -} - -func (m *Provider) Reset() { *m = Provider{} } -func (*Provider) ProtoMessage() {} -func (*Provider) Descriptor() ([]byte, []int) { - return fileDescriptor_f65279e900573cf7, []int{7} -} -func (m *Provider) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Provider.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Provider) XXX_Merge(src proto.Message) { - xxx_messageInfo_Provider.Merge(m, src) -} -func (m *Provider) XXX_Size() int { - return m.Size() -} -func (m *Provider) XXX_DiscardUnknown() { - xxx_messageInfo_Provider.DiscardUnknown(m) -} - -var xxx_messageInfo_Provider proto.InternalMessageInfo - -func (m *Provider) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -func (m *Provider) GetHostURI() string { - if m != nil { - return m.HostURI - } - return "" -} - -func (m *Provider) GetAttributes() github_com_akash_network_akash_api_go_node_types_v1beta3.Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func (m *Provider) GetInfo() ProviderInfo { - if m != nil { - return m.Info - } - return ProviderInfo{} -} - -func init() { - proto.RegisterType((*ProviderInfo)(nil), "akash.provider.v1beta3.ProviderInfo") - proto.RegisterType((*MsgCreateProvider)(nil), "akash.provider.v1beta3.MsgCreateProvider") - proto.RegisterType((*MsgCreateProviderResponse)(nil), "akash.provider.v1beta3.MsgCreateProviderResponse") - proto.RegisterType((*MsgUpdateProvider)(nil), "akash.provider.v1beta3.MsgUpdateProvider") - proto.RegisterType((*MsgUpdateProviderResponse)(nil), "akash.provider.v1beta3.MsgUpdateProviderResponse") - proto.RegisterType((*MsgDeleteProvider)(nil), "akash.provider.v1beta3.MsgDeleteProvider") - proto.RegisterType((*MsgDeleteProviderResponse)(nil), "akash.provider.v1beta3.MsgDeleteProviderResponse") - proto.RegisterType((*Provider)(nil), "akash.provider.v1beta3.Provider") -} - -func init() { - proto.RegisterFile("akash/provider/v1beta3/provider.proto", fileDescriptor_f65279e900573cf7) -} - -var fileDescriptor_f65279e900573cf7 = []byte{ - // 575 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x41, 0x6b, 0x13, 0x41, - 0x18, 0xdd, 0x6d, 0x12, 0x13, 0x27, 0xa5, 0xd2, 0x45, 0x24, 0x4d, 0xe9, 0x4e, 0x18, 0x14, 0xa2, - 0xe0, 0x2e, 0x4d, 0x0f, 0x4a, 0x3d, 0x99, 0x5a, 0xb0, 0x42, 0x40, 0x16, 0x72, 0xf1, 0x22, 0xbb, - 0x66, 0xba, 0x59, 0x9a, 0xec, 0x2c, 0x3b, 0x93, 0x86, 0x1e, 0xfd, 0x07, 0x82, 0x97, 0x1e, 0x7b, - 0x16, 0xfc, 0x1f, 0x3d, 0xf6, 0x22, 0x78, 0x1a, 0x25, 0xb9, 0x48, 0x8e, 0xf9, 0x05, 0xb2, 0x33, - 0xbb, 0xdb, 0x6c, 0x12, 0x83, 0xe2, 0xa5, 0x87, 0xde, 0x76, 0xde, 0xbc, 0xef, 0x7b, 0x1f, 0xdf, - 0x7b, 0x4c, 0x02, 0x1e, 0xd9, 0x27, 0x36, 0xed, 0x9a, 0x41, 0x48, 0x4e, 0xbd, 0x0e, 0x0e, 0xcd, - 0xd3, 0x5d, 0x07, 0x33, 0x7b, 0x2f, 0x05, 0x8c, 0x20, 0x24, 0x8c, 0x68, 0x0f, 0x04, 0xcd, 0x48, - 0xd1, 0x98, 0x56, 0xbd, 0xef, 0x12, 0x97, 0x08, 0x8a, 0x19, 0x7d, 0x49, 0x76, 0x15, 0xc9, 0xa6, - 0x8e, 0x4d, 0x71, 0xda, 0xd0, 0x66, 0x2c, 0xf4, 0x9c, 0x01, 0xc3, 0x92, 0x83, 0x3e, 0xaa, 0x60, - 0xfd, 0x6d, 0xdc, 0xee, 0xc8, 0x3f, 0x26, 0xda, 0x73, 0x50, 0xc0, 0x7d, 0xdb, 0xeb, 0x55, 0xd4, - 0x9a, 0x5a, 0xbf, 0xdb, 0x44, 0x23, 0x0e, 0x0b, 0x87, 0x2d, 0xdb, 0xeb, 0x4d, 0x38, 0x94, 0x37, - 0x53, 0x0e, 0xd7, 0xcf, 0xec, 0x7e, 0x6f, 0x1f, 0x89, 0x23, 0xb2, 0x24, 0xac, 0x3d, 0x03, 0xc5, - 0x21, 0x76, 0xa8, 0xc7, 0x70, 0x65, 0x4d, 0xd4, 0xee, 0x4c, 0x38, 0x4c, 0xa0, 0x29, 0x87, 0x1b, - 0xb2, 0x28, 0x06, 0x90, 0x95, 0x5c, 0xa1, 0xf3, 0x1c, 0xd8, 0x6c, 0x51, 0xf7, 0x20, 0xc4, 0x36, - 0xc3, 0xc9, 0x30, 0x9a, 0x09, 0x0a, 0x64, 0xe8, 0xe3, 0x30, 0x1e, 0x64, 0x2b, 0xd2, 0x17, 0xc0, - 0xb5, 0xbe, 0x38, 0x22, 0x4b, 0xc2, 0xda, 0x21, 0x28, 0x75, 0x09, 0x65, 0xef, 0x07, 0xa1, 0x17, - 0x0f, 0xf0, 0x64, 0xc4, 0x61, 0xf1, 0x35, 0xa1, 0xac, 0x6d, 0x1d, 0x4d, 0x38, 0x4c, 0xaf, 0xa7, - 0x1c, 0xde, 0x93, 0x1d, 0x12, 0x04, 0x59, 0xc5, 0xe8, 0xb3, 0x1d, 0x7a, 0xda, 0x57, 0x15, 0x80, - 0x74, 0x4b, 0xb4, 0x92, 0xab, 0xe5, 0xea, 0xe5, 0xc6, 0x8e, 0x21, 0x37, 0x1f, 0xed, 0x32, 0xd9, - 0xba, 0xf1, 0x32, 0x61, 0x35, 0xfd, 0x4b, 0x0e, 0x95, 0x09, 0x87, 0x33, 0x85, 0x53, 0x0e, 0x37, - 0xa5, 0xc6, 0x35, 0x86, 0xbe, 0xfc, 0x80, 0x07, 0xae, 0xc7, 0xba, 0x03, 0xc7, 0xf8, 0x40, 0xfa, - 0xa6, 0xe8, 0xf9, 0xd4, 0xc7, 0x6c, 0x48, 0xc2, 0x93, 0xf8, 0x64, 0x07, 0x9e, 0xe9, 0x12, 0xd3, - 0x27, 0x1d, 0x6c, 0xb2, 0xb3, 0x00, 0x53, 0x73, 0x41, 0x8e, 0x5a, 0x33, 0x3a, 0x5a, 0x1b, 0xe4, - 0x3d, 0xff, 0x98, 0x54, 0xf2, 0x35, 0xb5, 0x5e, 0x6e, 0x3c, 0x34, 0x96, 0x47, 0xc4, 0x98, 0x35, - 0xb9, 0xb9, 0x1d, 0xcf, 0x2b, 0x2a, 0xa7, 0x1c, 0x96, 0xe5, 0xa4, 0xd1, 0x09, 0x59, 0x02, 0xdc, - 0xcf, 0xff, 0xba, 0x80, 0x0a, 0xda, 0x06, 0x5b, 0x0b, 0xce, 0x58, 0x98, 0x06, 0xc4, 0xa7, 0xa9, - 0x6f, 0xed, 0xa0, 0x73, 0xeb, 0xdb, 0x8d, 0xf4, 0x2d, 0xeb, 0x4c, 0xea, 0xdb, 0x1b, 0x61, 0xdb, - 0x2b, 0xdc, 0xc3, 0xff, 0x61, 0x5b, 0x46, 0x28, 0xdb, 0x2b, 0x15, 0xfa, 0x9c, 0x03, 0xa5, 0xdb, - 0x5c, 0xdc, 0x8c, 0x5c, 0x94, 0xce, 0x2f, 0xa0, 0x12, 0x59, 0xd6, 0xf8, 0xb6, 0x06, 0x72, 0x2d, - 0xea, 0x6a, 0x3e, 0xd8, 0x98, 0x7b, 0x72, 0x1f, 0xff, 0x49, 0x6c, 0xe1, 0x0d, 0xa8, 0xee, 0xfe, - 0x35, 0x35, 0x49, 0x43, 0xa4, 0x37, 0xf7, 0x54, 0xac, 0xd2, 0xcb, 0x52, 0x57, 0xea, 0x2d, 0x8f, - 0x79, 0xa4, 0x37, 0x97, 0xf1, 0x55, 0x7a, 0x59, 0xea, 0x4a, 0xbd, 0xe5, 0x69, 0x6f, 0xb6, 0x2f, - 0x47, 0xba, 0x7a, 0x35, 0xd2, 0xd5, 0x9f, 0x23, 0x5d, 0xfd, 0x34, 0xd6, 0x95, 0xab, 0xb1, 0xae, - 0x7c, 0x1f, 0xeb, 0xca, 0xbb, 0x17, 0xff, 0x90, 0x91, 0xf9, 0xbf, 0x00, 0xce, 0x1d, 0xf1, 0x43, - 0xbd, 0xf7, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xfb, 0xca, 0xbb, 0x8f, 0x23, 0x08, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// MsgClient is the client API for Msg service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type MsgClient interface { - // CreateProvider defines a method that creates a provider given the proper inputs - CreateProvider(ctx context.Context, in *MsgCreateProvider, opts ...grpc.CallOption) (*MsgCreateProviderResponse, error) - // UpdateProvider defines a method that updates a provider given the proper inputs - UpdateProvider(ctx context.Context, in *MsgUpdateProvider, opts ...grpc.CallOption) (*MsgUpdateProviderResponse, error) - // DeleteProvider defines a method that deletes a provider given the proper inputs - DeleteProvider(ctx context.Context, in *MsgDeleteProvider, opts ...grpc.CallOption) (*MsgDeleteProviderResponse, error) -} - -type msgClient struct { - cc grpc1.ClientConn -} - -func NewMsgClient(cc grpc1.ClientConn) MsgClient { - return &msgClient{cc} -} - -func (c *msgClient) CreateProvider(ctx context.Context, in *MsgCreateProvider, opts ...grpc.CallOption) (*MsgCreateProviderResponse, error) { - out := new(MsgCreateProviderResponse) - err := c.cc.Invoke(ctx, "/akash.provider.v1beta3.Msg/CreateProvider", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) UpdateProvider(ctx context.Context, in *MsgUpdateProvider, opts ...grpc.CallOption) (*MsgUpdateProviderResponse, error) { - out := new(MsgUpdateProviderResponse) - err := c.cc.Invoke(ctx, "/akash.provider.v1beta3.Msg/UpdateProvider", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *msgClient) DeleteProvider(ctx context.Context, in *MsgDeleteProvider, opts ...grpc.CallOption) (*MsgDeleteProviderResponse, error) { - out := new(MsgDeleteProviderResponse) - err := c.cc.Invoke(ctx, "/akash.provider.v1beta3.Msg/DeleteProvider", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// MsgServer is the server API for Msg service. -type MsgServer interface { - // CreateProvider defines a method that creates a provider given the proper inputs - CreateProvider(context.Context, *MsgCreateProvider) (*MsgCreateProviderResponse, error) - // UpdateProvider defines a method that updates a provider given the proper inputs - UpdateProvider(context.Context, *MsgUpdateProvider) (*MsgUpdateProviderResponse, error) - // DeleteProvider defines a method that deletes a provider given the proper inputs - DeleteProvider(context.Context, *MsgDeleteProvider) (*MsgDeleteProviderResponse, error) -} - -// UnimplementedMsgServer can be embedded to have forward compatible implementations. -type UnimplementedMsgServer struct { -} - -func (*UnimplementedMsgServer) CreateProvider(ctx context.Context, req *MsgCreateProvider) (*MsgCreateProviderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method CreateProvider not implemented") -} -func (*UnimplementedMsgServer) UpdateProvider(ctx context.Context, req *MsgUpdateProvider) (*MsgUpdateProviderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method UpdateProvider not implemented") -} -func (*UnimplementedMsgServer) DeleteProvider(ctx context.Context, req *MsgDeleteProvider) (*MsgDeleteProviderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteProvider not implemented") -} - -func RegisterMsgServer(s grpc1.Server, srv MsgServer) { - s.RegisterService(&_Msg_serviceDesc, srv) -} - -func _Msg_CreateProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgCreateProvider) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).CreateProvider(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.provider.v1beta3.Msg/CreateProvider", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).CreateProvider(ctx, req.(*MsgCreateProvider)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_UpdateProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgUpdateProvider) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).UpdateProvider(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.provider.v1beta3.Msg/UpdateProvider", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).UpdateProvider(ctx, req.(*MsgUpdateProvider)) - } - return interceptor(ctx, in, info, handler) -} - -func _Msg_DeleteProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(MsgDeleteProvider) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(MsgServer).DeleteProvider(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.provider.v1beta3.Msg/DeleteProvider", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(MsgServer).DeleteProvider(ctx, req.(*MsgDeleteProvider)) - } - return interceptor(ctx, in, info, handler) -} - -var _Msg_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.provider.v1beta3.Msg", - HandlerType: (*MsgServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "CreateProvider", - Handler: _Msg_CreateProvider_Handler, - }, - { - MethodName: "UpdateProvider", - Handler: _Msg_UpdateProvider_Handler, - }, - { - MethodName: "DeleteProvider", - Handler: _Msg_DeleteProvider_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/provider/v1beta3/provider.proto", -} - -func (m *ProviderInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProviderInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ProviderInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Website) > 0 { - i -= len(m.Website) - copy(dAtA[i:], m.Website) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Website))) - i-- - dAtA[i] = 0x12 - } - if len(m.EMail) > 0 { - i -= len(m.EMail) - copy(dAtA[i:], m.EMail) - i = encodeVarintProvider(dAtA, i, uint64(len(m.EMail))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateProvider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateProvider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.HostURI) > 0 { - i -= len(m.HostURI) - copy(dAtA[i:], m.HostURI) - i = encodeVarintProvider(dAtA, i, uint64(len(m.HostURI))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgCreateProviderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgCreateProviderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgCreateProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgUpdateProvider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateProvider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.HostURI) > 0 { - i -= len(m.HostURI) - copy(dAtA[i:], m.HostURI) - i = encodeVarintProvider(dAtA, i, uint64(len(m.HostURI))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgUpdateProviderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgUpdateProviderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgUpdateProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *MsgDeleteProvider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDeleteProvider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDeleteProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *MsgDeleteProviderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MsgDeleteProviderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MsgDeleteProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *Provider) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Provider) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintProvider(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.HostURI) > 0 { - i -= len(m.HostURI) - copy(dAtA[i:], m.HostURI) - i = encodeVarintProvider(dAtA, i, uint64(len(m.HostURI))) - i-- - dAtA[i] = 0x12 - } - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintProvider(dAtA []byte, offset int, v uint64) int { - offset -= sovProvider(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ProviderInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.EMail) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - l = len(m.Website) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - return n -} - -func (m *MsgCreateProvider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - l = len(m.HostURI) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovProvider(uint64(l)) - } - } - l = m.Info.Size() - n += 1 + l + sovProvider(uint64(l)) - return n -} - -func (m *MsgCreateProviderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgUpdateProvider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - l = len(m.HostURI) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovProvider(uint64(l)) - } - } - l = m.Info.Size() - n += 1 + l + sovProvider(uint64(l)) - return n -} - -func (m *MsgUpdateProviderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *MsgDeleteProvider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - return n -} - -func (m *MsgDeleteProviderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *Provider) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - l = len(m.HostURI) - if l > 0 { - n += 1 + l + sovProvider(uint64(l)) - } - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovProvider(uint64(l)) - } - } - l = m.Info.Size() - n += 1 + l + sovProvider(uint64(l)) - return n -} - -func sovProvider(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozProvider(x uint64) (n int) { - return sovProvider(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ProviderInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProviderInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProviderInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EMail", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EMail = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Website", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Website = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateProvider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateProvider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateProvider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostURI = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta3.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgCreateProviderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgCreateProviderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgCreateProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgUpdateProvider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateProvider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateProvider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostURI = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta3.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgUpdateProviderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgUpdateProviderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgUpdateProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDeleteProvider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDeleteProvider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDeleteProvider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MsgDeleteProviderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MsgDeleteProviderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MsgDeleteProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Provider) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Provider: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostURI = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, v1beta3.Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowProvider - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthProvider - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthProvider - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipProvider(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthProvider - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipProvider(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProvider - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProvider - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowProvider - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthProvider - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupProvider - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthProvider - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthProvider = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowProvider = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupProvider = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/provider/v1beta3/query.pb.go b/go/node/provider/v1beta3/query.pb.go deleted file mode 100644 index cb79652e..00000000 --- a/go/node/provider/v1beta3/query.pb.go +++ /dev/null @@ -1,1059 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/provider/v1beta3/query.proto - -package v1beta3 - -import ( - context "context" - fmt "fmt" - query "github.com/cosmos/cosmos-sdk/types/query" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - _ "google.golang.org/genproto/googleapis/api/annotations" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// QueryProvidersRequest is request type for the Query/Providers RPC method -type QueryProvidersRequest struct { - Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryProvidersRequest) Reset() { *m = QueryProvidersRequest{} } -func (m *QueryProvidersRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProvidersRequest) ProtoMessage() {} -func (*QueryProvidersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b2720b5eb3b981bc, []int{0} -} -func (m *QueryProvidersRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProvidersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProvidersRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProvidersRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProvidersRequest.Merge(m, src) -} -func (m *QueryProvidersRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryProvidersRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProvidersRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProvidersRequest proto.InternalMessageInfo - -func (m *QueryProvidersRequest) GetPagination() *query.PageRequest { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -type QueryProvidersResponse struct { - Providers Providers `protobuf:"bytes,1,rep,name=providers,proto3,castrepeated=Providers" json:"providers"` - Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` -} - -func (m *QueryProvidersResponse) Reset() { *m = QueryProvidersResponse{} } -func (m *QueryProvidersResponse) String() string { return proto.CompactTextString(m) } -func (*QueryProvidersResponse) ProtoMessage() {} -func (*QueryProvidersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b2720b5eb3b981bc, []int{1} -} -func (m *QueryProvidersResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProvidersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProvidersResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProvidersResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProvidersResponse.Merge(m, src) -} -func (m *QueryProvidersResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryProvidersResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProvidersResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProvidersResponse proto.InternalMessageInfo - -func (m *QueryProvidersResponse) GetProviders() Providers { - if m != nil { - return m.Providers - } - return nil -} - -func (m *QueryProvidersResponse) GetPagination() *query.PageResponse { - if m != nil { - return m.Pagination - } - return nil -} - -// QueryProviderRequest is request type for the Query/Provider RPC method -type QueryProviderRequest struct { - Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` -} - -func (m *QueryProviderRequest) Reset() { *m = QueryProviderRequest{} } -func (m *QueryProviderRequest) String() string { return proto.CompactTextString(m) } -func (*QueryProviderRequest) ProtoMessage() {} -func (*QueryProviderRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_b2720b5eb3b981bc, []int{2} -} -func (m *QueryProviderRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProviderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProviderRequest.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProviderRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProviderRequest.Merge(m, src) -} -func (m *QueryProviderRequest) XXX_Size() int { - return m.Size() -} -func (m *QueryProviderRequest) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProviderRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProviderRequest proto.InternalMessageInfo - -func (m *QueryProviderRequest) GetOwner() string { - if m != nil { - return m.Owner - } - return "" -} - -// QueryProviderResponse is response type for the Query/Provider RPC method -type QueryProviderResponse struct { - Provider Provider `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider"` -} - -func (m *QueryProviderResponse) Reset() { *m = QueryProviderResponse{} } -func (m *QueryProviderResponse) String() string { return proto.CompactTextString(m) } -func (*QueryProviderResponse) ProtoMessage() {} -func (*QueryProviderResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_b2720b5eb3b981bc, []int{3} -} -func (m *QueryProviderResponse) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QueryProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_QueryProviderResponse.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *QueryProviderResponse) XXX_Merge(src proto.Message) { - xxx_messageInfo_QueryProviderResponse.Merge(m, src) -} -func (m *QueryProviderResponse) XXX_Size() int { - return m.Size() -} -func (m *QueryProviderResponse) XXX_DiscardUnknown() { - xxx_messageInfo_QueryProviderResponse.DiscardUnknown(m) -} - -var xxx_messageInfo_QueryProviderResponse proto.InternalMessageInfo - -func (m *QueryProviderResponse) GetProvider() Provider { - if m != nil { - return m.Provider - } - return Provider{} -} - -func init() { - proto.RegisterType((*QueryProvidersRequest)(nil), "akash.provider.v1beta3.QueryProvidersRequest") - proto.RegisterType((*QueryProvidersResponse)(nil), "akash.provider.v1beta3.QueryProvidersResponse") - proto.RegisterType((*QueryProviderRequest)(nil), "akash.provider.v1beta3.QueryProviderRequest") - proto.RegisterType((*QueryProviderResponse)(nil), "akash.provider.v1beta3.QueryProviderResponse") -} - -func init() { - proto.RegisterFile("akash/provider/v1beta3/query.proto", fileDescriptor_b2720b5eb3b981bc) -} - -var fileDescriptor_b2720b5eb3b981bc = []byte{ - // 449 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xc7, 0x7d, 0x81, 0xa2, 0xe6, 0x3a, 0x71, 0x0a, 0x55, 0x15, 0x21, 0x37, 0x18, 0x01, 0x2d, - 0x34, 0x77, 0x4a, 0x3a, 0xb2, 0x65, 0x80, 0xb5, 0xb5, 0xc4, 0x02, 0x03, 0x3a, 0xb7, 0xa7, 0xab, - 0x55, 0x7a, 0xcf, 0xf1, 0x39, 0x89, 0x10, 0x62, 0xe1, 0x13, 0x20, 0x21, 0x16, 0x3e, 0x02, 0x33, - 0x23, 0x1f, 0x20, 0x63, 0x24, 0x16, 0x26, 0x40, 0x09, 0x1f, 0x04, 0xf9, 0xee, 0x1c, 0x93, 0x34, - 0x91, 0xb3, 0xf9, 0xec, 0xff, 0xfb, 0xbf, 0xdf, 0xfb, 0x9f, 0x1f, 0x0e, 0xf8, 0x25, 0xd7, 0x17, - 0x2c, 0x49, 0x61, 0x18, 0x9f, 0x8b, 0x94, 0x0d, 0x3b, 0x91, 0xc8, 0xf8, 0x31, 0xeb, 0x0f, 0x44, - 0xfa, 0x96, 0x26, 0x29, 0x64, 0x40, 0x76, 0x8d, 0x86, 0x16, 0x1a, 0xea, 0x34, 0xcd, 0x86, 0x04, - 0x09, 0x46, 0xc2, 0xf2, 0x27, 0xab, 0x6e, 0xde, 0x95, 0x00, 0xf2, 0x8d, 0x60, 0x3c, 0x89, 0x19, - 0x57, 0x0a, 0x32, 0x9e, 0xc5, 0xa0, 0xb4, 0xfb, 0xfa, 0xf8, 0x0c, 0xf4, 0x15, 0x68, 0x16, 0x71, - 0x2d, 0x6c, 0x13, 0xd7, 0xb2, 0xc3, 0x12, 0x2e, 0x63, 0x65, 0xc4, 0x4e, 0xfb, 0x60, 0x0d, 0xdb, - 0x1c, 0xc4, 0xc8, 0x82, 0xd7, 0xf8, 0xce, 0x69, 0x6e, 0x74, 0xe2, 0x5e, 0xeb, 0x50, 0xf4, 0x07, - 0x42, 0x67, 0xe4, 0x19, 0xc6, 0xa5, 0xe7, 0x1e, 0x6a, 0xa1, 0x83, 0x9d, 0xee, 0x43, 0x6a, 0x01, - 0x68, 0x0e, 0x40, 0xed, 0x94, 0x0e, 0x80, 0x9e, 0x70, 0x29, 0x5c, 0x6d, 0xf8, 0x5f, 0x65, 0xf0, - 0x0d, 0xe1, 0xdd, 0xe5, 0x0e, 0x3a, 0x01, 0xa5, 0x05, 0x39, 0xc5, 0xf5, 0x82, 0x46, 0xef, 0xa1, - 0xd6, 0x8d, 0x83, 0x9d, 0x6e, 0x8b, 0xae, 0x8e, 0x8b, 0x16, 0xd5, 0xbd, 0xdb, 0xe3, 0x5f, 0xfb, - 0xde, 0xd7, 0xdf, 0xfb, 0xf5, 0xd2, 0xaf, 0x74, 0x21, 0xcf, 0x17, 0xa8, 0x6b, 0x86, 0xfa, 0x51, - 0x25, 0xb5, 0xe5, 0x59, 0xc0, 0x3e, 0xc2, 0x8d, 0x05, 0xea, 0x22, 0x96, 0x06, 0xde, 0x82, 0x91, - 0x12, 0xa9, 0x49, 0xa4, 0x1e, 0xda, 0x43, 0xf0, 0x6a, 0x29, 0xc5, 0xf9, 0x88, 0x3d, 0xbc, 0x5d, - 0xc0, 0xb9, 0x0c, 0xab, 0x27, 0xbc, 0x99, 0x4f, 0x18, 0xce, 0xeb, 0xba, 0xdf, 0x6b, 0x78, 0xcb, - 0xb8, 0x93, 0xcf, 0x08, 0x97, 0x63, 0x93, 0xf6, 0x3a, 0xa7, 0x95, 0x17, 0xda, 0xa4, 0x9b, 0xca, - 0x2d, 0x7a, 0x70, 0xf8, 0xe1, 0xc7, 0xdf, 0x4f, 0xb5, 0xfb, 0xe4, 0x1e, 0xab, 0xf8, 0x93, 0x34, - 0xf9, 0x82, 0xf0, 0x76, 0x61, 0x40, 0x8e, 0x36, 0xea, 0x53, 0x50, 0xb5, 0x37, 0x54, 0x3b, 0xa8, - 0x8e, 0x81, 0x7a, 0x42, 0x0e, 0x2b, 0xa1, 0xd8, 0x3b, 0x73, 0x35, 0xef, 0x7b, 0x2f, 0xc6, 0x53, - 0x1f, 0x4d, 0xa6, 0x3e, 0xfa, 0x33, 0xf5, 0xd1, 0xc7, 0x99, 0xef, 0x4d, 0x66, 0xbe, 0xf7, 0x73, - 0xe6, 0x7b, 0x2f, 0x9f, 0xca, 0x38, 0xbb, 0x18, 0x44, 0xf4, 0x0c, 0xae, 0xac, 0x5d, 0x5b, 0x89, - 0x6c, 0x04, 0xe9, 0xa5, 0x3b, 0xe5, 0x4b, 0x28, 0x81, 0x29, 0x38, 0x17, 0xd7, 0x1a, 0x45, 0xb7, - 0xcc, 0xfe, 0x1c, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xa1, 0x89, 0x43, 0x69, 0x04, 0x04, 0x00, - 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { - // Providers queries providers - Providers(ctx context.Context, in *QueryProvidersRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) - // Provider queries provider details - Provider(ctx context.Context, in *QueryProviderRequest, opts ...grpc.CallOption) (*QueryProviderResponse, error) -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -func (c *queryClient) Providers(ctx context.Context, in *QueryProvidersRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { - out := new(QueryProvidersResponse) - err := c.cc.Invoke(ctx, "/akash.provider.v1beta3.Query/Providers", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *queryClient) Provider(ctx context.Context, in *QueryProviderRequest, opts ...grpc.CallOption) (*QueryProviderResponse, error) { - out := new(QueryProviderResponse) - err := c.cc.Invoke(ctx, "/akash.provider.v1beta3.Query/Provider", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// QueryServer is the server API for Query service. -type QueryServer interface { - // Providers queries providers - Providers(context.Context, *QueryProvidersRequest) (*QueryProvidersResponse, error) - // Provider queries provider details - Provider(context.Context, *QueryProviderRequest) (*QueryProviderResponse, error) -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func (*UnimplementedQueryServer) Providers(ctx context.Context, req *QueryProvidersRequest) (*QueryProvidersResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Providers not implemented") -} -func (*UnimplementedQueryServer) Provider(ctx context.Context, req *QueryProviderRequest) (*QueryProviderResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Provider not implemented") -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -func _Query_Providers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryProvidersRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Providers(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.provider.v1beta3.Query/Providers", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Providers(ctx, req.(*QueryProvidersRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Query_Provider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(QueryProviderRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(QueryServer).Provider(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/akash.provider.v1beta3.Query/Provider", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(QueryServer).Provider(ctx, req.(*QueryProviderRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.provider.v1beta3.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Providers", - Handler: _Query_Providers_Handler, - }, - { - MethodName: "Provider", - Handler: _Query_Provider_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/provider/v1beta3/query.proto", -} - -func (m *QueryProvidersRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProvidersRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProvidersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryProvidersResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProvidersResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProvidersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Pagination != nil { - { - size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if len(m.Providers) > 0 { - for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *QueryProviderRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProviderRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProviderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Owner) > 0 { - i -= len(m.Owner) - copy(dAtA[i:], m.Owner) - i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *QueryProviderResponse) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QueryProviderResponse) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *QueryProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintQuery(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { - offset -= sovQuery(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *QueryProvidersRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProvidersResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Providers) > 0 { - for _, e := range m.Providers { - l = e.Size() - n += 1 + l + sovQuery(uint64(l)) - } - } - if m.Pagination != nil { - l = m.Pagination.Size() - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProviderRequest) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Owner) - if l > 0 { - n += 1 + l + sovQuery(uint64(l)) - } - return n -} - -func (m *QueryProviderResponse) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Provider.Size() - n += 1 + l + sovQuery(uint64(l)) - return n -} - -func sovQuery(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozQuery(x uint64) (n int) { - return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *QueryProvidersRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProvidersRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProvidersRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageRequest{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProvidersResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProvidersResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProvidersResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Providers = append(m.Providers, Provider{}) - if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pagination == nil { - m.Pagination = &query.PageResponse{} - } - if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProviderRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProviderRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProviderRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Owner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QueryProviderResponse) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QueryProviderResponse: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QueryProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowQuery - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthQuery - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthQuery - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipQuery(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthQuery - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipQuery(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowQuery - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthQuery - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupQuery - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthQuery - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/provider/v1beta3/query.pb.gw.go b/go/node/provider/v1beta3/query.pb.gw.go deleted file mode 100644 index 443f15b6..00000000 --- a/go/node/provider/v1beta3/query.pb.gw.go +++ /dev/null @@ -1,272 +0,0 @@ -// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. -// source: akash/provider/v1beta3/query.proto - -/* -Package v1beta3 is a reverse proxy. - -It translates gRPC into RESTful JSON APIs. -*/ -package v1beta3 - -import ( - "context" - "io" - "net/http" - - "github.com/golang/protobuf/descriptor" - "github.com/golang/protobuf/proto" - "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/grpc-ecosystem/grpc-gateway/utilities" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -// Suppress "imported and not used" errors -var _ codes.Code -var _ io.Reader -var _ status.Status -var _ = runtime.String -var _ = utilities.NewDoubleArray -var _ = descriptor.ForMessage -var _ = metadata.Join - -var ( - filter_Query_Providers_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} -) - -func request_Query_Providers_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProvidersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Providers_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := client.Providers(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Providers_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProvidersRequest - var metadata runtime.ServerMetadata - - if err := req.ParseForm(); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Providers_0); err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) - } - - msg, err := server.Providers(ctx, &protoReq) - return msg, metadata, err - -} - -func request_Query_Provider_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - msg, err := client.Provider(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) - return msg, metadata, err - -} - -func local_request_Query_Provider_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq QueryProviderRequest - var metadata runtime.ServerMetadata - - var ( - val string - ok bool - err error - _ = err - ) - - val, ok = pathParams["owner"] - if !ok { - return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") - } - - protoReq.Owner, err = runtime.String(val) - - if err != nil { - return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) - } - - msg, err := server.Provider(ctx, &protoReq) - return msg, metadata, err - -} - -// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". -// UnaryRPC :call QueryServer directly. -// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. -// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. -func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { - - mux.Handle("GET", pattern_Query_Providers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Providers_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Providers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Provider_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - var stream runtime.ServerTransportStream - ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := local_request_Query_Provider_0(rctx, inboundMarshaler, server, req, pathParams) - md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Provider_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but -// automatically dials to "endpoint" and closes the connection when "ctx" gets done. -func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { - conn, err := grpc.Dial(endpoint, opts...) - if err != nil { - return err - } - defer func() { - if err != nil { - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - return - } - go func() { - <-ctx.Done() - if cerr := conn.Close(); cerr != nil { - grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) - } - }() - }() - - return RegisterQueryHandler(ctx, mux, conn) -} - -// RegisterQueryHandler registers the http handlers for service Query to "mux". -// The handlers forward requests to the grpc endpoint over "conn". -func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { - return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) -} - -// RegisterQueryHandlerClient registers the http handlers for service Query -// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". -// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" -// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in -// "QueryClient" to call the correct interceptors. -func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { - - mux.Handle("GET", pattern_Query_Providers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Providers_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Providers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - mux.Handle("GET", pattern_Query_Provider_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { - ctx, cancel := context.WithCancel(req.Context()) - defer cancel() - inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - resp, md, err := request_Query_Provider_0(rctx, inboundMarshaler, client, req, pathParams) - ctx = runtime.NewServerMetadataContext(ctx, md) - if err != nil { - runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) - return - } - - forward_Query_Provider_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) - - }) - - return nil -} - -var ( - pattern_Query_Providers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"akash", "provider", "v1beta3", "providers"}, "", runtime.AssumeColonVerbOpt(true))) - - pattern_Query_Provider_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"akash", "provider", "v1beta3", "providers", "owner"}, "", runtime.AssumeColonVerbOpt(true))) -) - -var ( - forward_Query_Providers_0 = runtime.ForwardResponseMessage - - forward_Query_Provider_0 = runtime.ForwardResponseMessage -) diff --git a/go/node/provider/v1beta3/types.go b/go/node/provider/v1beta3/types.go deleted file mode 100644 index 28bc0352..00000000 --- a/go/node/provider/v1beta3/types.go +++ /dev/null @@ -1,67 +0,0 @@ -package v1beta3 - -import ( - "bytes" - "fmt" - "net/url" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// String implements the Stringer interface for a Provider object. -func (p Provider) String() string { - res := fmt.Sprintf(`Deployment - Owner: %s - HostURI: %s - Attributes: %v - `, p.Owner, p.HostURI, p.Attributes) - - if !p.Info.IsEmpty() { - res += fmt.Sprintf("Info: %v\n", p.Info) - } - return res -} - -// Providers is the collection of Provider -type Providers []Provider - -// String implements the Stringer interface for a Providers object. -func (obj Providers) String() string { - var buf bytes.Buffer - - const sep = "\n\n" - - for _, p := range obj { - buf.WriteString(p.String()) - buf.WriteString(sep) - } - - if len(obj) > 0 { - buf.Truncate(buf.Len() - len(sep)) - } - - return buf.String() -} - -// Address implements provider and returns owner of provider -func (p *Provider) Address() sdk.AccAddress { - owner, err := sdk.AccAddressFromBech32(p.Owner) - if err != nil { - panic(err) - } - - return owner -} - -func (m ProviderInfo) IsEmpty() bool { - return m.EMail == "" && m.Website == "" -} - -func (m ProviderInfo) Validate() error { - if m.Website != "" { - if _, err := url.Parse(m.Website); err != nil { - return ErrInvalidInfoWebsite - } - } - return nil -} diff --git a/go/node/provider/v1beta4/codec.go b/go/node/provider/v1beta4/codec.go new file mode 100644 index 00000000..9a8fe777 --- /dev/null +++ b/go/node/provider/v1beta4/codec.go @@ -0,0 +1,40 @@ +package v1beta4 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +var ( + // ModuleCdc references the global x/provider module codec. Note, the codec should + // ONLY be used in certain instances of tests and for JSON encoding as Amino is + // still used for that purpose. + // + // The actual codec used for serialization should be provided to x/provider and + // defined at the application level. + // + // Deprecated: ModuleCdc use is deprecated + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) + +// RegisterLegacyAminoCodec register concrete types on codec +// +// Deprecated: RegisterLegacyAminoCodec is deprecated +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgCreateProvider{}, ModuleName+"/"+msgTypeCreateProvider, nil) + cdc.RegisterConcrete(&MsgUpdateProvider{}, ModuleName+"/"+msgTypeUpdateProvider, nil) + cdc.RegisterConcrete(&MsgDeleteProvider{}, ModuleName+"/"+msgTypeDeleteProvider, nil) +} + +// RegisterInterfaces registers the x/provider interfaces types with the interface registry +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgCreateProvider{}, + &MsgUpdateProvider{}, + &MsgDeleteProvider{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} diff --git a/go/node/provider/v1beta4/errors.go b/go/node/provider/v1beta4/errors.go new file mode 100644 index 00000000..3ee857dd --- /dev/null +++ b/go/node/provider/v1beta4/errors.go @@ -0,0 +1,42 @@ +package v1beta4 + +import ( + sdkerrors "cosmossdk.io/errors" +) + +const ( + errInvalidProviderURI uint32 = iota + 1 + errNotAbsProviderURI + errProviderNotFound + errProviderExists + errInvalidAddress + errAttributes + errIncompatibleAttributes + errInvalidInfoWebsite +) + +var ( + // ErrInvalidProviderURI register error code for invalid provider uri + ErrInvalidProviderURI = sdkerrors.Register(ModuleName, errInvalidProviderURI, "invalid provider: invalid host uri") + + // ErrNotAbsProviderURI register error code for not absolute provider uri + ErrNotAbsProviderURI = sdkerrors.Register(ModuleName, errNotAbsProviderURI, "invalid provider: not absolute host uri") + + // ErrProviderNotFound provider not found + ErrProviderNotFound = sdkerrors.Register(ModuleName, errProviderNotFound, "invalid provider: address not found") + + // ErrProviderExists provider already exists + ErrProviderExists = sdkerrors.Register(ModuleName, errProviderExists, "invalid provider: already exists") + + // ErrInvalidAddress invalid provider address + ErrInvalidAddress = sdkerrors.Register(ModuleName, errInvalidAddress, "invalid address") + + // ErrAttributes error code for provider attribute problems + ErrAttributes = sdkerrors.Register(ModuleName, errAttributes, "attribute specification error") + + // ErrIncompatibleAttributes error code for attributes update + ErrIncompatibleAttributes = sdkerrors.Register(ModuleName, errIncompatibleAttributes, "attributes cannot be changed") + + // ErrInvalidInfoWebsite register error code for invalid info website + ErrInvalidInfoWebsite = sdkerrors.Register(ModuleName, errInvalidInfoWebsite, "invalid provider: invalid info website") +) diff --git a/go/node/provider/v1beta4/event.pb.go b/go/node/provider/v1beta4/event.pb.go new file mode 100644 index 00000000..d9711e1c --- /dev/null +++ b/go/node/provider/v1beta4/event.pb.go @@ -0,0 +1,667 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/provider/v1beta4/event.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// EventProviderCreated defines an SDK message for provider created event +type EventProviderCreated struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` +} + +func (m *EventProviderCreated) Reset() { *m = EventProviderCreated{} } +func (m *EventProviderCreated) String() string { return proto.CompactTextString(m) } +func (*EventProviderCreated) ProtoMessage() {} +func (*EventProviderCreated) Descriptor() ([]byte, []int) { + return fileDescriptor_03bf12ee18bdd2de, []int{0} +} +func (m *EventProviderCreated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventProviderCreated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventProviderCreated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventProviderCreated) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventProviderCreated.Merge(m, src) +} +func (m *EventProviderCreated) XXX_Size() int { + return m.Size() +} +func (m *EventProviderCreated) XXX_DiscardUnknown() { + xxx_messageInfo_EventProviderCreated.DiscardUnknown(m) +} + +var xxx_messageInfo_EventProviderCreated proto.InternalMessageInfo + +func (m *EventProviderCreated) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +// EventProviderUpdated defines an SDK message for provider updated event +type EventProviderUpdated struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` +} + +func (m *EventProviderUpdated) Reset() { *m = EventProviderUpdated{} } +func (m *EventProviderUpdated) String() string { return proto.CompactTextString(m) } +func (*EventProviderUpdated) ProtoMessage() {} +func (*EventProviderUpdated) Descriptor() ([]byte, []int) { + return fileDescriptor_03bf12ee18bdd2de, []int{1} +} +func (m *EventProviderUpdated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventProviderUpdated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventProviderUpdated.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventProviderUpdated) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventProviderUpdated.Merge(m, src) +} +func (m *EventProviderUpdated) XXX_Size() int { + return m.Size() +} +func (m *EventProviderUpdated) XXX_DiscardUnknown() { + xxx_messageInfo_EventProviderUpdated.DiscardUnknown(m) +} + +var xxx_messageInfo_EventProviderUpdated proto.InternalMessageInfo + +func (m *EventProviderUpdated) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +// EventProviderDeleted defines an SDK message for provider deleted event +type EventProviderDeleted struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` +} + +func (m *EventProviderDeleted) Reset() { *m = EventProviderDeleted{} } +func (m *EventProviderDeleted) String() string { return proto.CompactTextString(m) } +func (*EventProviderDeleted) ProtoMessage() {} +func (*EventProviderDeleted) Descriptor() ([]byte, []int) { + return fileDescriptor_03bf12ee18bdd2de, []int{2} +} +func (m *EventProviderDeleted) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventProviderDeleted) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EventProviderDeleted.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EventProviderDeleted) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventProviderDeleted.Merge(m, src) +} +func (m *EventProviderDeleted) XXX_Size() int { + return m.Size() +} +func (m *EventProviderDeleted) XXX_DiscardUnknown() { + xxx_messageInfo_EventProviderDeleted.DiscardUnknown(m) +} + +var xxx_messageInfo_EventProviderDeleted proto.InternalMessageInfo + +func (m *EventProviderDeleted) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func init() { + proto.RegisterType((*EventProviderCreated)(nil), "akash.provider.v1beta4.EventProviderCreated") + proto.RegisterType((*EventProviderUpdated)(nil), "akash.provider.v1beta4.EventProviderUpdated") + proto.RegisterType((*EventProviderDeleted)(nil), "akash.provider.v1beta4.EventProviderDeleted") +} + +func init() { + proto.RegisterFile("akash/provider/v1beta4/event.proto", fileDescriptor_03bf12ee18bdd2de) +} + +var fileDescriptor_03bf12ee18bdd2de = []byte{ + // 243 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x2f, 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, + 0x49, 0x34, 0xd1, 0x4f, 0x2d, 0x4b, 0xcd, 0x2b, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, + 0x03, 0xab, 0xd1, 0x83, 0xa9, 0xd1, 0x83, 0xaa, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, + 0xd1, 0x07, 0xb1, 0x20, 0xaa, 0xa5, 0x24, 0x93, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, 0xe3, 0x21, 0x12, + 0x10, 0x0e, 0x44, 0x4a, 0x29, 0x9e, 0x4b, 0xc4, 0x15, 0x64, 0x6e, 0x00, 0xd4, 0x24, 0xe7, 0xa2, + 0xd4, 0xc4, 0x92, 0xd4, 0x14, 0x21, 0x77, 0x2e, 0xd6, 0xfc, 0xf2, 0xbc, 0xd4, 0x22, 0x09, 0x46, + 0x05, 0x46, 0x0d, 0x4e, 0x27, 0xc3, 0x57, 0xf7, 0xe4, 0x21, 0x02, 0x9f, 0xee, 0xc9, 0xf3, 0x54, + 0x26, 0xe6, 0xe6, 0x58, 0x29, 0x81, 0xb9, 0x4a, 0x97, 0xb6, 0xe8, 0x8a, 0x40, 0x4d, 0x74, 0x4c, + 0x49, 0x29, 0x4a, 0x2d, 0x2e, 0x0e, 0x2e, 0x29, 0xca, 0xcc, 0x4b, 0x0f, 0x82, 0x28, 0xc7, 0xb0, + 0x20, 0xb4, 0x20, 0x85, 0xb6, 0x16, 0xb8, 0xa4, 0xe6, 0xa4, 0x52, 0xd3, 0x02, 0x27, 0xbb, 0x13, + 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, + 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0x52, 0x29, 0xc8, 0x4e, 0xd7, 0x4b, 0xcc, + 0x2e, 0xd1, 0x4b, 0x49, 0x2d, 0xd3, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, 0x49, 0xc5, 0x88, 0xb7, + 0x24, 0x36, 0x70, 0x48, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd7, 0xf8, 0x9f, 0x64, 0xd8, + 0x01, 0x00, 0x00, +} + +func (m *EventProviderCreated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventProviderCreated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventProviderCreated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintEvent(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventProviderUpdated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventProviderUpdated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventProviderUpdated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintEvent(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EventProviderDeleted) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventProviderDeleted) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventProviderDeleted) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintEvent(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintEvent(dAtA []byte, offset int, v uint64) int { + offset -= sovEvent(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EventProviderCreated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovEvent(uint64(l)) + } + return n +} + +func (m *EventProviderUpdated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovEvent(uint64(l)) + } + return n +} + +func (m *EventProviderDeleted) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovEvent(uint64(l)) + } + return n +} + +func sovEvent(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEvent(x uint64) (n int) { + return sovEvent(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EventProviderCreated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventProviderCreated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventProviderCreated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventProviderUpdated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventProviderUpdated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventProviderUpdated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventProviderDeleted) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventProviderDeleted: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventProviderDeleted: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEvent + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEvent + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEvent + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipEvent(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEvent + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEvent(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEvent + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEvent + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEvent + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEvent + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEvent = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEvent = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEvent = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/provider/v1beta4/genesis.pb.go b/go/node/provider/v1beta4/genesis.pb.go new file mode 100644 index 00000000..5d571439 --- /dev/null +++ b/go/node/provider/v1beta4/genesis.pb.go @@ -0,0 +1,333 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/provider/v1beta4/genesis.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the basic genesis state used by provider module +type GenesisState struct { + Providers Providers `protobuf:"bytes,1,rep,name=providers,proto3,castrepeated=Providers" json:"providers" yaml:"providers"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_045bfc9d93eb382e, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetProviders() Providers { + if m != nil { + return m.Providers + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "akash.provider.v1beta4.GenesisState") +} + +func init() { + proto.RegisterFile("akash/provider/v1beta4/genesis.proto", fileDescriptor_045bfc9d93eb382e) +} + +var fileDescriptor_045bfc9d93eb382e = []byte{ + // 220 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x2f, 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, + 0x49, 0x34, 0xd1, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, + 0x17, 0x12, 0x03, 0xab, 0xd2, 0x83, 0xa9, 0xd2, 0x83, 0xaa, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, + 0x07, 0x2b, 0xd1, 0x07, 0xb1, 0x20, 0xaa, 0xa5, 0x54, 0x71, 0x98, 0x09, 0xd7, 0x0e, 0x56, 0xa6, + 0xd4, 0xc4, 0xc8, 0xc5, 0xe3, 0x0e, 0xb1, 0x26, 0xb8, 0x24, 0xb1, 0x24, 0x55, 0xa8, 0x88, 0x8b, + 0x13, 0xa6, 0xa4, 0x58, 0x82, 0x51, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x41, 0x0f, 0xbb, 0xcd, 0x7a, + 0x01, 0x50, 0x01, 0x27, 0xa3, 0x13, 0xf7, 0xe4, 0x19, 0x5e, 0xdd, 0x93, 0x47, 0x68, 0xfd, 0x74, + 0x4f, 0x5e, 0xa0, 0x32, 0x31, 0x37, 0xc7, 0x4a, 0x09, 0x2e, 0xa4, 0xb4, 0xea, 0xbe, 0x3c, 0x27, + 0x4c, 0x4b, 0x71, 0x10, 0x42, 0xad, 0x93, 0xdd, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, + 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, + 0x31, 0x44, 0xa9, 0x14, 0x64, 0xa7, 0xeb, 0x25, 0x66, 0x97, 0xe8, 0xa5, 0xa4, 0x96, 0xe9, 0xa7, + 0xe7, 0xeb, 0xe7, 0xe5, 0xa7, 0xa4, 0x62, 0xf8, 0x29, 0x89, 0x0d, 0xec, 0x17, 0x63, 0x40, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xe2, 0x7a, 0x51, 0x2b, 0x48, 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Providers) > 0 { + for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Providers) > 0 { + for _, e := range m.Providers { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Providers = append(m.Providers, Provider{}) + if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/provider/v1beta4/key.go b/go/node/provider/v1beta4/key.go new file mode 100644 index 00000000..c39d13f8 --- /dev/null +++ b/go/node/provider/v1beta4/key.go @@ -0,0 +1,16 @@ +package v1beta4 + +const ( + // ModuleName is the module name constant used in many places + ModuleName = "provider" + + // StoreKey is the store key string for provider + StoreKey = ModuleName + + // RouterKey is the message route for provider + RouterKey = ModuleName +) + +func ProviderPrefix() []byte { + return []byte{0x01} +} diff --git a/go/node/provider/v1beta4/msg.pb.go b/go/node/provider/v1beta4/msg.pb.go new file mode 100644 index 00000000..986bb01b --- /dev/null +++ b/go/node/provider/v1beta4/msg.pb.go @@ -0,0 +1,1375 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/provider/v1beta4/msg.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + pkg_akt_dev_go_node_types_attributes_v1 "pkg.akt.dev/go/node/types/attributes/v1" + v1 "pkg.akt.dev/go/node/types/attributes/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgCreateProvider defines an SDK message for creating a provider +type MsgCreateProvider struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` + Attributes pkg_akt_dev_go_node_types_attributes_v1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=pkg.akt.dev/go/node/types/attributes/v1.Attributes" json:"attributes" yaml:"attributes"` + Info Info `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` +} + +func (m *MsgCreateProvider) Reset() { *m = MsgCreateProvider{} } +func (m *MsgCreateProvider) String() string { return proto.CompactTextString(m) } +func (*MsgCreateProvider) ProtoMessage() {} +func (*MsgCreateProvider) Descriptor() ([]byte, []int) { + return fileDescriptor_5c874c91147ead42, []int{0} +} +func (m *MsgCreateProvider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateProvider.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateProvider) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateProvider.Merge(m, src) +} +func (m *MsgCreateProvider) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateProvider) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateProvider.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateProvider proto.InternalMessageInfo + +func (m *MsgCreateProvider) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *MsgCreateProvider) GetHostURI() string { + if m != nil { + return m.HostURI + } + return "" +} + +func (m *MsgCreateProvider) GetAttributes() pkg_akt_dev_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *MsgCreateProvider) GetInfo() Info { + if m != nil { + return m.Info + } + return Info{} +} + +// MsgCreateProviderResponse defines the Msg/CreateProvider response type. +type MsgCreateProviderResponse struct { +} + +func (m *MsgCreateProviderResponse) Reset() { *m = MsgCreateProviderResponse{} } +func (m *MsgCreateProviderResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateProviderResponse) ProtoMessage() {} +func (*MsgCreateProviderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c874c91147ead42, []int{1} +} +func (m *MsgCreateProviderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateProviderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateProviderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateProviderResponse.Merge(m, src) +} +func (m *MsgCreateProviderResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateProviderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateProviderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateProviderResponse proto.InternalMessageInfo + +// MsgUpdateProvider defines an SDK message for updating a provider +type MsgUpdateProvider struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` + Attributes pkg_akt_dev_go_node_types_attributes_v1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=pkg.akt.dev/go/node/types/attributes/v1.Attributes" json:"attributes" yaml:"attributes"` + Info Info `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` +} + +func (m *MsgUpdateProvider) Reset() { *m = MsgUpdateProvider{} } +func (m *MsgUpdateProvider) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateProvider) ProtoMessage() {} +func (*MsgUpdateProvider) Descriptor() ([]byte, []int) { + return fileDescriptor_5c874c91147ead42, []int{2} +} +func (m *MsgUpdateProvider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateProvider.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateProvider) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateProvider.Merge(m, src) +} +func (m *MsgUpdateProvider) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateProvider) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateProvider.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateProvider proto.InternalMessageInfo + +func (m *MsgUpdateProvider) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *MsgUpdateProvider) GetHostURI() string { + if m != nil { + return m.HostURI + } + return "" +} + +func (m *MsgUpdateProvider) GetAttributes() pkg_akt_dev_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *MsgUpdateProvider) GetInfo() Info { + if m != nil { + return m.Info + } + return Info{} +} + +// MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. +type MsgUpdateProviderResponse struct { +} + +func (m *MsgUpdateProviderResponse) Reset() { *m = MsgUpdateProviderResponse{} } +func (m *MsgUpdateProviderResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateProviderResponse) ProtoMessage() {} +func (*MsgUpdateProviderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c874c91147ead42, []int{3} +} +func (m *MsgUpdateProviderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateProviderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateProviderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateProviderResponse.Merge(m, src) +} +func (m *MsgUpdateProviderResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateProviderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateProviderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateProviderResponse proto.InternalMessageInfo + +// MsgDeleteProvider defines an SDK message for deleting a provider +type MsgDeleteProvider struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` +} + +func (m *MsgDeleteProvider) Reset() { *m = MsgDeleteProvider{} } +func (m *MsgDeleteProvider) String() string { return proto.CompactTextString(m) } +func (*MsgDeleteProvider) ProtoMessage() {} +func (*MsgDeleteProvider) Descriptor() ([]byte, []int) { + return fileDescriptor_5c874c91147ead42, []int{4} +} +func (m *MsgDeleteProvider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDeleteProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDeleteProvider.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDeleteProvider) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDeleteProvider.Merge(m, src) +} +func (m *MsgDeleteProvider) XXX_Size() int { + return m.Size() +} +func (m *MsgDeleteProvider) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDeleteProvider.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDeleteProvider proto.InternalMessageInfo + +func (m *MsgDeleteProvider) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +// MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. +type MsgDeleteProviderResponse struct { +} + +func (m *MsgDeleteProviderResponse) Reset() { *m = MsgDeleteProviderResponse{} } +func (m *MsgDeleteProviderResponse) String() string { return proto.CompactTextString(m) } +func (*MsgDeleteProviderResponse) ProtoMessage() {} +func (*MsgDeleteProviderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_5c874c91147ead42, []int{5} +} +func (m *MsgDeleteProviderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDeleteProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDeleteProviderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDeleteProviderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDeleteProviderResponse.Merge(m, src) +} +func (m *MsgDeleteProviderResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgDeleteProviderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDeleteProviderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDeleteProviderResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCreateProvider)(nil), "akash.provider.v1beta4.MsgCreateProvider") + proto.RegisterType((*MsgCreateProviderResponse)(nil), "akash.provider.v1beta4.MsgCreateProviderResponse") + proto.RegisterType((*MsgUpdateProvider)(nil), "akash.provider.v1beta4.MsgUpdateProvider") + proto.RegisterType((*MsgUpdateProviderResponse)(nil), "akash.provider.v1beta4.MsgUpdateProviderResponse") + proto.RegisterType((*MsgDeleteProvider)(nil), "akash.provider.v1beta4.MsgDeleteProvider") + proto.RegisterType((*MsgDeleteProviderResponse)(nil), "akash.provider.v1beta4.MsgDeleteProviderResponse") +} + +func init() { proto.RegisterFile("akash/provider/v1beta4/msg.proto", fileDescriptor_5c874c91147ead42) } + +var fileDescriptor_5c874c91147ead42 = []byte{ + // 495 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x54, 0x4f, 0x6b, 0x13, 0x41, + 0x1c, 0xdd, 0x35, 0xd5, 0xea, 0x44, 0x94, 0x2e, 0x45, 0xd3, 0x54, 0x76, 0xc2, 0xaa, 0x10, 0x0a, + 0xce, 0x90, 0xe8, 0xa9, 0x07, 0xa1, 0x51, 0xd1, 0x1e, 0x0a, 0xb2, 0x92, 0x8b, 0x20, 0x65, 0xe2, + 0x4e, 0xb7, 0x4b, 0x92, 0x9d, 0x65, 0x66, 0xba, 0xd2, 0xab, 0x9f, 0xc0, 0x8f, 0x20, 0x5e, 0x04, + 0x4f, 0x1e, 0xfc, 0x10, 0x3d, 0x16, 0x4f, 0x9e, 0x46, 0x49, 0x0e, 0x4a, 0x8e, 0xf9, 0x04, 0xb2, + 0x33, 0xfb, 0xa7, 0xd1, 0xf6, 0xe8, 0x41, 0xe8, 0x6d, 0x7e, 0xbf, 0xdf, 0xfb, 0xbd, 0x79, 0xfb, + 0xde, 0x32, 0xa0, 0x45, 0x86, 0x44, 0xec, 0xe3, 0x84, 0xb3, 0x34, 0x0a, 0x28, 0xc7, 0x69, 0x67, + 0x40, 0x25, 0x79, 0x80, 0xc7, 0x22, 0x44, 0x09, 0x67, 0x92, 0x39, 0x37, 0x34, 0x02, 0x15, 0x08, + 0x94, 0x23, 0x9a, 0xab, 0x21, 0x0b, 0x99, 0x86, 0xe0, 0xec, 0x64, 0xd0, 0xcd, 0x9b, 0xaf, 0x99, + 0x18, 0x33, 0x91, 0xed, 0xe3, 0xb4, 0x53, 0xd1, 0x34, 0xd7, 0xcc, 0x60, 0xd7, 0x6c, 0x98, 0x22, + 0x1f, 0xb5, 0x8d, 0x86, 0x01, 0x11, 0x14, 0x13, 0x29, 0x79, 0x34, 0x38, 0x90, 0x54, 0x64, 0xeb, + 0x65, 0x95, 0x23, 0xef, 0x9e, 0xa1, 0xb6, 0x14, 0xa7, 0x61, 0xde, 0xc7, 0x1a, 0x58, 0xd9, 0x11, + 0xe1, 0x23, 0x4e, 0x89, 0xa4, 0xcf, 0xf3, 0x99, 0xf3, 0x14, 0x5c, 0x64, 0x6f, 0x62, 0xca, 0x1b, + 0x76, 0xcb, 0x6e, 0x5f, 0xe9, 0x75, 0x66, 0x0a, 0x9a, 0xc6, 0x5c, 0xc1, 0xab, 0x87, 0x64, 0x3c, + 0xda, 0xf4, 0x74, 0xe9, 0x7d, 0xfd, 0x72, 0x6f, 0x35, 0x17, 0xb8, 0x15, 0x04, 0x9c, 0x0a, 0xf1, + 0x42, 0xf2, 0x28, 0x0e, 0x7d, 0x03, 0x77, 0x9e, 0x80, 0xcb, 0xfb, 0x4c, 0xc8, 0xdd, 0x03, 0x1e, + 0x35, 0x2e, 0x68, 0xae, 0x8d, 0x89, 0x82, 0xcb, 0xcf, 0x98, 0x90, 0x7d, 0x7f, 0x7b, 0xa6, 0x60, + 0x39, 0x9e, 0x2b, 0x78, 0xdd, 0x30, 0x17, 0x1d, 0xcf, 0x5f, 0xce, 0x8e, 0x7d, 0x1e, 0x39, 0x1f, + 0x6c, 0x00, 0xaa, 0xcf, 0x6d, 0xd4, 0x5a, 0xb5, 0x76, 0xbd, 0x7b, 0x1b, 0x19, 0xbb, 0x33, 0x33, + 0x50, 0x35, 0x45, 0x69, 0x07, 0x6d, 0x15, 0x55, 0xef, 0xd5, 0x91, 0x82, 0xd6, 0x4c, 0xc1, 0x13, + 0xeb, 0x73, 0x05, 0x57, 0xcc, 0x4d, 0x55, 0xcf, 0xfb, 0xf4, 0x1d, 0x76, 0x93, 0x61, 0x88, 0xc8, + 0x50, 0xa2, 0x80, 0xa6, 0x38, 0x64, 0x38, 0x66, 0x01, 0xc5, 0xf2, 0x30, 0xa1, 0x62, 0xd1, 0xea, + 0x8a, 0x5d, 0xf8, 0x27, 0x68, 0x9d, 0x1d, 0xb0, 0x14, 0xc5, 0x7b, 0xac, 0xb1, 0xd4, 0xb2, 0xdb, + 0xf5, 0xee, 0x2d, 0x74, 0xfa, 0xcf, 0x80, 0xb6, 0xe3, 0x3d, 0xd6, 0x5b, 0xcf, 0x65, 0xe9, 0x8d, + 0xb9, 0x82, 0x75, 0x23, 0x28, 0xab, 0x3c, 0x5f, 0x37, 0x37, 0xaf, 0xfd, 0x7a, 0x0f, 0xad, 0xb7, + 0x3f, 0x3f, 0x6f, 0x18, 0x2b, 0xbd, 0x75, 0xb0, 0xf6, 0x57, 0x50, 0x3e, 0x15, 0x09, 0x8b, 0x05, + 0x2d, 0x62, 0xec, 0x27, 0xc1, 0x79, 0x8c, 0xff, 0x43, 0x8c, 0x8b, 0x41, 0x95, 0x31, 0x8e, 0x74, + 0x8a, 0x8f, 0xe9, 0x88, 0xfe, 0x83, 0x14, 0xcf, 0x90, 0xb2, 0x78, 0x5b, 0x21, 0xa5, 0xf7, 0xf0, + 0x68, 0xe2, 0xda, 0xc7, 0x13, 0xd7, 0xfe, 0x31, 0x71, 0xed, 0x77, 0x53, 0xd7, 0x3a, 0x9e, 0xba, + 0xd6, 0xb7, 0xa9, 0x6b, 0xbd, 0xbc, 0x73, 0x9a, 0xc1, 0x7f, 0xbe, 0x33, 0x83, 0x4b, 0xfa, 0x7d, + 0xb9, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xd8, 0xb1, 0x80, 0x88, 0x36, 0x05, 0x00, 0x00, +} + +func (m *MsgCreateProvider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateProvider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.HostURI) > 0 { + i -= len(m.HostURI) + copy(dAtA[i:], m.HostURI) + i = encodeVarintMsg(dAtA, i, uint64(len(m.HostURI))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintMsg(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateProviderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateProviderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateProvider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateProvider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.HostURI) > 0 { + i -= len(m.HostURI) + copy(dAtA[i:], m.HostURI) + i = encodeVarintMsg(dAtA, i, uint64(len(m.HostURI))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintMsg(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateProviderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateProviderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgDeleteProvider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDeleteProvider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDeleteProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintMsg(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgDeleteProviderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDeleteProviderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDeleteProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintMsg(dAtA []byte, offset int, v uint64) int { + offset -= sovMsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCreateProvider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovMsg(uint64(l)) + } + l = len(m.HostURI) + if l > 0 { + n += 1 + l + sovMsg(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovMsg(uint64(l)) + } + } + l = m.Info.Size() + n += 1 + l + sovMsg(uint64(l)) + return n +} + +func (m *MsgCreateProviderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateProvider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovMsg(uint64(l)) + } + l = len(m.HostURI) + if l > 0 { + n += 1 + l + sovMsg(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovMsg(uint64(l)) + } + } + l = m.Info.Size() + n += 1 + l + sovMsg(uint64(l)) + return n +} + +func (m *MsgUpdateProviderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgDeleteProvider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovMsg(uint64(l)) + } + return n +} + +func (m *MsgDeleteProviderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovMsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMsg(x uint64) (n int) { + return sovMsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCreateProvider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateProvider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateProvider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateProviderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateProviderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateProvider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateProvider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateProvider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateProviderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateProviderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDeleteProvider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDeleteProvider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDeleteProvider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDeleteProviderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDeleteProviderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDeleteProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/provider/v1beta4/msgs.go b/go/node/provider/v1beta4/msgs.go new file mode 100644 index 00000000..b1f9b46a --- /dev/null +++ b/go/node/provider/v1beta4/msgs.go @@ -0,0 +1,209 @@ +package v1beta4 + +import ( + "net/url" + "reflect" + "regexp" + + cerrors "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + attr "pkg.akt.dev/go/node/types/attributes/v1" +) + +var ( + msgTypeCreateProvider = "" + msgTypeUpdateProvider = "" + msgTypeDeleteProvider = "" +) + +var ( + _ sdk.Msg = &MsgCreateProvider{} + _ sdk.Msg = &MsgUpdateProvider{} + _ sdk.Msg = &MsgDeleteProvider{} +) + +var ( + attributeNameRegexp = regexp.MustCompile(attr.AttributeNameRegexpString) +) + +func init() { + msgTypeCreateProvider = reflect.TypeOf(&MsgCreateProvider{}).Elem().Name() + msgTypeUpdateProvider = reflect.TypeOf(&MsgUpdateProvider{}).Elem().Name() + msgTypeDeleteProvider = reflect.TypeOf(&MsgDeleteProvider{}).Elem().Name() +} + +// NewMsgCreateProvider creates a new MsgCreateProvider instance +func NewMsgCreateProvider(owner sdk.AccAddress, hostURI string, attributes attr.Attributes) *MsgCreateProvider { + return &MsgCreateProvider{ + Owner: owner.String(), + HostURI: hostURI, + Attributes: attributes, + } +} + +// Type implements the sdk.Msg interface +func (msg *MsgCreateProvider) Type() string { return msgTypeCreateProvider } + +// ValidateBasic does basic validation of a HostURI +func (msg *MsgCreateProvider) ValidateBasic() error { + if err := validateProviderURI(msg.HostURI); err != nil { + return err + } + if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { + return cerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Provider Address") + } + if err := msg.Attributes.ValidateWithRegex(attributeNameRegexp); err != nil { + return err + } + if err := msg.Info.Validate(); err != nil { + return err + } + return nil +} + +// GetSigners defines whose signature is required +func (msg *MsgCreateProvider) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// NewMsgUpdateProvider creates a new MsgUpdateProvider instance +func NewMsgUpdateProvider(owner sdk.AccAddress, hostURI string, attributes attr.Attributes) *MsgUpdateProvider { + return &MsgUpdateProvider{ + Owner: owner.String(), + HostURI: hostURI, + Attributes: attributes, + } +} + +// Type implements the sdk.Msg interface +func (msg *MsgUpdateProvider) Type() string { return msgTypeUpdateProvider } + +// ValidateBasic does basic validation of a ProviderURI +func (msg *MsgUpdateProvider) ValidateBasic() error { + if err := validateProviderURI(msg.HostURI); err != nil { + return err + } + if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { + return cerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgUpdate: Invalid Provider Address") + } + if err := msg.Attributes.ValidateWithRegex(attributeNameRegexp); err != nil { + return err + } + if err := msg.Info.Validate(); err != nil { + return err + } + return nil +} + +// GetSigners defines whose signature is required +func (msg *MsgUpdateProvider) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// NewMsgDeleteProvider creates a new MsgDeleteProvider instance +func NewMsgDeleteProvider(owner sdk.AccAddress) *MsgDeleteProvider { + return &MsgDeleteProvider{ + Owner: owner.String(), + } +} + +// Type implements the sdk.Msg interface +func (msg *MsgDeleteProvider) Type() string { return msgTypeDeleteProvider } + +// ValidateBasic does basic validation +func (msg *MsgDeleteProvider) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { + return cerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgDelete: Invalid Provider Address") + } + return nil +} + +// GetSigners defines whose signature is required +func (msg *MsgDeleteProvider) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +func validateProviderURI(val string) error { + u, err := url.Parse(val) + if err != nil { + return ErrInvalidProviderURI + } + if !u.IsAbs() { + return ErrNotAbsProviderURI.Wrapf("validating %q for absolute URI", val) + } + + if u.Scheme != "https" { + return ErrInvalidProviderURI.Wrapf("scheme in %q should be https", val) + } + + if u.Host == "" { + return ErrInvalidProviderURI.Wrapf("validating %q for valid host", val) + } + + if u.Path != "" { + return ErrInvalidProviderURI.Wrapf("path in %q should be empty", val) + } + + return nil +} + +// ============= GetSignBytes ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgCreateProvider) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgUpdateProvider) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (msg *MsgDeleteProvider) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(msg)) +} + +// ============= Route ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all since sdk.Msg does not not have Route defined anymore + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (msg *MsgCreateProvider) Route() string { return RouterKey } + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (msg *MsgUpdateProvider) Route() string { return RouterKey } + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (msg *MsgDeleteProvider) Route() string { return RouterKey } diff --git a/go/node/provider/v1beta4/msgs_test.go b/go/node/provider/v1beta4/msgs_test.go new file mode 100644 index 00000000..503b80fd --- /dev/null +++ b/go/node/provider/v1beta4/msgs_test.go @@ -0,0 +1,254 @@ +package v1beta4 + +import ( + "errors" + "fmt" + "net/url" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + attr "pkg.akt.dev/go/node/types/attributes/v1" +) + +func TestConfigPath(t *testing.T) { + type testConfigPath struct { + path string + expErr error + } + tests := []testConfigPath{ + { + path: "foo.yaml", + expErr: ErrNotAbsProviderURI, + }, + { + path: "localhost", + expErr: ErrNotAbsProviderURI, + }, + { + path: "localhost/foo", + expErr: ErrNotAbsProviderURI, + }, + { + path: "localhost:80", + expErr: ErrInvalidProviderURI, + }, + { + path: "localhost:80/foo", + expErr: ErrInvalidProviderURI, + }, + { + path: "127.0.0.1", + expErr: ErrNotAbsProviderURI, + }, + { + path: "127.0.0.1/foo", + expErr: ErrNotAbsProviderURI, + }, + { + path: "127.0.0.1:80", + expErr: ErrInvalidProviderURI, + }, + { + path: "127.0.0.1:80/foo", + expErr: ErrInvalidProviderURI, + }, + { + path: "file:///foo.yaml", + expErr: ErrInvalidProviderURI, + }, + { + path: "https://localhost", + expErr: nil, + }, + { + path: "http://localhost/foo", + expErr: ErrInvalidProviderURI, + }, + { + path: "https://localhost:80", + expErr: nil, + }, + { + path: "http://localhost:80/foo", + expErr: ErrInvalidProviderURI, + }, + { + path: "http://localhost:3001/", + expErr: ErrInvalidProviderURI, + }, + { + path: "https://localhost:80", + expErr: nil, + }, + { + path: "https://localhost:80/foo", + expErr: ErrInvalidProviderURI, + }, + } + + for i, testUnit := range tests { + closure := func(test testConfigPath) func(t *testing.T) { + testFunc := func(t *testing.T) { + err := validateProviderURI(test.path) + if test.expErr != nil && !errors.Is(err, test.expErr) || + err != nil && test.expErr == nil { + t.Errorf("unexpected error occurred: %v", err) + + _, err := url.Parse(test.path) + if err != nil { + t.Errorf("url.Parse() of %q err: %v", test.path, err) + } + } + } + return testFunc + } + tf := closure(testUnit) + t.Run(fmt.Sprintf("%d->%q", i, testUnit.path), tf) + } +} + +type providerTestParams struct { + msg Provider + expErr error + delErr error +} + +func (test providerTestParams) testCreate() func(t *testing.T) { + msg := MsgCreateProvider{ + Owner: test.msg.Owner, + HostURI: test.msg.HostURI, + Attributes: test.msg.Attributes, + } + vErr := msg.ValidateBasic() + return func(t *testing.T) { + if test.expErr != nil && !errors.Is(vErr, test.expErr) { + t.Errorf("error expected: '%v' VS: %v", test.expErr, vErr) + return + } + sb := msg.GetSignBytes() + if len(sb) == 0 { + t.Error("no signed bytes returned") + } + } +} + +func (test providerTestParams) testUpdate() func(t *testing.T) { + msg := MsgUpdateProvider{ + Owner: test.msg.Owner, + HostURI: test.msg.HostURI, + Attributes: test.msg.Attributes, + } + vErr := msg.ValidateBasic() + return func(t *testing.T) { + if test.expErr != nil && !errors.Is(vErr, test.expErr) { + t.Errorf("error expected: '%v' VS: %v", test.expErr, vErr) + return + } + sb := msg.GetSignBytes() + if len(sb) == 0 { + t.Error("no signed bytes returned") + } + } +} + +func (test providerTestParams) testDelete() func(t *testing.T) { + msg := MsgDeleteProvider{ + Owner: test.msg.Owner, + } + vErr := msg.ValidateBasic() + return func(t *testing.T) { + if test.delErr != nil && !errors.Is(vErr, test.delErr) { + t.Errorf("error expected: '%v' VS: %v", test.expErr, vErr) + return + } + sb := msg.GetSignBytes() + if len(sb) == 0 { + t.Error("no signed bytes returned") + } + } +} + +var msgCreateTests = []providerTestParams{ + { + msg: Provider{ + Owner: sdk.AccAddress("hihi").String(), + HostURI: "https://localhost:3001", + Attributes: attr.Attributes{ + attr.Attribute{ + Key: "hihi", + Value: "neh", + }, + }, + }, + expErr: nil, + }, + { + msg: Provider{ + Owner: sdk.AccAddress("hihi").String(), + HostURI: "https://localhost:3001", + Attributes: attr.Attributes{ + attr.Attribute{ + Key: "hihi*", + Value: "neh", + }, + }, + }, + expErr: attr.ErrInvalidAttributeKey, + }, + { + msg: Provider{ + Owner: sdk.AccAddress("").String(), + HostURI: "https://localhost:3001", + Attributes: attr.Attributes{ + attr.Attribute{ + Key: "hihi", + Value: "neh", + }, + }, + }, + expErr: sdkerrors.ErrInvalidAddress, + delErr: sdkerrors.ErrInvalidAddress, + }, + { + msg: Provider{ + Owner: sdk.AccAddress("hihi").String(), + HostURI: "ht tp://foo.com", + Attributes: attr.Attributes{ + attr.Attribute{ + Key: "hihi", + Value: "neh", + }, + }, + }, + expErr: ErrInvalidProviderURI, + }, + { + msg: Provider{ + Owner: sdk.AccAddress("hihi").String(), + HostURI: "", + Attributes: attr.Attributes{ + attr.Attribute{ + Key: "hihi", + Value: "neh", + }, + }, + }, + expErr: ErrNotAbsProviderURI, + }, +} + +func TestMsgStarValidation(t *testing.T) { + for i, test := range msgCreateTests { + main := func(test providerTestParams) func(t *testing.T) { + return func(t *testing.T) { + t.Run("msg-create", test.testCreate()) + t.Run("msg-update", test.testUpdate()) + t.Run("msg-delete", test.testDelete()) + } + } + f := main(test) + t.Run(fmt.Sprintf("%d", i), f) + } +} diff --git a/go/node/provider/v1beta4/provider.pb.go b/go/node/provider/v1beta4/provider.pb.go new file mode 100644 index 00000000..442e3e58 --- /dev/null +++ b/go/node/provider/v1beta4/provider.pb.go @@ -0,0 +1,728 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/provider/v1beta4/provider.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + pkg_akt_dev_go_node_types_attributes_v1 "pkg.akt.dev/go/node/types/attributes/v1" + v1 "pkg.akt.dev/go/node/types/attributes/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Info +type Info struct { + EMail string `protobuf:"bytes,1,opt,name=email,proto3" json:"email" yaml:"email"` + Website string `protobuf:"bytes,2,opt,name=website,proto3" json:"website" yaml:"website"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { + return fileDescriptor_cbb1622664c70e47, []int{0} +} +func (m *Info) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Info) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Info.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Info) XXX_Merge(src proto.Message) { + xxx_messageInfo_Info.Merge(m, src) +} +func (m *Info) XXX_Size() int { + return m.Size() +} +func (m *Info) XXX_DiscardUnknown() { + xxx_messageInfo_Info.DiscardUnknown(m) +} + +var xxx_messageInfo_Info proto.InternalMessageInfo + +func (m *Info) GetEMail() string { + if m != nil { + return m.EMail + } + return "" +} + +func (m *Info) GetWebsite() string { + if m != nil { + return m.Website + } + return "" +} + +// Provider stores owner and host details +type Provider struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` + Attributes pkg_akt_dev_go_node_types_attributes_v1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=pkg.akt.dev/go/node/types/attributes/v1.Attributes" json:"attributes" yaml:"attributes"` + Info Info `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` +} + +func (m *Provider) Reset() { *m = Provider{} } +func (*Provider) ProtoMessage() {} +func (*Provider) Descriptor() ([]byte, []int) { + return fileDescriptor_cbb1622664c70e47, []int{1} +} +func (m *Provider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Provider.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Provider) XXX_Merge(src proto.Message) { + xxx_messageInfo_Provider.Merge(m, src) +} +func (m *Provider) XXX_Size() int { + return m.Size() +} +func (m *Provider) XXX_DiscardUnknown() { + xxx_messageInfo_Provider.DiscardUnknown(m) +} + +var xxx_messageInfo_Provider proto.InternalMessageInfo + +func (m *Provider) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *Provider) GetHostURI() string { + if m != nil { + return m.HostURI + } + return "" +} + +func (m *Provider) GetAttributes() pkg_akt_dev_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Provider) GetInfo() Info { + if m != nil { + return m.Info + } + return Info{} +} + +func init() { + proto.RegisterType((*Info)(nil), "akash.provider.v1beta4.Info") + proto.RegisterType((*Provider)(nil), "akash.provider.v1beta4.Provider") +} + +func init() { + proto.RegisterFile("akash/provider/v1beta4/provider.proto", fileDescriptor_cbb1622664c70e47) +} + +var fileDescriptor_cbb1622664c70e47 = []byte{ + // 473 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x4f, 0x6b, 0x13, 0x41, + 0x14, 0xdf, 0x35, 0x89, 0x89, 0x13, 0x51, 0x5c, 0x8a, 0xc4, 0xaa, 0x3b, 0x61, 0x54, 0x08, 0x82, + 0xb3, 0x24, 0x0a, 0x4a, 0x0f, 0x42, 0x03, 0x45, 0x7b, 0x28, 0xc8, 0x4a, 0x2f, 0x82, 0x94, 0x89, + 0x3b, 0xdd, 0x0e, 0x49, 0x76, 0xc2, 0xcc, 0x74, 0x4b, 0xbe, 0x85, 0xc7, 0x1e, 0x8b, 0x47, 0xcf, + 0x7e, 0x88, 0x1e, 0x8b, 0x27, 0x4f, 0xa3, 0x6c, 0x2e, 0x92, 0xe3, 0x7e, 0x02, 0xd9, 0x99, 0x4d, + 0xb6, 0x96, 0xde, 0xe6, 0xf7, 0xe7, 0xcd, 0xef, 0xcd, 0xbc, 0x07, 0x9e, 0x91, 0x31, 0x91, 0x47, + 0xc1, 0x4c, 0xf0, 0x94, 0x45, 0x54, 0x04, 0x69, 0x7f, 0x44, 0x15, 0x79, 0xb5, 0x26, 0xf0, 0x4c, + 0x70, 0xc5, 0xbd, 0xfb, 0xc6, 0x86, 0xd7, 0x6c, 0x69, 0xdb, 0xdc, 0x88, 0x79, 0xcc, 0x8d, 0x25, + 0x28, 0x4e, 0xd6, 0xbd, 0xf9, 0xe0, 0x0b, 0x97, 0x53, 0x2e, 0x0f, 0xac, 0x60, 0x41, 0x29, 0xf5, + 0x6c, 0xde, 0x88, 0x48, 0x1a, 0x10, 0xa5, 0x04, 0x1b, 0x1d, 0x2b, 0x2a, 0x83, 0xb4, 0x5f, 0x21, + 0xeb, 0x44, 0x73, 0x50, 0xdf, 0x4d, 0x0e, 0xb9, 0xf7, 0x06, 0x34, 0xe8, 0x94, 0xb0, 0x49, 0xc7, + 0xed, 0xba, 0xbd, 0x5b, 0x43, 0x94, 0x69, 0xd8, 0xd8, 0xd9, 0x23, 0x6c, 0xb2, 0xd4, 0xd0, 0x2a, + 0xb9, 0x86, 0xb7, 0xe7, 0x64, 0x3a, 0xd9, 0x42, 0x06, 0xa2, 0xd0, 0xd2, 0xde, 0x6b, 0xd0, 0x3c, + 0xa1, 0x23, 0xc9, 0x14, 0xed, 0xdc, 0x30, 0xb5, 0x8f, 0x97, 0x1a, 0xae, 0xa8, 0x5c, 0xc3, 0x3b, + 0xb6, 0xa8, 0x24, 0x50, 0xb8, 0x92, 0xd0, 0x69, 0x0d, 0xb4, 0x3e, 0x94, 0x4f, 0xf5, 0xde, 0x81, + 0x06, 0x3f, 0x49, 0xa8, 0x28, 0xf3, 0xfb, 0x45, 0xac, 0x21, 0xaa, 0x58, 0x03, 0xd1, 0xcf, 0x1f, + 0x2f, 0x36, 0xca, 0xb7, 0x6e, 0x47, 0x91, 0xa0, 0x52, 0x7e, 0x54, 0x82, 0x25, 0x71, 0x68, 0xed, + 0xde, 0x0e, 0x68, 0x1d, 0x71, 0xa9, 0x0e, 0x8e, 0x05, 0x2b, 0xfb, 0x79, 0x9e, 0x69, 0xd8, 0x7c, + 0xcf, 0xa5, 0xda, 0x0f, 0x77, 0x97, 0x1a, 0xae, 0xe5, 0x5c, 0xc3, 0xbb, 0xf6, 0xe6, 0x15, 0x83, + 0xc2, 0x66, 0x71, 0xdc, 0x17, 0xcc, 0xfb, 0xe6, 0x02, 0x50, 0xfd, 0x5c, 0xa7, 0xd6, 0xad, 0xf5, + 0xda, 0x83, 0x27, 0xd8, 0x0e, 0xa8, 0xf8, 0x57, 0x5c, 0xa9, 0x38, 0xed, 0xe3, 0xed, 0x15, 0x1a, + 0x7e, 0x3e, 0xd7, 0xd0, 0x59, 0x6a, 0x78, 0xa9, 0x3c, 0xd7, 0xf0, 0x9e, 0x4d, 0xaa, 0x38, 0xf4, + 0xfd, 0x37, 0x1c, 0xcc, 0xc6, 0x31, 0x26, 0x63, 0x85, 0x23, 0x9a, 0x06, 0x31, 0x0f, 0x12, 0x1e, + 0xd1, 0x40, 0xcd, 0x67, 0x54, 0xfe, 0x3f, 0xb5, 0xea, 0x76, 0x19, 0x5e, 0xba, 0xd6, 0xdb, 0x03, + 0x75, 0x96, 0x1c, 0xf2, 0x4e, 0xbd, 0xeb, 0xf6, 0xda, 0x83, 0x47, 0xf8, 0xfa, 0xf5, 0xc1, 0xc5, + 0x80, 0x87, 0x0f, 0xcb, 0xb6, 0x4c, 0x45, 0xae, 0x61, 0xdb, 0x36, 0x54, 0x20, 0x14, 0x1a, 0x72, + 0xab, 0x75, 0x7a, 0x06, 0x9d, 0xbf, 0x67, 0xd0, 0x19, 0xbe, 0x3d, 0xcf, 0x7c, 0xf7, 0x22, 0xf3, + 0xdd, 0x3f, 0x99, 0xef, 0x7e, 0x5d, 0xf8, 0xce, 0xc5, 0xc2, 0x77, 0x7e, 0x2d, 0x7c, 0xe7, 0xd3, + 0xd3, 0xeb, 0x5a, 0xbe, 0xba, 0xd7, 0xa3, 0x9b, 0x66, 0xb9, 0x5e, 0xfe, 0x0b, 0x00, 0x00, 0xff, + 0xff, 0x92, 0x2b, 0xb7, 0x2e, 0xf8, 0x02, 0x00, 0x00, +} + +func (m *Info) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Info) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Info) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Website) > 0 { + i -= len(m.Website) + copy(dAtA[i:], m.Website) + i = encodeVarintProvider(dAtA, i, uint64(len(m.Website))) + i-- + dAtA[i] = 0x12 + } + if len(m.EMail) > 0 { + i -= len(m.EMail) + copy(dAtA[i:], m.EMail) + i = encodeVarintProvider(dAtA, i, uint64(len(m.EMail))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Provider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Provider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProvider(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProvider(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.HostURI) > 0 { + i -= len(m.HostURI) + copy(dAtA[i:], m.HostURI) + i = encodeVarintProvider(dAtA, i, uint64(len(m.HostURI))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintProvider(dAtA []byte, offset int, v uint64) int { + offset -= sovProvider(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Info) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EMail) + if l > 0 { + n += 1 + l + sovProvider(uint64(l)) + } + l = len(m.Website) + if l > 0 { + n += 1 + l + sovProvider(uint64(l)) + } + return n +} + +func (m *Provider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovProvider(uint64(l)) + } + l = len(m.HostURI) + if l > 0 { + n += 1 + l + sovProvider(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovProvider(uint64(l)) + } + } + l = m.Info.Size() + n += 1 + l + sovProvider(uint64(l)) + return n +} + +func sovProvider(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozProvider(x uint64) (n int) { + return sovProvider(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Info) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Info: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Info: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EMail", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EMail = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Website", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Website = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProvider(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProvider + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Provider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Provider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProvider(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProvider + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipProvider(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProvider + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProvider + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProvider + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthProvider + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupProvider + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthProvider + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthProvider = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowProvider = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupProvider = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/provider/v1beta4/query.pb.go b/go/node/provider/v1beta4/query.pb.go new file mode 100644 index 00000000..92a4a195 --- /dev/null +++ b/go/node/provider/v1beta4/query.pb.go @@ -0,0 +1,1058 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/provider/v1beta4/query.proto + +package v1beta4 + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryProvidersRequest is request type for the Query/Providers RPC method +type QueryProvidersRequest struct { + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryProvidersRequest) Reset() { *m = QueryProvidersRequest{} } +func (m *QueryProvidersRequest) String() string { return proto.CompactTextString(m) } +func (*QueryProvidersRequest) ProtoMessage() {} +func (*QueryProvidersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fc667e24f0c91e71, []int{0} +} +func (m *QueryProvidersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProvidersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProvidersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProvidersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProvidersRequest.Merge(m, src) +} +func (m *QueryProvidersRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryProvidersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProvidersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProvidersRequest proto.InternalMessageInfo + +func (m *QueryProvidersRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryProvidersResponse is response type for the Query/Providers RPC method +type QueryProvidersResponse struct { + Providers Providers `protobuf:"bytes,1,rep,name=providers,proto3,castrepeated=Providers" json:"providers"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryProvidersResponse) Reset() { *m = QueryProvidersResponse{} } +func (m *QueryProvidersResponse) String() string { return proto.CompactTextString(m) } +func (*QueryProvidersResponse) ProtoMessage() {} +func (*QueryProvidersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_fc667e24f0c91e71, []int{1} +} +func (m *QueryProvidersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProvidersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProvidersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProvidersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProvidersResponse.Merge(m, src) +} +func (m *QueryProvidersResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryProvidersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProvidersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProvidersResponse proto.InternalMessageInfo + +func (m *QueryProvidersResponse) GetProviders() Providers { + if m != nil { + return m.Providers + } + return nil +} + +func (m *QueryProvidersResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryProviderRequest is request type for the Query/Provider RPC method +type QueryProviderRequest struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` +} + +func (m *QueryProviderRequest) Reset() { *m = QueryProviderRequest{} } +func (m *QueryProviderRequest) String() string { return proto.CompactTextString(m) } +func (*QueryProviderRequest) ProtoMessage() {} +func (*QueryProviderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fc667e24f0c91e71, []int{2} +} +func (m *QueryProviderRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProviderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProviderRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProviderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProviderRequest.Merge(m, src) +} +func (m *QueryProviderRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryProviderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProviderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProviderRequest proto.InternalMessageInfo + +func (m *QueryProviderRequest) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +// QueryProviderResponse is response type for the Query/Provider RPC method +type QueryProviderResponse struct { + Provider Provider `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider"` +} + +func (m *QueryProviderResponse) Reset() { *m = QueryProviderResponse{} } +func (m *QueryProviderResponse) String() string { return proto.CompactTextString(m) } +func (*QueryProviderResponse) ProtoMessage() {} +func (*QueryProviderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_fc667e24f0c91e71, []int{3} +} +func (m *QueryProviderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProviderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProviderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProviderResponse.Merge(m, src) +} +func (m *QueryProviderResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryProviderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProviderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProviderResponse proto.InternalMessageInfo + +func (m *QueryProviderResponse) GetProvider() Provider { + if m != nil { + return m.Provider + } + return Provider{} +} + +func init() { + proto.RegisterType((*QueryProvidersRequest)(nil), "akash.provider.v1beta4.QueryProvidersRequest") + proto.RegisterType((*QueryProvidersResponse)(nil), "akash.provider.v1beta4.QueryProvidersResponse") + proto.RegisterType((*QueryProviderRequest)(nil), "akash.provider.v1beta4.QueryProviderRequest") + proto.RegisterType((*QueryProviderResponse)(nil), "akash.provider.v1beta4.QueryProviderResponse") +} + +func init() { + proto.RegisterFile("akash/provider/v1beta4/query.proto", fileDescriptor_fc667e24f0c91e71) +} + +var fileDescriptor_fc667e24f0c91e71 = []byte{ + // 437 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3d, 0x6f, 0xda, 0x40, + 0x18, 0xc7, 0x7d, 0xb4, 0x54, 0x70, 0x4c, 0x3d, 0x51, 0x84, 0x50, 0x65, 0xa8, 0xfb, 0x06, 0x2d, + 0xdc, 0x09, 0xda, 0xb9, 0x03, 0x43, 0xbb, 0x82, 0xc7, 0x76, 0xa8, 0x8e, 0x72, 0xba, 0x5a, 0xb4, + 0x3e, 0xe3, 0x33, 0x54, 0x55, 0xd5, 0x25, 0x9f, 0x20, 0x52, 0x94, 0x25, 0x1f, 0x21, 0x73, 0xc6, + 0x7c, 0x00, 0x46, 0xa4, 0x2c, 0x99, 0x92, 0x08, 0xf2, 0x41, 0x22, 0x9f, 0xcf, 0x38, 0x10, 0x90, + 0xd9, 0xfc, 0xf2, 0x7b, 0xfe, 0xcf, 0xef, 0x79, 0xce, 0x86, 0x16, 0x1d, 0x51, 0xf9, 0x93, 0x78, + 0xbe, 0x98, 0x3a, 0x43, 0xe6, 0x93, 0x69, 0x7b, 0xc0, 0x02, 0xfa, 0x91, 0x8c, 0x27, 0xcc, 0xff, + 0x8b, 0x3d, 0x5f, 0x04, 0x02, 0x95, 0x14, 0x83, 0x63, 0x06, 0x6b, 0xa6, 0x52, 0xe4, 0x82, 0x0b, + 0x85, 0x90, 0xf0, 0x2a, 0xa2, 0x2b, 0xcf, 0xb9, 0x10, 0xfc, 0x17, 0x23, 0xd4, 0x73, 0x08, 0x75, + 0x5d, 0x11, 0xd0, 0xc0, 0x11, 0xae, 0xd4, 0x6f, 0xdf, 0xfd, 0x10, 0xf2, 0xb7, 0x90, 0x64, 0x40, + 0x25, 0x8b, 0x9a, 0xe8, 0x96, 0x6d, 0xe2, 0x51, 0xee, 0xb8, 0x0a, 0xd6, 0xec, 0xeb, 0x1d, 0x6e, + 0x2b, 0x11, 0x85, 0x59, 0xdf, 0xe1, 0xb3, 0x7e, 0x18, 0xd4, 0xd3, 0x8f, 0xa5, 0xcd, 0xc6, 0x13, + 0x26, 0x03, 0xf4, 0x19, 0xc2, 0x24, 0xb3, 0x0c, 0x6a, 0xa0, 0x5e, 0xe8, 0xbc, 0xc1, 0x91, 0x00, + 0x0e, 0x05, 0x70, 0x34, 0xa5, 0x16, 0xc0, 0x3d, 0xca, 0x99, 0xae, 0xb5, 0xef, 0x55, 0x5a, 0x67, + 0x00, 0x96, 0x36, 0x3b, 0x48, 0x4f, 0xb8, 0x92, 0xa1, 0x3e, 0xcc, 0xc7, 0x36, 0xb2, 0x0c, 0x6a, + 0x8f, 0xea, 0x85, 0x4e, 0x0d, 0x6f, 0x5f, 0x17, 0x8e, 0xab, 0xbb, 0x4f, 0x67, 0x57, 0x55, 0xe3, + 0xf4, 0xba, 0x9a, 0x4f, 0xf2, 0x92, 0x14, 0xf4, 0x65, 0xcd, 0x3a, 0xa3, 0xac, 0xdf, 0xa6, 0x5a, + 0x47, 0x3e, 0x6b, 0xda, 0x4d, 0x58, 0x5c, 0xb3, 0x8e, 0xd7, 0x52, 0x84, 0x59, 0xf1, 0xc7, 0x65, + 0xbe, 0xda, 0x48, 0xde, 0x8e, 0x6e, 0xac, 0x6f, 0x1b, 0x5b, 0x5c, 0x8d, 0xd8, 0x85, 0xb9, 0x58, + 0x4e, 0xef, 0x30, 0x7d, 0xc2, 0xc7, 0xe1, 0x84, 0xf6, 0xaa, 0xae, 0x73, 0x9e, 0x81, 0x59, 0x95, + 0x8e, 0x8e, 0x01, 0x4c, 0xc6, 0x46, 0xad, 0x5d, 0x49, 0x5b, 0x0f, 0xb4, 0x82, 0xf7, 0xc5, 0x23, + 0x75, 0xab, 0x71, 0x70, 0x71, 0x7b, 0x94, 0x79, 0x89, 0x5e, 0x90, 0x94, 0x2f, 0x49, 0xa2, 0x13, + 0x00, 0x73, 0x71, 0x00, 0x6a, 0xee, 0xd5, 0x27, 0xb6, 0x6a, 0xed, 0x49, 0x6b, 0xa9, 0xb6, 0x92, + 0x7a, 0x8f, 0x1a, 0xa9, 0x52, 0xe4, 0x9f, 0x3a, 0x9a, 0xff, 0xdd, 0x4f, 0xb3, 0x85, 0x09, 0xe6, + 0x0b, 0x13, 0xdc, 0x2c, 0x4c, 0x70, 0xb8, 0x34, 0x8d, 0xf9, 0xd2, 0x34, 0x2e, 0x97, 0xa6, 0xf1, + 0xf5, 0x95, 0x37, 0xe2, 0x98, 0x8e, 0x02, 0x3c, 0x64, 0x53, 0xc2, 0x05, 0x71, 0xc5, 0x90, 0x3d, + 0x48, 0x1c, 0x3c, 0x51, 0x3f, 0xca, 0x87, 0xbb, 0x00, 0x00, 0x00, 0xff, 0xff, 0x18, 0x29, 0x53, + 0xbb, 0xed, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Providers queries providers + Providers(ctx context.Context, in *QueryProvidersRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) + // Provider queries provider details + Provider(ctx context.Context, in *QueryProviderRequest, opts ...grpc.CallOption) (*QueryProviderResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Providers(ctx context.Context, in *QueryProvidersRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { + out := new(QueryProvidersResponse) + err := c.cc.Invoke(ctx, "/akash.provider.v1beta4.Query/Providers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Provider(ctx context.Context, in *QueryProviderRequest, opts ...grpc.CallOption) (*QueryProviderResponse, error) { + out := new(QueryProviderResponse) + err := c.cc.Invoke(ctx, "/akash.provider.v1beta4.Query/Provider", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Providers queries providers + Providers(context.Context, *QueryProvidersRequest) (*QueryProvidersResponse, error) + // Provider queries provider details + Provider(context.Context, *QueryProviderRequest) (*QueryProviderResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Providers(ctx context.Context, req *QueryProvidersRequest) (*QueryProvidersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Providers not implemented") +} +func (*UnimplementedQueryServer) Provider(ctx context.Context, req *QueryProviderRequest) (*QueryProviderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Provider not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Providers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryProvidersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Providers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.provider.v1beta4.Query/Providers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Providers(ctx, req.(*QueryProvidersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Provider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryProviderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Provider(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.provider.v1beta4.Query/Provider", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Provider(ctx, req.(*QueryProviderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.provider.v1beta4.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Providers", + Handler: _Query_Providers_Handler, + }, + { + MethodName: "Provider", + Handler: _Query_Provider_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/provider/v1beta4/query.proto", +} + +func (m *QueryProvidersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProvidersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProvidersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryProvidersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProvidersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProvidersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Providers) > 0 { + for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryProviderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProviderRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProviderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryProviderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProviderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryProvidersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryProvidersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Providers) > 0 { + for _, e := range m.Providers { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryProviderRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryProviderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Provider.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryProvidersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProvidersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProvidersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryProvidersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProvidersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProvidersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Providers = append(m.Providers, Provider{}) + if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryProviderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProviderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProviderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryProviderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProviderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/provider/v1beta4/query.pb.gw.go b/go/node/provider/v1beta4/query.pb.gw.go new file mode 100644 index 00000000..da67c303 --- /dev/null +++ b/go/node/provider/v1beta4/query.pb.gw.go @@ -0,0 +1,272 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: akash/provider/v1beta4/query.proto + +/* +Package v1beta4 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1beta4 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_Query_Providers_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Providers_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProvidersRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Providers_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Providers(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Providers_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProvidersRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Providers_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Providers(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Provider_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProviderRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + msg, err := client.Provider(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Provider_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProviderRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + msg, err := server.Provider(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Providers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Providers_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Providers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Provider_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Provider_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Provider_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Providers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Providers_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Providers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Provider_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Provider_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Provider_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Providers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"akash", "provider", "v1beta4", "providers"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_Provider_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"akash", "provider", "v1beta4", "providers", "owner"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_Providers_0 = runtime.ForwardResponseMessage + + forward_Query_Provider_0 = runtime.ForwardResponseMessage +) diff --git a/go/node/provider/v1beta4/service.pb.go b/go/node/provider/v1beta4/service.pb.go new file mode 100644 index 00000000..a7b50e43 --- /dev/null +++ b/go/node/provider/v1beta4/service.pb.go @@ -0,0 +1,208 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/provider/v1beta4/service.proto + +package v1beta4 + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("akash/provider/v1beta4/service.proto", fileDescriptor_3b4eb524c9b29aec) +} + +var fileDescriptor_3b4eb524c9b29aec = []byte{ + // 237 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x2f, 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, + 0x49, 0x34, 0xd1, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, + 0x17, 0x12, 0x03, 0xab, 0xd2, 0x83, 0xa9, 0xd2, 0x83, 0xaa, 0x92, 0x12, 0x4f, 0xce, 0x2f, 0xce, + 0xcd, 0x2f, 0xd6, 0xcf, 0x2d, 0x4e, 0xd7, 0x2f, 0x33, 0x04, 0x51, 0x10, 0x0d, 0x52, 0x0a, 0x38, + 0x8c, 0x85, 0xab, 0x30, 0xba, 0xc3, 0xc4, 0xc5, 0xec, 0x5b, 0x9c, 0x2e, 0x94, 0xc7, 0xc5, 0xe7, + 0x5c, 0x94, 0x9a, 0x58, 0x92, 0x1a, 0x00, 0x55, 0x2b, 0xa4, 0xa9, 0x87, 0xdd, 0x36, 0x3d, 0xdf, + 0xe2, 0x74, 0x54, 0xa5, 0x52, 0x86, 0x44, 0x2b, 0x0d, 0x4a, 0x2d, 0x2e, 0xc8, 0xcf, 0x2b, 0x4e, + 0x05, 0xd9, 0x17, 0x5a, 0x90, 0x42, 0xac, 0x7d, 0xa8, 0x4a, 0xf1, 0xda, 0x87, 0xaa, 0x14, 0xd9, + 0x3e, 0x97, 0xd4, 0x9c, 0x54, 0x22, 0xed, 0x43, 0x55, 0x8a, 0xd7, 0x3e, 0x54, 0xa5, 0x30, 0xfb, + 0xa4, 0x58, 0x1b, 0x9e, 0x6f, 0xd0, 0x62, 0x74, 0xb2, 0x3b, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, + 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, + 0x63, 0x39, 0x86, 0x28, 0x95, 0x82, 0xec, 0x74, 0xbd, 0xc4, 0xec, 0x12, 0xbd, 0x94, 0xd4, 0x32, + 0xfd, 0xf4, 0x7c, 0xfd, 0xbc, 0xfc, 0x94, 0x54, 0x8c, 0x88, 0x4a, 0x62, 0x03, 0xc7, 0x92, 0x31, + 0x20, 0x00, 0x00, 0xff, 0xff, 0x2b, 0xd4, 0x25, 0xcf, 0x20, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // CreateProvider defines a method that creates a provider given the proper inputs + CreateProvider(ctx context.Context, in *MsgCreateProvider, opts ...grpc.CallOption) (*MsgCreateProviderResponse, error) + // UpdateProvider defines a method that updates a provider given the proper inputs + UpdateProvider(ctx context.Context, in *MsgUpdateProvider, opts ...grpc.CallOption) (*MsgUpdateProviderResponse, error) + // DeleteProvider defines a method that deletes a provider given the proper inputs + DeleteProvider(ctx context.Context, in *MsgDeleteProvider, opts ...grpc.CallOption) (*MsgDeleteProviderResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) CreateProvider(ctx context.Context, in *MsgCreateProvider, opts ...grpc.CallOption) (*MsgCreateProviderResponse, error) { + out := new(MsgCreateProviderResponse) + err := c.cc.Invoke(ctx, "/akash.provider.v1beta4.Msg/CreateProvider", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateProvider(ctx context.Context, in *MsgUpdateProvider, opts ...grpc.CallOption) (*MsgUpdateProviderResponse, error) { + out := new(MsgUpdateProviderResponse) + err := c.cc.Invoke(ctx, "/akash.provider.v1beta4.Msg/UpdateProvider", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) DeleteProvider(ctx context.Context, in *MsgDeleteProvider, opts ...grpc.CallOption) (*MsgDeleteProviderResponse, error) { + out := new(MsgDeleteProviderResponse) + err := c.cc.Invoke(ctx, "/akash.provider.v1beta4.Msg/DeleteProvider", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // CreateProvider defines a method that creates a provider given the proper inputs + CreateProvider(context.Context, *MsgCreateProvider) (*MsgCreateProviderResponse, error) + // UpdateProvider defines a method that updates a provider given the proper inputs + UpdateProvider(context.Context, *MsgUpdateProvider) (*MsgUpdateProviderResponse, error) + // DeleteProvider defines a method that deletes a provider given the proper inputs + DeleteProvider(context.Context, *MsgDeleteProvider) (*MsgDeleteProviderResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) CreateProvider(ctx context.Context, req *MsgCreateProvider) (*MsgCreateProviderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateProvider not implemented") +} +func (*UnimplementedMsgServer) UpdateProvider(ctx context.Context, req *MsgUpdateProvider) (*MsgUpdateProviderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateProvider not implemented") +} +func (*UnimplementedMsgServer) DeleteProvider(ctx context.Context, req *MsgDeleteProvider) (*MsgDeleteProviderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteProvider not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_CreateProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateProvider) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateProvider(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.provider.v1beta4.Msg/CreateProvider", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateProvider(ctx, req.(*MsgCreateProvider)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateProvider) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateProvider(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.provider.v1beta4.Msg/UpdateProvider", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateProvider(ctx, req.(*MsgUpdateProvider)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_DeleteProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgDeleteProvider) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).DeleteProvider(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.provider.v1beta4.Msg/DeleteProvider", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).DeleteProvider(ctx, req.(*MsgDeleteProvider)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.provider.v1beta4.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateProvider", + Handler: _Msg_CreateProvider_Handler, + }, + { + MethodName: "UpdateProvider", + Handler: _Msg_UpdateProvider_Handler, + }, + { + MethodName: "DeleteProvider", + Handler: _Msg_DeleteProvider_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/provider/v1beta4/service.proto", +} diff --git a/go/node/provider/v1beta4/types.go b/go/node/provider/v1beta4/types.go new file mode 100644 index 00000000..a5c28743 --- /dev/null +++ b/go/node/provider/v1beta4/types.go @@ -0,0 +1,67 @@ +package v1beta4 + +import ( + "bytes" + "fmt" + "net/url" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// String implements the Stringer interface for a Provider object. +func (p Provider) String() string { + res := fmt.Sprintf(`Deployment + Owner: %s + HostURI: %s + Attributes: %v + `, p.Owner, p.HostURI, p.Attributes) + + if !p.Info.IsEmpty() { + res += fmt.Sprintf("Info: %v\n", p.Info) + } + return res +} + +// Providers is the collection of Provider +type Providers []Provider + +// String implements the Stringer interface for a Providers object. +func (obj Providers) String() string { + var buf bytes.Buffer + + const sep = "\n\n" + + for _, p := range obj { + buf.WriteString(p.String()) + buf.WriteString(sep) + } + + if len(obj) > 0 { + buf.Truncate(buf.Len() - len(sep)) + } + + return buf.String() +} + +// Address implements provider and returns owner of provider +func (p *Provider) Address() sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(p.Owner) + if err != nil { + panic(err) + } + + return owner +} + +func (m Info) IsEmpty() bool { + return m.EMail == "" && m.Website == "" +} + +func (m Info) Validate() error { + if m.Website != "" { + if _, err := url.Parse(m.Website); err != nil { + return ErrInvalidInfoWebsite + } + } + return nil +} diff --git a/go/node/staking/v1beta3/codec.go b/go/node/staking/v1beta3/codec.go index cb3f6e09..66e49f14 100644 --- a/go/node/staking/v1beta3/codec.go +++ b/go/node/staking/v1beta3/codec.go @@ -3,22 +3,41 @@ package v1beta3 import ( "github.com/cosmos/cosmos-sdk/codec" cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" ) var ( - amino = codec.NewLegacyAmino() + // amino = codec.NewLegacyAmino() - // ModuleCdc references the global x/provider module codec. Note, the codec should + // ModuleCdc references the global x/astaking module codec. Note, the codec should // ONLY be used in certain instances of tests and for JSON encoding as Amino is // still used for that purpose. // // The actual codec used for serialization should be provided to x/provider and // defined at the application level. + // + // Deprecated: ModuleCdc use is deprecated ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) ) -func init() { - cryptocodec.RegisterCrypto(amino) - amino.Seal() +// func init() { +// cryptocodec.RegisterCrypto(amino) +// amino.Seal() +// } + +// RegisterLegacyAminoCodec register concrete types on codec +// +// Deprecated: RegisterLegacyAminoCodec is deprecated +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgUpdateParams{}, "akash-sdk/x/"+ModuleName+"/"+(&MsgUpdateParams{}).Type(), nil) +} + +// RegisterInterfaces registers the x/provider interfaces types with the interface registry +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgUpdateParams{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) } diff --git a/go/node/staking/v1beta3/genesis.pb.go b/go/node/staking/v1beta3/genesis.pb.go index f9addbca..7a8e296c 100644 --- a/go/node/staking/v1beta3/genesis.pb.go +++ b/go/node/staking/v1beta3/genesis.pb.go @@ -5,8 +5,8 @@ package v1beta3 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -77,7 +77,7 @@ func init() { } var fileDescriptor_b23589504e747952 = []byte{ - // 227 bytes of a gzipped FileDescriptorProto + // 212 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4e, 0xcc, 0x4e, 0x2c, 0xce, 0xd0, 0x2f, 0x2e, 0x49, 0xcc, 0xce, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, @@ -86,13 +86,12 @@ var fileDescriptor_b23589504e747952 = []byte{ 0x35, 0x50, 0x29, 0x8d, 0x8b, 0xc7, 0x1d, 0x62, 0x43, 0x70, 0x49, 0x62, 0x49, 0xaa, 0x50, 0x18, 0x17, 0x1b, 0x44, 0x5e, 0x82, 0x51, 0x81, 0x51, 0x83, 0xdb, 0x48, 0x56, 0x0f, 0xab, 0x8d, 0x7a, 0x01, 0x60, 0x45, 0x4e, 0xf2, 0x27, 0xee, 0xc9, 0x33, 0xbc, 0xba, 0x27, 0x0f, 0xd5, 0xf4, 0xe9, - 0x9e, 0x3c, 0x6f, 0x65, 0x62, 0x6e, 0x8e, 0x95, 0x12, 0x84, 0xaf, 0x14, 0x04, 0x95, 0x70, 0x0a, - 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, - 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xab, 0xf4, 0xcc, 0x92, 0x8c, - 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0x5d, 0xba, 0x79, 0xa9, 0x25, 0xe5, 0xf9, 0x45, - 0xd9, 0x50, 0x5e, 0x62, 0x41, 0xa6, 0x7e, 0x7a, 0xbe, 0x7e, 0x5e, 0x7e, 0x4a, 0x2a, 0xba, 0x57, - 0x92, 0xd8, 0xc0, 0x9e, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x6a, 0xe1, 0x9e, 0x3e, 0x3c, - 0x01, 0x00, 0x00, + 0x9e, 0x3c, 0x6f, 0x65, 0x62, 0x6e, 0x8e, 0x95, 0x12, 0x84, 0xaf, 0x14, 0x04, 0x95, 0x70, 0xb2, + 0x3d, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, + 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xe5, 0x82, 0xec, 0x74, 0xbd, + 0xc4, 0xec, 0x12, 0xbd, 0x94, 0xd4, 0x32, 0xfd, 0xf4, 0x7c, 0xfd, 0xbc, 0xfc, 0x94, 0x54, 0x74, + 0x37, 0x27, 0xb1, 0x81, 0x5d, 0x6b, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x70, 0x9d, 0x42, + 0x25, 0x01, 0x00, 0x00, } func (m *GenesisState) Marshal() (dAtA []byte, err error) { diff --git a/go/node/staking/v1beta3/key.go b/go/node/staking/v1beta3/key.go index 068ca446..2782adf2 100644 --- a/go/node/staking/v1beta3/key.go +++ b/go/node/staking/v1beta3/key.go @@ -13,3 +13,7 @@ const ( // QuerierRoute is the querier route for gov QuerierRoute = ModuleName ) + +func ParamsPrefix() []byte { + return []byte{0x01} +} diff --git a/go/node/staking/v1beta3/msgs.go b/go/node/staking/v1beta3/msgs.go new file mode 100644 index 00000000..cdc9f4e4 --- /dev/null +++ b/go/node/staking/v1beta3/msgs.go @@ -0,0 +1,66 @@ +package v1beta3 + +import ( + "reflect" + + cerrors "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var ( + _ sdk.Msg = &MsgUpdateParams{} +) + +var ( + msgTypeUpdateParams = "" +) + +func init() { + msgTypeUpdateParams = reflect.TypeOf(&MsgUpdateParams{}).Elem().Name() +} + +// ====MsgUpdateParams==== + +// Type implements the sdk.Msg interface +func (m *MsgUpdateParams) Type() string { return msgTypeUpdateParams } + +// GetSigners returns the expected signers for a MsgUpdateParams message. +func (m *MsgUpdateParams) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(m.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (m *MsgUpdateParams) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(m.Authority); err != nil { + return cerrors.Wrap(err, "invalid authority address") + } + + if err := m.Params.Validate(); err != nil { + return err + } + + return nil +} + +// ============= GetSignBytes ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (m *MsgUpdateParams) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(m)) +} + +// ============= Route ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all since sdk.Msg does not not have Route defined anymore + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (m *MsgUpdateParams) Route() string { + return RouterKey +} diff --git a/go/node/staking/v1beta3/params.go b/go/node/staking/v1beta3/params.go index 2e815882..d1d99797 100644 --- a/go/node/staking/v1beta3/params.go +++ b/go/node/staking/v1beta3/params.go @@ -56,6 +56,8 @@ func (p Params) Validate() error { return nil } +// ParamKeyTable for astaking module +// Deprecated: now params can be accessed via cosmos-sdk staking store func ParamKeyTable() paramtypes.KeyTable { return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) } diff --git a/go/node/staking/v1beta3/params.pb.go b/go/node/staking/v1beta3/params.pb.go index bdb72055..4a737f99 100644 --- a/go/node/staking/v1beta3/params.pb.go +++ b/go/node/staking/v1beta3/params.pb.go @@ -6,8 +6,8 @@ package v1beta3 import ( fmt "fmt" github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -72,7 +72,7 @@ func init() { } var fileDescriptor_2132d2a1c0a6f259 = []byte{ - // 251 bytes of a gzipped FileDescriptorProto + // 245 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, 0xce, 0xd0, 0x2f, 0x2e, 0x49, 0xcc, 0xce, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, @@ -83,12 +83,12 @@ var fileDescriptor_2132d2a1c0a6f259 = []byte{ 0xb8, 0x75, 0x4f, 0x5e, 0x2d, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0x3f, 0x39, 0xbf, 0x38, 0x37, 0xbf, 0x18, 0x4a, 0xe9, 0x16, 0xa7, 0x64, 0xeb, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0xb9, 0xa4, 0x26, 0x7f, 0xba, 0x27, 0x2f, 0x55, 0x99, 0x98, 0x9b, 0x63, 0xa5, 0x84, - 0xc5, 0x48, 0xa5, 0x20, 0xc1, 0xdc, 0xcc, 0x3c, 0x67, 0xb8, 0x60, 0x50, 0x62, 0x49, 0xaa, 0x53, - 0xc8, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, - 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x59, 0x21, 0x59, 0x09, 0xf6, - 0x9a, 0x6e, 0x5e, 0x6a, 0x49, 0x79, 0x7e, 0x51, 0x36, 0x94, 0x97, 0x58, 0x90, 0xa9, 0x9f, 0x9e, - 0xaf, 0x9f, 0x97, 0x9f, 0x92, 0x8a, 0x1e, 0x30, 0x49, 0x6c, 0x60, 0x5f, 0x1a, 0x03, 0x02, 0x00, - 0x00, 0xff, 0xff, 0xf0, 0x3d, 0xdb, 0x62, 0x38, 0x01, 0x00, 0x00, + 0xc5, 0x48, 0xa5, 0x20, 0xc1, 0xdc, 0xcc, 0x3c, 0x67, 0xb8, 0x60, 0x50, 0x62, 0x49, 0xaa, 0x93, + 0xed, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, + 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x29, 0x17, 0x64, 0xa7, 0xeb, + 0x25, 0x66, 0x97, 0xe8, 0xa5, 0xa4, 0x96, 0xe9, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, 0xa7, 0xa4, 0xa2, + 0x87, 0x40, 0x12, 0x1b, 0xd8, 0x3b, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x89, 0x1e, 0xb4, + 0x26, 0x21, 0x01, 0x00, 0x00, } func (m *Params) Marshal() (dAtA []byte, err error) { diff --git a/go/node/staking/v1beta3/paramsmsg.pb.go b/go/node/staking/v1beta3/paramsmsg.pb.go new file mode 100644 index 00000000..112e4cd9 --- /dev/null +++ b/go/node/staking/v1beta3/paramsmsg.pb.go @@ -0,0 +1,513 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/staking/v1beta3/paramsmsg.proto + +package v1beta3 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgUpdateParams is the Msg/UpdateParams request type. +// +// Since: akash v1.0.0 +type MsgUpdateParams struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // params defines the x/deployment parameters to update. + // + // NOTE: All parameters must be supplied. + Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"` +} + +func (m *MsgUpdateParams) Reset() { *m = MsgUpdateParams{} } +func (m *MsgUpdateParams) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParams) ProtoMessage() {} +func (*MsgUpdateParams) Descriptor() ([]byte, []int) { + return fileDescriptor_c5e1218effcfafd1, []int{0} +} +func (m *MsgUpdateParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParams.Merge(m, src) +} +func (m *MsgUpdateParams) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParams) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParams.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParams proto.InternalMessageInfo + +func (m *MsgUpdateParams) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgUpdateParams) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +// MsgUpdateParamsResponse defines the response structure for executing a +// MsgUpdateParams message. +// +// Since: akash v1.0.0 +type MsgUpdateParamsResponse struct { +} + +func (m *MsgUpdateParamsResponse) Reset() { *m = MsgUpdateParamsResponse{} } +func (m *MsgUpdateParamsResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParamsResponse) ProtoMessage() {} +func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c5e1218effcfafd1, []int{1} +} +func (m *MsgUpdateParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParamsResponse.Merge(m, src) +} +func (m *MsgUpdateParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgUpdateParams)(nil), "akash.staking.v1beta3.MsgUpdateParams") + proto.RegisterType((*MsgUpdateParamsResponse)(nil), "akash.staking.v1beta3.MsgUpdateParamsResponse") +} + +func init() { + proto.RegisterFile("akash/staking/v1beta3/paramsmsg.proto", fileDescriptor_c5e1218effcfafd1) +} + +var fileDescriptor_c5e1218effcfafd1 = []byte{ + // 316 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x2f, 0x2e, 0x49, 0xcc, 0xce, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, + 0x34, 0xd6, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xce, 0x2d, 0x4e, 0xd7, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x12, 0x05, 0x2b, 0xd3, 0x83, 0x2a, 0xd3, 0x83, 0x2a, 0x93, 0x12, 0x49, 0xcf, 0x4f, + 0xcf, 0x07, 0xab, 0xd0, 0x07, 0xb1, 0x20, 0x8a, 0xa5, 0x04, 0x13, 0x73, 0x33, 0xf3, 0xf2, 0xf5, + 0xc1, 0x24, 0x54, 0x48, 0x3c, 0x39, 0xbf, 0x38, 0x37, 0xbf, 0x58, 0x3f, 0xb7, 0x18, 0x64, 0x87, + 0x3e, 0xdc, 0x60, 0x29, 0x49, 0x88, 0x44, 0x3c, 0xc4, 0x10, 0x08, 0x07, 0x2a, 0xa5, 0x84, 0xcf, + 0x69, 0x10, 0x35, 0x4a, 0xfb, 0x19, 0xb9, 0xf8, 0x7d, 0x8b, 0xd3, 0x43, 0x0b, 0x52, 0x12, 0x4b, + 0x52, 0x03, 0xc0, 0x32, 0x42, 0x66, 0x5c, 0x9c, 0x89, 0xa5, 0x25, 0x19, 0xf9, 0x45, 0x99, 0x25, + 0x95, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x9c, 0x4e, 0x12, 0x97, 0xb6, 0xe8, 0x8a, 0x40, 0x0d, 0x77, + 0x4c, 0x49, 0x29, 0x4a, 0x2d, 0x2e, 0x0e, 0x2e, 0x29, 0xca, 0xcc, 0x4b, 0x0f, 0x42, 0x28, 0x15, + 0x72, 0xe0, 0x62, 0x83, 0x98, 0x2d, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0xab, 0x87, 0xd5, + 0xd3, 0x7a, 0x10, 0x6b, 0x9c, 0x38, 0x4f, 0xdc, 0x93, 0x67, 0x58, 0xf1, 0x7c, 0x83, 0x16, 0x63, + 0x10, 0x54, 0x9f, 0x95, 0x51, 0xd3, 0xf3, 0x0d, 0x5a, 0x08, 0x13, 0xbb, 0x9e, 0x6f, 0xd0, 0x92, + 0x87, 0x78, 0xa2, 0x02, 0xee, 0x0d, 0x34, 0xd7, 0x2a, 0x49, 0x72, 0x89, 0xa3, 0x09, 0x05, 0xa5, + 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x3a, 0xd9, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, + 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, + 0x1c, 0x43, 0x94, 0x72, 0x41, 0x76, 0xba, 0x5e, 0x62, 0x76, 0x89, 0x5e, 0x4a, 0x6a, 0x99, 0x7e, + 0x7a, 0xbe, 0x7e, 0x5e, 0x7e, 0x4a, 0x2a, 0x7a, 0x40, 0x25, 0xb1, 0x81, 0x83, 0xc8, 0x18, 0x10, + 0x00, 0x00, 0xff, 0xff, 0x6a, 0x93, 0x4f, 0xd7, 0xe3, 0x01, 0x00, 0x00, +} + +func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParamsmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintParamsmsg(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintParamsmsg(dAtA []byte, offset int, v uint64) int { + offset -= sovParamsmsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgUpdateParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovParamsmsg(uint64(l)) + } + l = m.Params.Size() + n += 1 + l + sovParamsmsg(uint64(l)) + return n +} + +func (m *MsgUpdateParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovParamsmsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParamsmsg(x uint64) (n int) { + return sovParamsmsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgUpdateParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParamsmsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParamsmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParamsmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParamsmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParamsmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParamsmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipParamsmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParamsmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParamsmsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParamsmsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParamsmsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParamsmsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParamsmsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParamsmsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParamsmsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/staking/v1beta3/query.pb.go b/go/node/staking/v1beta3/query.pb.go new file mode 100644 index 00000000..bbc3ce3a --- /dev/null +++ b/go/node/staking/v1beta3/query.pb.go @@ -0,0 +1,537 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/staking/v1beta3/query.proto + +package v1beta3 + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryParamsRequest is the request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b58ba6fa3ac0c571, []int{0} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is the response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params defines the parameters of the module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b58ba6fa3ac0c571, []int{1} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "akash.staking.v1beta3.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "akash.staking.v1beta3.QueryParamsResponse") +} + +func init() { proto.RegisterFile("akash/staking/v1beta3/query.proto", fileDescriptor_b58ba6fa3ac0c571) } + +var fileDescriptor_b58ba6fa3ac0c571 = []byte{ + // 291 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x2f, 0x2e, 0x49, 0xcc, 0xce, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, + 0x34, 0xd6, 0x2f, 0x2c, 0x4d, 0x2d, 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x05, + 0x2b, 0xd1, 0x83, 0x2a, 0xd1, 0x83, 0x2a, 0x91, 0x92, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, + 0x4f, 0x2c, 0xc8, 0xd4, 0x4f, 0xcc, 0xcb, 0xcb, 0x2f, 0x49, 0x2c, 0xc9, 0xcc, 0xcf, 0x2b, 0x86, + 0x68, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x33, 0xf5, 0x41, 0x2c, 0xa8, 0xa8, 0x60, 0x62, + 0x6e, 0x66, 0x5e, 0xbe, 0x3e, 0x98, 0x84, 0x0a, 0x29, 0x61, 0x77, 0x40, 0x41, 0x62, 0x51, 0x62, + 0x2e, 0xd4, 0x30, 0x25, 0x11, 0x2e, 0xa1, 0x40, 0x90, 0x83, 0x02, 0xc0, 0x82, 0x41, 0xa9, 0x85, + 0xa5, 0xa9, 0xc5, 0x25, 0x4a, 0xe1, 0x5c, 0xc2, 0x28, 0xa2, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, + 0x42, 0x0e, 0x5c, 0x6c, 0x10, 0xcd, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xdc, 0x46, 0xb2, 0x7a, 0x58, + 0xdd, 0xaf, 0x07, 0xd1, 0xe6, 0xc4, 0x79, 0xe2, 0x9e, 0x3c, 0xc3, 0x8a, 0xe7, 0x1b, 0xb4, 0x18, + 0x83, 0xa0, 0xfa, 0x8c, 0x26, 0x30, 0x72, 0xb1, 0x82, 0x4d, 0x16, 0x6a, 0x63, 0xe4, 0x62, 0x83, + 0xa8, 0x13, 0xd2, 0xc4, 0x61, 0x0c, 0xa6, 0xc3, 0xa4, 0xb4, 0x88, 0x51, 0x0a, 0x71, 0xad, 0x92, + 0x6a, 0xd3, 0xe5, 0x27, 0x93, 0x99, 0xe4, 0x85, 0x64, 0xf5, 0xf1, 0x85, 0x83, 0x93, 0xed, 0x89, + 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, + 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x29, 0x17, 0x64, 0xa7, 0xeb, 0x25, 0x66, + 0x97, 0xe8, 0xa5, 0xa4, 0x96, 0xe9, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, 0xa7, 0xa4, 0xa2, 0x9b, 0x92, + 0xc4, 0x06, 0x0e, 0x47, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x14, 0x1d, 0x20, 0xae, 0xee, + 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Params returns the total set of minting parameters. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/akash.staking.v1beta3.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Params returns the total set of minting parameters. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.staking.v1beta3.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.staking.v1beta3.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/staking/v1beta3/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/staking/v1beta3/query.pb.gw.go b/go/node/staking/v1beta3/query.pb.gw.go new file mode 100644 index 00000000..bb942a9d --- /dev/null +++ b/go/node/staking/v1beta3/query.pb.gw.go @@ -0,0 +1,153 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: akash/staking/v1beta3/query.proto + +/* +Package v1beta3 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1beta3 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"akash", "staking", "v1beta3", "params"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_Params_0 = runtime.ForwardResponseMessage +) diff --git a/go/node/staking/v1beta3/service.pb.go b/go/node/staking/v1beta3/service.pb.go new file mode 100644 index 00000000..650806e1 --- /dev/null +++ b/go/node/staking/v1beta3/service.pb.go @@ -0,0 +1,137 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/staking/v1beta3/service.proto + +package v1beta3 + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("akash/staking/v1beta3/service.proto", fileDescriptor_f6c1e84a476ade99) +} + +var fileDescriptor_f6c1e84a476ade99 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4e, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x2f, 0x2e, 0x49, 0xcc, 0xce, 0xcc, 0x4b, 0xd7, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, + 0x34, 0xd6, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, + 0x12, 0x05, 0x2b, 0xd2, 0x83, 0x2a, 0xd2, 0x83, 0x2a, 0x92, 0x52, 0xc5, 0xae, 0xb7, 0x20, 0xb1, + 0x28, 0x31, 0xb7, 0x38, 0xb7, 0x38, 0x1d, 0xa2, 0x5b, 0x4a, 0x3c, 0x39, 0xbf, 0x38, 0x37, 0xbf, + 0x58, 0x3f, 0xb7, 0x18, 0xa4, 0x46, 0x1f, 0x2e, 0x61, 0x54, 0xc2, 0xc5, 0xec, 0x5b, 0x9c, 0x2e, + 0x94, 0xc6, 0xc5, 0x13, 0x5a, 0x90, 0x92, 0x58, 0x92, 0x1a, 0x00, 0xd6, 0x28, 0xa4, 0xa6, 0x87, + 0xd5, 0x3a, 0x3d, 0xdf, 0xe2, 0x74, 0x64, 0x75, 0x52, 0x7a, 0xc4, 0xa9, 0x0b, 0x4a, 0x2d, 0x2e, + 0xc8, 0xcf, 0x2b, 0x4e, 0x95, 0x62, 0x6d, 0x78, 0xbe, 0x41, 0x8b, 0xd1, 0xc9, 0xf6, 0xc4, 0x23, + 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, + 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x94, 0x0b, 0xb2, 0xd3, 0xf5, 0x12, 0xb3, 0x4b, + 0xf4, 0x52, 0x52, 0xcb, 0xf4, 0xd3, 0xf3, 0xf5, 0xf3, 0xf2, 0x53, 0x52, 0xd1, 0x7d, 0x97, 0xc4, + 0x06, 0x76, 0xbb, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xc6, 0xcf, 0x0f, 0x1b, 0x39, 0x01, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // UpdateParams defines a governance operation for updating the x/market module + // parameters. The authority is hard-coded to the x/gov module account. + // + // Since: akash v1.0.0 + UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) { + out := new(MsgUpdateParamsResponse) + err := c.cc.Invoke(ctx, "/akash.staking.v1beta3.Msg/UpdateParams", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // UpdateParams defines a governance operation for updating the x/market module + // parameters. The authority is hard-coded to the x/gov module account. + // + // Since: akash v1.0.0 + UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateParams) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateParams(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.staking.v1beta3.Msg/UpdateParams", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateParams(ctx, req.(*MsgUpdateParams)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.staking.v1beta3.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UpdateParams", + Handler: _Msg_UpdateParams_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/staking/v1beta3/service.proto", +} diff --git a/go/node/take/v1/codec.go b/go/node/take/v1/codec.go new file mode 100644 index 00000000..718c4252 --- /dev/null +++ b/go/node/take/v1/codec.go @@ -0,0 +1,44 @@ +package v1 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +var ( + // amino = codec.NewLegacyAmino() + + // ModuleCdc references the global x/deployment module codec. Note, the codec should + // ONLY be used in certain instances of tests and for JSON encoding as Amino is + // still used for that purpose. + // + // The actual codec used for serialization should be provided to x/deployment and + // defined at the application level. + // + // Deprecated: ModuleCdc use is deprecated + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) + +// func init() { +// RegisterLegacyAminoCodec(amino) +// cryptocodec.RegisterCrypto(amino) +// amino.Seal() +// } + +// RegisterLegacyAminoCodec register concrete types on codec +// +// Deprecated: RegisterLegacyAminoCodec is deprecated +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgUpdateParams{}, "akash-sdk/x/"+ModuleName+"/"+(&MsgUpdateParams{}).Type(), nil) +} + +// RegisterInterfaces registers the x/provider interfaces types with the interface registry +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgUpdateParams{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} diff --git a/go/node/take/v1beta3/denom_take_rate.go b/go/node/take/v1/denom_take_rate.go similarity index 95% rename from go/node/take/v1beta3/denom_take_rate.go rename to go/node/take/v1/denom_take_rate.go index 1b18485f..349272c5 100644 --- a/go/node/take/v1beta3/denom_take_rate.go +++ b/go/node/take/v1/denom_take_rate.go @@ -1,4 +1,4 @@ -package v1beta3 +package v1 import ( "sort" diff --git a/go/node/take/v1/errors.go b/go/node/take/v1/errors.go new file mode 100644 index 00000000..db9697d6 --- /dev/null +++ b/go/node/take/v1/errors.go @@ -0,0 +1,14 @@ +package v1 + +import ( + sdkerrors "cosmossdk.io/errors" +) + +const ( + errInvalidParam uint32 = iota + 1 +) + +var ( + // ErrInvalidParam indicates an invalid chain parameter + ErrInvalidParam = sdkerrors.Register(ModuleName, errInvalidParam, "parameter invalid") +) diff --git a/go/node/take/v1/genesis.pb.go b/go/node/take/v1/genesis.pb.go new file mode 100644 index 00000000..2da83065 --- /dev/null +++ b/go/node/take/v1/genesis.pb.go @@ -0,0 +1,321 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/take/v1/genesis.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState stores slice of genesis deployment instance +type GenesisState struct { + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params" yaml:"params"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_7e42c7967782a711, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "akash.take.v1.GenesisState") +} + +func init() { proto.RegisterFile("akash/take/v1/genesis.proto", fileDescriptor_7e42c7967782a711) } + +var fileDescriptor_7e42c7967782a711 = []byte{ + // 203 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x2f, 0x49, 0xcc, 0x4e, 0xd5, 0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, + 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x05, 0x4b, 0xea, 0x81, 0x24, 0xf5, 0xca, + 0x0c, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x32, 0xfa, 0x20, 0x16, 0x44, 0x91, 0x94, 0x14, + 0xaa, 0x09, 0x05, 0x89, 0x45, 0x89, 0xb9, 0x50, 0x03, 0x94, 0xe2, 0xb8, 0x78, 0xdc, 0x21, 0x26, + 0x06, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0xf9, 0x71, 0xb1, 0x41, 0xe4, 0x25, 0x18, 0x15, 0x18, 0x35, + 0xb8, 0x8d, 0x44, 0xf5, 0x50, 0x6c, 0xd0, 0x0b, 0x00, 0x4b, 0x3a, 0xc9, 0x9f, 0xb8, 0x27, 0xcf, + 0xf0, 0xea, 0x9e, 0x3c, 0x54, 0xf1, 0xa7, 0x7b, 0xf2, 0xbc, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, + 0x10, 0xbe, 0x52, 0x10, 0x54, 0xc2, 0xc9, 0xf4, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, + 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, + 0x18, 0xa2, 0xa4, 0x0b, 0xb2, 0xd3, 0xf5, 0x12, 0xb3, 0x4b, 0xf4, 0x52, 0x52, 0xcb, 0xf4, 0xd3, + 0xf3, 0xf5, 0xf3, 0xf2, 0x53, 0x52, 0x61, 0x6e, 0x4c, 0x62, 0x03, 0xbb, 0xce, 0x18, 0x10, 0x00, + 0x00, 0xff, 0xff, 0x6f, 0x75, 0x79, 0x5c, 0xfd, 0x00, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/take/v1/key.go b/go/node/take/v1/key.go new file mode 100644 index 00000000..c55fdb89 --- /dev/null +++ b/go/node/take/v1/key.go @@ -0,0 +1,16 @@ +package v1 + +const ( + // ModuleName is the module name constant used in many places + ModuleName = "take" + + // StoreKey is the store key string for deployment + StoreKey = ModuleName + + // RouterKey is the message route for deployment + RouterKey = ModuleName +) + +func ParamsPrefix() []byte { + return []byte{0x01} +} diff --git a/go/node/take/v1/msgs.go b/go/node/take/v1/msgs.go new file mode 100644 index 00000000..5c89ad23 --- /dev/null +++ b/go/node/take/v1/msgs.go @@ -0,0 +1,66 @@ +package v1 + +import ( + "reflect" + + cerrors "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var ( + _ sdk.Msg = &MsgUpdateParams{} +) + +var ( + msgTypeUpdateParams = "" +) + +func init() { + msgTypeUpdateParams = reflect.TypeOf(&MsgUpdateParams{}).Elem().Name() +} + +// ====MsgUpdateParams==== + +// Type implements the sdk.Msg interface +func (m *MsgUpdateParams) Type() string { return msgTypeUpdateParams } + +// GetSigners returns the expected signers for a MsgUpdateParams message. +func (m *MsgUpdateParams) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(m.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (m *MsgUpdateParams) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(m.Authority); err != nil { + return cerrors.Wrap(err, "invalid authority address") + } + + if err := m.Params.Validate(); err != nil { + return err + } + + return nil +} + +// ============= GetSignBytes ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all + +// GetSignBytes encodes the message for signing +// +// Deprecated: GetSignBytes is deprecated +func (m *MsgUpdateParams) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(m)) +} + +// ============= Route ============= +// ModuleCdc is defined in codec.go +// TODO @troian to check if we need them at all since sdk.Msg does not not have Route defined anymore + +// Route implements the sdk.Msg interface +// +// Deprecated: Route is deprecated +func (m *MsgUpdateParams) Route() string { + return RouterKey +} diff --git a/go/node/take/v1/params.go b/go/node/take/v1/params.go new file mode 100644 index 00000000..0d0a9c4b --- /dev/null +++ b/go/node/take/v1/params.go @@ -0,0 +1,89 @@ +package v1 + +import ( + "fmt" + + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +var _ paramtypes.ParamSet = (*Params)(nil) + +var ( + KeyDefaultTakeRate = []byte("DefaultTakeRate") + KeyDenomTakeRates = []byte("DenomTakeRates") +) + +// ParamKeyTable for take module +// Deprecated: now params can be accessed on key `0x01` on the take store. +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{ + paramtypes.NewParamSetPair(KeyDefaultTakeRate, &p.DefaultTakeRate, validateTakeRate), + paramtypes.NewParamSetPair(KeyDenomTakeRates, &p.DenomTakeRates, validateDenomTakeRates), + } +} + +func DefaultParams() Params { + return Params{ + DefaultTakeRate: 20, + DenomTakeRates: DenomTakeRates{ + { + Denom: "uakt", + Rate: 2, + }, + }, + } +} + +func (p Params) Validate() error { + if err := validateTakeRate(p.DefaultTakeRate); err != nil { + return err + } + if err := validateDenomTakeRates(p.DenomTakeRates); err != nil { + return err + } + + return nil +} + +func validateTakeRate(i interface{}) error { + val, ok := i.(uint32) + if !ok { + return ErrInvalidParam.Wrapf("%T", i) + } + if val > 100 { + return fmt.Errorf("invalid Take Rate (%#v)", val) + } + return nil +} + +func validateDenomTakeRates(i interface{}) error { + takeRates, ok := i.(DenomTakeRates) + if !ok { + return ErrInvalidParam.Wrapf("%T", i) + } + + check := make(map[string]uint32) + + for k, v := range takeRates { + if _, exists := check[v.Denom]; exists { + return fmt.Errorf("duplicate Denom Take Rate (%#v)", v) + } + + check[v.Denom] = v.Rate + + if v.Rate > 100 { + return fmt.Errorf("invalid Denom Take Rate (%v=%#v)", k, v) + } + } + + // uakt must always be present + if _, exists := check["uakt"]; !exists { + return fmt.Errorf("invalid Denom Take Rate - uakt must be present") + } + + return nil +} diff --git a/go/node/take/v1/params.pb.go b/go/node/take/v1/params.pb.go new file mode 100644 index 00000000..9dc11ecc --- /dev/null +++ b/go/node/take/v1/params.pb.go @@ -0,0 +1,581 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/take/v1/params.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// DenomTakeRate describes take rate for specified denom +type DenomTakeRate struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom" yaml:"denom"` + Rate uint32 `protobuf:"varint,2,opt,name=rate,proto3" json:"rate" yaml:"rate"` +} + +func (m *DenomTakeRate) Reset() { *m = DenomTakeRate{} } +func (m *DenomTakeRate) String() string { return proto.CompactTextString(m) } +func (*DenomTakeRate) ProtoMessage() {} +func (*DenomTakeRate) Descriptor() ([]byte, []int) { + return fileDescriptor_e97420d932342ed5, []int{0} +} +func (m *DenomTakeRate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DenomTakeRate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DenomTakeRate.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DenomTakeRate) XXX_Merge(src proto.Message) { + xxx_messageInfo_DenomTakeRate.Merge(m, src) +} +func (m *DenomTakeRate) XXX_Size() int { + return m.Size() +} +func (m *DenomTakeRate) XXX_DiscardUnknown() { + xxx_messageInfo_DenomTakeRate.DiscardUnknown(m) +} + +var xxx_messageInfo_DenomTakeRate proto.InternalMessageInfo + +func (m *DenomTakeRate) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *DenomTakeRate) GetRate() uint32 { + if m != nil { + return m.Rate + } + return 0 +} + +// Params defines the parameters for the x/take package +type Params struct { + // denom -> % take rate + DenomTakeRates DenomTakeRates `protobuf:"bytes,1,rep,name=denom_take_rates,json=denomTakeRates,proto3,castrepeated=DenomTakeRates" json:"denom_take_rates" yaml:"denom_take_rates"` + DefaultTakeRate uint32 `protobuf:"varint,2,opt,name=default_take_rate,json=defaultTakeRate,proto3" json:"default_take_rate" yaml:"default_take_rate"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_e97420d932342ed5, []int{1} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetDenomTakeRates() DenomTakeRates { + if m != nil { + return m.DenomTakeRates + } + return nil +} + +func (m *Params) GetDefaultTakeRate() uint32 { + if m != nil { + return m.DefaultTakeRate + } + return 0 +} + +func init() { + proto.RegisterType((*DenomTakeRate)(nil), "akash.take.v1.DenomTakeRate") + proto.RegisterType((*Params)(nil), "akash.take.v1.Params") +} + +func init() { proto.RegisterFile("akash/take/v1/params.proto", fileDescriptor_e97420d932342ed5) } + +var fileDescriptor_e97420d932342ed5 = []byte{ + // 347 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0xc1, 0x4a, 0xeb, 0x40, + 0x14, 0x86, 0x33, 0xbd, 0x6d, 0xe1, 0x4e, 0x6f, 0xdb, 0x7b, 0xc3, 0x05, 0x43, 0x95, 0x9c, 0x32, + 0xab, 0xae, 0x26, 0xd4, 0x22, 0x88, 0xcb, 0xd0, 0x07, 0x90, 0x20, 0x08, 0x6e, 0xca, 0x48, 0xc6, + 0x08, 0x69, 0x3b, 0xa1, 0x89, 0x05, 0x37, 0x3e, 0x83, 0x0f, 0x20, 0xb8, 0xf7, 0x49, 0xba, 0xec, + 0xd2, 0xd5, 0x28, 0xd3, 0x5d, 0x96, 0x79, 0x02, 0x99, 0x89, 0xc5, 0xa6, 0xdd, 0xe5, 0xfc, 0xdf, + 0xf9, 0x7f, 0xfe, 0x43, 0x06, 0xf7, 0x58, 0xcc, 0xd2, 0x7b, 0x2f, 0x63, 0x31, 0xf7, 0x96, 0x43, + 0x2f, 0x61, 0x0b, 0x36, 0x4b, 0x69, 0xb2, 0x10, 0x99, 0xb0, 0xdb, 0x86, 0x51, 0xcd, 0xe8, 0x72, + 0xd8, 0xfb, 0x1f, 0x89, 0x48, 0x18, 0xe2, 0xe9, 0xaf, 0x72, 0x89, 0x3c, 0xe1, 0xf6, 0x98, 0xcf, + 0xc5, 0xec, 0x8a, 0xc5, 0x3c, 0x60, 0x19, 0xb7, 0xcf, 0x71, 0x23, 0xd4, 0x82, 0x83, 0xfa, 0x68, + 0xf0, 0xdb, 0x27, 0x4a, 0x42, 0xc3, 0x6c, 0xe4, 0x12, 0x4a, 0x52, 0x48, 0xf8, 0xf3, 0xc8, 0x66, + 0xd3, 0x0b, 0x62, 0x46, 0x12, 0x94, 0xb2, 0x3d, 0xc2, 0xf5, 0x05, 0xcb, 0xb8, 0x53, 0xeb, 0xa3, + 0x41, 0xdb, 0x07, 0x25, 0xa1, 0xae, 0x13, 0x73, 0x09, 0x46, 0x2f, 0x24, 0xb4, 0x4a, 0x9b, 0x9e, + 0x48, 0x60, 0x44, 0xf2, 0x52, 0xc3, 0xcd, 0x4b, 0xd3, 0xda, 0x7e, 0x45, 0xf8, 0xaf, 0x49, 0x9a, + 0xe8, 0xca, 0x13, 0x8d, 0x53, 0x07, 0xf5, 0x7f, 0x0d, 0x5a, 0xa7, 0x27, 0xb4, 0x72, 0x0b, 0xad, + 0x54, 0xf6, 0xaf, 0x57, 0x12, 0x2c, 0x25, 0xa1, 0x53, 0x91, 0xd3, 0x5c, 0xc2, 0x41, 0x5e, 0x21, + 0xe1, 0x68, 0xa7, 0xfb, 0x0e, 0x21, 0x6f, 0x1f, 0xfb, 0x01, 0x41, 0x27, 0xac, 0xcc, 0x76, 0x82, + 0xff, 0x85, 0xfc, 0x8e, 0x3d, 0x4c, 0xb3, 0x1f, 0xe3, 0xf7, 0xb9, 0x63, 0x25, 0xa1, 0x3b, 0x2e, + 0xe1, 0xd6, 0x90, 0x4b, 0x38, 0xdc, 0x2f, 0x24, 0x38, 0xdb, 0x06, 0x7b, 0x88, 0x04, 0xdd, 0xb0, + 0x9a, 0xe0, 0x9f, 0xad, 0x94, 0x8b, 0xd6, 0xca, 0x45, 0x9f, 0xca, 0x45, 0xcf, 0x1b, 0xd7, 0x5a, + 0x6f, 0x5c, 0xeb, 0x7d, 0xe3, 0x5a, 0x37, 0xc7, 0x49, 0x1c, 0x51, 0x16, 0x67, 0x34, 0xe4, 0x4b, + 0x2f, 0x12, 0xde, 0x5c, 0x84, 0x7c, 0xfb, 0x0e, 0x6e, 0x9b, 0xe6, 0xe7, 0x8e, 0xbe, 0x02, 0x00, + 0x00, 0xff, 0xff, 0xeb, 0x73, 0xb3, 0xfb, 0x1f, 0x02, 0x00, 0x00, +} + +func (m *DenomTakeRate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DenomTakeRate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DenomTakeRate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Rate != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.Rate)) + i-- + dAtA[i] = 0x10 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintParams(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DefaultTakeRate != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.DefaultTakeRate)) + i-- + dAtA[i] = 0x10 + } + if len(m.DenomTakeRates) > 0 { + for iNdEx := len(m.DenomTakeRates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.DenomTakeRates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DenomTakeRate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovParams(uint64(l)) + } + if m.Rate != 0 { + n += 1 + sovParams(uint64(m.Rate)) + } + return n +} + +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DenomTakeRates) > 0 { + for _, e := range m.DenomTakeRates { + l = e.Size() + n += 1 + l + sovParams(uint64(l)) + } + } + if m.DefaultTakeRate != 0 { + n += 1 + sovParams(uint64(m.DefaultTakeRate)) + } + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *DenomTakeRate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DenomTakeRate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DenomTakeRate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Rate", wireType) + } + m.Rate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Rate |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DenomTakeRates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DenomTakeRates = append(m.DenomTakeRates, DenomTakeRate{}) + if err := m.DenomTakeRates[len(m.DenomTakeRates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultTakeRate", wireType) + } + m.DefaultTakeRate = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DefaultTakeRate |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/take/v1/paramsmsg.pb.go b/go/node/take/v1/paramsmsg.pb.go new file mode 100644 index 00000000..9d8b2eda --- /dev/null +++ b/go/node/take/v1/paramsmsg.pb.go @@ -0,0 +1,508 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/take/v1/paramsmsg.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgUpdateParams is the Msg/UpdateParams request type. +// +// Since: akash v1.0.0 +type MsgUpdateParams struct { + // authority is the address of the governance account. + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` + // params defines the x/deployment parameters to update. + // + // NOTE: All parameters must be supplied. + Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"` +} + +func (m *MsgUpdateParams) Reset() { *m = MsgUpdateParams{} } +func (m *MsgUpdateParams) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParams) ProtoMessage() {} +func (*MsgUpdateParams) Descriptor() ([]byte, []int) { + return fileDescriptor_4006bfa904e2ca4b, []int{0} +} +func (m *MsgUpdateParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParams.Merge(m, src) +} +func (m *MsgUpdateParams) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParams) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParams.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParams proto.InternalMessageInfo + +func (m *MsgUpdateParams) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgUpdateParams) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +// MsgUpdateParamsResponse defines the response structure for executing a +// MsgUpdateParams message. +// +// Since: akash v1.0.0 +type MsgUpdateParamsResponse struct { +} + +func (m *MsgUpdateParamsResponse) Reset() { *m = MsgUpdateParamsResponse{} } +func (m *MsgUpdateParamsResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateParamsResponse) ProtoMessage() {} +func (*MsgUpdateParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4006bfa904e2ca4b, []int{1} +} +func (m *MsgUpdateParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateParamsResponse.Merge(m, src) +} +func (m *MsgUpdateParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgUpdateParams)(nil), "akash.take.v1.MsgUpdateParams") + proto.RegisterType((*MsgUpdateParamsResponse)(nil), "akash.take.v1.MsgUpdateParamsResponse") +} + +func init() { proto.RegisterFile("akash/take/v1/paramsmsg.proto", fileDescriptor_4006bfa904e2ca4b) } + +var fileDescriptor_4006bfa904e2ca4b = []byte{ + // 278 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4d, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x2f, 0x49, 0xcc, 0x4e, 0xd5, 0x2f, 0x33, 0xd4, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, + 0xce, 0x2d, 0x4e, 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x05, 0x4b, 0xeb, 0x81, 0xa4, + 0xf5, 0xca, 0x0c, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, 0x32, 0xfa, 0x20, 0x16, 0x44, 0x91, + 0x94, 0x78, 0x72, 0x7e, 0x71, 0x6e, 0x7e, 0xb1, 0x7e, 0x6e, 0x71, 0x3a, 0xc8, 0x0c, 0xb8, 0x6e, + 0x29, 0x49, 0x88, 0x44, 0x3c, 0x44, 0x07, 0x84, 0x03, 0x95, 0x92, 0xc2, 0x66, 0x2f, 0x44, 0x4e, + 0xa9, 0x8f, 0x91, 0x8b, 0xdf, 0xb7, 0x38, 0x3d, 0xb4, 0x20, 0x25, 0xb1, 0x24, 0x35, 0x00, 0x2c, + 0x23, 0x64, 0xc6, 0xc5, 0x99, 0x58, 0x5a, 0x92, 0x91, 0x5f, 0x94, 0x59, 0x52, 0x29, 0xc1, 0xa8, + 0xc0, 0xa8, 0xc1, 0xe9, 0x24, 0x71, 0x69, 0x8b, 0xae, 0x08, 0xd4, 0x50, 0xc7, 0x94, 0x94, 0xa2, + 0xd4, 0xe2, 0xe2, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0xf4, 0x20, 0x84, 0x52, 0x21, 0x63, 0x2e, 0x36, + 0x88, 0xd9, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xdc, 0x46, 0xa2, 0x7a, 0x28, 0x3e, 0xd2, 0x83, 0x18, + 0xef, 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x54, 0xa9, 0x15, 0x5f, 0xd3, 0xf3, 0x0d, 0x5a, + 0x08, 0x43, 0x94, 0x24, 0xb9, 0xc4, 0xd1, 0xdc, 0x13, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, + 0xea, 0x64, 0x7a, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, + 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xd2, 0x05, 0xd9, + 0xe9, 0x7a, 0x89, 0xd9, 0x25, 0x7a, 0x29, 0xa9, 0x65, 0xfa, 0xe9, 0xf9, 0xfa, 0x79, 0xf9, 0x29, + 0xa9, 0x30, 0xff, 0x26, 0xb1, 0x81, 0x7d, 0x6a, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x32, + 0xe7, 0xe1, 0x7f, 0x01, 0x00, 0x00, +} + +func (m *MsgUpdateParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParamsmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintParamsmsg(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintParamsmsg(dAtA []byte, offset int, v uint64) int { + offset -= sovParamsmsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgUpdateParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovParamsmsg(uint64(l)) + } + l = m.Params.Size() + n += 1 + l + sovParamsmsg(uint64(l)) + return n +} + +func (m *MsgUpdateParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovParamsmsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParamsmsg(x uint64) (n int) { + return sovParamsmsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgUpdateParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParamsmsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParamsmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParamsmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParamsmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParamsmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParamsmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipParamsmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParamsmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParamsmsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParamsmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParamsmsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParamsmsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParamsmsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParamsmsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParamsmsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParamsmsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/take/v1/query.pb.go b/go/node/take/v1/query.pb.go new file mode 100644 index 00000000..5e455655 --- /dev/null +++ b/go/node/take/v1/query.pb.go @@ -0,0 +1,536 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/take/v1/query.proto + +package v1 + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/tx/amino" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryParamsRequest is the request type for the Query/Params RPC method. +type QueryParamsRequest struct { +} + +func (m *QueryParamsRequest) Reset() { *m = QueryParamsRequest{} } +func (m *QueryParamsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryParamsRequest) ProtoMessage() {} +func (*QueryParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_1009782dd529735e, []int{0} +} +func (m *QueryParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsRequest.Merge(m, src) +} +func (m *QueryParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsRequest proto.InternalMessageInfo + +// QueryParamsResponse is the response type for the Query/Params RPC method. +type QueryParamsResponse struct { + // params defines the parameters of the module. + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` +} + +func (m *QueryParamsResponse) Reset() { *m = QueryParamsResponse{} } +func (m *QueryParamsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryParamsResponse) ProtoMessage() {} +func (*QueryParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_1009782dd529735e, []int{1} +} +func (m *QueryParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryParamsResponse.Merge(m, src) +} +func (m *QueryParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryParamsResponse proto.InternalMessageInfo + +func (m *QueryParamsResponse) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*QueryParamsRequest)(nil), "akash.take.v1.QueryParamsRequest") + proto.RegisterType((*QueryParamsResponse)(nil), "akash.take.v1.QueryParamsResponse") +} + +func init() { proto.RegisterFile("akash/take/v1/query.proto", fileDescriptor_1009782dd529735e) } + +var fileDescriptor_1009782dd529735e = []byte{ + // 279 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x2f, 0x49, 0xcc, 0x4e, 0xd5, 0x2f, 0x33, 0xd4, 0x2f, 0x2c, 0x4d, 0x2d, 0xaa, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x05, 0x4b, 0xe9, 0x81, 0xa4, 0xf4, 0xca, 0x0c, 0xa5, + 0x64, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x13, 0x0b, 0x32, 0xf5, 0x13, 0xf3, 0xf2, 0xf2, + 0x4b, 0x12, 0x4b, 0x32, 0xf3, 0xf3, 0x8a, 0x21, 0x8a, 0xa5, 0x44, 0xd2, 0xf3, 0xd3, 0xf3, 0xc1, + 0x4c, 0x7d, 0x10, 0x0b, 0x2a, 0x2a, 0x98, 0x98, 0x9b, 0x99, 0x97, 0xaf, 0x0f, 0x26, 0xa1, 0x42, + 0x52, 0xa8, 0x16, 0x16, 0x24, 0x16, 0x25, 0xe6, 0x42, 0x0d, 0x51, 0x12, 0xe1, 0x12, 0x0a, 0x04, + 0x39, 0x20, 0x00, 0x2c, 0x18, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0xa2, 0xe4, 0xcf, 0x25, 0x8c, + 0x22, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0x64, 0xc1, 0xc5, 0x06, 0xd1, 0x2c, 0xc1, 0xa8, + 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0xaa, 0x87, 0xe2, 0x5e, 0x3d, 0x88, 0x72, 0x27, 0xce, 0x13, 0xf7, + 0xe4, 0x19, 0x56, 0x3c, 0xdf, 0xa0, 0xc5, 0x18, 0x04, 0x55, 0x6f, 0x54, 0xce, 0xc5, 0x0a, 0x36, + 0x50, 0x28, 0x8f, 0x8b, 0x0d, 0xa2, 0x4a, 0x48, 0x11, 0x4d, 0x33, 0xa6, 0x33, 0xa4, 0x94, 0xf0, + 0x29, 0x81, 0xb8, 0x49, 0x49, 0xb6, 0xe9, 0xf2, 0x93, 0xc9, 0x4c, 0xe2, 0x42, 0xa2, 0xfa, 0xd8, + 0x7c, 0xe9, 0x64, 0x7a, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, + 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xd2, 0x05, + 0xd9, 0xe9, 0x7a, 0x89, 0xd9, 0x25, 0x7a, 0x29, 0xa9, 0x65, 0xfa, 0xe9, 0xf9, 0xfa, 0x79, 0xf9, + 0x29, 0xa9, 0x30, 0xdd, 0x49, 0x6c, 0xe0, 0xd0, 0x31, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x37, + 0x0c, 0xfd, 0x91, 0xac, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Params returns the total set of minting parameters. + Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Params(ctx context.Context, in *QueryParamsRequest, opts ...grpc.CallOption) (*QueryParamsResponse, error) { + out := new(QueryParamsResponse) + err := c.cc.Invoke(ctx, "/akash.take.v1.Query/Params", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Params returns the total set of minting parameters. + Params(context.Context, *QueryParamsRequest) (*QueryParamsResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Params(ctx context.Context, req *QueryParamsRequest) (*QueryParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Params not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Params_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryParamsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Params(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.take.v1.Query/Params", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Params(ctx, req.(*QueryParamsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.take.v1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Params", + Handler: _Query_Params_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/take/v1/query.proto", +} + +func (m *QueryParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/take/v1/query.pb.gw.go b/go/node/take/v1/query.pb.gw.go new file mode 100644 index 00000000..5367d8e2 --- /dev/null +++ b/go/node/take/v1/query.pb.gw.go @@ -0,0 +1,153 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: akash/take/v1/query.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := client.Params(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Params_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryParamsRequest + var metadata runtime.ServerMetadata + + msg, err := server.Params(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Params_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Params_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Params_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Params_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Params_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"akash", "take", "v1", "params"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_Params_0 = runtime.ForwardResponseMessage +) diff --git a/go/node/take/v1/service.pb.go b/go/node/take/v1/service.pb.go new file mode 100644 index 00000000..a92e2437 --- /dev/null +++ b/go/node/take/v1/service.pb.go @@ -0,0 +1,134 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/take/v1/service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { proto.RegisterFile("akash/take/v1/service.proto", fileDescriptor_a49d07398034c31e) } + +var fileDescriptor_a49d07398034c31e = []byte{ + // 200 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x2f, 0x49, 0xcc, 0x4e, 0xd5, 0x2f, 0x33, 0xd4, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, + 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x05, 0x4b, 0xea, 0x81, 0x24, 0xf5, 0xca, + 0x0c, 0xa5, 0xc4, 0x93, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, 0xf5, 0x73, 0x8b, 0xd3, 0x41, 0x6a, 0x73, + 0x8b, 0xd3, 0x21, 0xea, 0xa4, 0x64, 0x51, 0x0d, 0x29, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0x86, 0x4b, + 0x1b, 0xa5, 0x70, 0x31, 0xfb, 0x16, 0xa7, 0x0b, 0x85, 0x71, 0xf1, 0x84, 0x16, 0xa4, 0x24, 0x96, + 0xa4, 0x06, 0x80, 0xe5, 0x85, 0xe4, 0xf4, 0x50, 0x8c, 0xd7, 0xf3, 0x2d, 0x4e, 0x47, 0x96, 0x97, + 0x52, 0xc3, 0x2f, 0x1f, 0x94, 0x5a, 0x5c, 0x90, 0x9f, 0x57, 0x9c, 0x2a, 0xc5, 0xda, 0xf0, 0x7c, + 0x83, 0x16, 0xa3, 0x93, 0xe9, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, + 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x49, + 0x17, 0x64, 0xa7, 0xeb, 0x25, 0x66, 0x97, 0xe8, 0xa5, 0xa4, 0x96, 0xe9, 0xa7, 0xe7, 0xeb, 0xe7, + 0xe5, 0xa7, 0xa4, 0xc2, 0x1c, 0x9b, 0xc4, 0x06, 0x76, 0xa3, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, + 0xfa, 0xd6, 0x67, 0x06, 0x09, 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // UpdateParams defines a governance operation for updating the x/market module + // parameters. The authority is hard-coded to the x/gov module account. + // + // Since: akash v1.0.0 + UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) UpdateParams(ctx context.Context, in *MsgUpdateParams, opts ...grpc.CallOption) (*MsgUpdateParamsResponse, error) { + out := new(MsgUpdateParamsResponse) + err := c.cc.Invoke(ctx, "/akash.take.v1.Msg/UpdateParams", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // UpdateParams defines a governance operation for updating the x/market module + // parameters. The authority is hard-coded to the x/gov module account. + // + // Since: akash v1.0.0 + UpdateParams(context.Context, *MsgUpdateParams) (*MsgUpdateParamsResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) UpdateParams(ctx context.Context, req *MsgUpdateParams) (*MsgUpdateParamsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateParams not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_UpdateParams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateParams) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateParams(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.take.v1.Msg/UpdateParams", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateParams(ctx, req.(*MsgUpdateParams)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.take.v1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "UpdateParams", + Handler: _Msg_UpdateParams_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/take/v1/service.proto", +} diff --git a/go/node/take/v1beta3/codec.go b/go/node/take/v1beta3/codec.go deleted file mode 100644 index a3f92173..00000000 --- a/go/node/take/v1beta3/codec.go +++ /dev/null @@ -1,32 +0,0 @@ -package v1beta3 - -import ( - "github.com/cosmos/cosmos-sdk/codec" - cdctypes "github.com/cosmos/cosmos-sdk/codec/types" - cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" -) - -var ( - amino = codec.NewLegacyAmino() - - // ModuleCdc references the global x/deployment module codec. Note, the codec should - // ONLY be used in certain instances of tests and for JSON encoding as Amino is - // still used for that purpose. - // - // The actual codec used for serialization should be provided to x/deployment and - // defined at the application level. - ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) -) - -func init() { - RegisterLegacyAminoCodec(amino) - cryptocodec.RegisterCrypto(amino) - amino.Seal() -} - -func RegisterLegacyAminoCodec(_ *codec.LegacyAmino) { -} - -// RegisterInterfaces registers the x/deployment interfaces types with the interface registry -func RegisterInterfaces(_ cdctypes.InterfaceRegistry) { -} diff --git a/go/node/take/v1beta3/errors.go b/go/node/take/v1beta3/errors.go deleted file mode 100644 index 61da271a..00000000 --- a/go/node/take/v1beta3/errors.go +++ /dev/null @@ -1,14 +0,0 @@ -package v1beta3 - -import ( - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - errInvalidParam uint32 = iota + 1 -) - -var ( - // ErrInvalidParam indicates an invalid chain parameter - ErrInvalidParam = sdkerrors.Register(ModuleName, errInvalidParam, "parameter invalid") -) diff --git a/go/node/take/v1beta3/genesis.pb.go b/go/node/take/v1beta3/genesis.pb.go deleted file mode 100644 index 5b110f14..00000000 --- a/go/node/take/v1beta3/genesis.pb.go +++ /dev/null @@ -1,322 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/take/v1beta3/genesis.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GenesisState stores slice of genesis deployment instance -type GenesisState struct { - Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params" yaml:"params"` -} - -func (m *GenesisState) Reset() { *m = GenesisState{} } -func (m *GenesisState) String() string { return proto.CompactTextString(m) } -func (*GenesisState) ProtoMessage() {} -func (*GenesisState) Descriptor() ([]byte, []int) { - return fileDescriptor_58ffc0750201d0e9, []int{0} -} -func (m *GenesisState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GenesisState) XXX_Merge(src proto.Message) { - xxx_messageInfo_GenesisState.Merge(m, src) -} -func (m *GenesisState) XXX_Size() int { - return m.Size() -} -func (m *GenesisState) XXX_DiscardUnknown() { - xxx_messageInfo_GenesisState.DiscardUnknown(m) -} - -var xxx_messageInfo_GenesisState proto.InternalMessageInfo - -func (m *GenesisState) GetParams() Params { - if m != nil { - return m.Params - } - return Params{} -} - -func init() { - proto.RegisterType((*GenesisState)(nil), "akash.take.v1beta3.GenesisState") -} - -func init() { proto.RegisterFile("akash/take/v1beta3/genesis.proto", fileDescriptor_58ffc0750201d0e9) } - -var fileDescriptor_58ffc0750201d0e9 = []byte{ - // 224 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x2f, 0x49, 0xcc, 0x4e, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x4f, - 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x02, 0xab, - 0xd0, 0x03, 0xa9, 0xd0, 0x83, 0xaa, 0x90, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x4b, 0xeb, 0x83, - 0x58, 0x10, 0x95, 0x52, 0xf2, 0x58, 0xcc, 0x2a, 0x48, 0x2c, 0x4a, 0xcc, 0x85, 0x1a, 0xa5, 0x94, - 0xcc, 0xc5, 0xe3, 0x0e, 0x31, 0x3b, 0xb8, 0x24, 0xb1, 0x24, 0x55, 0x28, 0x98, 0x8b, 0x0d, 0x22, - 0x2f, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0xa5, 0x87, 0x69, 0x97, 0x5e, 0x00, 0x58, 0x85, - 0x93, 0xfc, 0x89, 0x7b, 0xf2, 0x0c, 0xaf, 0xee, 0xc9, 0x43, 0x75, 0x7c, 0xba, 0x27, 0xcf, 0x5b, - 0x99, 0x98, 0x9b, 0x63, 0xa5, 0x04, 0xe1, 0x2b, 0x05, 0x41, 0x25, 0x9c, 0x02, 0x4f, 0x3c, 0x92, - 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, - 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0xca, 0x3c, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, - 0x39, 0x3f, 0x57, 0x1f, 0x6c, 0x91, 0x6e, 0x5e, 0x6a, 0x49, 0x79, 0x7e, 0x51, 0x36, 0x94, 0x97, - 0x58, 0x90, 0xa9, 0x9f, 0x9e, 0xaf, 0x9f, 0x97, 0x9f, 0x92, 0x8a, 0xe2, 0x89, 0x24, 0x36, 0xb0, - 0xf3, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa4, 0xf9, 0x7d, 0xe7, 0x2d, 0x01, 0x00, 0x00, -} - -func (m *GenesisState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenesis(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Params.Size() - n += 1 + l + sovGenesis(uint64(l)) - return n -} - -func sovGenesis(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenesis(x uint64) (n int) { - return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GenesisState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenesis(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenesis - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenesis - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenesis - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenesis - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/take/v1beta3/key.go b/go/node/take/v1beta3/key.go deleted file mode 100644 index 574b59e3..00000000 --- a/go/node/take/v1beta3/key.go +++ /dev/null @@ -1,12 +0,0 @@ -package v1beta3 - -const ( - // ModuleName is the module name constant used in many places - ModuleName = "take" - - // StoreKey is the store key string for deployment - StoreKey = ModuleName - - // RouterKey is the message route for deployment - RouterKey = ModuleName -) diff --git a/go/node/take/v1beta3/params.go b/go/node/take/v1beta3/params.go deleted file mode 100644 index 9928c9c9..00000000 --- a/go/node/take/v1beta3/params.go +++ /dev/null @@ -1,88 +0,0 @@ -package v1beta3 - -import ( - "fmt" - - paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" - "github.com/pkg/errors" -) - -var _ paramtypes.ParamSet = (*Params)(nil) - -const ( - keyDefaultTakeRate = "DefaultTakeRate" - keyDenomTakeRates = "DenomTakeRates" -) - -func ParamKeyTable() paramtypes.KeyTable { - return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) -} - -func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { - return paramtypes.ParamSetPairs{ - paramtypes.NewParamSetPair([]byte(keyDefaultTakeRate), &p.DefaultTakeRate, validateTakeRate), - paramtypes.NewParamSetPair([]byte(keyDenomTakeRates), &p.DenomTakeRates, validateDenomTakeRates), - } -} - -func DefaultParams() Params { - return Params{ - DefaultTakeRate: 20, - DenomTakeRates: DenomTakeRates{ - { - Denom: "uakt", - Rate: 2, - }, - }, - } -} - -func (p Params) Validate() error { - if err := validateTakeRate(p.DefaultTakeRate); err != nil { - return err - } - if err := validateDenomTakeRates(p.DenomTakeRates); err != nil { - return err - } - - return nil -} - -func validateTakeRate(i interface{}) error { - val, ok := i.(uint32) - if !ok { - return errors.Wrapf(ErrInvalidParam, "%T", i) - } - if val > 100 { - return fmt.Errorf("invalid Take Rate (%#v)", val) - } - return nil -} - -func validateDenomTakeRates(i interface{}) error { - takeRates, ok := i.(DenomTakeRates) - if !ok { - return errors.Wrapf(ErrInvalidParam, "%T", i) - } - - check := make(map[string]uint32) - - for k, v := range takeRates { - if _, exists := check[v.Denom]; exists { - return fmt.Errorf("duplicate Denom Take Rate (%#v)", v) - } - - check[v.Denom] = v.Rate - - if v.Rate > 100 { - return fmt.Errorf("invalid Denom Take Rate (%v=%#v)", k, v) - } - } - - // uakt must always be present - if _, exists := check["uakt"]; !exists { - return fmt.Errorf("invalid Denom Take Rate - uakt must be present") - } - - return nil -} diff --git a/go/node/take/v1beta3/params.pb.go b/go/node/take/v1beta3/params.pb.go deleted file mode 100644 index b43aa0f0..00000000 --- a/go/node/take/v1beta3/params.pb.go +++ /dev/null @@ -1,582 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/take/v1beta3/params.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// DenomTakeRate describes take rate for specified denom -type DenomTakeRate struct { - Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom" yaml:"denom"` - Rate uint32 `protobuf:"varint,2,opt,name=rate,proto3" json:"rate" yaml:"rate"` -} - -func (m *DenomTakeRate) Reset() { *m = DenomTakeRate{} } -func (m *DenomTakeRate) String() string { return proto.CompactTextString(m) } -func (*DenomTakeRate) ProtoMessage() {} -func (*DenomTakeRate) Descriptor() ([]byte, []int) { - return fileDescriptor_4c72a477131e48ec, []int{0} -} -func (m *DenomTakeRate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DenomTakeRate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_DenomTakeRate.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *DenomTakeRate) XXX_Merge(src proto.Message) { - xxx_messageInfo_DenomTakeRate.Merge(m, src) -} -func (m *DenomTakeRate) XXX_Size() int { - return m.Size() -} -func (m *DenomTakeRate) XXX_DiscardUnknown() { - xxx_messageInfo_DenomTakeRate.DiscardUnknown(m) -} - -var xxx_messageInfo_DenomTakeRate proto.InternalMessageInfo - -func (m *DenomTakeRate) GetDenom() string { - if m != nil { - return m.Denom - } - return "" -} - -func (m *DenomTakeRate) GetRate() uint32 { - if m != nil { - return m.Rate - } - return 0 -} - -// Params defines the parameters for the x/take package -type Params struct { - // denom -> % take rate - DenomTakeRates DenomTakeRates `protobuf:"bytes,1,rep,name=denom_take_rates,json=denomTakeRates,proto3,castrepeated=DenomTakeRates" json:"denom_take_rates" yaml:"denom_take_rates"` - DefaultTakeRate uint32 `protobuf:"varint,2,opt,name=default_take_rate,json=defaultTakeRate,proto3" json:"default_take_rate" yaml:"default_take_rate"` -} - -func (m *Params) Reset() { *m = Params{} } -func (m *Params) String() string { return proto.CompactTextString(m) } -func (*Params) ProtoMessage() {} -func (*Params) Descriptor() ([]byte, []int) { - return fileDescriptor_4c72a477131e48ec, []int{1} -} -func (m *Params) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Params.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Params) XXX_Merge(src proto.Message) { - xxx_messageInfo_Params.Merge(m, src) -} -func (m *Params) XXX_Size() int { - return m.Size() -} -func (m *Params) XXX_DiscardUnknown() { - xxx_messageInfo_Params.DiscardUnknown(m) -} - -var xxx_messageInfo_Params proto.InternalMessageInfo - -func (m *Params) GetDenomTakeRates() DenomTakeRates { - if m != nil { - return m.DenomTakeRates - } - return nil -} - -func (m *Params) GetDefaultTakeRate() uint32 { - if m != nil { - return m.DefaultTakeRate - } - return 0 -} - -func init() { - proto.RegisterType((*DenomTakeRate)(nil), "akash.take.v1beta3.DenomTakeRate") - proto.RegisterType((*Params)(nil), "akash.take.v1beta3.Params") -} - -func init() { proto.RegisterFile("akash/take/v1beta3/params.proto", fileDescriptor_4c72a477131e48ec) } - -var fileDescriptor_4c72a477131e48ec = []byte{ - // 362 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x52, 0x3f, 0x4f, 0xfa, 0x40, - 0x18, 0xee, 0xf1, 0x03, 0x92, 0x5f, 0x11, 0xd0, 0xc6, 0xc4, 0xc6, 0xa1, 0x2f, 0xde, 0xc4, 0x62, - 0x1b, 0x65, 0xd0, 0x38, 0x36, 0x7c, 0x00, 0x6d, 0x4c, 0x4c, 0x5c, 0xc8, 0x61, 0xcf, 0x42, 0x4a, - 0xb9, 0xa6, 0x3d, 0x34, 0x2e, 0x7e, 0x06, 0xbf, 0x82, 0x89, 0x93, 0x9f, 0x84, 0x91, 0xd1, 0xe9, - 0x34, 0x65, 0xeb, 0xd8, 0x4f, 0x60, 0x7a, 0x85, 0x48, 0x61, 0xbb, 0xe7, 0xcf, 0xfb, 0xe4, 0x79, - 0xef, 0x4e, 0x05, 0xe2, 0x93, 0x78, 0x64, 0x71, 0xe2, 0x53, 0xeb, 0xe9, 0x6c, 0x48, 0x39, 0xe9, - 0x59, 0x21, 0x89, 0x48, 0x10, 0x9b, 0x61, 0xc4, 0x38, 0xd3, 0x34, 0x69, 0x30, 0x73, 0x83, 0xb9, - 0x32, 0x1c, 0x1f, 0x7a, 0xcc, 0x63, 0x52, 0xb6, 0xf2, 0x53, 0xe1, 0xc4, 0xaf, 0x6a, 0xb3, 0x4f, - 0xa7, 0x2c, 0xb8, 0x25, 0x3e, 0x75, 0x08, 0xa7, 0xda, 0xa5, 0x5a, 0x73, 0x73, 0x42, 0x47, 0x1d, - 0xd4, 0xfd, 0x6f, 0xe3, 0x44, 0x40, 0x4d, 0x3a, 0x52, 0x01, 0x85, 0x92, 0x09, 0xd8, 0x7b, 0x21, - 0xc1, 0xe4, 0x0a, 0x4b, 0x88, 0x9d, 0x82, 0xd6, 0x7a, 0x6a, 0x35, 0x22, 0x9c, 0xea, 0x95, 0x0e, - 0xea, 0x36, 0x6d, 0x48, 0x04, 0x54, 0xf3, 0xc4, 0x54, 0x80, 0xe4, 0x33, 0x01, 0x8d, 0x62, 0x2c, - 0x47, 0xd8, 0x91, 0x24, 0x7e, 0xaf, 0xa8, 0xf5, 0x6b, 0x59, 0x5d, 0xfb, 0x40, 0xea, 0xbe, 0x4c, - 0x1a, 0xe4, 0xbd, 0x07, 0xb9, 0x1c, 0xeb, 0xa8, 0xf3, 0xaf, 0xdb, 0x38, 0x3f, 0x31, 0x77, 0x17, - 0x32, 0x4b, 0xbd, 0xed, 0xbb, 0xb9, 0x00, 0x25, 0x11, 0xd0, 0x2a, 0xd1, 0x71, 0x2a, 0x60, 0x27, - 0x34, 0x13, 0x70, 0xb4, 0xb1, 0xc0, 0x86, 0x82, 0x3f, 0xbf, 0xb7, 0x03, 0x9c, 0x96, 0x5b, 0xc2, - 0x5a, 0xa8, 0x1e, 0xb8, 0xf4, 0x91, 0xcc, 0x26, 0xfc, 0x6f, 0x70, 0xb5, 0x73, 0x3f, 0x11, 0xd0, - 0xee, 0x17, 0xe2, 0x7a, 0x20, 0x15, 0xb0, 0xeb, 0xcf, 0x04, 0xe8, 0xeb, 0x06, 0x5b, 0x12, 0x76, - 0xda, 0x6e, 0x39, 0xc1, 0xbe, 0x99, 0x27, 0x06, 0x5a, 0x24, 0x06, 0xfa, 0x49, 0x0c, 0xf4, 0xb6, - 0x34, 0x94, 0xc5, 0xd2, 0x50, 0xbe, 0x96, 0x86, 0x72, 0x7f, 0xe1, 0x8d, 0xf9, 0x68, 0x36, 0x34, - 0x1f, 0x58, 0x60, 0xc9, 0x1b, 0x3a, 0x9d, 0x52, 0xfe, 0xcc, 0x22, 0x7f, 0x85, 0x48, 0x38, 0xb6, - 0x3c, 0x66, 0x4d, 0x99, 0x4b, 0x4b, 0xbf, 0x65, 0x58, 0x97, 0xaf, 0xdf, 0xfb, 0x0d, 0x00, 0x00, - 0xff, 0xff, 0x64, 0x93, 0x15, 0xab, 0x4a, 0x02, 0x00, 0x00, -} - -func (m *DenomTakeRate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DenomTakeRate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DenomTakeRate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Rate != 0 { - i = encodeVarintParams(dAtA, i, uint64(m.Rate)) - i-- - dAtA[i] = 0x10 - } - if len(m.Denom) > 0 { - i -= len(m.Denom) - copy(dAtA[i:], m.Denom) - i = encodeVarintParams(dAtA, i, uint64(len(m.Denom))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *Params) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Params) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DefaultTakeRate != 0 { - i = encodeVarintParams(dAtA, i, uint64(m.DefaultTakeRate)) - i-- - dAtA[i] = 0x10 - } - if len(m.DenomTakeRates) > 0 { - for iNdEx := len(m.DenomTakeRates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.DenomTakeRates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintParams(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintParams(dAtA []byte, offset int, v uint64) int { - offset -= sovParams(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *DenomTakeRate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Denom) - if l > 0 { - n += 1 + l + sovParams(uint64(l)) - } - if m.Rate != 0 { - n += 1 + sovParams(uint64(m.Rate)) - } - return n -} - -func (m *Params) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.DenomTakeRates) > 0 { - for _, e := range m.DenomTakeRates { - l = e.Size() - n += 1 + l + sovParams(uint64(l)) - } - } - if m.DefaultTakeRate != 0 { - n += 1 + sovParams(uint64(m.DefaultTakeRate)) - } - return n -} - -func sovParams(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozParams(x uint64) (n int) { - return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *DenomTakeRate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DenomTakeRate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DenomTakeRate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Denom = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Rate", wireType) - } - m.Rate = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Rate |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipParams(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Params) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Params: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DenomTakeRates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthParams - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthParams - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DenomTakeRates = append(m.DenomTakeRates, DenomTakeRate{}) - if err := m.DenomTakeRates[len(m.DenomTakeRates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultTakeRate", wireType) - } - m.DefaultTakeRate = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowParams - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DefaultTakeRate |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipParams(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthParams - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipParams(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowParams - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthParams - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupParams - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthParams - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/take/v1beta3/query.pb.go b/go/node/take/v1beta3/query.pb.go deleted file mode 100644 index e290d25f..00000000 --- a/go/node/take/v1beta3/query.pb.go +++ /dev/null @@ -1,81 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/take/v1beta3/query.proto - -package v1beta3 - -import ( - context "context" - fmt "fmt" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" - grpc "google.golang.org/grpc" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func init() { proto.RegisterFile("akash/take/v1beta3/query.proto", fileDescriptor_d53d30cc323d5fb3) } - -var fileDescriptor_d53d30cc323d5fb3 = []byte{ - // 143 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x2f, 0x49, 0xcc, 0x4e, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x2f, - 0x2c, 0x4d, 0x2d, 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x02, 0xcb, 0xeb, 0x81, - 0xe4, 0xf5, 0xa0, 0xf2, 0x46, 0xec, 0x5c, 0xac, 0x81, 0x20, 0x25, 0x4e, 0x81, 0x27, 0x1e, 0xc9, - 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, - 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0x65, 0x9e, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, - 0x9c, 0x9f, 0xab, 0x0f, 0x36, 0x41, 0x37, 0x2f, 0xb5, 0xa4, 0x3c, 0xbf, 0x28, 0x1b, 0xca, 0x4b, - 0x2c, 0xc8, 0xd4, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, 0x49, 0x45, 0xb1, 0x3b, 0x89, 0x0d, 0x6c, - 0xad, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xb4, 0x98, 0x8b, 0xe8, 0x98, 0x00, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// QueryClient is the client API for Query service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type QueryClient interface { -} - -type queryClient struct { - cc grpc1.ClientConn -} - -func NewQueryClient(cc grpc1.ClientConn) QueryClient { - return &queryClient{cc} -} - -// QueryServer is the server API for Query service. -type QueryServer interface { -} - -// UnimplementedQueryServer can be embedded to have forward compatible implementations. -type UnimplementedQueryServer struct { -} - -func RegisterQueryServer(s grpc1.Server, srv QueryServer) { - s.RegisterService(&_Query_serviceDesc, srv) -} - -var _Query_serviceDesc = grpc.ServiceDesc{ - ServiceName: "akash.take.v1beta3.Query", - HandlerType: (*QueryServer)(nil), - Methods: []grpc.MethodDesc{}, - Streams: []grpc.StreamDesc{}, - Metadata: "akash/take/v1beta3/query.proto", -} diff --git a/go/node/types/attributes/v1/attribute.go b/go/node/types/attributes/v1/attribute.go new file mode 100644 index 00000000..be0330b5 --- /dev/null +++ b/go/node/types/attributes/v1/attribute.go @@ -0,0 +1,379 @@ +package v1 + +import ( + "path/filepath" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + + "gopkg.in/yaml.v3" + + cerrors "cosmossdk.io/errors" +) + +const ( + moduleName = "akash" + AttributeNameRegexpStringWildcard = `^([a-zA-Z][\w\/\.\-]{1,126}[\w\*]?)$` + AttributeNameRegexpString = `^([a-zA-Z][\w\/\.\-]{1,126})$` +) + +const ( + errAttributesDuplicateKeys uint32 = iota + 1 + errInvalidAttributeKey +) + +var ( + ErrAttributesDuplicateKeys = cerrors.Register(moduleName, errAttributesDuplicateKeys, "attributes cannot have duplicate keys") + ErrInvalidAttributeKey = cerrors.Register(moduleName, errInvalidAttributeKey, "attribute key does not match regexp") +) + +var ( + attributeNameRegexpWildcard = regexp.MustCompile(AttributeNameRegexpStringWildcard) +) + +/* +Attributes purpose of using this type in favor of Cosmos's sdk.Attribute is +ability to later extend it with operators to support querying on things like +cpu/memory/storage attributes +At this moment type though is same as sdk.Attributes but all akash libraries were +turned to use a new one +*/ +type Attributes []Attribute + +var _ sort.Interface = (*Attributes)(nil) + +type AttributesGroup []Attributes + +type AttributeValue interface { + AsBool() (bool, bool) + AsString() (string, bool) +} + +type attributeValue struct { + value string +} + +func (val attributeValue) AsBool() (bool, bool) { + if val.value == "" { + return false, false + } + + res, err := strconv.ParseBool(val.value) + if err != nil { + return false, false + } + + return res, true +} + +func (val attributeValue) AsString() (string, bool) { + if val.value == "" { + return "", false + } + + return val.value, true +} + +func (m PlacementRequirements) Dup() PlacementRequirements { + return PlacementRequirements{ + SignedBy: m.SignedBy, + Attributes: m.Attributes.Dup(), + } +} + +func NewStringAttribute(key, val string) Attribute { + return Attribute{ + Key: key, + Value: val, + } +} + +func (m *Attribute) String() string { + res, _ := yaml.Marshal(m) + return string(res) +} + +func (m *Attribute) Equal(rhs *Attribute) bool { + return reflect.DeepEqual(m, rhs) +} + +func (m Attribute) SubsetOf(rhs Attribute) bool { + match, _ := filepath.Match(m.Key, rhs.Key) + + return match && (m.Value == rhs.Value) +} + +func (attr Attributes) Len() int { + return len(attr) +} + +func (attr Attributes) Swap(i, j int) { + attr[i], attr[j] = attr[j], attr[i] +} + +func (attr Attributes) Less(i, j int) bool { + return attr[i].Key < attr[j].Key +} + +func (attr Attributes) Validate() error { + return attr.ValidateWithRegex(attributeNameRegexpWildcard) +} + +func (attr Attributes) ValidateWithRegex(r *regexp.Regexp) error { + store := make(map[string]bool) + + for i := range attr { + if !r.MatchString(attr[i].Key) { + return ErrInvalidAttributeKey + } + + if _, ok := store[attr[i].Key]; ok { + return ErrAttributesDuplicateKeys + } + + store[attr[i].Key] = true + } + + return nil +} + +func (attr Attributes) Dup() Attributes { + if attr == nil { + return nil + } + + res := make(Attributes, 0, len(attr)) + + for _, pair := range attr { + res = append(res, Attribute{ + Key: pair.Key, + Value: pair.Value, + }) + } + + return res +} + +// AttributesSubsetOf check if a is subset of b +// nolint: gofmt +// For example there are two yaml files being converted into these attributes +// example 1: a is subset of b +// --- +// // a +// attributes: +// +// region: +// - us-east-1 +// +// --- +// b +// attributes: +// +// region: +// - us-east-1 +// - us-east-2 +// +// example 2: a is not subset of b +// attributes: +// +// region: +// - us-east-1 +// +// --- +// b +// attributes: +// +// region: +// - us-east-2 +// - us-east-3 +// +// example 3: a is subset of b +// attributes: +// +// region: +// - us-east-2 +// - us-east-3 +// +// --- +// b +// attributes: +// +// region: +// - us-east-2 +func AttributesSubsetOf(a, b Attributes) bool { +loop: + for _, req := range a { + for _, attr := range b { + if req.SubsetOf(attr) { + continue loop + } + } + return false + } + + return true +} + +func AttributesAnyOf(a, b Attributes) bool { + for _, req := range a { + for _, attr := range b { + if req.SubsetOf(attr) { + return true + } + } + } + + return false +} + +func (attr Attributes) SubsetOf(b Attributes) bool { + return AttributesSubsetOf(attr, b) +} + +func (attr Attributes) AnyOf(b Attributes) bool { + return AttributesAnyOf(attr, b) +} + +func (attr Attributes) Find(glob string) AttributeValue { + // todo wildcard + + var val attributeValue + + for i := range attr { + if glob == attr[i].Key { + val.value = attr[i].Value + break + } + } + + return val +} + +func (attr Attributes) Iterate(prefix string, fn func(group, key, value string)) { + for _, item := range attr { + if strings.HasPrefix(item.Key, prefix) { + tokens := strings.SplitAfter(item.Key, "/") + tokens = tokens[1:] + fn(tokens[1], tokens[2], item.Value) + } + } +} + +// GetCapabilitiesGroup +// +// example +// capabilities/storage/1/persistent: true +// capabilities/storage/1/class: io1 +// capabilities/storage/2/persistent: false +// +// nolint: gofmt +// returns +// - - persistent: true +// class: nvme +// - - persistent: false +func (attr Attributes) GetCapabilitiesGroup(prefix string) AttributesGroup { + var res AttributesGroup // nolint:prealloc + + groups := make(map[string]Attributes) + + for _, item := range attr { + if !strings.HasPrefix(item.Key, "capabilities/"+prefix) { + continue + } + + tokens := strings.SplitAfter(strings.TrimPrefix(item.Key, "capabilities/"), "/") + // skip malformed attributes. really? + if len(tokens) != 3 { + continue + } + + // filter out prefix name + tokens = tokens[1:] + + group := groups[tokens[0]] + if group == nil { + group = Attributes{} + } + + group = append(group, Attribute{ + Key: tokens[1], + Value: item.Value, + }) + + groups[tokens[0]] = group + } + + for _, group := range groups { + res = append(res, group) + } + + return res +} + +func (attr Attributes) GetCapabilitiesMap(prefix string) AttributesGroup { + res := make(AttributesGroup, 0, 1) + groups := make(Attributes, 0, len(attr)) + + for _, item := range attr { + if !strings.HasPrefix(item.Key, "capabilities/"+prefix) { + continue + } + + tokens := strings.Split(strings.TrimPrefix(item.Key, "capabilities/"), "/") + // skip malformed attributes + if len(tokens) < 3 { + continue + } + + // filter out prefix name + tokens = tokens[1:] + + var key string + for i, token := range tokens { + if i == 0 { + key = token + } else { + key += "/" + token + } + } + + groups = append(groups, Attribute{ + Key: key, + Value: item.Value, + }) + } + + res = append(res, groups) + + return res +} + +// IN check if given attributes are in attributes group +// AttributesGroup for storage +// - persistent: true +// class: beta1 +// - persistent: true +// class: beta2 +// +// that +// - persistent: true +// class: beta1 +func (attr Attributes) IN(group AttributesGroup) bool { + for _, group := range group { + if attr.SubsetOf(group) { + return true + } + } + return false +} + +func (attr Attributes) AnyIN(group AttributesGroup) bool { + for _, group := range group { + if attr.AnyOf(group) { + return true + } + } + return false +} diff --git a/go/node/types/attributes/v1/attribute.pb.go b/go/node/types/attributes/v1/attribute.pb.go new file mode 100644 index 00000000..699f1c65 --- /dev/null +++ b/go/node/types/attributes/v1/attribute.pb.go @@ -0,0 +1,812 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/attributes/v1/attribute.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Attribute represents key value pair +type Attribute struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty" yaml:"key"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty" yaml:"value"` +} + +func (m *Attribute) Reset() { *m = Attribute{} } +func (*Attribute) ProtoMessage() {} +func (*Attribute) Descriptor() ([]byte, []int) { + return fileDescriptor_44d6ae5d18e0c0a3, []int{0} +} +func (m *Attribute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Attribute.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Attribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_Attribute.Merge(m, src) +} +func (m *Attribute) XXX_Size() int { + return m.Size() +} +func (m *Attribute) XXX_DiscardUnknown() { + xxx_messageInfo_Attribute.DiscardUnknown(m) +} + +var xxx_messageInfo_Attribute proto.InternalMessageInfo + +// SignedBy represents validation accounts that tenant expects signatures for provider attributes +// AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many +// entries there +// this behaviour to be discussed +type SignedBy struct { + // all_of all keys in this list must have signed attributes + AllOf []string `protobuf:"bytes,1,rep,name=all_of,json=allOf,proto3" json:"all_of" yaml:"allOf"` + // any_of at least of of the keys from the list must have signed attributes + AnyOf []string `protobuf:"bytes,2,rep,name=any_of,json=anyOf,proto3" json:"any_of" yaml:"anyOf"` +} + +func (m *SignedBy) Reset() { *m = SignedBy{} } +func (*SignedBy) ProtoMessage() {} +func (*SignedBy) Descriptor() ([]byte, []int) { + return fileDescriptor_44d6ae5d18e0c0a3, []int{1} +} +func (m *SignedBy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignedBy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignedBy.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignedBy) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignedBy.Merge(m, src) +} +func (m *SignedBy) XXX_Size() int { + return m.Size() +} +func (m *SignedBy) XXX_DiscardUnknown() { + xxx_messageInfo_SignedBy.DiscardUnknown(m) +} + +var xxx_messageInfo_SignedBy proto.InternalMessageInfo + +// PlacementRequirements +type PlacementRequirements struct { + // SignedBy list of keys that tenants expect to have signatures from + SignedBy SignedBy `protobuf:"bytes,1,opt,name=signed_by,json=signedBy,proto3" json:"signed_by" yaml:"signed_by"` + // Attribute list of attributes tenant expects from the provider + Attributes Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=Attributes" json:"attributes" yaml:"attributes"` +} + +func (m *PlacementRequirements) Reset() { *m = PlacementRequirements{} } +func (*PlacementRequirements) ProtoMessage() {} +func (*PlacementRequirements) Descriptor() ([]byte, []int) { + return fileDescriptor_44d6ae5d18e0c0a3, []int{2} +} +func (m *PlacementRequirements) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PlacementRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PlacementRequirements.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PlacementRequirements) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlacementRequirements.Merge(m, src) +} +func (m *PlacementRequirements) XXX_Size() int { + return m.Size() +} +func (m *PlacementRequirements) XXX_DiscardUnknown() { + xxx_messageInfo_PlacementRequirements.DiscardUnknown(m) +} + +var xxx_messageInfo_PlacementRequirements proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Attribute)(nil), "akash.base.attributes.v1.Attribute") + proto.RegisterType((*SignedBy)(nil), "akash.base.attributes.v1.SignedBy") + proto.RegisterType((*PlacementRequirements)(nil), "akash.base.attributes.v1.PlacementRequirements") +} + +func init() { + proto.RegisterFile("akash/base/attributes/v1/attribute.proto", fileDescriptor_44d6ae5d18e0c0a3) +} + +var fileDescriptor_44d6ae5d18e0c0a3 = []byte{ + // 407 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x31, 0xef, 0xd2, 0x40, + 0x18, 0xc6, 0xaf, 0xfc, 0x85, 0xd0, 0xc3, 0x18, 0x6c, 0x34, 0x69, 0x18, 0xee, 0x9a, 0x33, 0x2a, + 0xd3, 0x35, 0x60, 0x5c, 0xd8, 0xec, 0xe6, 0xa4, 0xa9, 0x9b, 0x0e, 0xe4, 0x2a, 0x47, 0x25, 0x2d, + 0x2d, 0x72, 0xa5, 0x49, 0x99, 0x58, 0x4c, 0x1c, 0xfd, 0x08, 0xce, 0x7e, 0x12, 0x46, 0x46, 0x06, + 0x53, 0xb5, 0x6c, 0x8c, 0xfd, 0x04, 0xa6, 0x57, 0x68, 0xfb, 0x1f, 0x98, 0xee, 0xfa, 0xdc, 0xef, + 0xb9, 0xf7, 0x9e, 0xb7, 0x2f, 0x1c, 0x32, 0x8f, 0x89, 0x2f, 0xa6, 0xc3, 0x04, 0x37, 0x59, 0x14, + 0xad, 0x17, 0xce, 0x26, 0xe2, 0xc2, 0x8c, 0x47, 0xf5, 0x17, 0x5d, 0xad, 0xc3, 0x28, 0xd4, 0x74, + 0x49, 0xd2, 0x82, 0xa4, 0x35, 0x49, 0xe3, 0xd1, 0xe0, 0x89, 0x1b, 0xba, 0xa1, 0x84, 0xcc, 0x62, + 0x57, 0xf2, 0xe4, 0x13, 0x54, 0xdf, 0x5c, 0x31, 0xcd, 0x80, 0x77, 0x1e, 0x4f, 0x74, 0xc5, 0x50, + 0x86, 0xaa, 0xf5, 0x28, 0x4f, 0x31, 0x4c, 0xd8, 0xd2, 0x9f, 0x10, 0x8f, 0x27, 0xc4, 0x2e, 0x8e, + 0xb4, 0x17, 0xb0, 0x1d, 0x33, 0x7f, 0xc3, 0xf5, 0x96, 0x64, 0xfa, 0x79, 0x8a, 0x1f, 0x96, 0x8c, + 0x94, 0x89, 0x5d, 0x1e, 0x4f, 0x1e, 0x7c, 0xff, 0x89, 0x01, 0xd9, 0xc2, 0xee, 0x87, 0x85, 0x1b, + 0xf0, 0x99, 0x95, 0x68, 0x23, 0xd8, 0x61, 0xbe, 0x3f, 0x0d, 0xe7, 0xba, 0x62, 0xdc, 0x0d, 0x55, + 0x6b, 0x70, 0x4e, 0xf1, 0x45, 0xa9, 0x2f, 0x61, 0xbe, 0xff, 0x6e, 0x4e, 0xec, 0xb6, 0x5c, 0xa5, + 0x25, 0x48, 0x0a, 0x4b, 0xab, 0x61, 0x91, 0x4a, 0xc3, 0x12, 0x24, 0xa5, 0xa5, 0x58, 0x27, 0xdd, + 0xa2, 0xee, 0xee, 0xb7, 0x01, 0xc8, 0xb7, 0x16, 0x7c, 0xfa, 0xde, 0x67, 0x9f, 0xf9, 0x92, 0x07, + 0x91, 0xcd, 0xbf, 0x6e, 0x16, 0x6b, 0xb9, 0x15, 0xda, 0x1c, 0xaa, 0x42, 0xbe, 0x6a, 0xea, 0x94, + 0x59, 0x7b, 0x63, 0x42, 0x6f, 0xb5, 0x8d, 0x5e, 0x03, 0x58, 0xcf, 0xf7, 0x29, 0x06, 0xe7, 0x14, + 0xd7, 0xe6, 0x3c, 0xc5, 0xfd, 0xf2, 0x11, 0x95, 0x44, 0xec, 0xae, 0xb8, 0x26, 0xde, 0x42, 0x58, + 0x5f, 0x25, 0x23, 0xf4, 0xc6, 0xcf, 0x6e, 0x17, 0xaa, 0x7e, 0x83, 0xf5, 0xfa, 0x52, 0xa9, 0x61, + 0xcf, 0x53, 0xfc, 0xf8, 0x92, 0xb7, 0xd2, 0xc8, 0xaf, 0x3f, 0x18, 0x56, 0x2e, 0x61, 0x37, 0xf0, + 0xba, 0x0f, 0xd6, 0xdb, 0xe3, 0x3f, 0x04, 0x76, 0x19, 0x02, 0xfb, 0x0c, 0x29, 0x87, 0x0c, 0x29, + 0x7f, 0x33, 0xa4, 0xfc, 0x38, 0x21, 0x70, 0x38, 0x21, 0x70, 0x3c, 0x21, 0xf0, 0xf1, 0xe5, 0xca, + 0x73, 0x29, 0xf3, 0x22, 0x3a, 0xe3, 0xb1, 0xe9, 0x86, 0x66, 0x10, 0xce, 0xb8, 0x19, 0x25, 0x2b, + 0x2e, 0xee, 0xcf, 0x9a, 0xd3, 0x91, 0x23, 0xf3, 0xea, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x76, + 0x3a, 0xe6, 0xaf, 0x8e, 0x02, 0x00, 0x00, +} + +func (m *Attribute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Attribute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Attribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintAttribute(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintAttribute(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignedBy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignedBy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedBy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AnyOf) > 0 { + for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AnyOf[iNdEx]) + copy(dAtA[i:], m.AnyOf[iNdEx]) + i = encodeVarintAttribute(dAtA, i, uint64(len(m.AnyOf[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.AllOf) > 0 { + for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllOf[iNdEx]) + copy(dAtA[i:], m.AllOf[iNdEx]) + i = encodeVarintAttribute(dAtA, i, uint64(len(m.AllOf[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PlacementRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PlacementRequirements) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PlacementRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAttribute(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.SignedBy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAttribute(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintAttribute(dAtA []byte, offset int, v uint64) int { + offset -= sovAttribute(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Attribute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovAttribute(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovAttribute(uint64(l)) + } + return n +} + +func (m *SignedBy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllOf) > 0 { + for _, s := range m.AllOf { + l = len(s) + n += 1 + l + sovAttribute(uint64(l)) + } + } + if len(m.AnyOf) > 0 { + for _, s := range m.AnyOf { + l = len(s) + n += 1 + l + sovAttribute(uint64(l)) + } + } + return n +} + +func (m *PlacementRequirements) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SignedBy.Size() + n += 1 + l + sovAttribute(uint64(l)) + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovAttribute(uint64(l)) + } + } + return n +} + +func sovAttribute(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAttribute(x uint64) (n int) { + return sovAttribute(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Attribute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Attribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Attribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAttribute + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAttribute + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAttribute + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAttribute + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAttribute(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAttribute + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignedBy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignedBy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignedBy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAttribute + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAttribute + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllOf = append(m.AllOf, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAttribute + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAttribute + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AnyOf = append(m.AnyOf, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAttribute(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAttribute + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlacementRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlacementRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlacementRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAttribute + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAttribute + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SignedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAttribute + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAttribute + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAttribute(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAttribute + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAttribute(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAttribute + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAttribute + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAttribute + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAttribute + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAttribute + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAttribute + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAttribute = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAttribute = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAttribute = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/attributes/v1/attribute_test.go b/go/node/types/attributes/v1/attribute_test.go new file mode 100644 index 00000000..113b1d7b --- /dev/null +++ b/go/node/types/attributes/v1/attribute_test.go @@ -0,0 +1,182 @@ +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type regexTest struct { + runName string + key string + shouldPass bool +} + +func TestAttributes_Validate(t *testing.T) { + attr := Attributes{ + {Key: "key"}, + {Key: "key"}, + } + + require.EqualError(t, attr.Validate(), ErrAttributesDuplicateKeys.Error()) + + // unsupported key symbol + attr = Attributes{ + {Key: "$"}, + } + + require.EqualError(t, attr.Validate(), ErrInvalidAttributeKey.Error()) + + // empty key + attr = Attributes{ + {Key: ""}, + } + + require.EqualError(t, attr.Validate(), ErrInvalidAttributeKey.Error()) + // to long key + attr = Attributes{ + {Key: "sdgkhaeirugaeroigheirghseiargfs3ssdgkhaeirugaeroigheirghseiargfs3sdgkhaeirugaeroigheirghseiargfs3ssdgkhaeirugaeroigheirghseiargfs3"}, + } + + require.EqualError(t, attr.Validate(), ErrInvalidAttributeKey.Error()) +} + +func TestAttribute_Equal(t *testing.T) { + attr1 := &Attribute{Key: "key1", Value: "val1"} + attr2 := &Attribute{Key: "key1", Value: "val1"} + attr3 := &Attribute{Key: "key1", Value: "val2"} + + require.True(t, attr1.Equal(attr2)) + require.False(t, attr1.Equal(attr3)) +} + +func TestAttribute_SubsetOf(t *testing.T) { + attr1 := Attribute{Key: "key1", Value: "val1"} + attr2 := Attribute{Key: "key1", Value: "val1"} + attr3 := Attribute{Key: "key1", Value: "val2"} + + require.True(t, attr1.SubsetOf(attr2)) + require.False(t, attr1.SubsetOf(attr3)) +} + +func TestAttribute_AnyOf(t *testing.T) { + attr1 := Attribute{Key: "key1", Value: "val1"} + attr2 := Attribute{Key: "key1", Value: "val1"} + attr3 := Attribute{Key: "key1", Value: "val2"} + + require.True(t, attr1.SubsetOf(attr2)) + require.False(t, attr1.SubsetOf(attr3)) +} + +func TestAttributes_SubsetOf(t *testing.T) { + attr1 := Attributes{ + {Key: "key1", Value: "val1"}, + } + + attr2 := Attributes{ + {Key: "key1", Value: "val1"}, + {Key: "key2", Value: "val2"}, + } + + attr3 := Attributes{ + {Key: "key1", Value: "val1"}, + {Key: "key2", Value: "val2"}, + {Key: "key3", Value: "val3"}, + {Key: "key4", Value: "val4"}, + } + + attr4 := Attributes{ + {Key: "key3", Value: "val3"}, + {Key: "key4", Value: "val4"}, + } + + require.True(t, attr1.SubsetOf(attr2)) + require.True(t, attr2.SubsetOf(attr3)) + require.False(t, attr1.SubsetOf(attr4)) +} + +func TestAttributes_AnyOf(t *testing.T) { + attr1 := Attributes{ + {Key: "key1", Value: "val1"}, + } + + attr2 := Attributes{ + {Key: "key1", Value: "val1"}, + {Key: "key2", Value: "val2"}, + } + + attr3 := Attributes{ + {Key: "key1", Value: "val1"}, + {Key: "key2", Value: "val2"}, + {Key: "key3", Value: "val3"}, + {Key: "key4", Value: "val4"}, + } + + attr4 := Attributes{ + {Key: "key3", Value: "val3"}, + {Key: "key4", Value: "val4"}, + } + + require.True(t, attr1.AnyOf(attr2)) + require.True(t, attr2.AnyOf(attr1)) + require.True(t, attr2.AnyOf(attr3)) + require.False(t, attr1.AnyOf(attr4)) +} + +func TestAttributeRegex(t *testing.T) { + tests := []regexTest{ + { + "arbitrary key", + "key1", + true, + }, + { + "allow trailing wildcard", + "key1*", + true, + }, + { + "allow trailing wildcard", + "key1/*", + true, + }, + { + "leading wildcard is not allowed", + "*key1", + false, + }, + { + "multiple wildcards are not allowed", + "key1**", + false, + }, + { + "wildcards in the middle of key are not allowed", + "key1*/", + false, + }, + { + "wildcards in the middle of key are not allowed", + "key1/*/", + false, + }, + } + + for _, test := range tests { + t.Run(test.runName, func(t *testing.T) { + require.Equal(t, test.shouldPass, attributeNameRegexpWildcard.MatchString(test.key)) + }) + } +} + +func TestAttributes_Dup(t *testing.T) { + attrs := Attributes{ + Attribute{ + Key: "key", + Value: "val", + }, + } + + dAttrs := attrs.Dup() + require.Equal(t, attrs, dAttrs) +} diff --git a/go/node/types/attributes/v1/requirements.go b/go/node/types/attributes/v1/requirements.go new file mode 100644 index 00000000..13b9b640 --- /dev/null +++ b/go/node/types/attributes/v1/requirements.go @@ -0,0 +1,15 @@ +package v1 + +import ( + "gopkg.in/yaml.v3" +) + +func (m *SignedBy) String() string { + res, _ := yaml.Marshal(m) + return string(res) +} + +func (m *PlacementRequirements) String() string { + res, _ := yaml.Marshal(m) + return string(res) +} diff --git a/go/node/types/constants/constants.go b/go/node/types/constants/constants.go index ef99ff5b..d846b8b8 100644 --- a/go/node/types/constants/constants.go +++ b/go/node/types/constants/constants.go @@ -2,4 +2,7 @@ package constants const ( DefaultMaxGroupVolumes = 4 + DefaultGas = "auto" + DefaultGasPrices = "0.025uakt" + DefaultGasAdjustment = 1.5 ) diff --git a/go/node/types/resources/v1beta4/cpu.pb.go b/go/node/types/resources/v1beta4/cpu.pb.go new file mode 100644 index 00000000..f7cb1683 --- /dev/null +++ b/go/node/types/resources/v1beta4/cpu.pb.go @@ -0,0 +1,426 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/resources/v1beta4/cpu.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + pkg_akt_dev_go_node_types_attributes_v1 "pkg.akt.dev/go/node/types/attributes/v1" + v1 "pkg.akt.dev/go/node/types/attributes/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// CPU stores resource units and cpu config attributes +type CPU struct { + Units ResourceValue `protobuf:"bytes,1,opt,name=units,proto3" json:"units"` + Attributes pkg_akt_dev_go_node_types_attributes_v1.Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=pkg.akt.dev/go/node/types/attributes/v1.Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` +} + +func (m *CPU) Reset() { *m = CPU{} } +func (m *CPU) String() string { return proto.CompactTextString(m) } +func (*CPU) ProtoMessage() {} +func (*CPU) Descriptor() ([]byte, []int) { + return fileDescriptor_a01a52020c12431a, []int{0} +} +func (m *CPU) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CPU) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CPU.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CPU) XXX_Merge(src proto.Message) { + xxx_messageInfo_CPU.Merge(m, src) +} +func (m *CPU) XXX_Size() int { + return m.Size() +} +func (m *CPU) XXX_DiscardUnknown() { + xxx_messageInfo_CPU.DiscardUnknown(m) +} + +var xxx_messageInfo_CPU proto.InternalMessageInfo + +func (m *CPU) GetUnits() ResourceValue { + if m != nil { + return m.Units + } + return ResourceValue{} +} + +func (m *CPU) GetAttributes() pkg_akt_dev_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func init() { + proto.RegisterType((*CPU)(nil), "akash.base.resources.v1beta4.CPU") +} + +func init() { + proto.RegisterFile("akash/base/resources/v1beta4/cpu.proto", fileDescriptor_a01a52020c12431a) +} + +var fileDescriptor_a01a52020c12431a = []byte{ + // 308 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4b, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, + 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd1, 0x4f, 0x2e, 0x28, 0xd5, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0x92, 0x01, 0xab, 0xd3, 0x03, 0xa9, 0xd3, 0x83, 0xab, 0xd3, 0x83, 0xaa, 0x93, + 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd4, 0x07, 0xb1, 0x20, 0x7a, 0xa4, 0x34, 0x90, 0xcc, + 0x4e, 0x2c, 0x29, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0x01, 0x1b, 0x8e, 0xe0, 0x41, 0x55, 0x1a, 0xe0, + 0x75, 0x05, 0x4c, 0xa4, 0x2c, 0x31, 0xa7, 0x14, 0xaa, 0x43, 0xa9, 0x97, 0x89, 0x8b, 0xd9, 0x39, + 0x20, 0x54, 0xc8, 0x9d, 0x8b, 0xb5, 0x34, 0x2f, 0xb3, 0xa4, 0x58, 0x82, 0x51, 0x81, 0x51, 0x83, + 0xdb, 0x48, 0x5b, 0x0f, 0x9f, 0x3b, 0xf5, 0x82, 0xa0, 0x22, 0x61, 0x20, 0x93, 0x9c, 0x58, 0x4e, + 0xdc, 0x93, 0x67, 0x08, 0x82, 0xe8, 0x17, 0xda, 0xc6, 0xc8, 0xc5, 0x85, 0x70, 0xa4, 0x04, 0x93, + 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x32, 0xb2, 0x71, 0x08, 0x59, 0xbd, 0x32, 0x43, 0x3d, 0x47, 0x18, + 0xcf, 0xa9, 0x10, 0x64, 0xcc, 0xab, 0x7b, 0xf2, 0x22, 0x08, 0x05, 0x3a, 0xf9, 0xb9, 0x99, 0x25, + 0xa9, 0xb9, 0x05, 0x25, 0x95, 0x9f, 0xee, 0xc9, 0x4b, 0x57, 0x26, 0xe6, 0xe6, 0x58, 0x29, 0x61, + 0x93, 0x55, 0x5a, 0x75, 0x5f, 0xde, 0xa8, 0x20, 0x3b, 0x5d, 0x2f, 0x31, 0xbb, 0x44, 0x2f, 0x25, + 0xb5, 0x4c, 0x3f, 0x3d, 0x5f, 0x3f, 0x2f, 0x3f, 0x25, 0x55, 0xbf, 0xa4, 0xb2, 0x20, 0xb5, 0x18, + 0x35, 0xd0, 0x10, 0x36, 0x16, 0x07, 0x21, 0xb9, 0xd4, 0x8a, 0xe5, 0xc5, 0x02, 0x79, 0x46, 0x27, + 0xd7, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, + 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xd2, 0xc6, 0x6d, 0x3e, 0x46, + 0x50, 0x27, 0xb1, 0x81, 0x43, 0xd7, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x5a, 0xa4, 0x9c, + 0x17, 0x02, 0x00, 0x00, +} + +func (this *CPU) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CPU) + if !ok { + that2, ok := that.(CPU) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Units.Equal(&that1.Units) { + return false + } + if len(this.Attributes) != len(that1.Attributes) { + return false + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return false + } + } + return true +} +func (m *CPU) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CPU) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CPU) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCpu(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Units.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCpu(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintCpu(dAtA []byte, offset int, v uint64) int { + offset -= sovCpu(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CPU) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Units.Size() + n += 1 + l + sovCpu(uint64(l)) + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovCpu(uint64(l)) + } + } + return n +} + +func sovCpu(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCpu(x uint64) (n int) { + return sovCpu(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CPU) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCpu + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPU: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPU: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Units", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCpu + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCpu + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCpu + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Units.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCpu + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCpu + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCpu + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCpu(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCpu + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCpu(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCpu + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCpu + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCpu + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCpu + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCpu + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCpu + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCpu = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCpu = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCpu = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/resources/v1beta4/endpoint.go b/go/node/types/resources/v1beta4/endpoint.go new file mode 100644 index 00000000..1192d166 --- /dev/null +++ b/go/node/types/resources/v1beta4/endpoint.go @@ -0,0 +1,29 @@ +package v1beta4 + +import ( + "sort" +) + +type Endpoints []Endpoint + +var _ sort.Interface = (*Endpoints)(nil) + +func (u Endpoints) Dup() Endpoints { + res := make(Endpoints, len(u)) + + copy(res, u) + + return res +} + +func (u Endpoints) Len() int { + return len(u) +} + +func (u Endpoints) Swap(i, j int) { + u[i], u[j] = u[j], u[i] +} + +func (u Endpoints) Less(i, j int) bool { + return u[i].SequenceNumber < u[j].SequenceNumber +} diff --git a/go/node/types/resources/v1beta4/endpoint.pb.go b/go/node/types/resources/v1beta4/endpoint.pb.go new file mode 100644 index 00000000..9a86b830 --- /dev/null +++ b/go/node/types/resources/v1beta4/endpoint.pb.go @@ -0,0 +1,407 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/resources/v1beta4/endpoint.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// This describes how the endpoint is implemented when the lease is deployed +type Endpoint_Kind int32 + +const ( + // Describes an endpoint that becomes a Kubernetes Ingress + Endpoint_SHARED_HTTP Endpoint_Kind = 0 + // Describes an endpoint that becomes a Kubernetes NodePort + Endpoint_RANDOM_PORT Endpoint_Kind = 1 + // Describes an endpoint that becomes a leased IP + Endpoint_LEASED_IP Endpoint_Kind = 2 +) + +var Endpoint_Kind_name = map[int32]string{ + 0: "SHARED_HTTP", + 1: "RANDOM_PORT", + 2: "LEASED_IP", +} + +var Endpoint_Kind_value = map[string]int32{ + "SHARED_HTTP": 0, + "RANDOM_PORT": 1, + "LEASED_IP": 2, +} + +func (x Endpoint_Kind) String() string { + return proto.EnumName(Endpoint_Kind_name, int32(x)) +} + +func (Endpoint_Kind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_7476b67ee95ddefb, []int{0, 0} +} + +// Endpoint describes a publicly accessible IP service +type Endpoint struct { + Kind Endpoint_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=akash.base.resources.v1beta4.Endpoint_Kind" json:"kind,omitempty"` + SequenceNumber uint32 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number" yaml:"sequence_number"` +} + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (m *Endpoint) String() string { return proto.CompactTextString(m) } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_7476b67ee95ddefb, []int{0} +} +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Endpoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(m, src) +} +func (m *Endpoint) XXX_Size() int { + return m.Size() +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *Endpoint) GetKind() Endpoint_Kind { + if m != nil { + return m.Kind + } + return Endpoint_SHARED_HTTP +} + +func (m *Endpoint) GetSequenceNumber() uint32 { + if m != nil { + return m.SequenceNumber + } + return 0 +} + +func init() { + proto.RegisterEnum("akash.base.resources.v1beta4.Endpoint_Kind", Endpoint_Kind_name, Endpoint_Kind_value) + proto.RegisterType((*Endpoint)(nil), "akash.base.resources.v1beta4.Endpoint") +} + +func init() { + proto.RegisterFile("akash/base/resources/v1beta4/endpoint.proto", fileDescriptor_7476b67ee95ddefb) +} + +var fileDescriptor_7476b67ee95ddefb = []byte{ + // 316 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4e, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, + 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd1, 0x4f, 0xcd, 0x4b, 0x29, 0xc8, 0xcf, 0xcc, + 0x2b, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x01, 0x2b, 0xd6, 0x03, 0x29, 0xd6, 0x83, + 0x2b, 0xd6, 0x83, 0x2a, 0x96, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd4, 0x07, 0xb1, 0x20, + 0x7a, 0x94, 0xbe, 0x30, 0x72, 0x71, 0xb8, 0x42, 0x8d, 0x11, 0xb2, 0xe7, 0x62, 0xc9, 0xce, 0xcc, + 0x4b, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x33, 0xd2, 0xd6, 0xc3, 0x67, 0x9e, 0x1e, 0x4c, 0x97, + 0x9e, 0x77, 0x66, 0x5e, 0x4a, 0x10, 0x58, 0xa3, 0x50, 0x06, 0x17, 0x7f, 0x71, 0x6a, 0x61, 0x69, + 0x6a, 0x5e, 0x72, 0x6a, 0x7c, 0x5e, 0x69, 0x6e, 0x52, 0x6a, 0x91, 0x04, 0x93, 0x02, 0xa3, 0x06, + 0xaf, 0x93, 0xfd, 0xa3, 0x7b, 0xf2, 0x7c, 0xc1, 0x50, 0x29, 0x3f, 0xb0, 0xcc, 0xab, 0x7b, 0xf2, + 0xe8, 0x8a, 0x3f, 0xdd, 0x93, 0x17, 0xab, 0x4c, 0xcc, 0xcd, 0xb1, 0x52, 0x42, 0x93, 0x50, 0x0a, + 0xe2, 0x2b, 0x46, 0xd1, 0xac, 0x64, 0xce, 0xc5, 0x02, 0xb2, 0x57, 0x88, 0x9f, 0x8b, 0x3b, 0xd8, + 0xc3, 0x31, 0xc8, 0xd5, 0x25, 0xde, 0x23, 0x24, 0x24, 0x40, 0x80, 0x01, 0x24, 0x10, 0xe4, 0xe8, + 0xe7, 0xe2, 0xef, 0x1b, 0x1f, 0xe0, 0x1f, 0x14, 0x22, 0xc0, 0x28, 0xc4, 0xcb, 0xc5, 0xe9, 0xe3, + 0xea, 0x18, 0xec, 0xea, 0x12, 0xef, 0x19, 0x20, 0xc0, 0x64, 0xc5, 0xf2, 0x62, 0x81, 0x3c, 0xa3, + 0x93, 0xeb, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, + 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x69, 0x17, 0x64, 0xa7, + 0xeb, 0x25, 0x66, 0x97, 0xe8, 0xa5, 0xa4, 0x96, 0xe9, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, 0xa7, 0xa4, + 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, 0x63, 0x46, 0x40, 0x12, 0x1b, 0x38, 0x10, 0x8d, 0x01, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x0e, 0x2c, 0x03, 0x73, 0xa7, 0x01, 0x00, 0x00, +} + +func (this *Endpoint) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Endpoint) + if !ok { + that2, ok := that.(Endpoint) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Kind != that1.Kind { + return false + } + if this.SequenceNumber != that1.SequenceNumber { + return false + } + return true +} +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SequenceNumber != 0 { + i = encodeVarintEndpoint(dAtA, i, uint64(m.SequenceNumber)) + i-- + dAtA[i] = 0x10 + } + if m.Kind != 0 { + i = encodeVarintEndpoint(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintEndpoint(dAtA []byte, offset int, v uint64) int { + offset -= sovEndpoint(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Endpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != 0 { + n += 1 + sovEndpoint(uint64(m.Kind)) + } + if m.SequenceNumber != 0 { + n += 1 + sovEndpoint(uint64(m.SequenceNumber)) + } + return n +} + +func sovEndpoint(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEndpoint(x uint64) (n int) { + return sovEndpoint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEndpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEndpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= Endpoint_Kind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SequenceNumber", wireType) + } + m.SequenceNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEndpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SequenceNumber |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEndpoint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEndpoint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEndpoint(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEndpoint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEndpoint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEndpoint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEndpoint + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEndpoint + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEndpoint + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEndpoint = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEndpoint = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEndpoint = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/resources/v1beta4/gpu.pb.go b/go/node/types/resources/v1beta4/gpu.pb.go new file mode 100644 index 00000000..4b50b2d8 --- /dev/null +++ b/go/node/types/resources/v1beta4/gpu.pb.go @@ -0,0 +1,426 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/resources/v1beta4/gpu.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + pkg_akt_dev_go_node_types_attributes_v1 "pkg.akt.dev/go/node/types/attributes/v1" + v1 "pkg.akt.dev/go/node/types/attributes/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GPU stores resource units and cpu config attributes +type GPU struct { + Units ResourceValue `protobuf:"bytes,1,opt,name=units,proto3" json:"units"` + Attributes pkg_akt_dev_go_node_types_attributes_v1.Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=pkg.akt.dev/go/node/types/attributes/v1.Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` +} + +func (m *GPU) Reset() { *m = GPU{} } +func (m *GPU) String() string { return proto.CompactTextString(m) } +func (*GPU) ProtoMessage() {} +func (*GPU) Descriptor() ([]byte, []int) { + return fileDescriptor_9da7ea6b7073ef21, []int{0} +} +func (m *GPU) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GPU) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GPU.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GPU) XXX_Merge(src proto.Message) { + xxx_messageInfo_GPU.Merge(m, src) +} +func (m *GPU) XXX_Size() int { + return m.Size() +} +func (m *GPU) XXX_DiscardUnknown() { + xxx_messageInfo_GPU.DiscardUnknown(m) +} + +var xxx_messageInfo_GPU proto.InternalMessageInfo + +func (m *GPU) GetUnits() ResourceValue { + if m != nil { + return m.Units + } + return ResourceValue{} +} + +func (m *GPU) GetAttributes() pkg_akt_dev_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func init() { + proto.RegisterType((*GPU)(nil), "akash.base.resources.v1beta4.GPU") +} + +func init() { + proto.RegisterFile("akash/base/resources/v1beta4/gpu.proto", fileDescriptor_9da7ea6b7073ef21) +} + +var fileDescriptor_9da7ea6b7073ef21 = []byte{ + // 308 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4b, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, + 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd1, 0x4f, 0x2f, 0x28, 0xd5, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0x92, 0x01, 0xab, 0xd3, 0x03, 0xa9, 0xd3, 0x83, 0xab, 0xd3, 0x83, 0xaa, 0x93, + 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd4, 0x07, 0xb1, 0x20, 0x7a, 0xa4, 0x34, 0x90, 0xcc, + 0x4e, 0x2c, 0x29, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0x01, 0x1b, 0x8e, 0xe0, 0x41, 0x55, 0x1a, 0xe0, + 0x75, 0x05, 0x4c, 0xa4, 0x2c, 0x31, 0xa7, 0x14, 0xaa, 0x43, 0xa9, 0x97, 0x89, 0x8b, 0xd9, 0x3d, + 0x20, 0x54, 0xc8, 0x9d, 0x8b, 0xb5, 0x34, 0x2f, 0xb3, 0xa4, 0x58, 0x82, 0x51, 0x81, 0x51, 0x83, + 0xdb, 0x48, 0x5b, 0x0f, 0x9f, 0x3b, 0xf5, 0x82, 0xa0, 0x22, 0x61, 0x20, 0x93, 0x9c, 0x58, 0x4e, + 0xdc, 0x93, 0x67, 0x08, 0x82, 0xe8, 0x17, 0xda, 0xc6, 0xc8, 0xc5, 0x85, 0x70, 0xa4, 0x04, 0x93, + 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x32, 0xb2, 0x71, 0x08, 0x59, 0xbd, 0x32, 0x43, 0x3d, 0x47, 0x18, + 0xcf, 0xa9, 0x10, 0x64, 0xcc, 0xab, 0x7b, 0xf2, 0x22, 0x08, 0x05, 0x3a, 0xf9, 0xb9, 0x99, 0x25, + 0xa9, 0xb9, 0x05, 0x25, 0x95, 0x9f, 0xee, 0xc9, 0x4b, 0x57, 0x26, 0xe6, 0xe6, 0x58, 0x29, 0x61, + 0x93, 0x55, 0x5a, 0x75, 0x5f, 0xde, 0xa8, 0x20, 0x3b, 0x5d, 0x2f, 0x31, 0xbb, 0x44, 0x2f, 0x25, + 0xb5, 0x4c, 0x3f, 0x3d, 0x5f, 0x3f, 0x2f, 0x3f, 0x25, 0x55, 0xbf, 0xa4, 0xb2, 0x20, 0xb5, 0x18, + 0x35, 0xd0, 0x10, 0x36, 0x16, 0x07, 0x21, 0xb9, 0xd4, 0x8a, 0xe5, 0xc5, 0x02, 0x79, 0x46, 0x27, + 0xd7, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, + 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xd2, 0xc6, 0x6d, 0x3e, 0x46, + 0x50, 0x27, 0xb1, 0x81, 0x43, 0xd7, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x4e, 0x30, 0x2d, 0x71, + 0x17, 0x02, 0x00, 0x00, +} + +func (this *GPU) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GPU) + if !ok { + that2, ok := that.(GPU) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Units.Equal(&that1.Units) { + return false + } + if len(this.Attributes) != len(that1.Attributes) { + return false + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return false + } + } + return true +} +func (m *GPU) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GPU) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GPU) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGpu(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Units.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGpu(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGpu(dAtA []byte, offset int, v uint64) int { + offset -= sovGpu(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GPU) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Units.Size() + n += 1 + l + sovGpu(uint64(l)) + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovGpu(uint64(l)) + } + } + return n +} + +func sovGpu(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGpu(x uint64) (n int) { + return sovGpu(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GPU) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGpu + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GPU: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GPU: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Units", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGpu + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGpu + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGpu + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Units.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGpu + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGpu + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGpu + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGpu(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGpu + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGpu(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGpu + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGpu + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGpu + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGpu + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGpu + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGpu + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGpu = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGpu = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGpu = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/resources/v1beta4/memory.pb.go b/go/node/types/resources/v1beta4/memory.pb.go new file mode 100644 index 00000000..69128bf8 --- /dev/null +++ b/go/node/types/resources/v1beta4/memory.pb.go @@ -0,0 +1,427 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/resources/v1beta4/memory.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + pkg_akt_dev_go_node_types_attributes_v1 "pkg.akt.dev/go/node/types/attributes/v1" + v1 "pkg.akt.dev/go/node/types/attributes/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Memory stores resource quantity and memory attributes +type Memory struct { + Quantity ResourceValue `protobuf:"bytes,1,opt,name=quantity,proto3" json:"size" yaml:"size"` + Attributes pkg_akt_dev_go_node_types_attributes_v1.Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=pkg.akt.dev/go/node/types/attributes/v1.Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` +} + +func (m *Memory) Reset() { *m = Memory{} } +func (m *Memory) String() string { return proto.CompactTextString(m) } +func (*Memory) ProtoMessage() {} +func (*Memory) Descriptor() ([]byte, []int) { + return fileDescriptor_69cf709b8b3b1702, []int{0} +} +func (m *Memory) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Memory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Memory.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Memory) XXX_Merge(src proto.Message) { + xxx_messageInfo_Memory.Merge(m, src) +} +func (m *Memory) XXX_Size() int { + return m.Size() +} +func (m *Memory) XXX_DiscardUnknown() { + xxx_messageInfo_Memory.DiscardUnknown(m) +} + +var xxx_messageInfo_Memory proto.InternalMessageInfo + +func (m *Memory) GetQuantity() ResourceValue { + if m != nil { + return m.Quantity + } + return ResourceValue{} +} + +func (m *Memory) GetAttributes() pkg_akt_dev_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func init() { + proto.RegisterType((*Memory)(nil), "akash.base.resources.v1beta4.Memory") +} + +func init() { + proto.RegisterFile("akash/base/resources/v1beta4/memory.proto", fileDescriptor_69cf709b8b3b1702) +} + +var fileDescriptor_69cf709b8b3b1702 = []byte{ + // 329 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4c, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, + 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd1, 0xcf, 0x4d, 0xcd, 0xcd, 0x2f, 0xaa, 0xd4, + 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x01, 0x2b, 0xd5, 0x03, 0x29, 0xd5, 0x83, 0x2b, 0xd5, + 0x83, 0x2a, 0x95, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd4, 0x07, 0xb1, 0x20, 0x7a, 0xa4, + 0x34, 0x90, 0x8c, 0x4f, 0x2c, 0x29, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0x01, 0x9b, 0x8f, 0xe0, 0x41, + 0x55, 0x1a, 0xe0, 0x75, 0x08, 0x4c, 0xa4, 0x2c, 0x31, 0xa7, 0x14, 0xaa, 0x43, 0x69, 0x2d, 0x13, + 0x17, 0x9b, 0x2f, 0xd8, 0x81, 0x42, 0x29, 0x5c, 0x1c, 0x85, 0xa5, 0x89, 0x79, 0x25, 0x99, 0x25, + 0x95, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xdc, 0x46, 0xda, 0x7a, 0xf8, 0x5c, 0xab, 0x17, 0x04, 0x15, + 0x09, 0x03, 0x99, 0xe7, 0x24, 0x7d, 0xe2, 0x9e, 0x3c, 0xc3, 0xab, 0x7b, 0xf2, 0x2c, 0xc5, 0x99, + 0x55, 0xa9, 0x9f, 0xee, 0xc9, 0x73, 0x57, 0x26, 0xe6, 0xe6, 0x58, 0x29, 0x81, 0x78, 0x4a, 0x41, + 0x70, 0x93, 0x85, 0xb6, 0x31, 0x72, 0x71, 0x21, 0x3c, 0x21, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, 0x6d, + 0xa4, 0x8c, 0x6c, 0x11, 0x42, 0x56, 0xaf, 0xcc, 0x50, 0xcf, 0x11, 0xc6, 0x73, 0x2a, 0x84, 0x5a, + 0x20, 0x82, 0x50, 0xa0, 0x93, 0x9f, 0x9b, 0x59, 0x92, 0x9a, 0x5b, 0x50, 0x52, 0xf9, 0xe9, 0x9e, + 0xbc, 0x34, 0xc4, 0x42, 0x6c, 0xb2, 0x4a, 0xab, 0xee, 0xcb, 0x1b, 0x15, 0x64, 0xa7, 0xeb, 0x25, + 0x66, 0x97, 0xe8, 0xa5, 0xa4, 0x96, 0xe9, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, 0xa7, 0xa4, 0xea, 0x97, + 0x54, 0x16, 0xa4, 0x16, 0xa3, 0x06, 0x2a, 0xc2, 0xc6, 0xe2, 0x20, 0x24, 0x97, 0x5a, 0xb1, 0xbc, + 0x58, 0x20, 0xcf, 0xe8, 0xe4, 0x7a, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, + 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, + 0xda, 0xb8, 0xcd, 0xc7, 0x88, 0x8a, 0x24, 0x36, 0x70, 0xe8, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x4b, 0x67, 0x0d, 0xa6, 0x3a, 0x02, 0x00, 0x00, +} + +func (this *Memory) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Memory) + if !ok { + that2, ok := that.(Memory) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Quantity.Equal(&that1.Quantity) { + return false + } + if len(this.Attributes) != len(that1.Attributes) { + return false + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return false + } + } + return true +} +func (m *Memory) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Memory) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Memory) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMemory(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Quantity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMemory(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintMemory(dAtA []byte, offset int, v uint64) int { + offset -= sovMemory(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Memory) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Quantity.Size() + n += 1 + l + sovMemory(uint64(l)) + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovMemory(uint64(l)) + } + } + return n +} + +func sovMemory(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMemory(x uint64) (n int) { + return sovMemory(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Memory) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMemory + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Memory: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Memory: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMemory + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMemory + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMemory + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Quantity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMemory + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMemory + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMemory + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMemory(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMemory + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMemory(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMemory + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMemory + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMemory + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMemory + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMemory + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMemory + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMemory = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMemory = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMemory = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/resources/v1beta4/resources.go b/go/node/types/resources/v1beta4/resources.go new file mode 100644 index 00000000..27c5381d --- /dev/null +++ b/go/node/types/resources/v1beta4/resources.go @@ -0,0 +1,194 @@ +package v1beta4 + +import ( + "fmt" +) + +type UnitType int + +type Unit interface { + String() string +} + +type ResUnit interface { + Equals(ResUnit) bool + Add(unit ResUnit) bool +} + +type Volumes []Storage + +var _ Unit = (*CPU)(nil) +var _ Unit = (*Memory)(nil) +var _ Unit = (*Storage)(nil) +var _ Unit = (*GPU)(nil) + +func (m Resources) Validate() error { + if m.ID == 0 { + return fmt.Errorf("resources ID must be > 0") + } + + if m.CPU == nil { + return fmt.Errorf("CPU must not be nil") + } + + if m.GPU == nil { + return fmt.Errorf("GPU must not be nil") + } + + if m.Memory == nil { + return fmt.Errorf("memory must not be nil") + } + + if m.Storage == nil { + return fmt.Errorf("storage must not be nil") + } + + if m.Endpoints == nil { + return fmt.Errorf("endpoints must not be nil") + } + + return nil +} + +func (m Resources) Dup() Resources { + res := Resources{ + ID: m.ID, + CPU: m.CPU.Dup(), + GPU: m.GPU.Dup(), + Memory: m.Memory.Dup(), + Storage: m.Storage.Dup(), + Endpoints: m.Endpoints.Dup(), + } + + return res +} + +func (m Resources) In(rhs Resources) bool { + if !m.CPU.Equal(rhs.CPU) || !m.GPU.Equal(rhs.GPU) || + !m.Memory.Equal(rhs.Memory) || !m.Storage.Equal(rhs.Storage) { + return false + } + +loop: + for _, ep := range m.Endpoints { + for _, rep := range rhs.Endpoints { + if ep.Equal(rep) { + continue loop + } + } + + return false + } + + return true +} + +func (m CPU) Dup() *CPU { + return &CPU{ + Units: m.Units.Dup(), + Attributes: m.Attributes.Dup(), + } +} + +func (m Memory) Dup() *Memory { + return &Memory{ + Quantity: m.Quantity.Dup(), + Attributes: m.Attributes.Dup(), + } +} + +func (m Storage) Dup() *Storage { + return &Storage{ + Name: m.Name, + Quantity: m.Quantity.Dup(), + Attributes: m.Attributes.Dup(), + } +} + +func (m GPU) Dup() *GPU { + return &GPU{ + Units: m.Units.Dup(), + Attributes: m.Attributes.Dup(), + } +} + +func (m Volumes) Equal(rhs Volumes) bool { + for i := range m { + if !m[i].Equal(rhs[i]) { + return false + } + } + + return true +} + +func (m Volumes) Dup() Volumes { + res := make(Volumes, 0, len(m)) + + for _, storage := range m { + res = append(res, *storage.Dup()) + } + + return res +} + +func (m *CPU) EqualUnits(that *CPU) bool { + if that == nil { + return m == nil + } else if m == nil { + return false + } + + if !m.Units.Equal(&that.Units) { + return false + } + + return true +} + +func (m *GPU) EqualUnits(that *GPU) bool { + if that == nil { + return m == nil + } else if m == nil { + return false + } + + if !m.Units.Equal(&that.Units) { + return false + } + + return true +} + +func (m *Memory) EqualUnits(that *Memory) bool { + if that == nil { + return m == nil + } else if m == nil { + return false + } + + if !m.Quantity.Equal(&that.Quantity) { + return false + } + + return true +} + +func (m Volumes) EqualUnits(that Volumes) bool { + if len(m) != len(that) { + return false + } + + for idx, vol := range m { + if vol.Name != that[idx].Name { + return false + } + + if !vol.Quantity.Equal(&that[idx].Quantity) { + return false + } + + } + + return true +} diff --git a/go/node/types/v1beta3/resources.pb.go b/go/node/types/resources/v1beta4/resources.pb.go similarity index 79% rename from go/node/types/v1beta3/resources.pb.go rename to go/node/types/resources/v1beta4/resources.pb.go index 81ab82d0..f56dfe67 100644 --- a/go/node/types/v1beta3/resources.pb.go +++ b/go/node/types/resources/v1beta4/resources.pb.go @@ -1,12 +1,12 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta3/resources.proto +// source: akash/base/resources/v1beta4/resources.proto -package v1beta3 +package v1beta4 import ( fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" math "math" math_bits "math/bits" @@ -31,14 +31,14 @@ type Resources struct { Memory *Memory `protobuf:"bytes,3,opt,name=memory,proto3" json:"memory,omitempty" yaml:"memory,omitempty"` Storage Volumes `protobuf:"bytes,4,rep,name=storage,proto3,castrepeated=Volumes" json:"storage,omitempty" yaml:"storage,omitempty"` GPU *GPU `protobuf:"bytes,5,opt,name=gpu,proto3" json:"gpu,omitempty" yaml:"gpu,omitempty"` - Endpoints Endpoints `protobuf:"bytes,6,rep,name=endpoints,proto3,castrepeated=Endpoints" json:"endpoints" yaml:"endpoints"` + Endpoints Endpoints `protobuf:"bytes,6,rep,name=endpoints,proto3,castrepeated=Endpoints" json:"endpoints,omitempty" yaml:"endpoints,omitempty"` } func (m *Resources) Reset() { *m = Resources{} } func (m *Resources) String() string { return proto.CompactTextString(m) } func (*Resources) ProtoMessage() {} func (*Resources) Descriptor() ([]byte, []int) { - return fileDescriptor_4e8008eb7299c34f, []int{0} + return fileDescriptor_77fa11db58d9f9a3, []int{0} } func (m *Resources) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -110,45 +110,45 @@ func (m *Resources) GetEndpoints() Endpoints { } func init() { - proto.RegisterType((*Resources)(nil), "akash.base.v1beta3.Resources") + proto.RegisterType((*Resources)(nil), "akash.base.resources.v1beta4.Resources") } func init() { - proto.RegisterFile("akash/base/v1beta3/resources.proto", fileDescriptor_4e8008eb7299c34f) + proto.RegisterFile("akash/base/resources/v1beta4/resources.proto", fileDescriptor_77fa11db58d9f9a3) } -var fileDescriptor_4e8008eb7299c34f = []byte{ - // 478 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xc1, 0x6e, 0xd3, 0x30, - 0x18, 0xc7, 0x9b, 0xb4, 0x74, 0x6a, 0xa6, 0x49, 0x23, 0x9a, 0xb4, 0x50, 0xa6, 0xb8, 0xe4, 0x54, - 0x24, 0x48, 0x44, 0xcb, 0x01, 0xed, 0x84, 0x32, 0xd0, 0xc4, 0x01, 0x69, 0x0a, 0x1a, 0x07, 0x2e, - 0x28, 0x4d, 0x2c, 0x2f, 0xda, 0x12, 0x5b, 0xb1, 0xcd, 0xd4, 0xb7, 0xe0, 0x11, 0x38, 0xf3, 0x24, - 0x3d, 0xf6, 0xc8, 0xc9, 0xa0, 0xf4, 0x82, 0x7a, 0xec, 0x13, 0xa0, 0xc4, 0xce, 0x46, 0x37, 0xc3, - 0x2d, 0xf1, 0xf7, 0xfb, 0xbe, 0x5f, 0xbe, 0x7f, 0x6c, 0x79, 0xf1, 0x65, 0x4c, 0x2f, 0x82, 0x59, - 0x4c, 0x61, 0xf0, 0xe5, 0xc5, 0x0c, 0xb2, 0x78, 0x1a, 0x94, 0x90, 0x62, 0x5e, 0x26, 0x90, 0xfa, - 0xa4, 0xc4, 0x0c, 0xdb, 0x76, 0xc3, 0xf8, 0x35, 0xe3, 0x2b, 0x66, 0x78, 0x80, 0x30, 0xc2, 0x4d, - 0x39, 0xa8, 0x9f, 0x24, 0x39, 0x3c, 0xd2, 0x4c, 0x4b, 0x08, 0xff, 0x4f, 0x15, 0xdd, 0x54, 0x81, - 0xa6, 0x9a, 0xc3, 0x1c, 0x97, 0x73, 0x05, 0x8c, 0x34, 0x00, 0x65, 0xb8, 0x8c, 0x11, 0x54, 0xc4, - 0x13, 0x0d, 0x01, 0x8b, 0x94, 0xe0, 0xac, 0x60, 0x12, 0xf1, 0x96, 0x3d, 0x6b, 0x10, 0xb5, 0xfb, - 0xd9, 0x4f, 0x2d, 0x33, 0x4b, 0x1d, 0x63, 0x64, 0x8c, 0xf7, 0xc2, 0x47, 0x95, 0x00, 0xe6, 0xbb, - 0x37, 0x6b, 0x01, 0xcc, 0x2c, 0xdd, 0x08, 0x30, 0x98, 0xc7, 0xf9, 0xd5, 0xb1, 0x97, 0xa5, 0x5e, - 0x64, 0x66, 0xa9, 0xfd, 0xd9, 0xea, 0x26, 0x84, 0x3b, 0xe6, 0xc8, 0x18, 0xef, 0x4e, 0x0e, 0xfd, - 0xfb, 0x91, 0xf8, 0x27, 0x67, 0xe7, 0xe1, 0xcb, 0x85, 0x00, 0x46, 0x25, 0x40, 0xf7, 0xe4, 0xec, - 0x7c, 0x2d, 0xc0, 0x5e, 0x42, 0xf8, 0x33, 0x9c, 0x67, 0x0c, 0xe6, 0x84, 0xcd, 0x37, 0x02, 0x1c, - 0xc8, 0xa1, 0x5b, 0xc7, 0x5e, 0x54, 0x4f, 0xb6, 0x91, 0xd5, 0x97, 0xeb, 0x3a, 0xdd, 0xc6, 0x31, - 0xd4, 0x39, 0xde, 0x37, 0x44, 0x38, 0xad, 0x35, 0x6b, 0x01, 0xf6, 0x65, 0xc7, 0x96, 0xe2, 0x50, - 0x2a, 0xee, 0x56, 0xbc, 0x48, 0x8d, 0xb7, 0xaf, 0xad, 0x1d, 0x15, 0x9b, 0xd3, 0x1b, 0x75, 0xc7, - 0xbb, 0x93, 0xc7, 0x3a, 0xd3, 0x07, 0x89, 0x84, 0xaf, 0x17, 0x02, 0x74, 0xd6, 0x02, 0x3c, 0x54, - 0x3d, 0x5b, 0x2e, 0x47, 0xba, 0xee, 0x95, 0xbc, 0xef, 0x3f, 0xc1, 0xce, 0x47, 0x7c, 0xc5, 0x73, - 0x48, 0xa3, 0xd6, 0x56, 0x47, 0x88, 0x08, 0x77, 0x1e, 0xfc, 0x3b, 0xc2, 0xd3, 0xbf, 0x23, 0x3c, - 0x95, 0x11, 0x22, 0x7d, 0x84, 0xe8, 0x4e, 0x84, 0x88, 0x70, 0xbb, 0xb0, 0x06, 0xed, 0xef, 0xa6, - 0x4e, 0xbf, 0xd9, 0xed, 0x48, 0xa7, 0x79, 0xab, 0xa0, 0x70, 0xa2, 0x96, 0xbb, 0x6d, 0xdb, 0x08, - 0xb0, 0x2f, 0x05, 0x37, 0x47, 0xf5, 0x32, 0x83, 0xb6, 0x85, 0x46, 0xb7, 0xec, 0x71, 0xef, 0xf7, - 0x37, 0x60, 0x84, 0xd1, 0xa2, 0x72, 0x8d, 0x65, 0xe5, 0x1a, 0xbf, 0x2a, 0xd7, 0xf8, 0xba, 0x72, - 0x3b, 0xcb, 0x95, 0xdb, 0xf9, 0xb1, 0x72, 0x3b, 0x9f, 0x5e, 0xa1, 0x8c, 0x5d, 0xf0, 0x99, 0x9f, - 0xe0, 0x3c, 0x68, 0x3e, 0xe3, 0x79, 0x01, 0xd9, 0x35, 0x2e, 0x2f, 0xd5, 0x5b, 0x4c, 0xb2, 0x00, - 0xe1, 0xa0, 0xc0, 0x29, 0x0c, 0xd8, 0x9c, 0x40, 0xda, 0xde, 0xda, 0x59, 0xbf, 0xb9, 0xad, 0xd3, - 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x18, 0xb7, 0x7c, 0x65, 0x9f, 0x03, 0x00, 0x00, +var fileDescriptor_77fa11db58d9f9a3 = []byte{ + // 477 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xc7, 0x73, 0x4e, 0x9b, 0x2a, 0xae, 0x2a, 0x81, 0xa9, 0x84, 0x89, 0x90, 0x2f, 0x58, 0x50, + 0xa5, 0xb4, 0xb2, 0x45, 0xdb, 0xa9, 0x13, 0x72, 0x29, 0x15, 0x03, 0x52, 0x65, 0x54, 0x06, 0x36, + 0x27, 0x3e, 0x1d, 0x56, 0xea, 0xdc, 0xe1, 0x3b, 0x57, 0xca, 0xc8, 0xce, 0xc0, 0xc8, 0xc8, 0xcc, + 0x27, 0xc9, 0xd8, 0x91, 0xe9, 0x40, 0xce, 0x82, 0x32, 0xf6, 0x13, 0x20, 0xfb, 0x2e, 0x4d, 0xdd, + 0x56, 0xa7, 0x6c, 0xd6, 0xbd, 0xdf, 0x7b, 0x3f, 0xbf, 0xbf, 0x9e, 0xb9, 0x1b, 0x0d, 0x23, 0xf6, + 0xd9, 0xef, 0x47, 0x0c, 0xf9, 0x19, 0x62, 0x24, 0xcf, 0x06, 0x88, 0xf9, 0x17, 0xaf, 0xfa, 0x88, + 0x47, 0x07, 0x8b, 0x17, 0x8f, 0x66, 0x84, 0x13, 0xeb, 0x69, 0x45, 0x7b, 0x25, 0xed, 0x2d, 0x6a, + 0x8a, 0xee, 0x6c, 0x62, 0x82, 0x49, 0x05, 0xfa, 0xe5, 0x97, 0xec, 0xe9, 0x6c, 0x69, 0x0d, 0x03, + 0x9a, 0x2f, 0xc5, 0xe1, 0x6b, 0x6e, 0x5b, 0xcb, 0xa5, 0x28, 0x25, 0xd9, 0x58, 0xa1, 0x2f, 0xb5, + 0x28, 0xe3, 0x24, 0x8b, 0x30, 0x52, 0xec, 0x8e, 0x96, 0x45, 0xa3, 0x98, 0x92, 0x64, 0xc4, 0x25, + 0xec, 0xfe, 0x58, 0x35, 0xdb, 0xe1, 0x1c, 0xb2, 0xb6, 0x4d, 0x23, 0x89, 0x6d, 0xd0, 0x05, 0xbd, + 0x8d, 0xe0, 0x49, 0x21, 0xa0, 0xf1, 0xee, 0xcd, 0x4c, 0x40, 0x23, 0x89, 0xaf, 0x04, 0x6c, 0x8f, + 0xa3, 0xf4, 0xfc, 0xd0, 0x4d, 0x62, 0x37, 0x34, 0x92, 0xd8, 0x4a, 0xcc, 0xe6, 0x80, 0xe6, 0xb6, + 0xd1, 0x05, 0xbd, 0xf5, 0xbd, 0x67, 0x9e, 0x2e, 0x4e, 0xef, 0xe8, 0xf4, 0x2c, 0x38, 0x98, 0x08, + 0x08, 0x0a, 0x01, 0x9b, 0x47, 0xa7, 0x67, 0x33, 0x01, 0x37, 0x06, 0x34, 0xdf, 0x25, 0x69, 0xc2, + 0x51, 0x4a, 0xf9, 0xf8, 0x4a, 0xc0, 0x4d, 0x39, 0xbe, 0xf6, 0xec, 0x86, 0xa5, 0xc3, 0xfa, 0x62, + 0xb6, 0x64, 0x18, 0x76, 0xb3, 0xb2, 0x3d, 0xd7, 0xdb, 0xde, 0x57, 0x6c, 0xb0, 0x5f, 0x0a, 0x67, + 0x02, 0x3e, 0x90, 0xbd, 0x35, 0xd9, 0x63, 0x29, 0xbb, 0x5d, 0x71, 0x43, 0x25, 0xb2, 0xbe, 0x02, + 0x73, 0x4d, 0xa5, 0x6a, 0xaf, 0x74, 0x9b, 0xbd, 0xf5, 0xbd, 0x17, 0x7a, 0xe9, 0x07, 0x09, 0x07, + 0xaf, 0x27, 0x02, 0x36, 0x66, 0x02, 0x3e, 0x54, 0xdd, 0x35, 0xad, 0x2d, 0xb5, 0x77, 0x4a, 0xee, + 0xaf, 0x3f, 0x70, 0xed, 0x23, 0x39, 0xcf, 0x53, 0xc4, 0xc2, 0xb9, 0xb7, 0x4c, 0x18, 0xd3, 0xdc, + 0x5e, 0x5d, 0x26, 0xe1, 0x93, 0x9b, 0x09, 0x9f, 0xc8, 0x84, 0xf1, 0xfd, 0x09, 0xe3, 0x5b, 0x09, + 0x63, 0x9a, 0x5b, 0xdf, 0x80, 0xd9, 0x9e, 0x1f, 0x06, 0xb3, 0x5b, 0xd5, 0xc2, 0x5b, 0x7a, 0xe3, + 0xb1, 0xc2, 0x83, 0xb7, 0x6a, 0xe3, 0x47, 0xd7, 0x03, 0x6a, 0xd6, 0x8e, 0xb4, 0xde, 0x53, 0x2c, + 0xb7, 0x6e, 0xcf, 0xc7, 0xb0, 0x70, 0xf1, 0x03, 0x87, 0x2b, 0xff, 0x7e, 0x42, 0x10, 0x1c, 0x4f, + 0x0a, 0x07, 0x5c, 0x16, 0x0e, 0xf8, 0x5b, 0x38, 0xe0, 0xfb, 0xd4, 0x69, 0x5c, 0x4e, 0x9d, 0xc6, + 0xef, 0xa9, 0xd3, 0xf8, 0xb4, 0x43, 0x87, 0xd8, 0x8b, 0x86, 0xdc, 0x8b, 0xd1, 0x85, 0x8f, 0x89, + 0x3f, 0x22, 0x31, 0xf2, 0xf9, 0x98, 0x22, 0x76, 0xf7, 0xe0, 0xfb, 0xad, 0xea, 0xd0, 0xf7, 0xff, + 0x07, 0x00, 0x00, 0xff, 0xff, 0x97, 0x65, 0x75, 0xe3, 0x20, 0x04, 0x00, 0x00, } func (this *Resources) Equal(that interface{}) bool { diff --git a/go/node/types/v1beta3/resources_test.go b/go/node/types/resources/v1beta4/resources_test.go similarity index 95% rename from go/node/types/v1beta3/resources_test.go rename to go/node/types/resources/v1beta4/resources_test.go index f9a15ab9..e461d5e7 100644 --- a/go/node/types/v1beta3/resources_test.go +++ b/go/node/types/resources/v1beta4/resources_test.go @@ -1,9 +1,11 @@ -package v1beta3 +package v1beta4 import ( "testing" "github.com/stretchr/testify/require" + + attr "pkg.akt.dev/go/node/types/attributes/v1" ) func TestVolumes_Dup(t *testing.T) { @@ -11,7 +13,7 @@ func TestVolumes_Dup(t *testing.T) { Storage{ Name: "default", Quantity: NewResourceValue(100), - Attributes: Attributes{}, + Attributes: attr.Attributes{}, }, } @@ -25,7 +27,7 @@ func TestVolumes_Equal(t *testing.T) { Storage{ Name: "default", Quantity: NewResourceValue(100), - Attributes: Attributes{}, + Attributes: attr.Attributes{}, }, } diff --git a/go/node/types/resources/v1beta4/resourcevalue.go b/go/node/types/resources/v1beta4/resourcevalue.go new file mode 100644 index 00000000..59df72ee --- /dev/null +++ b/go/node/types/resources/v1beta4/resourcevalue.go @@ -0,0 +1,58 @@ +package v1beta4 + +import ( + "errors" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var ( + ErrOverflow = errors.New("resource value overflow") + ErrCannotSub = errors.New("cannot subtract resources when lhs does not have same units as rhs") + ErrNegativeResult = errors.New("result of subtraction is negative") +) + +/* +ResourceValue the big point of this small change is to ensure math operations on resources +not resulting with negative value which panic on unsigned types as well as overflow which leads to panic too +instead reasonable error is returned. +Each resource using this type as value can take extra advantage of it to check upper bounds +For example in SDL v1 CPU units were handled as uint32 and operation like math.MaxUint32 + 2 +would cause application to panic. But nowadays + const CPULimit = math.MaxUint32 + + func (c *CPU) add(rhs CPU) error { + res, err := c.Units.add(rhs.Units) + if err != nil { + return err + } + + if res.Units.Value() > CPULimit { + return ErrOverflow + } + + c.Units = res + + return nil + } +*/ + +func NewResourceValue(val uint64) ResourceValue { + res := ResourceValue{ + Val: sdk.NewIntFromUint64(val), + } + + return res +} + +func (m ResourceValue) Value() uint64 { + return m.Val.Uint64() +} + +func (m ResourceValue) Dup() ResourceValue { + res := ResourceValue{ + Val: sdk.NewIntFromBigInt(m.Val.BigInt()), + } + + return res +} diff --git a/go/node/types/resources/v1beta4/resourcevalue.pb.go b/go/node/types/resources/v1beta4/resourcevalue.pb.go new file mode 100644 index 00000000..7e6108fb --- /dev/null +++ b/go/node/types/resources/v1beta4/resourcevalue.pb.go @@ -0,0 +1,343 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/resources/v1beta4/resourcevalue.proto + +package v1beta4 + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Unit stores cpu, memory and storage metrics +type ResourceValue struct { + Val github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,1,opt,name=val,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"val"` +} + +func (m *ResourceValue) Reset() { *m = ResourceValue{} } +func (m *ResourceValue) String() string { return proto.CompactTextString(m) } +func (*ResourceValue) ProtoMessage() {} +func (*ResourceValue) Descriptor() ([]byte, []int) { + return fileDescriptor_ba63ba8298019f93, []int{0} +} +func (m *ResourceValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceValue.Merge(m, src) +} +func (m *ResourceValue) XXX_Size() int { + return m.Size() +} +func (m *ResourceValue) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceValue proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ResourceValue)(nil), "akash.base.resources.v1beta4.ResourceValue") +} + +func init() { + proto.RegisterFile("akash/base/resources/v1beta4/resourcevalue.proto", fileDescriptor_ba63ba8298019f93) +} + +var fileDescriptor_ba63ba8298019f93 = []byte{ + // 226 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x48, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, + 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0x81, 0x8b, 0x94, 0x25, 0xe6, 0x94, 0xa6, 0xea, + 0x15, 0x14, 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x80, 0x75, 0xe8, 0x81, 0x74, 0xe8, 0xc1, 0x75, 0xe8, + 0x41, 0x75, 0x48, 0x89, 0xa4, 0xe7, 0xa7, 0xe7, 0x83, 0x15, 0xea, 0x83, 0x58, 0x10, 0x3d, 0x4a, + 0xe1, 0x5c, 0xbc, 0x41, 0x50, 0xa5, 0x61, 0x20, 0xa3, 0x84, 0x1c, 0xb8, 0x98, 0xcb, 0x12, 0x73, + 0x24, 0x18, 0x15, 0x18, 0x35, 0x78, 0x9c, 0xf4, 0x4e, 0xdc, 0x93, 0x67, 0xb8, 0x75, 0x4f, 0x5e, + 0x2d, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0x3f, 0x39, 0xbf, 0x38, 0x37, + 0xbf, 0x18, 0x4a, 0xe9, 0x16, 0xa7, 0x64, 0xeb, 0x97, 0x54, 0x16, 0xa4, 0x16, 0xeb, 0x79, 0xe6, + 0x95, 0x04, 0x81, 0xb4, 0x5a, 0xb1, 0xbc, 0x58, 0x20, 0xcf, 0xe8, 0xe4, 0x7a, 0xe2, 0x91, 0x1c, + 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, + 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xda, 0x05, 0xd9, 0xe9, 0x7a, 0x89, 0xd9, 0x25, 0x7a, + 0x29, 0xa9, 0x65, 0xfa, 0xe9, 0xf9, 0xfa, 0x79, 0xf9, 0x29, 0xa9, 0x10, 0x33, 0x30, 0xfd, 0x99, + 0xc4, 0x06, 0x76, 0xa6, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x8f, 0x08, 0x67, 0x0e, 0x01, + 0x00, 0x00, +} + +func (this *ResourceValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceValue) + if !ok { + that2, ok := that.(ResourceValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Val.Equal(that1.Val) { + return false + } + return true +} +func (m *ResourceValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.Val.Size() + i -= size + if _, err := m.Val.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintResourcevalue(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintResourcevalue(dAtA []byte, offset int, v uint64) int { + offset -= sovResourcevalue(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ResourceValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Val.Size() + n += 1 + l + sovResourcevalue(uint64(l)) + return n +} + +func sovResourcevalue(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozResourcevalue(x uint64) (n int) { + return sovResourcevalue(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResourceValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResourcevalue + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResourcevalue + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthResourcevalue + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthResourcevalue + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Val.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResourcevalue(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthResourcevalue + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipResourcevalue(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResourcevalue + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResourcevalue + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResourcevalue + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthResourcevalue + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupResourcevalue + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthResourcevalue + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthResourcevalue = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowResourcevalue = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupResourcevalue = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/resources/v1beta4/storage.pb.go b/go/node/types/resources/v1beta4/storage.pb.go new file mode 100644 index 00000000..ffecbdbb --- /dev/null +++ b/go/node/types/resources/v1beta4/storage.pb.go @@ -0,0 +1,483 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/resources/v1beta4/storage.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" + pkg_akt_dev_go_node_types_attributes_v1 "pkg.akt.dev/go/node/types/attributes/v1" + v1 "pkg.akt.dev/go/node/types/attributes/v1" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Storage stores resource quantity and storage attributes +type Storage struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` + Quantity ResourceValue `protobuf:"bytes,2,opt,name=quantity,proto3" json:"size" yaml:"size"` + Attributes pkg_akt_dev_go_node_types_attributes_v1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=pkg.akt.dev/go/node/types/attributes/v1.Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` +} + +func (m *Storage) Reset() { *m = Storage{} } +func (m *Storage) String() string { return proto.CompactTextString(m) } +func (*Storage) ProtoMessage() {} +func (*Storage) Descriptor() ([]byte, []int) { + return fileDescriptor_b74847f898008a0e, []int{0} +} +func (m *Storage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Storage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Storage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Storage) XXX_Merge(src proto.Message) { + xxx_messageInfo_Storage.Merge(m, src) +} +func (m *Storage) XXX_Size() int { + return m.Size() +} +func (m *Storage) XXX_DiscardUnknown() { + xxx_messageInfo_Storage.DiscardUnknown(m) +} + +var xxx_messageInfo_Storage proto.InternalMessageInfo + +func (m *Storage) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Storage) GetQuantity() ResourceValue { + if m != nil { + return m.Quantity + } + return ResourceValue{} +} + +func (m *Storage) GetAttributes() pkg_akt_dev_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func init() { + proto.RegisterType((*Storage)(nil), "akash.base.resources.v1beta4.Storage") +} + +func init() { + proto.RegisterFile("akash/base/resources/v1beta4/storage.proto", fileDescriptor_b74847f898008a0e) +} + +var fileDescriptor_b74847f898008a0e = []byte{ + // 356 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4a, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, + 0xd6, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd1, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0x4c, 0x4f, + 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x01, 0xab, 0xd5, 0x03, 0xa9, 0xd5, 0x83, 0xab, + 0xd5, 0x83, 0xaa, 0x95, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd4, 0x07, 0xb1, 0x20, 0x7a, + 0xa4, 0x34, 0x90, 0xcc, 0x4f, 0x2c, 0x29, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0x01, 0x5b, 0x80, 0xe0, + 0x41, 0x55, 0x1a, 0xe0, 0x75, 0x09, 0x4c, 0xa4, 0x2c, 0x31, 0xa7, 0x14, 0xaa, 0x43, 0xe9, 0x36, + 0x13, 0x17, 0x7b, 0x30, 0xc4, 0x85, 0x42, 0xda, 0x5c, 0x2c, 0x79, 0x89, 0xb9, 0xa9, 0x12, 0x8c, + 0x0a, 0x8c, 0x1a, 0x9c, 0x4e, 0xe2, 0xaf, 0xee, 0xc9, 0x83, 0xf9, 0x9f, 0xee, 0xc9, 0x73, 0x57, + 0x26, 0xe6, 0xe6, 0x58, 0x29, 0x81, 0x78, 0x4a, 0x41, 0x60, 0x41, 0xa1, 0x14, 0x2e, 0x8e, 0xc2, + 0xd2, 0xc4, 0xbc, 0x92, 0xcc, 0x92, 0x4a, 0x09, 0x26, 0x05, 0x46, 0x0d, 0x6e, 0x23, 0x6d, 0x3d, + 0x7c, 0x7e, 0xd3, 0x0b, 0x82, 0x8a, 0x84, 0x81, 0x6c, 0x77, 0x92, 0x3e, 0x71, 0x4f, 0x9e, 0x01, + 0x64, 0x43, 0x71, 0x66, 0x15, 0x92, 0x0d, 0x20, 0x9e, 0x52, 0x10, 0xdc, 0x64, 0xa1, 0x6d, 0x8c, + 0x5c, 0x5c, 0x08, 0x2f, 0x4b, 0x30, 0x2b, 0x30, 0x6b, 0x70, 0x1b, 0x29, 0x23, 0x5b, 0x84, 0x90, + 0xd5, 0x2b, 0x33, 0xd4, 0x73, 0x84, 0xf1, 0x9c, 0x0a, 0xa1, 0x16, 0x88, 0x20, 0x14, 0xe8, 0xe4, + 0xe7, 0x66, 0x96, 0xa4, 0xe6, 0x16, 0x94, 0x54, 0x7e, 0xba, 0x27, 0x2f, 0x0d, 0xb1, 0x10, 0x9b, + 0xac, 0xd2, 0xaa, 0xfb, 0xf2, 0x46, 0x05, 0xd9, 0xe9, 0x7a, 0x89, 0xd9, 0x25, 0x7a, 0x29, 0xa9, + 0x65, 0xfa, 0xe9, 0xf9, 0xfa, 0x79, 0xf9, 0x29, 0xa9, 0xfa, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0xa8, + 0x51, 0x80, 0xb0, 0xb1, 0x38, 0x08, 0xc9, 0xa5, 0x56, 0x2c, 0x2f, 0x16, 0xc8, 0x33, 0x3a, 0xb9, + 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, + 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x36, 0x6e, 0xf3, 0x31, 0x22, + 0x2e, 0x89, 0x0d, 0x1c, 0x57, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xa2, 0x92, 0x81, + 0x69, 0x02, 0x00, 0x00, +} + +func (this *Storage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Storage) + if !ok { + that2, ok := that.(Storage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.Quantity.Equal(&that1.Quantity) { + return false + } + if len(this.Attributes) != len(that1.Attributes) { + return false + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return false + } + } + return true +} +func (m *Storage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Storage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Storage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStorage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.Quantity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStorage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintStorage(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintStorage(dAtA []byte, offset int, v uint64) int { + offset -= sovStorage(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Storage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovStorage(uint64(l)) + } + l = m.Quantity.Size() + n += 1 + l + sovStorage(uint64(l)) + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovStorage(uint64(l)) + } + } + return n +} + +func sovStorage(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozStorage(x uint64) (n int) { + return sovStorage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Storage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Storage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Storage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStorage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStorage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Quantity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStorage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStorage(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStorage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStorage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStorage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStorage + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupStorage + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthStorage + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthStorage = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStorage = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupStorage = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/v1beta1/attribute.go b/go/node/types/v1beta1/attribute.go deleted file mode 100644 index 98aace15..00000000 --- a/go/node/types/v1beta1/attribute.go +++ /dev/null @@ -1,138 +0,0 @@ -package v1beta1 - -import ( - "errors" - "reflect" - "regexp" - - "gopkg.in/yaml.v3" -) - -const ( - attributeNameRegexpString = `^[a-zA-Z][\w-]{1,30}[a-zA-Z0-9]$` -) - -var ( - ErrAttributesDuplicateKeys = errors.New("attributes cannot have duplicate keys") - ErrInvalidAttributeKey = errors.New("attribute key does not match regexp " + attributeNameRegexpString) -) - -var ( - attributeNameRegexp = regexp.MustCompile(attributeNameRegexpString) -) - -/* -Attributes purpose of using this type in favor of Cosmos's sdk.Attribute is -ability to later extend it with operators to support querying on things like -cpu/memory/storage attributes -At this moment type though is same as sdk.Attributes but all akash libraries were -turned to use a new one -*/ -type Attributes []Attribute -type AttributeValue string - -func NewStringAttribute(key, val string) Attribute { - return Attribute{ - Key: key, - Value: val, - } -} - -func (m *Attribute) String() string { - res, _ := yaml.Marshal(m) - return string(res) -} - -func (m *Attribute) Equal(rhs *Attribute) bool { - return reflect.DeepEqual(m, rhs) -} - -func (m Attribute) SubsetOf(rhs Attribute) bool { - if m.Key == rhs.Key && m.Value == rhs.Value { - return true - } - - return false -} - -func (attr Attributes) Validate() error { - store := make(map[string]bool) - - for i := range attr { - if !attributeNameRegexp.MatchString(attr[i].Key) { - return ErrInvalidAttributeKey - } - - if _, ok := store[attr[i].Key]; ok { - return ErrAttributesDuplicateKeys - } - - store[attr[i].Key] = true - } - - return nil -} - -// AttributesSubsetOf check if a is subset of that -// For example there are two yaml files being converted into these attributes -// example 1: a is subset of b -// --- -// // a -// // nolint: gofmt -// attributes: -// -// region: -// - us-east-1 -// -// --- -// b -// attributes: -// -// region: -// - us-east-1 -// - us-east-2 -// -// example 2: a is not subset of b -// attributes: -// -// region: -// - us-east-1 -// -// --- -// b -// attributes: -// -// region: -// - us-east-2 -// - us-east-3 -// -// example 3: a is subset of b -// attributes: -// -// region: -// - us-east-2 -// - us-east-3 -// -// --- -// b -// attributes: -// -// region: -// - us-east-2 -func AttributesSubsetOf(a, b Attributes) bool { -loop: - for _, req := range a { - for _, attr := range b { - if req.SubsetOf(attr) { - continue loop - } - } - return false - } - - return true -} - -func (attr Attributes) SubsetOf(that Attributes) bool { - return AttributesSubsetOf(attr, that) -} diff --git a/go/node/types/v1beta1/attribute.pb.go b/go/node/types/v1beta1/attribute.pb.go deleted file mode 100644 index 955ea105..00000000 --- a/go/node/types/v1beta1/attribute.pb.go +++ /dev/null @@ -1,812 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta1/attribute.proto - -package v1beta1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Attribute represents key value pair -type Attribute struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty" yaml:"key"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty" yaml:"value"` -} - -func (m *Attribute) Reset() { *m = Attribute{} } -func (*Attribute) ProtoMessage() {} -func (*Attribute) Descriptor() ([]byte, []int) { - return fileDescriptor_90b8f964cf66c51d, []int{0} -} -func (m *Attribute) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Attribute.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Attribute) XXX_Merge(src proto.Message) { - xxx_messageInfo_Attribute.Merge(m, src) -} -func (m *Attribute) XXX_Size() int { - return m.Size() -} -func (m *Attribute) XXX_DiscardUnknown() { - xxx_messageInfo_Attribute.DiscardUnknown(m) -} - -var xxx_messageInfo_Attribute proto.InternalMessageInfo - -// SignedBy represents validation accounts that tenant expects signatures for provider attributes -// AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many -// entries there -// this behaviour to be discussed -type SignedBy struct { - // all_of all keys in this list must have signed attributes - AllOf []string `protobuf:"bytes,1,rep,name=all_of,json=allOf,proto3" json:"all_of" yaml:"allOf"` - // any_of at least of of the keys from the list must have signed attributes - AnyOf []string `protobuf:"bytes,2,rep,name=any_of,json=anyOf,proto3" json:"any_of" yaml:"anyOf"` -} - -func (m *SignedBy) Reset() { *m = SignedBy{} } -func (*SignedBy) ProtoMessage() {} -func (*SignedBy) Descriptor() ([]byte, []int) { - return fileDescriptor_90b8f964cf66c51d, []int{1} -} -func (m *SignedBy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SignedBy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SignedBy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SignedBy) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignedBy.Merge(m, src) -} -func (m *SignedBy) XXX_Size() int { - return m.Size() -} -func (m *SignedBy) XXX_DiscardUnknown() { - xxx_messageInfo_SignedBy.DiscardUnknown(m) -} - -var xxx_messageInfo_SignedBy proto.InternalMessageInfo - -// PlacementRequirements -type PlacementRequirements struct { - // SignedBy list of keys that tenants expect to have signatures from - SignedBy SignedBy `protobuf:"bytes,1,opt,name=signed_by,json=signedBy,proto3" json:"signed_by" yaml:"signed_by"` - // Attribute list of attributes tenant expects from the provider - Attributes []Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes" yaml:"attributes"` -} - -func (m *PlacementRequirements) Reset() { *m = PlacementRequirements{} } -func (*PlacementRequirements) ProtoMessage() {} -func (*PlacementRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_90b8f964cf66c51d, []int{2} -} -func (m *PlacementRequirements) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PlacementRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PlacementRequirements.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PlacementRequirements) XXX_Merge(src proto.Message) { - xxx_messageInfo_PlacementRequirements.Merge(m, src) -} -func (m *PlacementRequirements) XXX_Size() int { - return m.Size() -} -func (m *PlacementRequirements) XXX_DiscardUnknown() { - xxx_messageInfo_PlacementRequirements.DiscardUnknown(m) -} - -var xxx_messageInfo_PlacementRequirements proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Attribute)(nil), "akash.base.v1beta1.Attribute") - proto.RegisterType((*SignedBy)(nil), "akash.base.v1beta1.SignedBy") - proto.RegisterType((*PlacementRequirements)(nil), "akash.base.v1beta1.PlacementRequirements") -} - -func init() { - proto.RegisterFile("akash/base/v1beta1/attribute.proto", fileDescriptor_90b8f964cf66c51d) -} - -var fileDescriptor_90b8f964cf66c51d = []byte{ - // 405 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xbf, 0xce, 0xd3, 0x30, - 0x14, 0xc5, 0x93, 0xef, 0xa3, 0x55, 0xe3, 0x22, 0x54, 0x22, 0x90, 0xaa, 0x0a, 0xec, 0xca, 0x12, - 0xd0, 0x85, 0x58, 0x2d, 0x0b, 0xea, 0x46, 0x5e, 0x00, 0x14, 0xb6, 0x32, 0x14, 0xa7, 0x75, 0xd3, - 0x28, 0x69, 0x5c, 0x12, 0xa7, 0x28, 0x1b, 0x23, 0x23, 0x8f, 0xc0, 0xe3, 0x74, 0xec, 0xd8, 0x29, - 0x82, 0x74, 0x40, 0xea, 0x98, 0x27, 0x40, 0x71, 0xfe, 0x34, 0x12, 0x4c, 0x76, 0xce, 0x3d, 0xbf, - 0xdc, 0x7b, 0x6c, 0x03, 0x4c, 0x3d, 0x1a, 0x6d, 0x89, 0x4d, 0x23, 0x46, 0x0e, 0x53, 0x9b, 0x09, - 0x3a, 0x25, 0x54, 0x88, 0xd0, 0xb5, 0x63, 0xc1, 0x8c, 0x7d, 0xc8, 0x05, 0xd7, 0x75, 0xe9, 0x31, - 0x0a, 0x8f, 0x51, 0x79, 0x46, 0x4f, 0x1c, 0xee, 0x70, 0x59, 0x26, 0xc5, 0xae, 0x74, 0xe2, 0x4f, - 0x40, 0x7b, 0x57, 0xc3, 0xfa, 0x18, 0xdc, 0x7b, 0x2c, 0x19, 0xaa, 0x63, 0x75, 0xa2, 0x99, 0x8f, - 0xf2, 0x14, 0x81, 0x84, 0xee, 0xfc, 0x39, 0xf6, 0x58, 0x82, 0xad, 0xa2, 0xa4, 0xbf, 0x04, 0x9d, - 0x03, 0xf5, 0x63, 0x36, 0xbc, 0x93, 0x9e, 0x41, 0x9e, 0xa2, 0x87, 0xa5, 0x47, 0xca, 0xd8, 0x2a, - 0xcb, 0xf3, 0x07, 0xdf, 0x7f, 0x22, 0x05, 0x1f, 0x40, 0xef, 0xa3, 0xeb, 0x04, 0x6c, 0x6d, 0x26, - 0xfa, 0x14, 0x74, 0xa9, 0xef, 0x2f, 0xf9, 0x66, 0xa8, 0x8e, 0xef, 0x27, 0x9a, 0x39, 0xba, 0xa6, - 0xa8, 0x52, 0x6e, 0x3f, 0xa1, 0xbe, 0xff, 0x7e, 0x83, 0xad, 0x8e, 0x5c, 0x25, 0x12, 0x24, 0x05, - 0x72, 0xd7, 0x42, 0xa4, 0xd2, 0x42, 0x82, 0xa4, 0x44, 0x8a, 0xb5, 0xea, 0xfb, 0x47, 0x05, 0x4f, - 0x3f, 0xf8, 0x74, 0xc5, 0x76, 0x2c, 0x10, 0x16, 0xfb, 0x12, 0xbb, 0xa1, 0xdc, 0x46, 0xfa, 0x67, - 0xa0, 0x45, 0x72, 0xa2, 0xa5, 0x5d, 0xe6, 0xec, 0xcf, 0x9e, 0x19, 0xff, 0x1e, 0x96, 0x51, 0x8f, - 0x6d, 0xbe, 0x38, 0xa6, 0x48, 0xb9, 0xa6, 0xe8, 0x86, 0xe5, 0x29, 0x1a, 0x94, 0xad, 0x1b, 0x09, - 0x5b, 0xbd, 0xa8, 0xce, 0xb9, 0x01, 0xa0, 0xb9, 0x8d, 0x48, 0x0e, 0xde, 0x9f, 0x3d, 0xff, 0x5f, - 0x8b, 0xe6, 0xd8, 0xcd, 0x57, 0x55, 0x8f, 0x16, 0x98, 0xa7, 0xe8, 0x71, 0x95, 0xaf, 0xd1, 0xb0, - 0xd5, 0x32, 0x94, 0x49, 0xcd, 0xc5, 0xf9, 0x37, 0x54, 0xbe, 0x65, 0x50, 0x39, 0x66, 0x50, 0x3d, - 0x65, 0x50, 0xfd, 0x95, 0x41, 0xf5, 0xc7, 0x05, 0x2a, 0xa7, 0x0b, 0x54, 0xce, 0x17, 0xa8, 0x2c, - 0xde, 0x3a, 0xae, 0xd8, 0xc6, 0xb6, 0xb1, 0xe2, 0x3b, 0x22, 0xa7, 0x78, 0x1d, 0x30, 0xf1, 0x95, - 0x87, 0x5e, 0xf5, 0x45, 0xf7, 0x2e, 0x71, 0x38, 0x09, 0xf8, 0x9a, 0x11, 0x91, 0xec, 0x59, 0x54, - 0x3f, 0x2a, 0xbb, 0x2b, 0x5f, 0xc8, 0x9b, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe6, 0xf3, 0x05, - 0xbe, 0x71, 0x02, 0x00, 0x00, -} - -func (m *Attribute) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Attribute) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Attribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintAttribute(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintAttribute(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SignedBy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SignedBy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SignedBy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.AnyOf) > 0 { - for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AnyOf[iNdEx]) - copy(dAtA[i:], m.AnyOf[iNdEx]) - i = encodeVarintAttribute(dAtA, i, uint64(len(m.AnyOf[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.AllOf) > 0 { - for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllOf[iNdEx]) - copy(dAtA[i:], m.AllOf[iNdEx]) - i = encodeVarintAttribute(dAtA, i, uint64(len(m.AllOf[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *PlacementRequirements) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PlacementRequirements) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PlacementRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAttribute(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.SignedBy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAttribute(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintAttribute(dAtA []byte, offset int, v uint64) int { - offset -= sovAttribute(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Attribute) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovAttribute(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovAttribute(uint64(l)) - } - return n -} - -func (m *SignedBy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AllOf) > 0 { - for _, s := range m.AllOf { - l = len(s) - n += 1 + l + sovAttribute(uint64(l)) - } - } - if len(m.AnyOf) > 0 { - for _, s := range m.AnyOf { - l = len(s) - n += 1 + l + sovAttribute(uint64(l)) - } - } - return n -} - -func (m *PlacementRequirements) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.SignedBy.Size() - n += 1 + l + sovAttribute(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAttribute(uint64(l)) - } - } - return n -} - -func sovAttribute(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAttribute(x uint64) (n int) { - return sovAttribute(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Attribute) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Attribute: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Attribute: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAttribute(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAttribute - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SignedBy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SignedBy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SignedBy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllOf = append(m.AllOf, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AnyOf = append(m.AnyOf, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAttribute(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAttribute - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PlacementRequirements) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PlacementRequirements: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PlacementRequirements: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SignedBy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SignedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAttribute(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAttribute - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAttribute(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAttribute - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAttribute - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAttribute - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAttribute - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAttribute - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAttribute - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAttribute = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAttribute = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAttribute = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta1/attribute_test.go b/go/node/types/v1beta1/attribute_test.go deleted file mode 100644 index dc76b893..00000000 --- a/go/node/types/v1beta1/attribute_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package v1beta1_test - -import ( - "testing" - - "github.com/stretchr/testify/require" - - types "github.com/akash-network/akash-api/go/node/types/v1beta1" -) - -func TestAttributes_Validate(t *testing.T) { - attr := types.Attributes{ - {Key: "key"}, - {Key: "key"}, - } - - require.EqualError(t, attr.Validate(), types.ErrAttributesDuplicateKeys.Error()) - - // unsupported key symbol - attr = types.Attributes{ - {Key: "$"}, - } - - require.EqualError(t, attr.Validate(), types.ErrInvalidAttributeKey.Error()) - - // empty key - attr = types.Attributes{ - {Key: ""}, - } - - require.EqualError(t, attr.Validate(), types.ErrInvalidAttributeKey.Error()) - // to long key - attr = types.Attributes{ - {Key: "sdgkhaeirugaeroigheirghseiargfs3s"}, - } - - require.EqualError(t, attr.Validate(), types.ErrInvalidAttributeKey.Error()) -} - -func TestAttribute_Equal(t *testing.T) { - attr1 := &types.Attribute{Key: "key1", Value: "val1"} - attr2 := &types.Attribute{Key: "key1", Value: "val1"} - attr3 := &types.Attribute{Key: "key1", Value: "val2"} - - require.True(t, attr1.Equal(attr2)) - require.False(t, attr1.Equal(attr3)) -} - -func TestAttribute_SubsetOf(t *testing.T) { - attr1 := types.Attribute{Key: "key1", Value: "val1"} - attr2 := types.Attribute{Key: "key1", Value: "val1"} - attr3 := types.Attribute{Key: "key1", Value: "val2"} - - require.True(t, attr1.SubsetOf(attr2)) - require.False(t, attr1.SubsetOf(attr3)) -} - -func TestAttributes_SubsetOf(t *testing.T) { - attr1 := types.Attributes{ - {Key: "key1", Value: "val1"}, - } - - attr2 := types.Attributes{ - {Key: "key1", Value: "val1"}, - {Key: "key2", Value: "val2"}, - } - - attr3 := types.Attributes{ - {Key: "key1", Value: "val1"}, - {Key: "key2", Value: "val2"}, - {Key: "key3", Value: "val3"}, - {Key: "key4", Value: "val4"}, - } - - attr4 := types.Attributes{ - {Key: "key3", Value: "val3"}, - {Key: "key4", Value: "val4"}, - } - - require.True(t, attr1.SubsetOf(attr2)) - require.True(t, attr2.SubsetOf(attr3)) - require.False(t, attr1.SubsetOf(attr4)) -} diff --git a/go/node/types/v1beta1/endpoint.pb.go b/go/node/types/v1beta1/endpoint.pb.go deleted file mode 100644 index 3df47cd7..00000000 --- a/go/node/types/v1beta1/endpoint.pb.go +++ /dev/null @@ -1,358 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta1/endpoint.proto - -package v1beta1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// This describes how the endpoint is implemented when the lease is deployed -type Endpoint_Kind int32 - -const ( - // Describes an endpoint that becomes a Kubernetes Ingress - Endpoint_SHARED_HTTP Endpoint_Kind = 0 - // Describes an endpoint that becomes a Kubernetes NodePort - Endpoint_RANDOM_PORT Endpoint_Kind = 1 -) - -var Endpoint_Kind_name = map[int32]string{ - 0: "SHARED_HTTP", - 1: "RANDOM_PORT", -} - -var Endpoint_Kind_value = map[string]int32{ - "SHARED_HTTP": 0, - "RANDOM_PORT": 1, -} - -func (x Endpoint_Kind) String() string { - return proto.EnumName(Endpoint_Kind_name, int32(x)) -} - -func (Endpoint_Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_07fb133899333c18, []int{0, 0} -} - -// Endpoint describes a publicly accessible IP service -type Endpoint struct { - Kind Endpoint_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=akash.base.v1beta1.Endpoint_Kind" json:"kind,omitempty"` -} - -func (m *Endpoint) Reset() { *m = Endpoint{} } -func (m *Endpoint) String() string { return proto.CompactTextString(m) } -func (*Endpoint) ProtoMessage() {} -func (*Endpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_07fb133899333c18, []int{0} -} -func (m *Endpoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Endpoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Endpoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_Endpoint.Merge(m, src) -} -func (m *Endpoint) XXX_Size() int { - return m.Size() -} -func (m *Endpoint) XXX_DiscardUnknown() { - xxx_messageInfo_Endpoint.DiscardUnknown(m) -} - -var xxx_messageInfo_Endpoint proto.InternalMessageInfo - -func (m *Endpoint) GetKind() Endpoint_Kind { - if m != nil { - return m.Kind - } - return Endpoint_SHARED_HTTP -} - -func init() { - proto.RegisterEnum("akash.base.v1beta1.Endpoint_Kind", Endpoint_Kind_name, Endpoint_Kind_value) - proto.RegisterType((*Endpoint)(nil), "akash.base.v1beta1.Endpoint") -} - -func init() { proto.RegisterFile("akash/base/v1beta1/endpoint.proto", fileDescriptor_07fb133899333c18) } - -var fileDescriptor_07fb133899333c18 = []byte{ - // 240 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0x4f, - 0xcd, 0x4b, 0x29, 0xc8, 0xcf, 0xcc, 0x2b, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x02, - 0x2b, 0xd1, 0x03, 0x29, 0xd1, 0x83, 0x2a, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x4b, 0xeb, - 0x83, 0x58, 0x10, 0x95, 0x4a, 0x85, 0x5c, 0x1c, 0xae, 0x50, 0xbd, 0x42, 0xa6, 0x5c, 0x2c, 0xd9, - 0x99, 0x79, 0x29, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x7c, 0x46, 0x8a, 0x7a, 0x98, 0x86, 0xe8, 0xc1, - 0xd4, 0xea, 0x79, 0x67, 0xe6, 0xa5, 0x04, 0x81, 0x95, 0x2b, 0x69, 0x70, 0xb1, 0x80, 0x78, 0x42, - 0xfc, 0x5c, 0xdc, 0xc1, 0x1e, 0x8e, 0x41, 0xae, 0x2e, 0xf1, 0x1e, 0x21, 0x21, 0x01, 0x02, 0x0c, - 0x20, 0x81, 0x20, 0x47, 0x3f, 0x17, 0x7f, 0xdf, 0xf8, 0x00, 0xff, 0xa0, 0x10, 0x01, 0x46, 0x2b, - 0x96, 0x17, 0x0b, 0xe4, 0x19, 0x9d, 0x82, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, - 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, - 0x21, 0xca, 0x22, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0x1f, 0x6c, 0xb9, - 0x6e, 0x5e, 0x6a, 0x49, 0x79, 0x7e, 0x51, 0x36, 0x94, 0x97, 0x58, 0x90, 0xa9, 0x9f, 0x9e, 0xaf, - 0x9f, 0x97, 0x9f, 0x92, 0xaa, 0x5f, 0x52, 0x59, 0x90, 0x5a, 0x0c, 0xf3, 0x7f, 0x12, 0x1b, 0xd8, - 0x37, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x54, 0xa4, 0xd2, 0x76, 0x1c, 0x01, 0x00, 0x00, -} - -func (this *Endpoint) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Endpoint) - if !ok { - that2, ok := that.(Endpoint) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Kind != that1.Kind { - return false - } - return true -} -func (m *Endpoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Kind != 0 { - i = encodeVarintEndpoint(dAtA, i, uint64(m.Kind)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintEndpoint(dAtA []byte, offset int, v uint64) int { - offset -= sovEndpoint(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Endpoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Kind != 0 { - n += 1 + sovEndpoint(uint64(m.Kind)) - } - return n -} - -func sovEndpoint(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozEndpoint(x uint64) (n int) { - return sovEndpoint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Endpoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - m.Kind = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Kind |= Endpoint_Kind(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipEndpoint(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEndpoint - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipEndpoint(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEndpoint - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEndpoint - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEndpoint - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthEndpoint - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupEndpoint - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthEndpoint - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthEndpoint = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEndpoint = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupEndpoint = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta1/requirements.go b/go/node/types/v1beta1/requirements.go deleted file mode 100644 index 1e463bd0..00000000 --- a/go/node/types/v1beta1/requirements.go +++ /dev/null @@ -1,15 +0,0 @@ -package v1beta1 - -import ( - "gopkg.in/yaml.v3" -) - -func (m *SignedBy) String() string { - res, _ := yaml.Marshal(m) - return string(res) -} - -func (m *PlacementRequirements) String() string { - res, _ := yaml.Marshal(m) - return string(res) -} diff --git a/go/node/types/v1beta1/resource.go b/go/node/types/v1beta1/resource.go deleted file mode 100644 index 6e8aac92..00000000 --- a/go/node/types/v1beta1/resource.go +++ /dev/null @@ -1,303 +0,0 @@ -package v1beta1 - -import ( - "reflect" - - cosmos "github.com/cosmos/cosmos-sdk/types" -) - -type UnitType int - -type Unit interface { - String() string - equals(Unit) bool - add(Unit) error - sub(Unit) error - le(Unit) bool -} - -type ResUnit interface { - Equals(ResUnit) bool - Add(unit ResUnit) bool -} - -// Resources stores Unit details and Count value -type Resources struct { - Resources ResourceUnits `json:"resources"` - Count uint32 `json:"count"` -} - -// ResourceGroup is the interface that wraps GetName and GetResources methods -type ResourceGroup interface { - GetName() string - GetResources() []Resources -} - -var _ Unit = (*CPU)(nil) -var _ Unit = (*Memory)(nil) -var _ Unit = (*Storage)(nil) - -func (m ResourceUnits) deepcopy() ResourceUnits { - res := ResourceUnits{} - - if m.CPU != nil { - res.CPU = &CPU{ - Units: m.CPU.Units, - } - res.CPU.Attributes = make([]Attribute, len(m.CPU.Attributes)) - copy(res.CPU.Attributes, m.CPU.Attributes) - } else { - res.CPU = &CPU{ - Units: ResourceValue{ - Val: cosmos.NewInt(0), - }, - } - } - - if m.Memory != nil { - res.Memory = &Memory{ - Quantity: m.Memory.Quantity, - } - res.Memory.Attributes = make([]Attribute, len(m.Memory.Attributes)) - copy(res.Memory.Attributes, m.Memory.Attributes) - } else { - res.Memory = &Memory{ - Quantity: ResourceValue{ - Val: cosmos.NewInt(0), - }, - } - } - - if m.Storage != nil { - res.Storage = &Storage{ - Quantity: m.Storage.Quantity, - Attributes: nil, - } - res.Storage.Attributes = make([]Attribute, len(m.Storage.Attributes)) - copy(res.Storage.Attributes, m.Storage.Attributes) - } else { - res.Storage = &Storage{ - Quantity: ResourceValue{ - Val: cosmos.NewInt(0), - }, - } - } - - res.Endpoints = make([]Endpoint, len(m.Endpoints)) - copy(res.Endpoints, m.Endpoints) - - return res -} - -// AddUnit it rather searches for existing entry of the same type and sums values -// if type not found it appends -func (m ResourceUnits) Add(rhs ResourceUnits) (ResourceUnits, error) { - // Make a deep copy - res := m.deepcopy() - - if err := res.CPU.add(rhs.CPU); err != nil { - return ResourceUnits{}, err - } - - if err := res.Memory.add(rhs.Memory); err != nil { - return ResourceUnits{}, err - } - - if err := res.Storage.add(rhs.Storage); err != nil { - return ResourceUnits{}, err - } - - return res, nil -} - -// Sub tbd -func (m ResourceUnits) Sub(rhs ResourceUnits) (ResourceUnits, error) { - if (m.CPU == nil && rhs.CPU != nil) || - (m.Memory == nil && rhs.Memory != nil) || - (m.Storage == nil && rhs.Storage != nil) { - return ResourceUnits{}, errCannotSub - } - - // Make a deep copy - res := m.deepcopy() - - if err := res.CPU.sub(rhs.CPU); err != nil { - return ResourceUnits{}, err - } - - if err := res.Memory.sub(rhs.Memory); err != nil { - return ResourceUnits{}, err - } - - if err := res.Storage.sub(rhs.Storage); err != nil { - return ResourceUnits{}, err - } - - return res, nil -} - -func (m ResourceUnits) Equals(rhs ResourceUnits) bool { - return reflect.DeepEqual(m, rhs) -} - -func (m *CPU) equals(other Unit) bool { - rhs, valid := other.(*CPU) - if !valid { - return false - } - - if !m.Units.equals(rhs.Units) || len(m.Attributes) != len(rhs.Attributes) { - return false - } - - return reflect.DeepEqual(m.Attributes, rhs.Attributes) -} - -func (m *CPU) le(other Unit) bool { - rhs, valid := other.(*CPU) - if !valid { - return false - } - - return m.Units.le(rhs.Units) -} - -func (m *CPU) add(other Unit) error { - rhs, valid := other.(*CPU) - if !valid { - return nil - } - - res, err := m.Units.add(rhs.Units) - if err != nil { - return err - } - - m.Units = res - - return nil -} - -func (m *CPU) sub(other Unit) error { - rhs, valid := other.(*CPU) - if !valid { - return nil - } - - res, err := m.Units.sub(rhs.Units) - if err != nil { - return err - } - - m.Units = res - - return nil -} - -func (m *Memory) equals(other Unit) bool { - rhs, valid := other.(*Memory) - if !valid { - return false - } - - if !m.Quantity.equals(rhs.Quantity) || len(m.Attributes) != len(rhs.Attributes) { - return false - } - - return reflect.DeepEqual(m.Attributes, rhs.Attributes) -} - -func (m *Memory) le(other Unit) bool { - rhs, valid := other.(*Memory) - if !valid { - return false - } - - return m.Quantity.le(rhs.Quantity) -} - -func (m *Memory) add(other Unit) error { - rhs, valid := other.(*Memory) - if !valid { - return nil - } - - res, err := m.Quantity.add(rhs.Quantity) - if err != nil { - return err - } - - m.Quantity = res - - return nil -} - -func (m *Memory) sub(other Unit) error { - rhs, valid := other.(*Memory) - if !valid { - return nil - } - - res, err := m.Quantity.sub(rhs.Quantity) - if err != nil { - return err - } - - m.Quantity = res - - return nil -} - -func (m *Storage) equals(other Unit) bool { - rhs, valid := other.(*Storage) - if !valid { - return false - } - - if !m.Quantity.equals(rhs.Quantity) || len(m.Attributes) != len(rhs.Attributes) { - return false - } - - return reflect.DeepEqual(m.Attributes, rhs.Attributes) -} - -func (m *Storage) le(other Unit) bool { - rhs, valid := other.(*Storage) - if !valid { - return false - } - - return m.Quantity.le(rhs.Quantity) -} - -func (m *Storage) add(other Unit) error { - rhs, valid := other.(*Storage) - if !valid { - return nil - } - - res, err := m.Quantity.add(rhs.Quantity) - if err != nil { - return err - } - - m.Quantity = res - - return nil -} - -func (m *Storage) sub(other Unit) error { - rhs, valid := other.(*Storage) - if !valid { - return nil - } - - res, err := m.Quantity.sub(rhs.Quantity) - if err != nil { - return err - } - - m.Quantity = res - - return nil -} diff --git a/go/node/types/v1beta1/resource.pb.go b/go/node/types/v1beta1/resource.pb.go deleted file mode 100644 index 36c995a4..00000000 --- a/go/node/types/v1beta1/resource.pb.go +++ /dev/null @@ -1,1370 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta1/resource.proto - -package v1beta1 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// CPU stores resource units and cpu config attributes -type CPU struct { - Units ResourceValue `protobuf:"bytes,1,opt,name=units,proto3" json:"units"` - Attributes []Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty" yaml:"cpu,omitempty"` -} - -func (m *CPU) Reset() { *m = CPU{} } -func (m *CPU) String() string { return proto.CompactTextString(m) } -func (*CPU) ProtoMessage() {} -func (*CPU) Descriptor() ([]byte, []int) { - return fileDescriptor_c018ae61a1e1b59b, []int{0} -} -func (m *CPU) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CPU) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CPU.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CPU) XXX_Merge(src proto.Message) { - xxx_messageInfo_CPU.Merge(m, src) -} -func (m *CPU) XXX_Size() int { - return m.Size() -} -func (m *CPU) XXX_DiscardUnknown() { - xxx_messageInfo_CPU.DiscardUnknown(m) -} - -var xxx_messageInfo_CPU proto.InternalMessageInfo - -func (m *CPU) GetUnits() ResourceValue { - if m != nil { - return m.Units - } - return ResourceValue{} -} - -func (m *CPU) GetAttributes() []Attribute { - if m != nil { - return m.Attributes - } - return nil -} - -// Memory stores resource quantity and memory attributes -type Memory struct { - Quantity ResourceValue `protobuf:"bytes,1,opt,name=quantity,proto3" json:"size" yaml:"size"` - Attributes []Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty" yaml:"cpu,omitempty"` -} - -func (m *Memory) Reset() { *m = Memory{} } -func (m *Memory) String() string { return proto.CompactTextString(m) } -func (*Memory) ProtoMessage() {} -func (*Memory) Descriptor() ([]byte, []int) { - return fileDescriptor_c018ae61a1e1b59b, []int{1} -} -func (m *Memory) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Memory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Memory.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Memory) XXX_Merge(src proto.Message) { - xxx_messageInfo_Memory.Merge(m, src) -} -func (m *Memory) XXX_Size() int { - return m.Size() -} -func (m *Memory) XXX_DiscardUnknown() { - xxx_messageInfo_Memory.DiscardUnknown(m) -} - -var xxx_messageInfo_Memory proto.InternalMessageInfo - -func (m *Memory) GetQuantity() ResourceValue { - if m != nil { - return m.Quantity - } - return ResourceValue{} -} - -func (m *Memory) GetAttributes() []Attribute { - if m != nil { - return m.Attributes - } - return nil -} - -// Storage stores resource quantity and storage attributes -type Storage struct { - Quantity ResourceValue `protobuf:"bytes,1,opt,name=quantity,proto3" json:"size" yaml:"size"` - Attributes []Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty" yaml:"cpu,omitempty"` -} - -func (m *Storage) Reset() { *m = Storage{} } -func (m *Storage) String() string { return proto.CompactTextString(m) } -func (*Storage) ProtoMessage() {} -func (*Storage) Descriptor() ([]byte, []int) { - return fileDescriptor_c018ae61a1e1b59b, []int{2} -} -func (m *Storage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Storage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Storage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Storage) XXX_Merge(src proto.Message) { - xxx_messageInfo_Storage.Merge(m, src) -} -func (m *Storage) XXX_Size() int { - return m.Size() -} -func (m *Storage) XXX_DiscardUnknown() { - xxx_messageInfo_Storage.DiscardUnknown(m) -} - -var xxx_messageInfo_Storage proto.InternalMessageInfo - -func (m *Storage) GetQuantity() ResourceValue { - if m != nil { - return m.Quantity - } - return ResourceValue{} -} - -func (m *Storage) GetAttributes() []Attribute { - if m != nil { - return m.Attributes - } - return nil -} - -// ResourceUnits describes all available resources types for deployment/node etc -// if field is nil resource is not present in the given data-structure -type ResourceUnits struct { - CPU *CPU `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty" yaml:"cpu,omitempty"` - Memory *Memory `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty" yaml:"memory,omitempty"` - Storage *Storage `protobuf:"bytes,3,opt,name=storage,proto3" json:"storage,omitempty" yaml:"storage,omitempty"` - Endpoints []Endpoint `protobuf:"bytes,4,rep,name=endpoints,proto3" json:"endpoints" yaml:"endpoints"` -} - -func (m *ResourceUnits) Reset() { *m = ResourceUnits{} } -func (m *ResourceUnits) String() string { return proto.CompactTextString(m) } -func (*ResourceUnits) ProtoMessage() {} -func (*ResourceUnits) Descriptor() ([]byte, []int) { - return fileDescriptor_c018ae61a1e1b59b, []int{3} -} -func (m *ResourceUnits) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceUnits) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceUnits.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceUnits) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceUnits.Merge(m, src) -} -func (m *ResourceUnits) XXX_Size() int { - return m.Size() -} -func (m *ResourceUnits) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceUnits.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceUnits proto.InternalMessageInfo - -func (m *ResourceUnits) GetCPU() *CPU { - if m != nil { - return m.CPU - } - return nil -} - -func (m *ResourceUnits) GetMemory() *Memory { - if m != nil { - return m.Memory - } - return nil -} - -func (m *ResourceUnits) GetStorage() *Storage { - if m != nil { - return m.Storage - } - return nil -} - -func (m *ResourceUnits) GetEndpoints() []Endpoint { - if m != nil { - return m.Endpoints - } - return nil -} - -func init() { - proto.RegisterType((*CPU)(nil), "akash.base.v1beta1.CPU") - proto.RegisterType((*Memory)(nil), "akash.base.v1beta1.Memory") - proto.RegisterType((*Storage)(nil), "akash.base.v1beta1.Storage") - proto.RegisterType((*ResourceUnits)(nil), "akash.base.v1beta1.ResourceUnits") -} - -func init() { proto.RegisterFile("akash/base/v1beta1/resource.proto", fileDescriptor_c018ae61a1e1b59b) } - -var fileDescriptor_c018ae61a1e1b59b = []byte{ - // 517 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x94, 0x41, 0x6b, 0xd4, 0x40, - 0x14, 0xc7, 0x77, 0x9a, 0x75, 0xab, 0xb3, 0x14, 0x6a, 0x58, 0x68, 0xd8, 0x6a, 0xa6, 0x1d, 0x50, - 0x7a, 0xd0, 0x84, 0xb6, 0x0a, 0x52, 0xf0, 0x60, 0x8a, 0x47, 0xa1, 0x44, 0xd6, 0x43, 0x2f, 0x32, - 0xd9, 0x0e, 0x69, 0xe8, 0x26, 0x13, 0x33, 0x93, 0x4a, 0xfc, 0x14, 0x7e, 0x04, 0x3f, 0x82, 0xdf, - 0xc0, 0x6b, 0x8f, 0x3d, 0x7a, 0x8a, 0x65, 0xf7, 0x22, 0x7b, 0xdc, 0x4f, 0x20, 0x99, 0x99, 0xec, - 0xba, 0x6b, 0x14, 0x6f, 0x82, 0xb7, 0xcc, 0xbc, 0xff, 0x7b, 0xbf, 0xf7, 0xf8, 0xbf, 0x0c, 0xdc, - 0x25, 0x17, 0x84, 0x9f, 0xbb, 0x01, 0xe1, 0xd4, 0xbd, 0xdc, 0x0f, 0xa8, 0x20, 0xfb, 0x6e, 0x46, - 0x39, 0xcb, 0xb3, 0x21, 0x75, 0xd2, 0x8c, 0x09, 0x66, 0x9a, 0x52, 0xe2, 0x54, 0x12, 0x47, 0x4b, - 0xfa, 0xbd, 0x90, 0x85, 0x4c, 0x86, 0xdd, 0xea, 0x4b, 0x29, 0xfb, 0xb8, 0xa1, 0x18, 0x11, 0x22, - 0x8b, 0x82, 0x5c, 0xe8, 0x6a, 0xfd, 0x87, 0x7f, 0x00, 0x5e, 0x92, 0x51, 0x5e, 0xeb, 0x9a, 0x1a, - 0xa3, 0xc9, 0x59, 0xca, 0xa2, 0x44, 0x28, 0x09, 0xfe, 0x02, 0xa0, 0x71, 0x7c, 0x32, 0x30, 0x9f, - 0xc3, 0x5b, 0x79, 0x12, 0x09, 0x6e, 0x81, 0x1d, 0xb0, 0xd7, 0x3d, 0xd8, 0x75, 0x7e, 0x6d, 0xd8, - 0xf1, 0x35, 0xe2, 0x4d, 0x85, 0xf0, 0xda, 0x57, 0x25, 0x6a, 0xf9, 0x2a, 0xcb, 0xe4, 0x10, 0xce, - 0x9b, 0xe4, 0xd6, 0xda, 0x8e, 0xb1, 0xd7, 0x3d, 0xb8, 0xdf, 0x54, 0xe3, 0x45, 0xad, 0xf2, 0x9e, - 0x54, 0xf9, 0xd3, 0x12, 0xf5, 0x16, 0x89, 0x8f, 0x58, 0x1c, 0x09, 0x1a, 0xa7, 0xa2, 0x98, 0x95, - 0xa8, 0x57, 0x90, 0x78, 0x74, 0x84, 0x87, 0x69, 0xbe, 0xb8, 0xc6, 0xfe, 0x4f, 0x98, 0xa3, 0xf6, - 0xf7, 0x4f, 0x08, 0xe0, 0x6f, 0x00, 0x76, 0x5e, 0xd1, 0x98, 0x65, 0x85, 0x79, 0x0a, 0x6f, 0xbf, - 0xcb, 0x49, 0x22, 0x22, 0x51, 0xfc, 0xfd, 0x1c, 0xdb, 0xba, 0x8f, 0x36, 0x8f, 0x3e, 0xd0, 0x59, - 0x89, 0xba, 0x8a, 0x5b, 0x9d, 0xb0, 0x3f, 0xaf, 0xf7, 0x2f, 0x27, 0xbc, 0x01, 0x70, 0xfd, 0xb5, - 0x60, 0x19, 0x09, 0xe9, 0xff, 0x3a, 0xe2, 0x67, 0x03, 0x6e, 0xd4, 0x3d, 0x0f, 0xe4, 0x46, 0xbd, - 0x85, 0xc6, 0x30, 0xcd, 0xf5, 0x8c, 0x5b, 0x4d, 0x5d, 0x1c, 0x9f, 0x0c, 0x24, 0x1f, 0x8c, 0x4b, - 0x54, 0xed, 0xf0, 0xb4, 0x44, 0x1b, 0x4b, 0xa0, 0xdf, 0xf2, 0xab, 0xca, 0x66, 0x08, 0x3b, 0xb1, - 0x5c, 0x1b, 0x6b, 0x4d, 0x32, 0xfa, 0x4d, 0x0c, 0xb5, 0x58, 0xde, 0x61, 0x85, 0x99, 0x96, 0x68, - 0x53, 0x65, 0x2c, 0x21, 0xb6, 0x14, 0x62, 0x35, 0x82, 0x7d, 0x5d, 0xde, 0x1c, 0xc1, 0x75, 0xae, - 0xdc, 0xb3, 0x0c, 0x49, 0xda, 0x6e, 0x22, 0x69, 0x83, 0xbd, 0xa7, 0x1a, 0x75, 0x57, 0xe7, 0x2c, - 0xb1, 0x2c, 0x6d, 0xdc, 0x6a, 0x08, 0xfb, 0x35, 0xc2, 0x24, 0xf0, 0x4e, 0xfd, 0x8b, 0x73, 0xab, - 0x2d, 0x3d, 0xbc, 0xd7, 0xc4, 0x7b, 0xa9, 0x45, 0xde, 0x03, 0x6d, 0xe1, 0x22, 0x6d, 0x56, 0xa2, - 0x4d, 0x05, 0x9a, 0x5f, 0x61, 0x7f, 0x11, 0x56, 0x96, 0x79, 0xfe, 0xd5, 0xd8, 0x06, 0xd7, 0x63, - 0x1b, 0xdc, 0x8c, 0x6d, 0xf0, 0x71, 0x62, 0xb7, 0xae, 0x27, 0x76, 0xeb, 0xeb, 0xc4, 0x6e, 0x9d, - 0x3e, 0x0b, 0x23, 0x71, 0x9e, 0x07, 0xce, 0x90, 0xc5, 0xae, 0x24, 0x3f, 0x4e, 0xa8, 0x78, 0xcf, - 0xb2, 0x0b, 0x7d, 0x22, 0x69, 0xe4, 0x86, 0xcc, 0x4d, 0xd8, 0x19, 0x75, 0x45, 0x91, 0x52, 0x5e, - 0x3f, 0x4e, 0x41, 0x47, 0x3e, 0x4a, 0x87, 0x3f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x0d, 0xbf, 0x30, - 0xf9, 0x52, 0x05, 0x00, 0x00, -} - -func (this *CPU) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CPU) - if !ok { - that2, ok := that.(CPU) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Units.Equal(&that1.Units) { - return false - } - if len(this.Attributes) != len(that1.Attributes) { - return false - } - for i := range this.Attributes { - if !this.Attributes[i].Equal(&that1.Attributes[i]) { - return false - } - } - return true -} -func (this *Memory) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Memory) - if !ok { - that2, ok := that.(Memory) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Quantity.Equal(&that1.Quantity) { - return false - } - if len(this.Attributes) != len(that1.Attributes) { - return false - } - for i := range this.Attributes { - if !this.Attributes[i].Equal(&that1.Attributes[i]) { - return false - } - } - return true -} -func (this *Storage) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Storage) - if !ok { - that2, ok := that.(Storage) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Quantity.Equal(&that1.Quantity) { - return false - } - if len(this.Attributes) != len(that1.Attributes) { - return false - } - for i := range this.Attributes { - if !this.Attributes[i].Equal(&that1.Attributes[i]) { - return false - } - } - return true -} -func (this *ResourceUnits) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResourceUnits) - if !ok { - that2, ok := that.(ResourceUnits) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.CPU.Equal(that1.CPU) { - return false - } - if !this.Memory.Equal(that1.Memory) { - return false - } - if !this.Storage.Equal(that1.Storage) { - return false - } - if len(this.Endpoints) != len(that1.Endpoints) { - return false - } - for i := range this.Endpoints { - if !this.Endpoints[i].Equal(&that1.Endpoints[i]) { - return false - } - } - return true -} -func (m *CPU) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CPU) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CPU) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Units.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Memory) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Memory) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Memory) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Quantity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Storage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Storage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Storage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Quantity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceUnits) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceUnits) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceUnits) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Endpoints) > 0 { - for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if m.Storage != nil { - { - size, err := m.Storage.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Memory != nil { - { - size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.CPU != nil { - { - size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintResource(dAtA []byte, offset int, v uint64) int { - offset -= sovResource(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CPU) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Units.Size() - n += 1 + l + sovResource(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovResource(uint64(l)) - } - } - return n -} - -func (m *Memory) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Quantity.Size() - n += 1 + l + sovResource(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovResource(uint64(l)) - } - } - return n -} - -func (m *Storage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Quantity.Size() - n += 1 + l + sovResource(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovResource(uint64(l)) - } - } - return n -} - -func (m *ResourceUnits) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CPU != nil { - l = m.CPU.Size() - n += 1 + l + sovResource(uint64(l)) - } - if m.Memory != nil { - l = m.Memory.Size() - n += 1 + l + sovResource(uint64(l)) - } - if m.Storage != nil { - l = m.Storage.Size() - n += 1 + l + sovResource(uint64(l)) - } - if len(m.Endpoints) > 0 { - for _, e := range m.Endpoints { - l = e.Size() - n += 1 + l + sovResource(uint64(l)) - } - } - return n -} - -func sovResource(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozResource(x uint64) (n int) { - return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CPU) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPU: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPU: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Units", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Units.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipResource(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResource - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Memory) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Memory: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Memory: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Quantity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Quantity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipResource(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResource - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Storage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Storage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Storage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Quantity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Quantity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipResource(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResource - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceUnits) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceUnits: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceUnits: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CPU == nil { - m.CPU = &CPU{} - } - if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Memory == nil { - m.Memory = &Memory{} - } - if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Storage == nil { - m.Storage = &Storage{} - } - if err := m.Storage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Endpoints = append(m.Endpoints, Endpoint{}) - if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipResource(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResource - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipResource(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthResource - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupResource - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthResource - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowResource = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupResource = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta1/resourcevalue.go b/go/node/types/v1beta1/resourcevalue.go deleted file mode 100644 index dcb58fd4..00000000 --- a/go/node/types/v1beta1/resourcevalue.go +++ /dev/null @@ -1,80 +0,0 @@ -package v1beta1 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -var ( - errOverflow = errors.Errorf("resource value overflow") - errCannotSub = errors.Errorf("cannot subtract resources when lhs does not have same units as rhs") - errNegativeResult = errors.Errorf("result of subtraction is negative") -) - -/* -ResourceValue the big point of this small change is to ensure math operations on resources -not resulting with negative value which panic on unsigned types as well as overflow which leads to panic too -instead reasonable error is returned. -Each resource using this type as value can take extra advantage of it to check upper bounds -For example in SDL v1 CPU units were handled as uint32 and operation like math.MaxUint32 + 2 -would cause application to panic. But nowadays - const CPULimit = math.MaxUint32 - - func (c *CPU) add(rhs CPU) error { - res, err := c.Units.add(rhs.Units) - if err != nil { - return err - } - - if res.Units.Value() > CPULimit { - return ErrOverflow - } - - c.Units = res - - return nil - } -*/ - -func NewResourceValue(val uint64) ResourceValue { - res := ResourceValue{ - Val: sdk.NewIntFromUint64(val), - } - - return res -} - -func (m ResourceValue) Value() uint64 { - return m.Val.Uint64() -} - -func (m ResourceValue) equals(rhs ResourceValue) bool { - return m.Val.Equal(rhs.Val) -} - -func (m ResourceValue) le(rhs ResourceValue) bool { - return m.Val.LTE(rhs.Val) -} - -func (m ResourceValue) add(rhs ResourceValue) (ResourceValue, error) { - res := m.Val - res = res.Add(rhs.Val) - - if res.Sign() == -1 { - return ResourceValue{}, errOverflow - } - - return ResourceValue{res}, nil -} - -func (m ResourceValue) sub(rhs ResourceValue) (ResourceValue, error) { - res := m.Val - - res = res.Sub(rhs.Val) - - if res.Sign() == -1 { - return ResourceValue{}, errNegativeResult - } - - return ResourceValue{res}, nil -} diff --git a/go/node/types/v1beta1/resourcevalue.pb.go b/go/node/types/v1beta1/resourcevalue.pb.go deleted file mode 100644 index 0809fce8..00000000 --- a/go/node/types/v1beta1/resourcevalue.pb.go +++ /dev/null @@ -1,343 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta1/resourcevalue.proto - -package v1beta1 - -import ( - fmt "fmt" - github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Unit stores cpu, memory and storage metrics -type ResourceValue struct { - Val github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,1,opt,name=val,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"val"` -} - -func (m *ResourceValue) Reset() { *m = ResourceValue{} } -func (m *ResourceValue) String() string { return proto.CompactTextString(m) } -func (*ResourceValue) ProtoMessage() {} -func (*ResourceValue) Descriptor() ([]byte, []int) { - return fileDescriptor_85efdc01289cdacd, []int{0} -} -func (m *ResourceValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceValue.Merge(m, src) -} -func (m *ResourceValue) XXX_Size() int { - return m.Size() -} -func (m *ResourceValue) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceValue.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceValue proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ResourceValue)(nil), "akash.base.v1beta1.ResourceValue") -} - -func init() { - proto.RegisterFile("akash/base/v1beta1/resourcevalue.proto", fileDescriptor_85efdc01289cdacd) -} - -var fileDescriptor_85efdc01289cdacd = []byte{ - // 227 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4b, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0x2f, - 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0xd5, 0x2b, 0x28, 0xca, - 0x2f, 0xc9, 0x17, 0x12, 0x02, 0xab, 0xd3, 0x03, 0xa9, 0xd3, 0x83, 0xaa, 0x93, 0x12, 0x49, 0xcf, - 0x4f, 0xcf, 0x07, 0x4b, 0xeb, 0x83, 0x58, 0x10, 0x95, 0x4a, 0xe1, 0x5c, 0xbc, 0x41, 0x50, 0x03, - 0xc2, 0x40, 0x06, 0x08, 0x39, 0x70, 0x31, 0x97, 0x25, 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, - 0x38, 0xe9, 0x9d, 0xb8, 0x27, 0xcf, 0x70, 0xeb, 0x9e, 0xbc, 0x5a, 0x7a, 0x66, 0x49, 0x46, 0x69, - 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x72, 0x7e, 0x71, 0x6e, 0x7e, 0x31, 0x94, 0xd2, 0x2d, 0x4e, - 0xc9, 0xd6, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0xf3, 0xcc, 0x2b, 0x09, 0x02, 0x69, 0xb5, 0x62, - 0x79, 0xb1, 0x40, 0x9e, 0xd1, 0x29, 0xe8, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, - 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, - 0xa2, 0x2c, 0x90, 0x0c, 0x03, 0xbb, 0x53, 0x37, 0x2f, 0xb5, 0xa4, 0x3c, 0xbf, 0x28, 0x1b, 0xca, - 0x4b, 0x2c, 0xc8, 0xd4, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, 0x49, 0x85, 0x18, 0x0d, 0xf3, 0x6a, - 0x12, 0x1b, 0xd8, 0xcd, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xf5, 0x4a, 0x29, 0x07, - 0x01, 0x00, 0x00, -} - -func (this *ResourceValue) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResourceValue) - if !ok { - that2, ok := that.(ResourceValue) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Val.Equal(that1.Val) { - return false - } - return true -} -func (m *ResourceValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size := m.Val.Size() - i -= size - if _, err := m.Val.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintResourcevalue(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintResourcevalue(dAtA []byte, offset int, v uint64) int { - offset -= sovResourcevalue(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ResourceValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Val.Size() - n += 1 + l + sovResourcevalue(uint64(l)) - return n -} - -func sovResourcevalue(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozResourcevalue(x uint64) (n int) { - return sovResourcevalue(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ResourceValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthResourcevalue - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthResourcevalue - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Val.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipResourcevalue(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResourcevalue - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipResourcevalue(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthResourcevalue - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupResourcevalue - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthResourcevalue - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthResourcevalue = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowResourcevalue = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupResourcevalue = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta1/resourcevalue_test.go b/go/node/types/v1beta1/resourcevalue_test.go deleted file mode 100644 index 3f5f73ea..00000000 --- a/go/node/types/v1beta1/resourcevalue_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package v1beta1 - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestValidSum(t *testing.T) { - val1 := NewResourceValue(1) - val2 := NewResourceValue(1) - - res, err := val1.add(val2) - require.NoError(t, err) - require.Equal(t, uint64(2), res.Value()) -} - -func TestSubToNegative(t *testing.T) { - val1 := NewResourceValue(1) - val2 := NewResourceValue(2) - - _, err := val1.sub(val2) - require.Error(t, err) -} - -func TestResourceValueSubIsIdempotent(t *testing.T) { - val1 := NewResourceValue(100) - before := val1.String() - val2 := NewResourceValue(1) - - _, err := val1.sub(val2) - require.NoError(t, err) - after := val1.String() - - require.Equal(t, before, after) -} - -func TestCPUSubIsNotIdempotent(t *testing.T) { - val1 := &CPU{ - Units: NewResourceValue(100), - Attributes: nil, - } - - before := val1.String() - val2 := &CPU{ - Units: NewResourceValue(1), - Attributes: nil, - } - - err := val1.sub(val2) - require.NoError(t, err) - after := val1.String() - - require.NotEqual(t, before, after) -} diff --git a/go/node/types/v1beta2/attribute.go b/go/node/types/v1beta2/attribute.go deleted file mode 100644 index a61b2859..00000000 --- a/go/node/types/v1beta2/attribute.go +++ /dev/null @@ -1,285 +0,0 @@ -package v1beta2 - -import ( - "errors" - "reflect" - "regexp" - "strconv" - "strings" - - "gopkg.in/yaml.v3" -) - -const ( - moduleName = "akash" - attributeNameRegexpString = `^([a-zA-Z][\w\/\.\-]{1,62}\w)$` -) - -const ( - errAttributesDuplicateKeys uint32 = iota + 1 - errInvalidAttributeKey -) - -var ( - ErrAttributesDuplicateKeys = errors.New("attributes cannot have duplicate keys") - ErrInvalidAttributeKey = errors.New("attribute key does not match regexp") -) - -var ( - attributeNameRegexp = regexp.MustCompile(attributeNameRegexpString) -) - -/* -Attributes purpose of using this type in favor of Cosmos's sdk.Attribute is -ability to later extend it with operators to support querying on things like -cpu/memory/storage attributes -At this moment type though is same as sdk.Attributes but all akash libraries were -turned to use a new one -*/ -type Attributes []Attribute - -type AttributesGroup []Attributes - -type AttributeValue interface { - AsBool() (bool, bool) - AsString() (string, bool) -} - -type attributeValue struct { - value string -} - -func (val attributeValue) AsBool() (bool, bool) { - if val.value == "" { - return false, false - } - - res, err := strconv.ParseBool(val.value) - if err != nil { - return false, false - } - - return res, true -} - -func (val attributeValue) AsString() (string, bool) { - if val.value == "" { - return "", false - } - - return val.value, true -} - -func NewStringAttribute(key, val string) Attribute { - return Attribute{ - Key: key, - Value: val, - } -} - -func (m *Attribute) String() string { - res, _ := yaml.Marshal(m) - return string(res) -} - -func (m *Attribute) Equal(rhs *Attribute) bool { - return reflect.DeepEqual(m, rhs) -} - -func (m Attribute) SubsetOf(rhs Attribute) bool { - if m.Key == rhs.Key && m.Value == rhs.Value { - return true - } - - return false -} - -func (attr Attributes) Validate() error { - store := make(map[string]bool) - - for i := range attr { - if !attributeNameRegexp.MatchString(attr[i].Key) { - return ErrInvalidAttributeKey - } - - if _, ok := store[attr[i].Key]; ok { - return ErrAttributesDuplicateKeys - } - - store[attr[i].Key] = true - } - - return nil -} - -func (attr Attributes) Dup() Attributes { - res := make(Attributes, len(attr)) - - for _, pair := range attr { - res = append(res, Attribute{ - Key: pair.Key, - Value: pair.Value, - }) - } - - return res -} - -// AttributesSubsetOf check if a is subset of b -// nolint: gofmt -// For example there are two yaml files being converted into these attributes -// example 1: a is subset of b -// --- -// // a -// attributes: -// -// region: -// - us-east-1 -// -// --- -// b -// attributes: -// -// region: -// - us-east-1 -// - us-east-2 -// -// example 2: a is not subset of b -// attributes: -// -// region: -// - us-east-1 -// -// --- -// b -// attributes: -// -// region: -// - us-east-2 -// - us-east-3 -// -// example 3: a is subset of b -// attributes: -// -// region: -// - us-east-2 -// - us-east-3 -// -// --- -// b -// attributes: -// -// region: -// - us-east-2 -func AttributesSubsetOf(a, b Attributes) bool { -loop: - for _, req := range a { - for _, attr := range b { - if req.SubsetOf(attr) { - continue loop - } - } - return false - } - - return true -} - -func (attr Attributes) SubsetOf(b Attributes) bool { - return AttributesSubsetOf(attr, b) -} - -func (attr Attributes) Find(glob string) AttributeValue { - // todo wildcard - - var val attributeValue - - for i := range attr { - if glob == attr[i].Key { - val.value = attr[i].Value - break - } - } - - return val -} - -func (attr Attributes) Iterate(prefix string, fn func(group, key, value string)) { - for _, item := range attr { - if strings.HasPrefix(item.Key, prefix) { - tokens := strings.SplitAfter(item.Key, "/") - tokens = tokens[1:] - fn(tokens[1], tokens[2], item.Value) - } - } -} - -// GetCapabilitiesGroup -// -// example -// capabilities/storage/1/persistent: true -// capabilities/storage/1/class: io1 -// capabilities/storage/2/persistent: false -// -// nolint: gofmt -// returns -// - - persistent: true -// class: nvme -// - - persistent: false -func (attr Attributes) GetCapabilitiesGroup(prefix string) AttributesGroup { - var res AttributesGroup // nolint:prealloc - - groups := make(map[string]Attributes) - - for _, item := range attr { - if !strings.HasPrefix(item.Key, "capabilities/"+prefix) { - continue - } - - tokens := strings.SplitAfter(strings.TrimPrefix(item.Key, "capabilities/"), "/") - // skip malformed attributes. really? - if len(tokens) != 3 { - continue - } - - // filter out prefix name - tokens = tokens[1:] - - group := groups[tokens[0]] - if group == nil { - group = Attributes{} - } - - group = append(group, Attribute{ - Key: tokens[1], - Value: item.Value, - }) - - groups[tokens[0]] = group - } - - for _, group := range groups { - res = append(res, group) - } - - return res -} - -// IN check if given attributes are in attributes group -// AttributesGroup for storage -// - persistent: true -// class: beta1 -// - persistent: true -// class: beta2 -// -// that -// - persistent: true -// class: beta1 -func (attr Attributes) IN(group AttributesGroup) bool { - for _, group := range group { - if attr.SubsetOf(group) { - return true - } - } - return false -} diff --git a/go/node/types/v1beta2/attribute.pb.go b/go/node/types/v1beta2/attribute.pb.go deleted file mode 100644 index 5adc36ce..00000000 --- a/go/node/types/v1beta2/attribute.pb.go +++ /dev/null @@ -1,812 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta2/attribute.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Attribute represents key value pair -type Attribute struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty" yaml:"key"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty" yaml:"value"` -} - -func (m *Attribute) Reset() { *m = Attribute{} } -func (*Attribute) ProtoMessage() {} -func (*Attribute) Descriptor() ([]byte, []int) { - return fileDescriptor_0a99fe9842d40254, []int{0} -} -func (m *Attribute) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Attribute.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Attribute) XXX_Merge(src proto.Message) { - xxx_messageInfo_Attribute.Merge(m, src) -} -func (m *Attribute) XXX_Size() int { - return m.Size() -} -func (m *Attribute) XXX_DiscardUnknown() { - xxx_messageInfo_Attribute.DiscardUnknown(m) -} - -var xxx_messageInfo_Attribute proto.InternalMessageInfo - -// SignedBy represents validation accounts that tenant expects signatures for provider attributes -// AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many -// entries there -// this behaviour to be discussed -type SignedBy struct { - // all_of all keys in this list must have signed attributes - AllOf []string `protobuf:"bytes,1,rep,name=all_of,json=allOf,proto3" json:"all_of" yaml:"allOf"` - // any_of at least of of the keys from the list must have signed attributes - AnyOf []string `protobuf:"bytes,2,rep,name=any_of,json=anyOf,proto3" json:"any_of" yaml:"anyOf"` -} - -func (m *SignedBy) Reset() { *m = SignedBy{} } -func (*SignedBy) ProtoMessage() {} -func (*SignedBy) Descriptor() ([]byte, []int) { - return fileDescriptor_0a99fe9842d40254, []int{1} -} -func (m *SignedBy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SignedBy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SignedBy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SignedBy) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignedBy.Merge(m, src) -} -func (m *SignedBy) XXX_Size() int { - return m.Size() -} -func (m *SignedBy) XXX_DiscardUnknown() { - xxx_messageInfo_SignedBy.DiscardUnknown(m) -} - -var xxx_messageInfo_SignedBy proto.InternalMessageInfo - -// PlacementRequirements -type PlacementRequirements struct { - // SignedBy list of keys that tenants expect to have signatures from - SignedBy SignedBy `protobuf:"bytes,1,opt,name=signed_by,json=signedBy,proto3" json:"signed_by" yaml:"signed_by"` - // Attribute list of attributes tenant expects from the provider - Attributes []Attribute `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes" yaml:"attributes"` -} - -func (m *PlacementRequirements) Reset() { *m = PlacementRequirements{} } -func (*PlacementRequirements) ProtoMessage() {} -func (*PlacementRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_0a99fe9842d40254, []int{2} -} -func (m *PlacementRequirements) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PlacementRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PlacementRequirements.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PlacementRequirements) XXX_Merge(src proto.Message) { - xxx_messageInfo_PlacementRequirements.Merge(m, src) -} -func (m *PlacementRequirements) XXX_Size() int { - return m.Size() -} -func (m *PlacementRequirements) XXX_DiscardUnknown() { - xxx_messageInfo_PlacementRequirements.DiscardUnknown(m) -} - -var xxx_messageInfo_PlacementRequirements proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Attribute)(nil), "akash.base.v1beta2.Attribute") - proto.RegisterType((*SignedBy)(nil), "akash.base.v1beta2.SignedBy") - proto.RegisterType((*PlacementRequirements)(nil), "akash.base.v1beta2.PlacementRequirements") -} - -func init() { - proto.RegisterFile("akash/base/v1beta2/attribute.proto", fileDescriptor_0a99fe9842d40254) -} - -var fileDescriptor_0a99fe9842d40254 = []byte{ - // 405 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0xbf, 0xce, 0xd3, 0x30, - 0x14, 0xc5, 0x93, 0xef, 0xa3, 0x55, 0xe3, 0x22, 0x54, 0x22, 0x90, 0xaa, 0x0a, 0xec, 0xca, 0x12, - 0xd0, 0x85, 0x58, 0x2d, 0x0b, 0xea, 0x46, 0x5e, 0x00, 0x14, 0xb6, 0x32, 0x14, 0xa7, 0x75, 0xd3, - 0x28, 0x69, 0x5c, 0x12, 0xa7, 0x28, 0x1b, 0x23, 0x23, 0x8f, 0xc0, 0xe3, 0x74, 0xec, 0xd8, 0x29, - 0x82, 0x74, 0x40, 0xea, 0x98, 0x27, 0x40, 0x71, 0xfe, 0x34, 0x12, 0x4c, 0x76, 0xce, 0x3d, 0xbf, - 0xdc, 0x7b, 0x6c, 0x03, 0x4c, 0x3d, 0x1a, 0x6d, 0x89, 0x4d, 0x23, 0x46, 0x0e, 0x53, 0x9b, 0x09, - 0x3a, 0x23, 0x54, 0x88, 0xd0, 0xb5, 0x63, 0xc1, 0x8c, 0x7d, 0xc8, 0x05, 0xd7, 0x75, 0xe9, 0x31, - 0x0a, 0x8f, 0x51, 0x79, 0x46, 0x4f, 0x1c, 0xee, 0x70, 0x59, 0x26, 0xc5, 0xae, 0x74, 0xe2, 0x4f, - 0x40, 0x7b, 0x57, 0xc3, 0xfa, 0x18, 0xdc, 0x7b, 0x2c, 0x19, 0xaa, 0x63, 0x75, 0xa2, 0x99, 0x8f, - 0xf2, 0x14, 0x81, 0x84, 0xee, 0xfc, 0x39, 0xf6, 0x58, 0x82, 0xad, 0xa2, 0xa4, 0xbf, 0x04, 0x9d, - 0x03, 0xf5, 0x63, 0x36, 0xbc, 0x93, 0x9e, 0x41, 0x9e, 0xa2, 0x87, 0xa5, 0x47, 0xca, 0xd8, 0x2a, - 0xcb, 0xf3, 0x07, 0xdf, 0x7f, 0x22, 0x05, 0x1f, 0x40, 0xef, 0xa3, 0xeb, 0x04, 0x6c, 0x6d, 0x26, - 0xfa, 0x14, 0x74, 0xa9, 0xef, 0x2f, 0xf9, 0x66, 0xa8, 0x8e, 0xef, 0x27, 0x9a, 0x39, 0xba, 0xa6, - 0xa8, 0x52, 0x6e, 0x3f, 0xa1, 0xbe, 0xff, 0x7e, 0x83, 0xad, 0x8e, 0x5c, 0x25, 0x12, 0x24, 0x05, - 0x72, 0xd7, 0x42, 0xa4, 0xd2, 0x42, 0x82, 0xa4, 0x44, 0x8a, 0xb5, 0xea, 0xfb, 0x47, 0x05, 0x4f, - 0x3f, 0xf8, 0x74, 0xc5, 0x76, 0x2c, 0x10, 0x16, 0xfb, 0x12, 0xbb, 0xa1, 0xdc, 0x46, 0xfa, 0x67, - 0xa0, 0x45, 0x72, 0xa2, 0xa5, 0x5d, 0xe6, 0xec, 0xcf, 0x9e, 0x19, 0xff, 0x1e, 0x96, 0x51, 0x8f, - 0x6d, 0xbe, 0x38, 0xa6, 0x48, 0xb9, 0xa6, 0xe8, 0x86, 0xe5, 0x29, 0x1a, 0x94, 0xad, 0x1b, 0x09, - 0x5b, 0xbd, 0xa8, 0xce, 0xb9, 0x01, 0xa0, 0xb9, 0x8d, 0x48, 0x0e, 0xde, 0x9f, 0x3d, 0xff, 0x5f, - 0x8b, 0xe6, 0xd8, 0xcd, 0x57, 0x55, 0x8f, 0x16, 0x98, 0xa7, 0xe8, 0x71, 0x95, 0xaf, 0xd1, 0xb0, - 0xd5, 0x32, 0x94, 0x49, 0xcd, 0xc5, 0xf9, 0x37, 0x54, 0xbe, 0x65, 0x50, 0x39, 0x66, 0x50, 0x3d, - 0x65, 0x50, 0xfd, 0x95, 0x41, 0xf5, 0xc7, 0x05, 0x2a, 0xa7, 0x0b, 0x54, 0xce, 0x17, 0xa8, 0x2c, - 0xde, 0x3a, 0xae, 0xd8, 0xc6, 0xb6, 0xb1, 0xe2, 0x3b, 0x22, 0xa7, 0x78, 0x1d, 0x30, 0xf1, 0x95, - 0x87, 0x5e, 0xf5, 0x45, 0xf7, 0x2e, 0x71, 0x38, 0x09, 0xf8, 0x9a, 0x11, 0x91, 0xec, 0x59, 0x54, - 0x3f, 0x2a, 0xbb, 0x2b, 0x5f, 0xc8, 0x9b, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xed, 0xef, 0xae, - 0x43, 0x71, 0x02, 0x00, 0x00, -} - -func (m *Attribute) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Attribute) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Attribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintAttribute(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintAttribute(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SignedBy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SignedBy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SignedBy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.AnyOf) > 0 { - for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AnyOf[iNdEx]) - copy(dAtA[i:], m.AnyOf[iNdEx]) - i = encodeVarintAttribute(dAtA, i, uint64(len(m.AnyOf[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.AllOf) > 0 { - for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllOf[iNdEx]) - copy(dAtA[i:], m.AllOf[iNdEx]) - i = encodeVarintAttribute(dAtA, i, uint64(len(m.AllOf[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *PlacementRequirements) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PlacementRequirements) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PlacementRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAttribute(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.SignedBy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAttribute(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintAttribute(dAtA []byte, offset int, v uint64) int { - offset -= sovAttribute(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Attribute) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovAttribute(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovAttribute(uint64(l)) - } - return n -} - -func (m *SignedBy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AllOf) > 0 { - for _, s := range m.AllOf { - l = len(s) - n += 1 + l + sovAttribute(uint64(l)) - } - } - if len(m.AnyOf) > 0 { - for _, s := range m.AnyOf { - l = len(s) - n += 1 + l + sovAttribute(uint64(l)) - } - } - return n -} - -func (m *PlacementRequirements) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.SignedBy.Size() - n += 1 + l + sovAttribute(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAttribute(uint64(l)) - } - } - return n -} - -func sovAttribute(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAttribute(x uint64) (n int) { - return sovAttribute(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Attribute) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Attribute: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Attribute: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAttribute(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAttribute - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SignedBy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SignedBy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SignedBy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllOf = append(m.AllOf, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AnyOf = append(m.AnyOf, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAttribute(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAttribute - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PlacementRequirements) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PlacementRequirements: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PlacementRequirements: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SignedBy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SignedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAttribute(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAttribute - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAttribute(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAttribute - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAttribute - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAttribute - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAttribute - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAttribute - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAttribute - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAttribute = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAttribute = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAttribute = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta2/endpoint.go b/go/node/types/v1beta2/endpoint.go deleted file mode 100644 index 5d7a9e6e..00000000 --- a/go/node/types/v1beta2/endpoint.go +++ /dev/null @@ -1,11 +0,0 @@ -package v1beta2 - -type Endpoints []Endpoint - -func (m Endpoints) Dup() Endpoints { - res := make(Endpoints, len(m)) - - copy(res, m) - - return res -} diff --git a/go/node/types/v1beta2/endpoint.pb.go b/go/node/types/v1beta2/endpoint.pb.go deleted file mode 100644 index edd3c9e6..00000000 --- a/go/node/types/v1beta2/endpoint.pb.go +++ /dev/null @@ -1,405 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta2/endpoint.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// This describes how the endpoint is implemented when the lease is deployed -type Endpoint_Kind int32 - -const ( - // Describes an endpoint that becomes a Kubernetes Ingress - Endpoint_SHARED_HTTP Endpoint_Kind = 0 - // Describes an endpoint that becomes a Kubernetes NodePort - Endpoint_RANDOM_PORT Endpoint_Kind = 1 - // Describes an endpoint that becomes a leased IP - Endpoint_LEASED_IP Endpoint_Kind = 2 -) - -var Endpoint_Kind_name = map[int32]string{ - 0: "SHARED_HTTP", - 1: "RANDOM_PORT", - 2: "LEASED_IP", -} - -var Endpoint_Kind_value = map[string]int32{ - "SHARED_HTTP": 0, - "RANDOM_PORT": 1, - "LEASED_IP": 2, -} - -func (x Endpoint_Kind) String() string { - return proto.EnumName(Endpoint_Kind_name, int32(x)) -} - -func (Endpoint_Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_4b446d601fc1bd00, []int{0, 0} -} - -// Endpoint describes a publicly accessible IP service -type Endpoint struct { - Kind Endpoint_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=akash.base.v1beta2.Endpoint_Kind" json:"kind,omitempty"` - SequenceNumber uint32 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number" yaml:"sequence_number"` -} - -func (m *Endpoint) Reset() { *m = Endpoint{} } -func (m *Endpoint) String() string { return proto.CompactTextString(m) } -func (*Endpoint) ProtoMessage() {} -func (*Endpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_4b446d601fc1bd00, []int{0} -} -func (m *Endpoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Endpoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Endpoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_Endpoint.Merge(m, src) -} -func (m *Endpoint) XXX_Size() int { - return m.Size() -} -func (m *Endpoint) XXX_DiscardUnknown() { - xxx_messageInfo_Endpoint.DiscardUnknown(m) -} - -var xxx_messageInfo_Endpoint proto.InternalMessageInfo - -func (m *Endpoint) GetKind() Endpoint_Kind { - if m != nil { - return m.Kind - } - return Endpoint_SHARED_HTTP -} - -func (m *Endpoint) GetSequenceNumber() uint32 { - if m != nil { - return m.SequenceNumber - } - return 0 -} - -func init() { - proto.RegisterEnum("akash.base.v1beta2.Endpoint_Kind", Endpoint_Kind_name, Endpoint_Kind_value) - proto.RegisterType((*Endpoint)(nil), "akash.base.v1beta2.Endpoint") -} - -func init() { proto.RegisterFile("akash/base/v1beta2/endpoint.proto", fileDescriptor_4b446d601fc1bd00) } - -var fileDescriptor_4b446d601fc1bd00 = []byte{ - // 317 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd2, 0x4f, - 0xcd, 0x4b, 0x29, 0xc8, 0xcf, 0xcc, 0x2b, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x02, - 0x2b, 0xd1, 0x03, 0x29, 0xd1, 0x83, 0x2a, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x4b, 0xeb, - 0x83, 0x58, 0x10, 0x95, 0x4a, 0xaf, 0x18, 0xb9, 0x38, 0x5c, 0xa1, 0x9a, 0x85, 0x4c, 0xb9, 0x58, - 0xb2, 0x33, 0xf3, 0x52, 0x24, 0x18, 0x15, 0x18, 0x35, 0xf8, 0x8c, 0x14, 0xf5, 0x30, 0x4d, 0xd1, - 0x83, 0xa9, 0xd5, 0xf3, 0xce, 0xcc, 0x4b, 0x09, 0x02, 0x2b, 0x17, 0xca, 0xe0, 0xe2, 0x2f, 0x4e, - 0x2d, 0x2c, 0x4d, 0xcd, 0x4b, 0x4e, 0x8d, 0xcf, 0x2b, 0xcd, 0x4d, 0x4a, 0x2d, 0x92, 0x60, 0x52, - 0x60, 0xd4, 0xe0, 0x75, 0xb2, 0x7f, 0x74, 0x4f, 0x9e, 0x2f, 0x18, 0x2a, 0xe5, 0x07, 0x96, 0x79, - 0x75, 0x4f, 0x1e, 0x5d, 0xf1, 0xa7, 0x7b, 0xf2, 0x62, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x68, - 0x12, 0x4a, 0x41, 0x7c, 0xc5, 0x28, 0x9a, 0x95, 0xcc, 0xb9, 0x58, 0x40, 0xf6, 0x0a, 0xf1, 0x73, - 0x71, 0x07, 0x7b, 0x38, 0x06, 0xb9, 0xba, 0xc4, 0x7b, 0x84, 0x84, 0x04, 0x08, 0x30, 0x80, 0x04, - 0x82, 0x1c, 0xfd, 0x5c, 0xfc, 0x7d, 0xe3, 0x03, 0xfc, 0x83, 0x42, 0x04, 0x18, 0x85, 0x78, 0xb9, - 0x38, 0x7d, 0x5c, 0x1d, 0x83, 0x5d, 0x5d, 0xe2, 0x3d, 0x03, 0x04, 0x98, 0xac, 0x58, 0x5e, 0x2c, - 0x90, 0x67, 0x74, 0x0a, 0x3a, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0x8b, - 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0xaf, 0x75, 0xf3, 0x52, - 0x4b, 0xca, 0xf3, 0x8b, 0xb2, 0xa1, 0xbc, 0xc4, 0x82, 0x4c, 0xfd, 0xf4, 0x7c, 0xfd, 0xbc, 0xfc, - 0x94, 0x54, 0xfd, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x58, 0xc8, 0x27, 0xb1, 0x81, 0xc3, 0xd1, 0x18, - 0x10, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x28, 0xfc, 0xc4, 0x96, 0x01, 0x00, 0x00, -} - -func (this *Endpoint) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Endpoint) - if !ok { - that2, ok := that.(Endpoint) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Kind != that1.Kind { - return false - } - if this.SequenceNumber != that1.SequenceNumber { - return false - } - return true -} -func (m *Endpoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SequenceNumber != 0 { - i = encodeVarintEndpoint(dAtA, i, uint64(m.SequenceNumber)) - i-- - dAtA[i] = 0x10 - } - if m.Kind != 0 { - i = encodeVarintEndpoint(dAtA, i, uint64(m.Kind)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintEndpoint(dAtA []byte, offset int, v uint64) int { - offset -= sovEndpoint(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Endpoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Kind != 0 { - n += 1 + sovEndpoint(uint64(m.Kind)) - } - if m.SequenceNumber != 0 { - n += 1 + sovEndpoint(uint64(m.SequenceNumber)) - } - return n -} - -func sovEndpoint(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozEndpoint(x uint64) (n int) { - return sovEndpoint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Endpoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - m.Kind = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Kind |= Endpoint_Kind(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SequenceNumber", wireType) - } - m.SequenceNumber = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SequenceNumber |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipEndpoint(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEndpoint - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipEndpoint(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEndpoint - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEndpoint - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEndpoint - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthEndpoint - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupEndpoint - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthEndpoint - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthEndpoint = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEndpoint = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupEndpoint = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta2/migrate/v1beta1.go b/go/node/types/v1beta2/migrate/v1beta1.go deleted file mode 100644 index 0ea820a8..00000000 --- a/go/node/types/v1beta2/migrate/v1beta1.go +++ /dev/null @@ -1,96 +0,0 @@ -package migrate - -import ( - "github.com/akash-network/akash-api/go/node/types/v1beta1" - "github.com/akash-network/akash-api/go/node/types/v1beta2" -) - -func ResourceValueFromV1Beta1(from v1beta1.ResourceValue) v1beta2.ResourceValue { - return v1beta2.NewResourceValue(from.Value()) -} - -func AttributesFromV1Beta1(from v1beta1.Attributes) v1beta2.Attributes { - res := make(v1beta2.Attributes, 0, len(from)) - - for _, attr := range from { - res = append(res, v1beta2.Attribute{ - Key: attr.Key, - Value: attr.Value, - }) - } - - return res -} - -func SignedByFromV1Beta1(from v1beta1.SignedBy) v1beta2.SignedBy { - return v1beta2.SignedBy{ - AllOf: from.AllOf, - AnyOf: from.AnyOf, - } -} - -func PlacementRequirementsFromV1Beta1(from v1beta1.PlacementRequirements) v1beta2.PlacementRequirements { - res := v1beta2.PlacementRequirements{ - SignedBy: SignedByFromV1Beta1(from.SignedBy), - Attributes: AttributesFromV1Beta1(from.Attributes), - } - - return res -} - -func CPUFromV1Beta1(from *v1beta1.CPU) *v1beta2.CPU { - if from == nil { - return nil - } - - return &v1beta2.CPU{ - Units: ResourceValueFromV1Beta1(from.Units), - Attributes: AttributesFromV1Beta1(from.Attributes), - } -} - -func MemoryFromV1Beta1(from *v1beta1.Memory) *v1beta2.Memory { - if from == nil { - return nil - } - - return &v1beta2.Memory{ - Quantity: ResourceValueFromV1Beta1(from.Quantity), - Attributes: AttributesFromV1Beta1(from.Attributes), - } -} - -func VolumesFromV1Beta1(from *v1beta1.Storage) v1beta2.Volumes { - var res v1beta2.Volumes - if from != nil { - res = append(res, v1beta2.Storage{ - Name: "default", - Quantity: ResourceValueFromV1Beta1(from.Quantity), - Attributes: AttributesFromV1Beta1(from.Attributes), - }) - } - - return res -} - -func EndpointsFromV1Beta1(from []v1beta1.Endpoint) []v1beta2.Endpoint { - res := make([]v1beta2.Endpoint, 0, len(from)) - - for _, endpoint := range from { - res = append(res, v1beta2.Endpoint{ - Kind: v1beta2.Endpoint_Kind(endpoint.Kind), - SequenceNumber: 0, // All previous data does not have a use for sequence number - }) - } - - return res -} - -func ResourceUnitsFromV1Beta1(from v1beta1.ResourceUnits) v1beta2.ResourceUnits { - return v1beta2.ResourceUnits{ - CPU: CPUFromV1Beta1(from.CPU), - Memory: MemoryFromV1Beta1(from.Memory), - Storage: VolumesFromV1Beta1(from.Storage), - Endpoints: EndpointsFromV1Beta1(from.Endpoints), - } -} diff --git a/go/node/types/v1beta2/requirements.go b/go/node/types/v1beta2/requirements.go deleted file mode 100644 index 625abcaf..00000000 --- a/go/node/types/v1beta2/requirements.go +++ /dev/null @@ -1,15 +0,0 @@ -package v1beta2 - -import ( - "gopkg.in/yaml.v3" -) - -func (m *SignedBy) String() string { - res, _ := yaml.Marshal(m) - return string(res) -} - -func (m *PlacementRequirements) String() string { - res, _ := yaml.Marshal(m) - return string(res) -} diff --git a/go/node/types/v1beta2/resource.go b/go/node/types/v1beta2/resource.go deleted file mode 100644 index ba5b317a..00000000 --- a/go/node/types/v1beta2/resource.go +++ /dev/null @@ -1,82 +0,0 @@ -package v1beta2 - -type UnitType int - -type Unit interface { - String() string -} - -type ResUnit interface { - Equals(ResUnit) bool - Add(unit ResUnit) bool -} - -// Resources stores Unit details and Count value -type Resources struct { - Resources ResourceUnits `json:"resources"` - Count uint32 `json:"count"` -} - -// ResourceGroup is the interface that wraps GetName and GetResources methods -type ResourceGroup interface { - GetName() string - GetResources() []Resources -} - -type Volumes []Storage - -var _ Unit = (*CPU)(nil) -var _ Unit = (*Memory)(nil) -var _ Unit = (*Storage)(nil) - -func (m ResourceUnits) Dup() ResourceUnits { - res := ResourceUnits{ - CPU: m.CPU.Dup(), - Memory: m.Memory.Dup(), - Storage: m.Storage.Dup(), - Endpoints: m.Endpoints.Dup(), - } - - return res -} - -func (m CPU) Dup() *CPU { - return &CPU{ - Units: m.Units.Dup(), - Attributes: m.Attributes.Dup(), - } -} - -func (m Memory) Dup() *Memory { - return &Memory{ - Quantity: m.Quantity.Dup(), - Attributes: m.Attributes.Dup(), - } -} - -func (m Storage) Dup() *Storage { - return &Storage{ - Quantity: m.Quantity.Dup(), - Attributes: m.Attributes.Dup(), - } -} - -func (m Volumes) Equal(rhs Volumes) bool { - for i := range m { - if !m[i].Equal(rhs[i]) { - return false - } - } - - return true -} - -func (m Volumes) Dup() Volumes { - res := make(Volumes, len(m)) - - for _, storage := range m { - res = append(res, *storage.Dup()) - } - - return res -} diff --git a/go/node/types/v1beta2/resource.pb.go b/go/node/types/v1beta2/resource.pb.go deleted file mode 100644 index 1badbcd0..00000000 --- a/go/node/types/v1beta2/resource.pb.go +++ /dev/null @@ -1,1015 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta2/resource.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// CPU stores resource units and cpu config attributes -type CPU struct { - Units ResourceValue `protobuf:"bytes,1,opt,name=units,proto3" json:"units"` - Attributes Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` -} - -func (m *CPU) Reset() { *m = CPU{} } -func (m *CPU) String() string { return proto.CompactTextString(m) } -func (*CPU) ProtoMessage() {} -func (*CPU) Descriptor() ([]byte, []int) { - return fileDescriptor_d2022fd0bb546ad1, []int{0} -} -func (m *CPU) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CPU) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CPU.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CPU) XXX_Merge(src proto.Message) { - xxx_messageInfo_CPU.Merge(m, src) -} -func (m *CPU) XXX_Size() int { - return m.Size() -} -func (m *CPU) XXX_DiscardUnknown() { - xxx_messageInfo_CPU.DiscardUnknown(m) -} - -var xxx_messageInfo_CPU proto.InternalMessageInfo - -func (m *CPU) GetUnits() ResourceValue { - if m != nil { - return m.Units - } - return ResourceValue{} -} - -func (m *CPU) GetAttributes() Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// Memory stores resource quantity and memory attributes -type Memory struct { - Quantity ResourceValue `protobuf:"bytes,1,opt,name=quantity,proto3" json:"size" yaml:"size"` - Attributes Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` -} - -func (m *Memory) Reset() { *m = Memory{} } -func (m *Memory) String() string { return proto.CompactTextString(m) } -func (*Memory) ProtoMessage() {} -func (*Memory) Descriptor() ([]byte, []int) { - return fileDescriptor_d2022fd0bb546ad1, []int{1} -} -func (m *Memory) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Memory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Memory.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Memory) XXX_Merge(src proto.Message) { - xxx_messageInfo_Memory.Merge(m, src) -} -func (m *Memory) XXX_Size() int { - return m.Size() -} -func (m *Memory) XXX_DiscardUnknown() { - xxx_messageInfo_Memory.DiscardUnknown(m) -} - -var xxx_messageInfo_Memory proto.InternalMessageInfo - -func (m *Memory) GetQuantity() ResourceValue { - if m != nil { - return m.Quantity - } - return ResourceValue{} -} - -func (m *Memory) GetAttributes() Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -// Storage stores resource quantity and storage attributes -type Storage struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` - Quantity ResourceValue `protobuf:"bytes,2,opt,name=quantity,proto3" json:"size" yaml:"size"` - Attributes Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` -} - -func (m *Storage) Reset() { *m = Storage{} } -func (m *Storage) String() string { return proto.CompactTextString(m) } -func (*Storage) ProtoMessage() {} -func (*Storage) Descriptor() ([]byte, []int) { - return fileDescriptor_d2022fd0bb546ad1, []int{2} -} -func (m *Storage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Storage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Storage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Storage) XXX_Merge(src proto.Message) { - xxx_messageInfo_Storage.Merge(m, src) -} -func (m *Storage) XXX_Size() int { - return m.Size() -} -func (m *Storage) XXX_DiscardUnknown() { - xxx_messageInfo_Storage.DiscardUnknown(m) -} - -var xxx_messageInfo_Storage proto.InternalMessageInfo - -func (m *Storage) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Storage) GetQuantity() ResourceValue { - if m != nil { - return m.Quantity - } - return ResourceValue{} -} - -func (m *Storage) GetAttributes() Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func init() { - proto.RegisterType((*CPU)(nil), "akash.base.v1beta2.CPU") - proto.RegisterType((*Memory)(nil), "akash.base.v1beta2.Memory") - proto.RegisterType((*Storage)(nil), "akash.base.v1beta2.Storage") -} - -func init() { proto.RegisterFile("akash/base/v1beta2/resource.proto", fileDescriptor_d2022fd0bb546ad1) } - -var fileDescriptor_d2022fd0bb546ad1 = []byte{ - // 397 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd2, 0x2f, - 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x02, - 0x2b, 0xd1, 0x03, 0x29, 0xd1, 0x83, 0x2a, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x4b, 0xeb, - 0x83, 0x58, 0x10, 0x95, 0x52, 0x4a, 0x58, 0x0c, 0x4b, 0x2c, 0x29, 0x29, 0xca, 0x4c, 0x2a, 0x2d, - 0x81, 0x9a, 0x26, 0xa5, 0x86, 0xc7, 0xc2, 0xb2, 0xc4, 0x9c, 0x52, 0xa8, 0x3a, 0xa5, 0xab, 0x8c, - 0x5c, 0xcc, 0xce, 0x01, 0xa1, 0x42, 0xb6, 0x5c, 0xac, 0xa5, 0x79, 0x99, 0x25, 0xc5, 0x12, 0x8c, - 0x0a, 0x8c, 0x1a, 0xdc, 0x46, 0x8a, 0x7a, 0x98, 0xae, 0xd1, 0x0b, 0x82, 0xea, 0x0f, 0x03, 0xe9, - 0x77, 0x62, 0x39, 0x71, 0x4f, 0x9e, 0x21, 0x08, 0xa2, 0x4b, 0xa8, 0x83, 0x91, 0x8b, 0x0b, 0xee, - 0x84, 0x62, 0x09, 0x26, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x59, 0x6c, 0x86, 0x38, 0xc2, 0x54, 0x39, - 0x79, 0x82, 0x0c, 0x78, 0x75, 0x4f, 0x5e, 0x04, 0xa1, 0x51, 0x27, 0x3f, 0x37, 0xb3, 0x24, 0x35, - 0xb7, 0xa0, 0xa4, 0xf2, 0xd3, 0x3d, 0x79, 0xe9, 0xca, 0xc4, 0xdc, 0x1c, 0x2b, 0x25, 0x6c, 0xb2, - 0x4a, 0xab, 0xee, 0xcb, 0x73, 0xc1, 0x4d, 0x2a, 0x0e, 0x42, 0xb2, 0xdb, 0x8a, 0xe5, 0xc5, 0x02, - 0x79, 0x46, 0xa5, 0xaf, 0x8c, 0x5c, 0x6c, 0xbe, 0xa9, 0xb9, 0xf9, 0x45, 0x95, 0x42, 0x51, 0x5c, - 0x1c, 0x85, 0xa5, 0x89, 0x79, 0x25, 0x99, 0x25, 0x95, 0xc4, 0xfb, 0x4e, 0x1a, 0xea, 0x38, 0x96, - 0xe2, 0xcc, 0xaa, 0xd4, 0x4f, 0xf7, 0xe4, 0xb9, 0x21, 0x8e, 0x01, 0xf1, 0x94, 0x82, 0xe0, 0xe6, - 0x0d, 0x3e, 0x7f, 0x2f, 0x66, 0xe2, 0x62, 0x0f, 0x2e, 0xc9, 0x2f, 0x4a, 0x4c, 0x4f, 0x15, 0xd2, - 0xe6, 0x62, 0xc9, 0x4b, 0xcc, 0x4d, 0x05, 0x7b, 0x9a, 0xd3, 0x49, 0x1c, 0xe4, 0x1b, 0x10, 0x1f, - 0xe1, 0x1b, 0x10, 0x4f, 0x29, 0x08, 0x2c, 0x88, 0x12, 0x4a, 0x4c, 0xb4, 0x0d, 0x25, 0xe6, 0x81, - 0x0e, 0x25, 0xa7, 0xa0, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, - 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xb2, 0x48, - 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x07, 0xbb, 0x4f, 0x37, 0x2f, 0xb5, - 0xa4, 0x3c, 0xbf, 0x28, 0x1b, 0xca, 0x4b, 0x2c, 0xc8, 0xd4, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, - 0x49, 0xd5, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0x86, 0xe5, 0xae, 0x24, 0x36, 0x70, 0x86, 0x32, 0x06, - 0x04, 0x00, 0x00, 0xff, 0xff, 0x9f, 0x02, 0x09, 0x18, 0xeb, 0x03, 0x00, 0x00, -} - -func (this *CPU) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CPU) - if !ok { - that2, ok := that.(CPU) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Units.Equal(&that1.Units) { - return false - } - if len(this.Attributes) != len(that1.Attributes) { - return false - } - for i := range this.Attributes { - if !this.Attributes[i].Equal(&that1.Attributes[i]) { - return false - } - } - return true -} -func (this *Memory) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Memory) - if !ok { - that2, ok := that.(Memory) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Quantity.Equal(&that1.Quantity) { - return false - } - if len(this.Attributes) != len(that1.Attributes) { - return false - } - for i := range this.Attributes { - if !this.Attributes[i].Equal(&that1.Attributes[i]) { - return false - } - } - return true -} -func (this *Storage) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Storage) - if !ok { - that2, ok := that.(Storage) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if !this.Quantity.Equal(&that1.Quantity) { - return false - } - if len(this.Attributes) != len(that1.Attributes) { - return false - } - for i := range this.Attributes { - if !this.Attributes[i].Equal(&that1.Attributes[i]) { - return false - } - } - return true -} -func (m *CPU) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CPU) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CPU) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Units.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Memory) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Memory) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Memory) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Quantity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Storage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Storage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Storage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - { - size, err := m.Quantity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResource(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintResource(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintResource(dAtA []byte, offset int, v uint64) int { - offset -= sovResource(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CPU) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Units.Size() - n += 1 + l + sovResource(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovResource(uint64(l)) - } - } - return n -} - -func (m *Memory) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Quantity.Size() - n += 1 + l + sovResource(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovResource(uint64(l)) - } - } - return n -} - -func (m *Storage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovResource(uint64(l)) - } - l = m.Quantity.Size() - n += 1 + l + sovResource(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovResource(uint64(l)) - } - } - return n -} - -func sovResource(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozResource(x uint64) (n int) { - return sovResource(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CPU) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPU: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPU: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Units", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Units.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipResource(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResource - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Memory) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Memory: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Memory: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Quantity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Quantity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipResource(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResource - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Storage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Storage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Storage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Quantity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Quantity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResource - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResource - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResource - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipResource(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResource - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipResource(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResource - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthResource - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupResource - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthResource - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthResource = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowResource = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupResource = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta2/resourceunits.pb.go b/go/node/types/v1beta2/resourceunits.pb.go deleted file mode 100644 index 7e8ffab5..00000000 --- a/go/node/types/v1beta2/resourceunits.pb.go +++ /dev/null @@ -1,571 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta2/resourceunits.proto - -package v1beta2 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// ResourceUnits describes all available resources types for deployment/node etc -// if field is nil resource is not present in the given data-structure -type ResourceUnits struct { - CPU *CPU `protobuf:"bytes,1,opt,name=cpu,proto3" json:"cpu,omitempty" yaml:"cpu,omitempty"` - Memory *Memory `protobuf:"bytes,2,opt,name=memory,proto3" json:"memory,omitempty" yaml:"memory,omitempty"` - Storage Volumes `protobuf:"bytes,3,rep,name=storage,proto3,castrepeated=Volumes" json:"storage,omitempty" yaml:"storage,omitempty"` - Endpoints Endpoints `protobuf:"bytes,4,rep,name=endpoints,proto3,castrepeated=Endpoints" json:"endpoints" yaml:"endpoints"` -} - -func (m *ResourceUnits) Reset() { *m = ResourceUnits{} } -func (m *ResourceUnits) String() string { return proto.CompactTextString(m) } -func (*ResourceUnits) ProtoMessage() {} -func (*ResourceUnits) Descriptor() ([]byte, []int) { - return fileDescriptor_3a653b54b68ae16d, []int{0} -} -func (m *ResourceUnits) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceUnits) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceUnits.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceUnits) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceUnits.Merge(m, src) -} -func (m *ResourceUnits) XXX_Size() int { - return m.Size() -} -func (m *ResourceUnits) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceUnits.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceUnits proto.InternalMessageInfo - -func (m *ResourceUnits) GetCPU() *CPU { - if m != nil { - return m.CPU - } - return nil -} - -func (m *ResourceUnits) GetMemory() *Memory { - if m != nil { - return m.Memory - } - return nil -} - -func (m *ResourceUnits) GetStorage() Volumes { - if m != nil { - return m.Storage - } - return nil -} - -func (m *ResourceUnits) GetEndpoints() Endpoints { - if m != nil { - return m.Endpoints - } - return nil -} - -func init() { - proto.RegisterType((*ResourceUnits)(nil), "akash.base.v1beta2.ResourceUnits") -} - -func init() { - proto.RegisterFile("akash/base/v1beta2/resourceunits.proto", fileDescriptor_3a653b54b68ae16d) -} - -var fileDescriptor_3a653b54b68ae16d = []byte{ - // 403 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xcf, 0xaa, 0xda, 0x40, - 0x14, 0x87, 0x93, 0x46, 0x14, 0x23, 0x82, 0x0d, 0x82, 0xc1, 0x96, 0x8c, 0xcd, 0xa2, 0xb8, 0x68, - 0x33, 0x34, 0x76, 0x51, 0x5c, 0x95, 0x48, 0x97, 0x05, 0x49, 0xb1, 0x8b, 0x6e, 0x4a, 0x12, 0x87, - 0x18, 0x34, 0x99, 0x90, 0x99, 0x54, 0xf2, 0x16, 0x7d, 0x84, 0xae, 0xfb, 0x24, 0x59, 0xba, 0xbc, - 0xab, 0xb9, 0x97, 0xb8, 0xb9, 0xb8, 0xf4, 0x09, 0x2e, 0xf9, 0x77, 0xc5, 0xab, 0xb8, 0x4b, 0xce, - 0xf9, 0x7e, 0xe7, 0x3b, 0x0c, 0x47, 0x7c, 0x6f, 0xad, 0x2d, 0xb2, 0x82, 0xb6, 0x45, 0x10, 0xfc, - 0xf3, 0xc9, 0x46, 0xd4, 0xd2, 0x61, 0x84, 0x08, 0x8e, 0x23, 0x07, 0xc5, 0x81, 0x47, 0x89, 0x16, - 0x46, 0x98, 0x62, 0x49, 0x2a, 0x38, 0x2d, 0xe7, 0xb4, 0x8a, 0x1b, 0xf6, 0x5d, 0xec, 0xe2, 0xa2, - 0x0d, 0xf3, 0xaf, 0x92, 0x1c, 0xbe, 0xbb, 0x31, 0xf1, 0x06, 0x82, 0x82, 0x65, 0x88, 0xbd, 0x80, - 0x96, 0x88, 0x9a, 0x0a, 0x62, 0xd7, 0xac, 0x52, 0x8b, 0x7c, 0x0f, 0xe9, 0xb7, 0x28, 0x38, 0x61, - 0x2c, 0xf3, 0x23, 0x7e, 0xdc, 0xd1, 0x07, 0xda, 0xe5, 0x3e, 0xda, 0x6c, 0xbe, 0x30, 0x3e, 0xa7, - 0x0c, 0xf0, 0x19, 0x03, 0xc2, 0x6c, 0xbe, 0x38, 0x30, 0xd0, 0x75, 0xc2, 0xf8, 0x03, 0xf6, 0x3d, - 0x8a, 0xfc, 0x90, 0x26, 0x47, 0x06, 0xfa, 0x89, 0xe5, 0x6f, 0xa6, 0xea, 0x59, 0x59, 0x35, 0xf3, - 0xc9, 0x92, 0x2b, 0x36, 0x7d, 0xe4, 0xe3, 0x28, 0x91, 0x5f, 0x15, 0x8e, 0xe1, 0x35, 0xc7, 0xf7, - 0x82, 0x30, 0x26, 0xb9, 0xe6, 0xc0, 0x40, 0xaf, 0x4c, 0x9c, 0x29, 0x06, 0xa5, 0xe2, 0x65, 0x47, - 0x35, 0xab, 0xf1, 0xd2, 0x56, 0x6c, 0x11, 0x8a, 0x23, 0xcb, 0x45, 0xb2, 0x30, 0x12, 0xc6, 0x1d, - 0xfd, 0xcd, 0x35, 0xd3, 0x8f, 0x12, 0x31, 0xbe, 0xa6, 0x0c, 0x70, 0x07, 0x06, 0x5e, 0x57, 0x99, - 0x33, 0x97, 0x5c, 0xba, 0x2e, 0x5a, 0xea, 0xff, 0x7b, 0xd0, 0xfa, 0x89, 0x37, 0xb1, 0x8f, 0x88, - 0x59, 0xdb, 0xa4, 0x40, 0x6c, 0xd7, 0xcf, 0x4c, 0xe4, 0x46, 0xa1, 0x7e, 0x7b, 0x4d, 0xfd, 0xad, - 0x82, 0x0c, 0xbd, 0x72, 0x9f, 0x62, 0x47, 0x06, 0x7a, 0xa5, 0xf3, 0xb9, 0x94, 0xbb, 0xda, 0x75, - 0x84, 0x98, 0x27, 0x76, 0xda, 0x78, 0xfc, 0x07, 0x78, 0xc3, 0x4c, 0x33, 0x85, 0xdf, 0x65, 0x0a, - 0xff, 0x90, 0x29, 0xfc, 0xdf, 0xbd, 0xc2, 0xed, 0xf6, 0x0a, 0x77, 0xb7, 0x57, 0xb8, 0x5f, 0x5f, - 0x5c, 0x8f, 0xae, 0x62, 0x5b, 0x73, 0xb0, 0x0f, 0x8b, 0x35, 0x3e, 0x06, 0x88, 0x6e, 0x71, 0xb4, - 0xae, 0xfe, 0xac, 0xd0, 0x83, 0x2e, 0x86, 0x01, 0x5e, 0x22, 0x48, 0x93, 0x10, 0x91, 0xfa, 0x5a, - 0xec, 0x66, 0x71, 0x25, 0x93, 0xa7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb9, 0x73, 0xe2, 0x78, 0xbf, - 0x02, 0x00, 0x00, -} - -func (this *ResourceUnits) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResourceUnits) - if !ok { - that2, ok := that.(ResourceUnits) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.CPU.Equal(that1.CPU) { - return false - } - if !this.Memory.Equal(that1.Memory) { - return false - } - if len(this.Storage) != len(that1.Storage) { - return false - } - for i := range this.Storage { - if !this.Storage[i].Equal(&that1.Storage[i]) { - return false - } - } - if len(this.Endpoints) != len(that1.Endpoints) { - return false - } - for i := range this.Endpoints { - if !this.Endpoints[i].Equal(&that1.Endpoints[i]) { - return false - } - } - return true -} -func (m *ResourceUnits) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceUnits) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceUnits) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Endpoints) > 0 { - for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResourceunits(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - if len(m.Storage) > 0 { - for iNdEx := len(m.Storage) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Storage[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResourceunits(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if m.Memory != nil { - { - size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResourceunits(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.CPU != nil { - { - size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintResourceunits(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintResourceunits(dAtA []byte, offset int, v uint64) int { - offset -= sovResourceunits(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ResourceUnits) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CPU != nil { - l = m.CPU.Size() - n += 1 + l + sovResourceunits(uint64(l)) - } - if m.Memory != nil { - l = m.Memory.Size() - n += 1 + l + sovResourceunits(uint64(l)) - } - if len(m.Storage) > 0 { - for _, e := range m.Storage { - l = e.Size() - n += 1 + l + sovResourceunits(uint64(l)) - } - } - if len(m.Endpoints) > 0 { - for _, e := range m.Endpoints { - l = e.Size() - n += 1 + l + sovResourceunits(uint64(l)) - } - } - return n -} - -func sovResourceunits(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozResourceunits(x uint64) (n int) { - return sovResourceunits(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ResourceUnits) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResourceunits - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceUnits: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceUnits: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResourceunits - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResourceunits - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResourceunits - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CPU == nil { - m.CPU = &CPU{} - } - if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResourceunits - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResourceunits - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResourceunits - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Memory == nil { - m.Memory = &Memory{} - } - if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResourceunits - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResourceunits - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResourceunits - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Storage = append(m.Storage, Storage{}) - if err := m.Storage[len(m.Storage)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResourceunits - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthResourceunits - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthResourceunits - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Endpoints = append(m.Endpoints, Endpoint{}) - if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipResourceunits(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResourceunits - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipResourceunits(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResourceunits - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResourceunits - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResourceunits - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthResourceunits - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupResourceunits - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthResourceunits - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthResourceunits = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowResourceunits = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupResourceunits = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta2/resourcevalue.go b/go/node/types/v1beta2/resourcevalue.go deleted file mode 100644 index c9eb9d94..00000000 --- a/go/node/types/v1beta2/resourcevalue.go +++ /dev/null @@ -1,57 +0,0 @@ -package v1beta2 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -var ( - ErrOverflow = errors.Errorf("resource value overflow") - ErrCannotSub = errors.Errorf("cannot subtract resources when lhs does not have same units as rhs") - ErrNegativeResult = errors.Errorf("result of subtraction is negative") -) - -/* -ResourceValue the big point of this small change is to ensure math operations on resources -not resulting with negative value which panic on unsigned types as well as overflow which leads to panic too -instead reasonable error is returned. -Each resource using this type as value can take extra advantage of it to check upper bounds -For example in SDL v1 CPU units were handled as uint32 and operation like math.MaxUint32 + 2 -would cause application to panic. But nowadays - const CPULimit = math.MaxUint32 - - func (c *CPU) add(rhs CPU) error { - res, err := c.Units.add(rhs.Units) - if err != nil { - return err - } - - if res.Units.Value() > CPULimit { - return ErrOverflow - } - - c.Units = res - - return nil - } -*/ - -func NewResourceValue(val uint64) ResourceValue { - res := ResourceValue{ - Val: sdk.NewIntFromUint64(val), - } - - return res -} - -func (m ResourceValue) Value() uint64 { - return m.Val.Uint64() -} - -func (m ResourceValue) Dup() ResourceValue { - res := ResourceValue{ - Val: sdk.NewIntFromBigInt(m.Val.BigInt()), - } - - return res -} diff --git a/go/node/types/v1beta2/resourcevalue.pb.go b/go/node/types/v1beta2/resourcevalue.pb.go deleted file mode 100644 index 9a0f0b10..00000000 --- a/go/node/types/v1beta2/resourcevalue.pb.go +++ /dev/null @@ -1,343 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta2/resourcevalue.proto - -package v1beta2 - -import ( - fmt "fmt" - github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Unit stores cpu, memory and storage metrics -type ResourceValue struct { - Val github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,1,opt,name=val,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"val"` -} - -func (m *ResourceValue) Reset() { *m = ResourceValue{} } -func (m *ResourceValue) String() string { return proto.CompactTextString(m) } -func (*ResourceValue) ProtoMessage() {} -func (*ResourceValue) Descriptor() ([]byte, []int) { - return fileDescriptor_7fcaa1b8d61f9bc3, []int{0} -} -func (m *ResourceValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceValue.Merge(m, src) -} -func (m *ResourceValue) XXX_Size() int { - return m.Size() -} -func (m *ResourceValue) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceValue.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceValue proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ResourceValue)(nil), "akash.base.v1beta2.ResourceValue") -} - -func init() { - proto.RegisterFile("akash/base/v1beta2/resourcevalue.proto", fileDescriptor_7fcaa1b8d61f9bc3) -} - -var fileDescriptor_7fcaa1b8d61f9bc3 = []byte{ - // 227 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4b, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd2, 0x2f, - 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0xd5, 0x2b, 0x28, 0xca, - 0x2f, 0xc9, 0x17, 0x12, 0x02, 0xab, 0xd3, 0x03, 0xa9, 0xd3, 0x83, 0xaa, 0x93, 0x12, 0x49, 0xcf, - 0x4f, 0xcf, 0x07, 0x4b, 0xeb, 0x83, 0x58, 0x10, 0x95, 0x4a, 0xe1, 0x5c, 0xbc, 0x41, 0x50, 0x03, - 0xc2, 0x40, 0x06, 0x08, 0x39, 0x70, 0x31, 0x97, 0x25, 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, - 0x38, 0xe9, 0x9d, 0xb8, 0x27, 0xcf, 0x70, 0xeb, 0x9e, 0xbc, 0x5a, 0x7a, 0x66, 0x49, 0x46, 0x69, - 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x72, 0x7e, 0x71, 0x6e, 0x7e, 0x31, 0x94, 0xd2, 0x2d, 0x4e, - 0xc9, 0xd6, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0xf3, 0xcc, 0x2b, 0x09, 0x02, 0x69, 0xb5, 0x62, - 0x79, 0xb1, 0x40, 0x9e, 0xd1, 0x29, 0xe8, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, - 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, - 0xa2, 0x2c, 0x90, 0x0c, 0x03, 0xbb, 0x53, 0x37, 0x2f, 0xb5, 0xa4, 0x3c, 0xbf, 0x28, 0x1b, 0xca, - 0x4b, 0x2c, 0xc8, 0xd4, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, 0x49, 0x85, 0x18, 0x0d, 0xf3, 0x6a, - 0x12, 0x1b, 0xd8, 0xcd, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x34, 0xdc, 0xf0, 0x8e, 0x07, - 0x01, 0x00, 0x00, -} - -func (this *ResourceValue) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResourceValue) - if !ok { - that2, ok := that.(ResourceValue) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Val.Equal(that1.Val) { - return false - } - return true -} -func (m *ResourceValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size := m.Val.Size() - i -= size - if _, err := m.Val.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintResourcevalue(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintResourcevalue(dAtA []byte, offset int, v uint64) int { - offset -= sovResourcevalue(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ResourceValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Val.Size() - n += 1 + l + sovResourcevalue(uint64(l)) - return n -} - -func sovResourcevalue(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozResourcevalue(x uint64) (n int) { - return sovResourcevalue(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ResourceValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthResourcevalue - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthResourcevalue - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Val.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipResourcevalue(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResourcevalue - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipResourcevalue(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthResourcevalue - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupResourcevalue - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthResourcevalue - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthResourcevalue = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowResourcevalue = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupResourcevalue = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta3/attribute.go b/go/node/types/v1beta3/attribute.go deleted file mode 100644 index a2bb42b6..00000000 --- a/go/node/types/v1beta3/attribute.go +++ /dev/null @@ -1,380 +0,0 @@ -package v1beta3 - -import ( - "path/filepath" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "gopkg.in/yaml.v3" -) - -const ( - moduleName = "akash" - AttributeNameRegexpStringWildcard = `^([a-zA-Z][\w\/\.\-]{1,126}[\w\*]?)$` - AttributeNameRegexpString = `^([a-zA-Z][\w\/\.\-]{1,126})$` -) - -const ( - errAttributesDuplicateKeys uint32 = iota + 1 - errInvalidAttributeKey -) - -var ( - ErrAttributesDuplicateKeys = sdkerrors.Register(moduleName, errAttributesDuplicateKeys, "attributes cannot have duplicate keys") - ErrInvalidAttributeKey = sdkerrors.Register(moduleName, errInvalidAttributeKey, "attribute key does not match regexp") -) - -var ( - attributeNameRegexpWildcard = regexp.MustCompile(AttributeNameRegexpStringWildcard) -) - -/* -Attributes purpose of using this type in favor of Cosmos's sdk.Attribute is -ability to later extend it with operators to support querying on things like -cpu/memory/storage attributes -At this moment type though is same as sdk.Attributes but all akash libraries were -turned to use a new one -*/ -type Attributes []Attribute - -var _ sort.Interface = (*Attributes)(nil) - -type AttributesGroup []Attributes - -type AttributeValue interface { - AsBool() (bool, bool) - AsString() (string, bool) -} - -type attributeValue struct { - value string -} - -func (val attributeValue) AsBool() (bool, bool) { - if val.value == "" { - return false, false - } - - res, err := strconv.ParseBool(val.value) - if err != nil { - return false, false - } - - return res, true -} - -func (val attributeValue) AsString() (string, bool) { - if val.value == "" { - return "", false - } - - return val.value, true -} - -func (m PlacementRequirements) Dup() PlacementRequirements { - return PlacementRequirements{ - SignedBy: m.SignedBy, - Attributes: m.Attributes.Dup(), - } -} - -func NewStringAttribute(key, val string) Attribute { - return Attribute{ - Key: key, - Value: val, - } -} - -func (m *Attribute) String() string { - res, _ := yaml.Marshal(m) - return string(res) -} - -func (m *Attribute) Equal(rhs *Attribute) bool { - return reflect.DeepEqual(m, rhs) -} - -func (m Attribute) SubsetOf(rhs Attribute) bool { - if match, _ := filepath.Match(m.Key, rhs.Key); match && (m.Value == rhs.Value) { - return true - } - - return false -} - -func (attr Attributes) Len() int { - return len(attr) -} - -func (attr Attributes) Swap(i, j int) { - attr[i], attr[j] = attr[j], attr[i] -} - -func (attr Attributes) Less(i, j int) bool { - return attr[i].Key < attr[j].Key -} - -func (attr Attributes) Validate() error { - return attr.ValidateWithRegex(attributeNameRegexpWildcard) -} - -func (attr Attributes) ValidateWithRegex(r *regexp.Regexp) error { - store := make(map[string]bool) - - for i := range attr { - if !r.MatchString(attr[i].Key) { - return ErrInvalidAttributeKey - } - - if _, ok := store[attr[i].Key]; ok { - return ErrAttributesDuplicateKeys - } - - store[attr[i].Key] = true - } - - return nil -} - -func (attr Attributes) Dup() Attributes { - if attr == nil { - return nil - } - - res := make(Attributes, 0, len(attr)) - - for _, pair := range attr { - res = append(res, Attribute{ - Key: pair.Key, - Value: pair.Value, - }) - } - - return res -} - -// AttributesSubsetOf check if a is subset of b -// nolint: gofmt -// For example there are two yaml files being converted into these attributes -// example 1: a is subset of b -// --- -// // a -// attributes: -// -// region: -// - us-east-1 -// -// --- -// b -// attributes: -// -// region: -// - us-east-1 -// - us-east-2 -// -// example 2: a is not subset of b -// attributes: -// -// region: -// - us-east-1 -// -// --- -// b -// attributes: -// -// region: -// - us-east-2 -// - us-east-3 -// -// example 3: a is subset of b -// attributes: -// -// region: -// - us-east-2 -// - us-east-3 -// -// --- -// b -// attributes: -// -// region: -// - us-east-2 -func AttributesSubsetOf(a, b Attributes) bool { -loop: - for _, req := range a { - for _, attr := range b { - if req.SubsetOf(attr) { - continue loop - } - } - return false - } - - return true -} - -func AttributesAnyOf(a, b Attributes) bool { - for _, req := range a { - for _, attr := range b { - if req.SubsetOf(attr) { - return true - } - } - } - - return false -} - -func (attr Attributes) SubsetOf(b Attributes) bool { - return AttributesSubsetOf(attr, b) -} - -func (attr Attributes) AnyOf(b Attributes) bool { - return AttributesAnyOf(attr, b) -} - -func (attr Attributes) Find(glob string) AttributeValue { - // todo wildcard - - var val attributeValue - - for i := range attr { - if glob == attr[i].Key { - val.value = attr[i].Value - break - } - } - - return val -} - -func (attr Attributes) Iterate(prefix string, fn func(group, key, value string)) { - for _, item := range attr { - if strings.HasPrefix(item.Key, prefix) { - tokens := strings.SplitAfter(item.Key, "/") - tokens = tokens[1:] - fn(tokens[1], tokens[2], item.Value) - } - } -} - -// GetCapabilitiesGroup -// -// example -// capabilities/storage/1/persistent: true -// capabilities/storage/1/class: io1 -// capabilities/storage/2/persistent: false -// -// nolint: gofmt -// returns -// - - persistent: true -// class: nvme -// - - persistent: false -func (attr Attributes) GetCapabilitiesGroup(prefix string) AttributesGroup { - var res AttributesGroup // nolint:prealloc - - groups := make(map[string]Attributes) - - for _, item := range attr { - if !strings.HasPrefix(item.Key, "capabilities/"+prefix) { - continue - } - - tokens := strings.SplitAfter(strings.TrimPrefix(item.Key, "capabilities/"), "/") - // skip malformed attributes. really? - if len(tokens) != 3 { - continue - } - - // filter out prefix name - tokens = tokens[1:] - - group := groups[tokens[0]] - if group == nil { - group = Attributes{} - } - - group = append(group, Attribute{ - Key: tokens[1], - Value: item.Value, - }) - - groups[tokens[0]] = group - } - - for _, group := range groups { - res = append(res, group) - } - - return res -} - -func (attr Attributes) GetCapabilitiesMap(prefix string) AttributesGroup { - res := make(AttributesGroup, 0, 1) - groups := make(Attributes, 0, len(attr)) - - for _, item := range attr { - if !strings.HasPrefix(item.Key, "capabilities/"+prefix) { - continue - } - - tokens := strings.Split(strings.TrimPrefix(item.Key, "capabilities/"), "/") - // skip malformed attributes - if len(tokens) < 3 { - continue - } - - // filter out prefix name - tokens = tokens[1:] - - var key string - for i, token := range tokens { - if i == 0 { - key = token - } else { - key += "/" + token - } - } - - groups = append(groups, Attribute{ - Key: key, - Value: item.Value, - }) - } - - res = append(res, groups) - - return res -} - -// IN check if given attributes are in attributes group -// AttributesGroup for storage -// - persistent: true -// class: beta1 -// - persistent: true -// class: beta2 -// -// that -// - persistent: true -// class: beta1 -func (attr Attributes) IN(group AttributesGroup) bool { - for _, group := range group { - if attr.SubsetOf(group) { - return true - } - } - return false -} - -func (attr Attributes) AnyIN(group AttributesGroup) bool { - for _, group := range group { - if attr.AnyOf(group) { - return true - } - } - return false -} diff --git a/go/node/types/v1beta3/attribute.pb.go b/go/node/types/v1beta3/attribute.pb.go deleted file mode 100644 index 7c418f4e..00000000 --- a/go/node/types/v1beta3/attribute.pb.go +++ /dev/null @@ -1,812 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta3/attribute.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Attribute represents key value pair -type Attribute struct { - Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty" yaml:"key"` - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty" yaml:"value"` -} - -func (m *Attribute) Reset() { *m = Attribute{} } -func (*Attribute) ProtoMessage() {} -func (*Attribute) Descriptor() ([]byte, []int) { - return fileDescriptor_616f00266f4f7c96, []int{0} -} -func (m *Attribute) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Attribute.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Attribute) XXX_Merge(src proto.Message) { - xxx_messageInfo_Attribute.Merge(m, src) -} -func (m *Attribute) XXX_Size() int { - return m.Size() -} -func (m *Attribute) XXX_DiscardUnknown() { - xxx_messageInfo_Attribute.DiscardUnknown(m) -} - -var xxx_messageInfo_Attribute proto.InternalMessageInfo - -// SignedBy represents validation accounts that tenant expects signatures for provider attributes -// AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many -// entries there -// this behaviour to be discussed -type SignedBy struct { - // all_of all keys in this list must have signed attributes - AllOf []string `protobuf:"bytes,1,rep,name=all_of,json=allOf,proto3" json:"all_of" yaml:"allOf"` - // any_of at least of of the keys from the list must have signed attributes - AnyOf []string `protobuf:"bytes,2,rep,name=any_of,json=anyOf,proto3" json:"any_of" yaml:"anyOf"` -} - -func (m *SignedBy) Reset() { *m = SignedBy{} } -func (*SignedBy) ProtoMessage() {} -func (*SignedBy) Descriptor() ([]byte, []int) { - return fileDescriptor_616f00266f4f7c96, []int{1} -} -func (m *SignedBy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SignedBy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_SignedBy.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *SignedBy) XXX_Merge(src proto.Message) { - xxx_messageInfo_SignedBy.Merge(m, src) -} -func (m *SignedBy) XXX_Size() int { - return m.Size() -} -func (m *SignedBy) XXX_DiscardUnknown() { - xxx_messageInfo_SignedBy.DiscardUnknown(m) -} - -var xxx_messageInfo_SignedBy proto.InternalMessageInfo - -// PlacementRequirements -type PlacementRequirements struct { - // SignedBy list of keys that tenants expect to have signatures from - SignedBy SignedBy `protobuf:"bytes,1,opt,name=signed_by,json=signedBy,proto3" json:"signed_by" yaml:"signed_by"` - // Attribute list of attributes tenant expects from the provider - Attributes Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=Attributes" json:"attributes" yaml:"attributes"` -} - -func (m *PlacementRequirements) Reset() { *m = PlacementRequirements{} } -func (*PlacementRequirements) ProtoMessage() {} -func (*PlacementRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_616f00266f4f7c96, []int{2} -} -func (m *PlacementRequirements) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PlacementRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_PlacementRequirements.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *PlacementRequirements) XXX_Merge(src proto.Message) { - xxx_messageInfo_PlacementRequirements.Merge(m, src) -} -func (m *PlacementRequirements) XXX_Size() int { - return m.Size() -} -func (m *PlacementRequirements) XXX_DiscardUnknown() { - xxx_messageInfo_PlacementRequirements.DiscardUnknown(m) -} - -var xxx_messageInfo_PlacementRequirements proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Attribute)(nil), "akash.base.v1beta3.Attribute") - proto.RegisterType((*SignedBy)(nil), "akash.base.v1beta3.SignedBy") - proto.RegisterType((*PlacementRequirements)(nil), "akash.base.v1beta3.PlacementRequirements") -} - -func init() { - proto.RegisterFile("akash/base/v1beta3/attribute.proto", fileDescriptor_616f00266f4f7c96) -} - -var fileDescriptor_616f00266f4f7c96 = []byte{ - // 413 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x92, 0x3f, 0x8f, 0xd3, 0x30, - 0x18, 0xc6, 0x93, 0x3b, 0xee, 0x74, 0xf1, 0x21, 0x74, 0x44, 0x20, 0x55, 0x27, 0xb0, 0x2b, 0x4b, - 0xa0, 0x5b, 0x88, 0xd5, 0x56, 0x48, 0xa8, 0x1b, 0xf9, 0x02, 0xa0, 0xb0, 0x95, 0xa1, 0x38, 0xad, - 0x9b, 0x46, 0x49, 0xe3, 0x12, 0x3b, 0x45, 0xd9, 0x18, 0x19, 0xf9, 0x08, 0xcc, 0x7c, 0x92, 0x8e, - 0x1d, 0x3b, 0x05, 0x48, 0xb7, 0x8e, 0x91, 0xd8, 0x51, 0x9c, 0x3f, 0x8d, 0xc4, 0x4d, 0x76, 0x9e, - 0xf7, 0xf9, 0xe5, 0x7d, 0x1f, 0xdb, 0x00, 0xd3, 0x80, 0x8a, 0x25, 0x71, 0xa9, 0x60, 0x64, 0x33, - 0x70, 0x99, 0xa4, 0x23, 0x42, 0xa5, 0x8c, 0x7d, 0x37, 0x91, 0xcc, 0x5a, 0xc7, 0x5c, 0x72, 0xd3, - 0x54, 0x1e, 0xab, 0xf4, 0x58, 0xb5, 0xe7, 0xf6, 0x89, 0xc7, 0x3d, 0xae, 0xca, 0xa4, 0xdc, 0x55, - 0x4e, 0xfc, 0x11, 0x18, 0x6f, 0x1b, 0xd8, 0xec, 0x83, 0xf3, 0x80, 0xa5, 0x3d, 0xbd, 0xaf, 0xdf, - 0x19, 0xf6, 0xa3, 0x22, 0x43, 0x20, 0xa5, 0xab, 0x70, 0x8c, 0x03, 0x96, 0x62, 0xa7, 0x2c, 0x99, - 0x2f, 0xc1, 0xc5, 0x86, 0x86, 0x09, 0xeb, 0x9d, 0x29, 0xcf, 0x4d, 0x91, 0xa1, 0x87, 0x95, 0x47, - 0xc9, 0xd8, 0xa9, 0xca, 0xe3, 0x07, 0xdf, 0x7e, 0x20, 0x0d, 0x6f, 0xc0, 0xd5, 0x07, 0xdf, 0x8b, - 0xd8, 0xdc, 0x4e, 0xcd, 0x01, 0xb8, 0xa4, 0x61, 0x38, 0xe5, 0x8b, 0x9e, 0xde, 0x3f, 0xbf, 0x33, - 0xec, 0xdb, 0x63, 0x86, 0x6a, 0xe5, 0xf4, 0x13, 0x1a, 0x86, 0xef, 0x16, 0xd8, 0xb9, 0x50, 0xab, - 0x42, 0xa2, 0xb4, 0x44, 0xce, 0x3a, 0x88, 0x52, 0x3a, 0x48, 0x94, 0x56, 0x48, 0xb9, 0xd6, 0x7d, - 0xff, 0xea, 0xe0, 0xe9, 0xfb, 0x90, 0xce, 0xd8, 0x8a, 0x45, 0xd2, 0x61, 0x9f, 0x13, 0x3f, 0x56, - 0x5b, 0x61, 0x7e, 0x02, 0x86, 0x50, 0x13, 0x4d, 0xdd, 0x2a, 0xe7, 0xf5, 0xf0, 0x99, 0xf5, 0xff, - 0x61, 0x59, 0xcd, 0xd8, 0xf6, 0x8b, 0x6d, 0x86, 0xb4, 0x63, 0x86, 0x4e, 0x58, 0x91, 0xa1, 0x9b, - 0xaa, 0x75, 0x2b, 0x61, 0xe7, 0x4a, 0x34, 0x39, 0x25, 0x00, 0xed, 0x6d, 0x08, 0x35, 0xf8, 0xf5, - 0xf0, 0xf9, 0x7d, 0x2d, 0xda, 0x63, 0xb7, 0x5f, 0xd7, 0x3d, 0x3a, 0x60, 0x91, 0xa1, 0xc7, 0x75, - 0xbe, 0x56, 0xc3, 0x3f, 0x7f, 0x21, 0xd0, 0x52, 0xc2, 0xe9, 0xd8, 0xab, 0xdc, 0xf6, 0x64, 0xff, - 0x07, 0x6a, 0x5f, 0x73, 0xa8, 0x6d, 0x73, 0xa8, 0xef, 0x72, 0xa8, 0xff, 0xce, 0xa1, 0xfe, 0xfd, - 0x00, 0xb5, 0xdd, 0x01, 0x6a, 0xfb, 0x03, 0xd4, 0x26, 0x6f, 0x3c, 0x5f, 0x2e, 0x13, 0xd7, 0x9a, - 0xf1, 0x15, 0x51, 0x33, 0xbd, 0x8a, 0x98, 0xfc, 0xc2, 0xe3, 0xa0, 0xfe, 0xa2, 0x6b, 0x9f, 0x78, - 0x9c, 0x44, 0x7c, 0xce, 0x88, 0x4c, 0xd7, 0x4c, 0x34, 0x4f, 0xcc, 0xbd, 0x54, 0xef, 0x65, 0xf4, - 0x2f, 0x00, 0x00, 0xff, 0xff, 0xce, 0xbe, 0x99, 0x37, 0x7f, 0x02, 0x00, 0x00, -} - -func (m *Attribute) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Attribute) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Attribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Value) > 0 { - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintAttribute(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - } - if len(m.Key) > 0 { - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintAttribute(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SignedBy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SignedBy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SignedBy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.AnyOf) > 0 { - for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AnyOf[iNdEx]) - copy(dAtA[i:], m.AnyOf[iNdEx]) - i = encodeVarintAttribute(dAtA, i, uint64(len(m.AnyOf[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.AllOf) > 0 { - for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllOf[iNdEx]) - copy(dAtA[i:], m.AllOf[iNdEx]) - i = encodeVarintAttribute(dAtA, i, uint64(len(m.AllOf[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *PlacementRequirements) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PlacementRequirements) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PlacementRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAttribute(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.SignedBy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintAttribute(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintAttribute(dAtA []byte, offset int, v uint64) int { - offset -= sovAttribute(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Attribute) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - if l > 0 { - n += 1 + l + sovAttribute(uint64(l)) - } - l = len(m.Value) - if l > 0 { - n += 1 + l + sovAttribute(uint64(l)) - } - return n -} - -func (m *SignedBy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AllOf) > 0 { - for _, s := range m.AllOf { - l = len(s) - n += 1 + l + sovAttribute(uint64(l)) - } - } - if len(m.AnyOf) > 0 { - for _, s := range m.AnyOf { - l = len(s) - n += 1 + l + sovAttribute(uint64(l)) - } - } - return n -} - -func (m *PlacementRequirements) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.SignedBy.Size() - n += 1 + l + sovAttribute(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovAttribute(uint64(l)) - } - } - return n -} - -func sovAttribute(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozAttribute(x uint64) (n int) { - return sovAttribute(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Attribute) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Attribute: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Attribute: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAttribute(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAttribute - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SignedBy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SignedBy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SignedBy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllOf = append(m.AllOf, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AnyOf = append(m.AnyOf, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAttribute(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAttribute - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PlacementRequirements) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PlacementRequirements: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PlacementRequirements: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SignedBy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SignedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowAttribute - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthAttribute - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthAttribute - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipAttribute(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthAttribute - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipAttribute(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAttribute - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAttribute - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowAttribute - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthAttribute - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupAttribute - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthAttribute - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthAttribute = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowAttribute = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupAttribute = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta3/attribute_test.go b/go/node/types/v1beta3/attribute_test.go deleted file mode 100644 index 17894962..00000000 --- a/go/node/types/v1beta3/attribute_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package v1beta3 - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -type regexTest struct { - runName string - key string - shouldPass bool -} - -func TestAttributes_Validate(t *testing.T) { - attr := Attributes{ - {Key: "key"}, - {Key: "key"}, - } - - require.EqualError(t, attr.Validate(), ErrAttributesDuplicateKeys.Error()) - - // unsupported key symbol - attr = Attributes{ - {Key: "$"}, - } - - require.EqualError(t, attr.Validate(), ErrInvalidAttributeKey.Error()) - - // empty key - attr = Attributes{ - {Key: ""}, - } - - require.EqualError(t, attr.Validate(), ErrInvalidAttributeKey.Error()) - // to long key - attr = Attributes{ - {Key: "sdgkhaeirugaeroigheirghseiargfs3ssdgkhaeirugaeroigheirghseiargfs3sdgkhaeirugaeroigheirghseiargfs3ssdgkhaeirugaeroigheirghseiargfs3"}, - } - - require.EqualError(t, attr.Validate(), ErrInvalidAttributeKey.Error()) -} - -func TestAttribute_Equal(t *testing.T) { - attr1 := &Attribute{Key: "key1", Value: "val1"} - attr2 := &Attribute{Key: "key1", Value: "val1"} - attr3 := &Attribute{Key: "key1", Value: "val2"} - - require.True(t, attr1.Equal(attr2)) - require.False(t, attr1.Equal(attr3)) -} - -func TestAttribute_SubsetOf(t *testing.T) { - attr1 := Attribute{Key: "key1", Value: "val1"} - attr2 := Attribute{Key: "key1", Value: "val1"} - attr3 := Attribute{Key: "key1", Value: "val2"} - - require.True(t, attr1.SubsetOf(attr2)) - require.False(t, attr1.SubsetOf(attr3)) -} - -func TestAttribute_AnyOf(t *testing.T) { - attr1 := Attribute{Key: "key1", Value: "val1"} - attr2 := Attribute{Key: "key1", Value: "val1"} - attr3 := Attribute{Key: "key1", Value: "val2"} - - require.True(t, attr1.SubsetOf(attr2)) - require.False(t, attr1.SubsetOf(attr3)) -} - -func TestAttributes_SubsetOf(t *testing.T) { - attr1 := Attributes{ - {Key: "key1", Value: "val1"}, - } - - attr2 := Attributes{ - {Key: "key1", Value: "val1"}, - {Key: "key2", Value: "val2"}, - } - - attr3 := Attributes{ - {Key: "key1", Value: "val1"}, - {Key: "key2", Value: "val2"}, - {Key: "key3", Value: "val3"}, - {Key: "key4", Value: "val4"}, - } - - attr4 := Attributes{ - {Key: "key3", Value: "val3"}, - {Key: "key4", Value: "val4"}, - } - - require.True(t, attr1.SubsetOf(attr2)) - require.True(t, attr2.SubsetOf(attr3)) - require.False(t, attr1.SubsetOf(attr4)) -} - -func TestAttributes_AnyOf(t *testing.T) { - attr1 := Attributes{ - {Key: "key1", Value: "val1"}, - } - - attr2 := Attributes{ - {Key: "key1", Value: "val1"}, - {Key: "key2", Value: "val2"}, - } - - attr3 := Attributes{ - {Key: "key1", Value: "val1"}, - {Key: "key2", Value: "val2"}, - {Key: "key3", Value: "val3"}, - {Key: "key4", Value: "val4"}, - } - - attr4 := Attributes{ - {Key: "key3", Value: "val3"}, - {Key: "key4", Value: "val4"}, - } - - require.True(t, attr1.AnyOf(attr2)) - require.True(t, attr2.AnyOf(attr1)) - require.True(t, attr2.AnyOf(attr3)) - require.False(t, attr1.AnyOf(attr4)) -} - -func TestAttributeRegex(t *testing.T) { - tests := []regexTest{ - { - "arbitrary key", - "key1", - true, - }, - { - "allow trailing wildcard", - "key1*", - true, - }, - { - "allow trailing wildcard", - "key1/*", - true, - }, - { - "leading wildcard is not allowed", - "*key1", - false, - }, - { - "multiple wildcards are not allowed", - "key1**", - false, - }, - { - "wildcards in the middle of key are not allowed", - "key1*/", - false, - }, - { - "wildcards in the middle of key are not allowed", - "key1/*/", - false, - }, - } - - for _, test := range tests { - t.Run(test.runName, func(t *testing.T) { - require.Equal(t, test.shouldPass, attributeNameRegexpWildcard.MatchString(test.key)) - }) - } -} - -func TestAttributes_Dup(t *testing.T) { - attrs := Attributes{ - Attribute{ - Key: "key", - Value: "val", - }, - } - - dAttrs := attrs.Dup() - require.Equal(t, attrs, dAttrs) -} diff --git a/go/node/types/v1beta3/cpu.pb.go b/go/node/types/v1beta3/cpu.pb.go deleted file mode 100644 index da52dc21..00000000 --- a/go/node/types/v1beta3/cpu.pb.go +++ /dev/null @@ -1,421 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta3/cpu.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// CPU stores resource units and cpu config attributes -type CPU struct { - Units ResourceValue `protobuf:"bytes,1,opt,name=units,proto3" json:"units"` - Attributes Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` -} - -func (m *CPU) Reset() { *m = CPU{} } -func (m *CPU) String() string { return proto.CompactTextString(m) } -func (*CPU) ProtoMessage() {} -func (*CPU) Descriptor() ([]byte, []int) { - return fileDescriptor_80ab47749737c9d3, []int{0} -} -func (m *CPU) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CPU) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_CPU.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *CPU) XXX_Merge(src proto.Message) { - xxx_messageInfo_CPU.Merge(m, src) -} -func (m *CPU) XXX_Size() int { - return m.Size() -} -func (m *CPU) XXX_DiscardUnknown() { - xxx_messageInfo_CPU.DiscardUnknown(m) -} - -var xxx_messageInfo_CPU proto.InternalMessageInfo - -func (m *CPU) GetUnits() ResourceValue { - if m != nil { - return m.Units - } - return ResourceValue{} -} - -func (m *CPU) GetAttributes() Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func init() { - proto.RegisterType((*CPU)(nil), "akash.base.v1beta3.CPU") -} - -func init() { proto.RegisterFile("akash/base/v1beta3/cpu.proto", fileDescriptor_80ab47749737c9d3) } - -var fileDescriptor_80ab47749737c9d3 = []byte{ - // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x4f, - 0x2e, 0x28, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x02, 0xcb, 0xea, 0x81, 0x64, 0xf5, - 0xa0, 0xb2, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0x69, 0x7d, 0x10, 0x0b, 0xa2, 0x52, 0x4a, - 0x09, 0x8b, 0x39, 0x89, 0x25, 0x25, 0x45, 0x99, 0x49, 0xa5, 0x25, 0xa9, 0x50, 0x35, 0x6a, 0x58, - 0xd4, 0x14, 0xa5, 0x16, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0x96, 0x25, 0xe6, 0x94, 0x42, 0xd5, 0x29, - 0x5d, 0x65, 0xe4, 0x62, 0x76, 0x0e, 0x08, 0x15, 0xb2, 0xe5, 0x62, 0x2d, 0xcd, 0xcb, 0x2c, 0x29, - 0x96, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x36, 0x52, 0xd4, 0xc3, 0x74, 0x8d, 0x5e, 0x10, 0x54, 0x7f, - 0x18, 0x48, 0xbf, 0x13, 0xcb, 0x89, 0x7b, 0xf2, 0x0c, 0x41, 0x10, 0x5d, 0x42, 0x1d, 0x8c, 0x5c, - 0x5c, 0x70, 0x27, 0x14, 0x4b, 0x30, 0x29, 0x30, 0x6b, 0x70, 0x1b, 0xc9, 0x62, 0x33, 0xc4, 0x11, - 0xa6, 0xca, 0xc9, 0x13, 0x64, 0xc0, 0xab, 0x7b, 0xf2, 0x22, 0x08, 0x8d, 0x3a, 0xf9, 0xb9, 0x99, - 0x25, 0xa9, 0xb9, 0x05, 0x25, 0x95, 0x9f, 0xee, 0xc9, 0x4b, 0x57, 0x26, 0xe6, 0xe6, 0x58, 0x29, - 0x61, 0x93, 0x55, 0x5a, 0x75, 0x5f, 0x9e, 0x0b, 0x6e, 0x52, 0x71, 0x10, 0x92, 0xdd, 0x56, 0x2c, - 0x2f, 0x16, 0xc8, 0x33, 0x3a, 0x05, 0x9d, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, - 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, - 0x94, 0x45, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0xd8, 0x7d, 0xba, - 0x79, 0xa9, 0x25, 0xe5, 0xf9, 0x45, 0xd9, 0x50, 0x5e, 0x62, 0x41, 0xa6, 0x7e, 0x7a, 0xbe, 0x7e, - 0x5e, 0x7e, 0x4a, 0xaa, 0x7e, 0x49, 0x65, 0x41, 0x6a, 0x31, 0x2c, 0xfc, 0x92, 0xd8, 0xc0, 0x41, - 0x66, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x9a, 0x56, 0xd0, 0xc8, 0x01, 0x00, 0x00, -} - -func (this *CPU) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*CPU) - if !ok { - that2, ok := that.(CPU) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Units.Equal(&that1.Units) { - return false - } - if len(this.Attributes) != len(that1.Attributes) { - return false - } - for i := range this.Attributes { - if !this.Attributes[i].Equal(&that1.Attributes[i]) { - return false - } - } - return true -} -func (m *CPU) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CPU) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CPU) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCpu(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Units.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintCpu(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintCpu(dAtA []byte, offset int, v uint64) int { - offset -= sovCpu(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CPU) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Units.Size() - n += 1 + l + sovCpu(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovCpu(uint64(l)) - } - } - return n -} - -func sovCpu(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozCpu(x uint64) (n int) { - return sovCpu(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *CPU) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCpu - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CPU: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CPU: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Units", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCpu - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCpu - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCpu - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Units.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowCpu - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthCpu - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthCpu - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipCpu(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthCpu - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipCpu(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCpu - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCpu - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowCpu - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthCpu - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupCpu - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthCpu - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthCpu = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowCpu = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupCpu = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta3/endpoint.go b/go/node/types/v1beta3/endpoint.go deleted file mode 100644 index fcb63a29..00000000 --- a/go/node/types/v1beta3/endpoint.go +++ /dev/null @@ -1,29 +0,0 @@ -package v1beta3 - -import ( - "sort" -) - -type Endpoints []Endpoint - -var _ sort.Interface = (*Endpoints)(nil) - -func (u Endpoints) Dup() Endpoints { - res := make(Endpoints, len(u)) - - copy(res, u) - - return res -} - -func (u Endpoints) Len() int { - return len(u) -} - -func (u Endpoints) Swap(i, j int) { - u[i], u[j] = u[j], u[i] -} - -func (u Endpoints) Less(i, j int) bool { - return u[i].SequenceNumber < u[j].SequenceNumber -} diff --git a/go/node/types/v1beta3/endpoint.pb.go b/go/node/types/v1beta3/endpoint.pb.go deleted file mode 100644 index 7e307ee0..00000000 --- a/go/node/types/v1beta3/endpoint.pb.go +++ /dev/null @@ -1,405 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta3/endpoint.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// This describes how the endpoint is implemented when the lease is deployed -type Endpoint_Kind int32 - -const ( - // Describes an endpoint that becomes a Kubernetes Ingress - Endpoint_SHARED_HTTP Endpoint_Kind = 0 - // Describes an endpoint that becomes a Kubernetes NodePort - Endpoint_RANDOM_PORT Endpoint_Kind = 1 - // Describes an endpoint that becomes a leased IP - Endpoint_LEASED_IP Endpoint_Kind = 2 -) - -var Endpoint_Kind_name = map[int32]string{ - 0: "SHARED_HTTP", - 1: "RANDOM_PORT", - 2: "LEASED_IP", -} - -var Endpoint_Kind_value = map[string]int32{ - "SHARED_HTTP": 0, - "RANDOM_PORT": 1, - "LEASED_IP": 2, -} - -func (x Endpoint_Kind) String() string { - return proto.EnumName(Endpoint_Kind_name, int32(x)) -} - -func (Endpoint_Kind) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_f12485ed8bcdc6e7, []int{0, 0} -} - -// Endpoint describes a publicly accessible IP service -type Endpoint struct { - Kind Endpoint_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=akash.base.v1beta3.Endpoint_Kind" json:"kind,omitempty"` - SequenceNumber uint32 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number" yaml:"sequence_number"` -} - -func (m *Endpoint) Reset() { *m = Endpoint{} } -func (m *Endpoint) String() string { return proto.CompactTextString(m) } -func (*Endpoint) ProtoMessage() {} -func (*Endpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_f12485ed8bcdc6e7, []int{0} -} -func (m *Endpoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Endpoint.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Endpoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_Endpoint.Merge(m, src) -} -func (m *Endpoint) XXX_Size() int { - return m.Size() -} -func (m *Endpoint) XXX_DiscardUnknown() { - xxx_messageInfo_Endpoint.DiscardUnknown(m) -} - -var xxx_messageInfo_Endpoint proto.InternalMessageInfo - -func (m *Endpoint) GetKind() Endpoint_Kind { - if m != nil { - return m.Kind - } - return Endpoint_SHARED_HTTP -} - -func (m *Endpoint) GetSequenceNumber() uint32 { - if m != nil { - return m.SequenceNumber - } - return 0 -} - -func init() { - proto.RegisterEnum("akash.base.v1beta3.Endpoint_Kind", Endpoint_Kind_name, Endpoint_Kind_value) - proto.RegisterType((*Endpoint)(nil), "akash.base.v1beta3.Endpoint") -} - -func init() { proto.RegisterFile("akash/base/v1beta3/endpoint.proto", fileDescriptor_f12485ed8bcdc6e7) } - -var fileDescriptor_f12485ed8bcdc6e7 = []byte{ - // 317 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x4f, - 0xcd, 0x4b, 0x29, 0xc8, 0xcf, 0xcc, 0x2b, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x02, - 0x2b, 0xd1, 0x03, 0x29, 0xd1, 0x83, 0x2a, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x4b, 0xeb, - 0x83, 0x58, 0x10, 0x95, 0x4a, 0xaf, 0x18, 0xb9, 0x38, 0x5c, 0xa1, 0x9a, 0x85, 0x4c, 0xb9, 0x58, - 0xb2, 0x33, 0xf3, 0x52, 0x24, 0x18, 0x15, 0x18, 0x35, 0xf8, 0x8c, 0x14, 0xf5, 0x30, 0x4d, 0xd1, - 0x83, 0xa9, 0xd5, 0xf3, 0xce, 0xcc, 0x4b, 0x09, 0x02, 0x2b, 0x17, 0xca, 0xe0, 0xe2, 0x2f, 0x4e, - 0x2d, 0x2c, 0x4d, 0xcd, 0x4b, 0x4e, 0x8d, 0xcf, 0x2b, 0xcd, 0x4d, 0x4a, 0x2d, 0x92, 0x60, 0x52, - 0x60, 0xd4, 0xe0, 0x75, 0xb2, 0x7f, 0x74, 0x4f, 0x9e, 0x2f, 0x18, 0x2a, 0xe5, 0x07, 0x96, 0x79, - 0x75, 0x4f, 0x1e, 0x5d, 0xf1, 0xa7, 0x7b, 0xf2, 0x62, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x68, - 0x12, 0x4a, 0x41, 0x7c, 0xc5, 0x28, 0x9a, 0x95, 0xcc, 0xb9, 0x58, 0x40, 0xf6, 0x0a, 0xf1, 0x73, - 0x71, 0x07, 0x7b, 0x38, 0x06, 0xb9, 0xba, 0xc4, 0x7b, 0x84, 0x84, 0x04, 0x08, 0x30, 0x80, 0x04, - 0x82, 0x1c, 0xfd, 0x5c, 0xfc, 0x7d, 0xe3, 0x03, 0xfc, 0x83, 0x42, 0x04, 0x18, 0x85, 0x78, 0xb9, - 0x38, 0x7d, 0x5c, 0x1d, 0x83, 0x5d, 0x5d, 0xe2, 0x3d, 0x03, 0x04, 0x98, 0xac, 0x58, 0x5e, 0x2c, - 0x90, 0x67, 0x74, 0x0a, 0x3a, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, - 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0x8b, - 0xf4, 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0xaf, 0x75, 0xf3, 0x52, - 0x4b, 0xca, 0xf3, 0x8b, 0xb2, 0xa1, 0xbc, 0xc4, 0x82, 0x4c, 0xfd, 0xf4, 0x7c, 0xfd, 0xbc, 0xfc, - 0x94, 0x54, 0xfd, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x58, 0xc8, 0x27, 0xb1, 0x81, 0xc3, 0xd1, 0x18, - 0x10, 0x00, 0x00, 0xff, 0xff, 0xa7, 0xd8, 0x24, 0x46, 0x96, 0x01, 0x00, 0x00, -} - -func (this *Endpoint) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Endpoint) - if !ok { - that2, ok := that.(Endpoint) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Kind != that1.Kind { - return false - } - if this.SequenceNumber != that1.SequenceNumber { - return false - } - return true -} -func (m *Endpoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SequenceNumber != 0 { - i = encodeVarintEndpoint(dAtA, i, uint64(m.SequenceNumber)) - i-- - dAtA[i] = 0x10 - } - if m.Kind != 0 { - i = encodeVarintEndpoint(dAtA, i, uint64(m.Kind)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintEndpoint(dAtA []byte, offset int, v uint64) int { - offset -= sovEndpoint(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Endpoint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Kind != 0 { - n += 1 + sovEndpoint(uint64(m.Kind)) - } - if m.SequenceNumber != 0 { - n += 1 + sovEndpoint(uint64(m.SequenceNumber)) - } - return n -} - -func sovEndpoint(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozEndpoint(x uint64) (n int) { - return sovEndpoint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Endpoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - m.Kind = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Kind |= Endpoint_Kind(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SequenceNumber", wireType) - } - m.SequenceNumber = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEndpoint - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SequenceNumber |= uint32(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipEndpoint(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthEndpoint - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipEndpoint(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEndpoint - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEndpoint - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowEndpoint - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthEndpoint - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupEndpoint - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthEndpoint - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthEndpoint = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowEndpoint = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupEndpoint = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta3/gpu.pb.go b/go/node/types/v1beta3/gpu.pb.go deleted file mode 100644 index 56d77d01..00000000 --- a/go/node/types/v1beta3/gpu.pb.go +++ /dev/null @@ -1,421 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta3/gpu.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// GPU stores resource units and cpu config attributes -type GPU struct { - Units ResourceValue `protobuf:"bytes,1,opt,name=units,proto3" json:"units"` - Attributes Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` -} - -func (m *GPU) Reset() { *m = GPU{} } -func (m *GPU) String() string { return proto.CompactTextString(m) } -func (*GPU) ProtoMessage() {} -func (*GPU) Descriptor() ([]byte, []int) { - return fileDescriptor_629c4d148689a448, []int{0} -} -func (m *GPU) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GPU) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_GPU.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *GPU) XXX_Merge(src proto.Message) { - xxx_messageInfo_GPU.Merge(m, src) -} -func (m *GPU) XXX_Size() int { - return m.Size() -} -func (m *GPU) XXX_DiscardUnknown() { - xxx_messageInfo_GPU.DiscardUnknown(m) -} - -var xxx_messageInfo_GPU proto.InternalMessageInfo - -func (m *GPU) GetUnits() ResourceValue { - if m != nil { - return m.Units - } - return ResourceValue{} -} - -func (m *GPU) GetAttributes() Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func init() { - proto.RegisterType((*GPU)(nil), "akash.base.v1beta3.GPU") -} - -func init() { proto.RegisterFile("akash/base/v1beta3/gpu.proto", fileDescriptor_629c4d148689a448) } - -var fileDescriptor_629c4d148689a448 = []byte{ - // 303 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x49, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x4f, - 0x2f, 0x28, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x02, 0xcb, 0xea, 0x81, 0x64, 0xf5, - 0xa0, 0xb2, 0x52, 0x22, 0xe9, 0xf9, 0xe9, 0xf9, 0x60, 0x69, 0x7d, 0x10, 0x0b, 0xa2, 0x52, 0x4a, - 0x09, 0x8b, 0x39, 0x89, 0x25, 0x25, 0x45, 0x99, 0x49, 0xa5, 0x25, 0xa9, 0x50, 0x35, 0x6a, 0x58, - 0xd4, 0x14, 0xa5, 0x16, 0xe7, 0x97, 0x16, 0x25, 0xa7, 0x96, 0x25, 0xe6, 0x94, 0x42, 0xd5, 0x29, - 0x5d, 0x65, 0xe4, 0x62, 0x76, 0x0f, 0x08, 0x15, 0xb2, 0xe5, 0x62, 0x2d, 0xcd, 0xcb, 0x2c, 0x29, - 0x96, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x36, 0x52, 0xd4, 0xc3, 0x74, 0x8d, 0x5e, 0x10, 0x54, 0x7f, - 0x18, 0x48, 0xbf, 0x13, 0xcb, 0x89, 0x7b, 0xf2, 0x0c, 0x41, 0x10, 0x5d, 0x42, 0x1d, 0x8c, 0x5c, - 0x5c, 0x70, 0x27, 0x14, 0x4b, 0x30, 0x29, 0x30, 0x6b, 0x70, 0x1b, 0xc9, 0x62, 0x33, 0xc4, 0x11, - 0xa6, 0xca, 0xc9, 0x13, 0x64, 0xc0, 0xab, 0x7b, 0xf2, 0x22, 0x08, 0x8d, 0x3a, 0xf9, 0xb9, 0x99, - 0x25, 0xa9, 0xb9, 0x05, 0x25, 0x95, 0x9f, 0xee, 0xc9, 0x4b, 0x57, 0x26, 0xe6, 0xe6, 0x58, 0x29, - 0x61, 0x93, 0x55, 0x5a, 0x75, 0x5f, 0x9e, 0x0b, 0x6e, 0x52, 0x71, 0x10, 0x92, 0xdd, 0x56, 0x2c, - 0x2f, 0x16, 0xc8, 0x33, 0x3a, 0x05, 0x9d, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, - 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, - 0x94, 0x45, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0xd8, 0x7d, 0xba, - 0x79, 0xa9, 0x25, 0xe5, 0xf9, 0x45, 0xd9, 0x50, 0x5e, 0x62, 0x41, 0xa6, 0x7e, 0x7a, 0xbe, 0x7e, - 0x5e, 0x7e, 0x4a, 0xaa, 0x7e, 0x49, 0x65, 0x41, 0x6a, 0x31, 0x2c, 0xfc, 0x92, 0xd8, 0xc0, 0x41, - 0x66, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x21, 0x7f, 0x4a, 0x18, 0xc8, 0x01, 0x00, 0x00, -} - -func (this *GPU) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*GPU) - if !ok { - that2, ok := that.(GPU) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Units.Equal(&that1.Units) { - return false - } - if len(this.Attributes) != len(that1.Attributes) { - return false - } - for i := range this.Attributes { - if !this.Attributes[i].Equal(&that1.Attributes[i]) { - return false - } - } - return true -} -func (m *GPU) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GPU) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GPU) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGpu(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Units.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGpu(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGpu(dAtA []byte, offset int, v uint64) int { - offset -= sovGpu(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *GPU) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Units.Size() - n += 1 + l + sovGpu(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovGpu(uint64(l)) - } - } - return n -} - -func sovGpu(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGpu(x uint64) (n int) { - return sovGpu(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *GPU) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGpu - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GPU: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GPU: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Units", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGpu - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGpu - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGpu - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Units.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGpu - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGpu - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGpu - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGpu(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGpu - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGpu(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGpu - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGpu - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGpu - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGpu - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGpu - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGpu - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGpu = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGpu = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGpu = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta3/memory.pb.go b/go/node/types/v1beta3/memory.pb.go deleted file mode 100644 index ab8c16a9..00000000 --- a/go/node/types/v1beta3/memory.pb.go +++ /dev/null @@ -1,423 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta3/memory.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Memory stores resource quantity and memory attributes -type Memory struct { - Quantity ResourceValue `protobuf:"bytes,1,opt,name=quantity,proto3" json:"size" yaml:"size"` - Attributes Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` -} - -func (m *Memory) Reset() { *m = Memory{} } -func (m *Memory) String() string { return proto.CompactTextString(m) } -func (*Memory) ProtoMessage() {} -func (*Memory) Descriptor() ([]byte, []int) { - return fileDescriptor_5f47c13a8ed1b91a, []int{0} -} -func (m *Memory) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Memory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Memory.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Memory) XXX_Merge(src proto.Message) { - xxx_messageInfo_Memory.Merge(m, src) -} -func (m *Memory) XXX_Size() int { - return m.Size() -} -func (m *Memory) XXX_DiscardUnknown() { - xxx_messageInfo_Memory.DiscardUnknown(m) -} - -var xxx_messageInfo_Memory proto.InternalMessageInfo - -func (m *Memory) GetQuantity() ResourceValue { - if m != nil { - return m.Quantity - } - return ResourceValue{} -} - -func (m *Memory) GetAttributes() Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func init() { - proto.RegisterType((*Memory)(nil), "akash.base.v1beta3.Memory") -} - -func init() { proto.RegisterFile("akash/base/v1beta3/memory.proto", fileDescriptor_5f47c13a8ed1b91a) } - -var fileDescriptor_5f47c13a8ed1b91a = []byte{ - // 322 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x31, 0x4f, 0xc2, 0x40, - 0x14, 0xc7, 0x7b, 0x4a, 0x88, 0x29, 0x5b, 0xc3, 0x40, 0x20, 0xf6, 0xb0, 0x83, 0x61, 0xd0, 0x5e, - 0x84, 0xc5, 0xb0, 0xc9, 0xe6, 0xe0, 0xd2, 0xc1, 0x81, 0xed, 0x8a, 0x97, 0x72, 0x81, 0xe3, 0xea, - 0xdd, 0x2b, 0xa6, 0x7e, 0x02, 0x47, 0x3f, 0x82, 0xb3, 0x9f, 0x84, 0x91, 0xd1, 0xa9, 0x1a, 0x58, - 0x8c, 0x23, 0x89, 0xbb, 0x69, 0x39, 0xd0, 0xc4, 0x6e, 0xf7, 0xf2, 0x7e, 0xf7, 0x7b, 0xff, 0xfc, - 0x6d, 0x4c, 0x27, 0x54, 0x8f, 0x49, 0x48, 0x35, 0x23, 0xf3, 0x8b, 0x90, 0x01, 0xed, 0x11, 0xc1, - 0x84, 0x54, 0xa9, 0x1f, 0x2b, 0x09, 0xd2, 0x71, 0x0a, 0xc0, 0xcf, 0x01, 0xdf, 0x00, 0xcd, 0x7a, - 0x24, 0x23, 0x59, 0xac, 0x49, 0xfe, 0xda, 0x92, 0x4d, 0xaf, 0x44, 0x45, 0x01, 0x14, 0x0f, 0x13, - 0x60, 0x86, 0x39, 0x2d, 0x61, 0x14, 0xd3, 0x32, 0x51, 0x23, 0x36, 0xa7, 0xd3, 0xc4, 0x70, 0xde, - 0x37, 0xb2, 0xab, 0x37, 0x45, 0x0c, 0x67, 0x68, 0x1f, 0xdd, 0x27, 0x74, 0x06, 0x1c, 0xd2, 0x06, - 0x6a, 0xa3, 0x4e, 0xad, 0x7b, 0xe2, 0xff, 0xcf, 0xe4, 0x07, 0xc6, 0x72, 0x9b, 0x5b, 0x06, 0xad, - 0x45, 0x86, 0xad, 0xaf, 0x0c, 0x57, 0x34, 0x7f, 0x64, 0x9b, 0x0c, 0xd7, 0x52, 0x2a, 0xa6, 0x7d, - 0x2f, 0x9f, 0xbc, 0x60, 0xef, 0x73, 0x9e, 0x90, 0x6d, 0xef, 0x23, 0xea, 0xc6, 0x41, 0xfb, 0xb0, - 0x53, 0xeb, 0x1e, 0x97, 0xe9, 0xaf, 0x76, 0xd4, 0xe0, 0xda, 0xa8, 0xeb, 0xbf, 0x1f, 0xcf, 0xa4, - 0xe0, 0xc0, 0x44, 0x0c, 0xe9, 0x26, 0xc3, 0xad, 0xed, 0xa9, 0xb2, 0xad, 0xf7, 0xfa, 0x8e, 0xed, - 0xbd, 0x49, 0x07, 0x7f, 0x6e, 0xf7, 0x2b, 0x9f, 0x2f, 0x18, 0x0d, 0x82, 0xc5, 0xca, 0x45, 0xcb, - 0x95, 0x8b, 0x3e, 0x56, 0x2e, 0x7a, 0x5e, 0xbb, 0xd6, 0x72, 0xed, 0x5a, 0x6f, 0x6b, 0xd7, 0x1a, - 0x5e, 0x46, 0x1c, 0xc6, 0x49, 0xe8, 0x8f, 0xa4, 0x20, 0x45, 0xbe, 0xf3, 0x19, 0x83, 0x07, 0xa9, - 0x26, 0x66, 0xa2, 0x31, 0x27, 0x91, 0x24, 0x33, 0x79, 0xc7, 0x08, 0xa4, 0x31, 0xd3, 0xbb, 0x7e, - 0xc3, 0x6a, 0x51, 0x69, 0xef, 0x27, 0x00, 0x00, 0xff, 0xff, 0xd9, 0xab, 0x65, 0x28, 0xeb, 0x01, - 0x00, 0x00, -} - -func (this *Memory) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Memory) - if !ok { - that2, ok := that.(Memory) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Quantity.Equal(&that1.Quantity) { - return false - } - if len(this.Attributes) != len(that1.Attributes) { - return false - } - for i := range this.Attributes { - if !this.Attributes[i].Equal(&that1.Attributes[i]) { - return false - } - } - return true -} -func (m *Memory) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Memory) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Memory) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMemory(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.Quantity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintMemory(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintMemory(dAtA []byte, offset int, v uint64) int { - offset -= sovMemory(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Memory) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Quantity.Size() - n += 1 + l + sovMemory(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovMemory(uint64(l)) - } - } - return n -} - -func sovMemory(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozMemory(x uint64) (n int) { - return sovMemory(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Memory) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMemory - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Memory: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Memory: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Quantity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMemory - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMemory - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMemory - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Quantity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowMemory - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthMemory - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthMemory - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipMemory(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthMemory - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipMemory(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMemory - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMemory - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowMemory - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthMemory - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupMemory - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthMemory - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthMemory = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowMemory = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupMemory = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta3/migrate/v1beta2.go b/go/node/types/v1beta3/migrate/v1beta2.go deleted file mode 100644 index 65da1e11..00000000 --- a/go/node/types/v1beta3/migrate/v1beta2.go +++ /dev/null @@ -1,107 +0,0 @@ -package migrate - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/akash-network/akash-api/go/node/types/v1beta2" - "github.com/akash-network/akash-api/go/node/types/v1beta3" -) - -func ResourceValueFromV1Beta2(from v1beta2.ResourceValue) v1beta3.ResourceValue { - return v1beta3.NewResourceValue(from.Value()) -} - -func AttributesFromV1Beta2(from v1beta2.Attributes) v1beta3.Attributes { - res := make(v1beta3.Attributes, 0, len(from)) - - for _, attr := range from { - res = append(res, v1beta3.Attribute{ - Key: attr.Key, - Value: attr.Value, - }) - } - - return res -} - -func SignedByFromV1Beta2(from v1beta2.SignedBy) v1beta3.SignedBy { - return v1beta3.SignedBy{ - AllOf: from.AllOf, - AnyOf: from.AnyOf, - } -} - -func PlacementRequirementsFromV1Beta2(from v1beta2.PlacementRequirements) v1beta3.PlacementRequirements { - res := v1beta3.PlacementRequirements{ - SignedBy: SignedByFromV1Beta2(from.SignedBy), - Attributes: AttributesFromV1Beta2(from.Attributes), - } - - return res -} - -func CPUFromV1Beta2(from *v1beta2.CPU) *v1beta3.CPU { - if from == nil { - return nil - } - - return &v1beta3.CPU{ - Units: ResourceValueFromV1Beta2(from.Units), - Attributes: AttributesFromV1Beta2(from.Attributes), - } -} - -func MemoryFromV1Beta2(from *v1beta2.Memory) *v1beta3.Memory { - if from == nil { - return nil - } - - return &v1beta3.Memory{ - Quantity: ResourceValueFromV1Beta2(from.Quantity), - Attributes: AttributesFromV1Beta2(from.Attributes), - } -} - -func VolumesFromV1Beta2(from v1beta2.Volumes) v1beta3.Volumes { - res := make(v1beta3.Volumes, 0, len(from)) - - for _, storage := range from { - res = append(res, v1beta3.Storage{ - Name: "default", - Quantity: ResourceValueFromV1Beta2(storage.Quantity), - Attributes: AttributesFromV1Beta2(storage.Attributes), - }) - } - - return res -} - -func EndpointsFromV1Beta2(from []v1beta2.Endpoint) []v1beta3.Endpoint { - res := make([]v1beta3.Endpoint, 0, len(from)) - - for _, endpoint := range from { - res = append(res, v1beta3.Endpoint{ - Kind: v1beta3.Endpoint_Kind(endpoint.Kind), - SequenceNumber: 0, // All previous data does not have a use for sequence number - }) - } - - return res -} - -func ResourcesFromV1Beta2(id uint32, from v1beta2.ResourceUnits) v1beta3.Resources { - return v1beta3.Resources{ - ID: id, - CPU: CPUFromV1Beta2(from.CPU), - Memory: MemoryFromV1Beta2(from.Memory), - Storage: VolumesFromV1Beta2(from.Storage), - Endpoints: EndpointsFromV1Beta2(from.Endpoints), - // v1beta2 version does not have GPU, so setting default value to 0 - GPU: &v1beta3.GPU{ - Units: v1beta3.ResourceValue{ - Val: sdk.NewInt(0), - }, - Attributes: nil, - }, - } -} diff --git a/go/node/types/v1beta3/requirements.go b/go/node/types/v1beta3/requirements.go deleted file mode 100644 index bc2ebeaf..00000000 --- a/go/node/types/v1beta3/requirements.go +++ /dev/null @@ -1,15 +0,0 @@ -package v1beta3 - -import ( - "gopkg.in/yaml.v3" -) - -func (m *SignedBy) String() string { - res, _ := yaml.Marshal(m) - return string(res) -} - -func (m *PlacementRequirements) String() string { - res, _ := yaml.Marshal(m) - return string(res) -} diff --git a/go/node/types/v1beta3/resources.go b/go/node/types/v1beta3/resources.go deleted file mode 100644 index c782c2e2..00000000 --- a/go/node/types/v1beta3/resources.go +++ /dev/null @@ -1,194 +0,0 @@ -package v1beta3 - -import ( - "fmt" -) - -type UnitType int - -type Unit interface { - String() string -} - -type ResUnit interface { - Equals(ResUnit) bool - Add(unit ResUnit) bool -} - -type Volumes []Storage - -var _ Unit = (*CPU)(nil) -var _ Unit = (*Memory)(nil) -var _ Unit = (*Storage)(nil) -var _ Unit = (*GPU)(nil) - -func (m Resources) Validate() error { - if m.ID == 0 { - return fmt.Errorf("resources ID must be > 0") - } - - if m.CPU == nil { - return fmt.Errorf("CPU must not be nil") - } - - if m.GPU == nil { - return fmt.Errorf("GPU must not be nil") - } - - if m.Memory == nil { - return fmt.Errorf("memory must not be nil") - } - - if m.Storage == nil { - return fmt.Errorf("storage must not be nil") - } - - if m.Endpoints == nil { - return fmt.Errorf("endpoints must not be nil") - } - - return nil -} - -func (m Resources) Dup() Resources { - res := Resources{ - ID: m.ID, - CPU: m.CPU.Dup(), - GPU: m.GPU.Dup(), - Memory: m.Memory.Dup(), - Storage: m.Storage.Dup(), - Endpoints: m.Endpoints.Dup(), - } - - return res -} - -func (m Resources) In(rhs Resources) bool { - if !m.CPU.Equal(rhs.CPU) || !m.GPU.Equal(rhs.GPU) || - !m.Memory.Equal(rhs.Memory) || !m.Storage.Equal(rhs.Storage) { - return false - } - -loop: - for _, ep := range m.Endpoints { - for _, rep := range rhs.Endpoints { - if ep.Equal(rep) { - continue loop - } - } - - return false - } - - return true -} - -func (m CPU) Dup() *CPU { - return &CPU{ - Units: m.Units.Dup(), - Attributes: m.Attributes.Dup(), - } -} - -func (m Memory) Dup() *Memory { - return &Memory{ - Quantity: m.Quantity.Dup(), - Attributes: m.Attributes.Dup(), - } -} - -func (m Storage) Dup() *Storage { - return &Storage{ - Name: m.Name, - Quantity: m.Quantity.Dup(), - Attributes: m.Attributes.Dup(), - } -} - -func (m GPU) Dup() *GPU { - return &GPU{ - Units: m.Units.Dup(), - Attributes: m.Attributes.Dup(), - } -} - -func (m Volumes) Equal(rhs Volumes) bool { - for i := range m { - if !m[i].Equal(rhs[i]) { - return false - } - } - - return true -} - -func (m Volumes) Dup() Volumes { - res := make(Volumes, 0, len(m)) - - for _, storage := range m { - res = append(res, *storage.Dup()) - } - - return res -} - -func (m *CPU) EqualUnits(that *CPU) bool { - if that == nil { - return m == nil - } else if m == nil { - return false - } - - if !m.Units.Equal(&that.Units) { - return false - } - - return true -} - -func (m *GPU) EqualUnits(that *GPU) bool { - if that == nil { - return m == nil - } else if m == nil { - return false - } - - if !m.Units.Equal(&that.Units) { - return false - } - - return true -} - -func (m *Memory) EqualUnits(that *Memory) bool { - if that == nil { - return m == nil - } else if m == nil { - return false - } - - if !m.Quantity.Equal(&that.Quantity) { - return false - } - - return true -} - -func (m Volumes) EqualUnits(that Volumes) bool { - if len(m) != len(that) { - return false - } - - for idx, vol := range m { - if vol.Name != that[idx].Name { - return false - } - - if !vol.Quantity.Equal(&that[idx].Quantity) { - return false - } - - } - - return true -} diff --git a/go/node/types/v1beta3/resourcevalue.go b/go/node/types/v1beta3/resourcevalue.go deleted file mode 100644 index 0d96f8c3..00000000 --- a/go/node/types/v1beta3/resourcevalue.go +++ /dev/null @@ -1,57 +0,0 @@ -package v1beta3 - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/pkg/errors" -) - -var ( - ErrOverflow = errors.Errorf("resource value overflow") - ErrCannotSub = errors.Errorf("cannot subtract resources when lhs does not have same units as rhs") - ErrNegativeResult = errors.Errorf("result of subtraction is negative") -) - -/* -ResourceValue the big point of this small change is to ensure math operations on resources -not resulting with negative value which panic on unsigned types as well as overflow which leads to panic too -instead reasonable error is returned. -Each resource using this type as value can take extra advantage of it to check upper bounds -For example in SDL v1 CPU units were handled as uint32 and operation like math.MaxUint32 + 2 -would cause application to panic. But nowadays - const CPULimit = math.MaxUint32 - - func (c *CPU) add(rhs CPU) error { - res, err := c.Units.add(rhs.Units) - if err != nil { - return err - } - - if res.Units.Value() > CPULimit { - return ErrOverflow - } - - c.Units = res - - return nil - } -*/ - -func NewResourceValue(val uint64) ResourceValue { - res := ResourceValue{ - Val: sdk.NewIntFromUint64(val), - } - - return res -} - -func (m ResourceValue) Value() uint64 { - return m.Val.Uint64() -} - -func (m ResourceValue) Dup() ResourceValue { - res := ResourceValue{ - Val: sdk.NewIntFromBigInt(m.Val.BigInt()), - } - - return res -} diff --git a/go/node/types/v1beta3/resourcevalue.pb.go b/go/node/types/v1beta3/resourcevalue.pb.go deleted file mode 100644 index 1a6b63f0..00000000 --- a/go/node/types/v1beta3/resourcevalue.pb.go +++ /dev/null @@ -1,343 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta3/resourcevalue.proto - -package v1beta3 - -import ( - fmt "fmt" - github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Unit stores cpu, memory and storage metrics -type ResourceValue struct { - Val github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,1,opt,name=val,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"val"` -} - -func (m *ResourceValue) Reset() { *m = ResourceValue{} } -func (m *ResourceValue) String() string { return proto.CompactTextString(m) } -func (*ResourceValue) ProtoMessage() {} -func (*ResourceValue) Descriptor() ([]byte, []int) { - return fileDescriptor_a8f01ca02b3f00f6, []int{0} -} -func (m *ResourceValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResourceValue.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResourceValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceValue.Merge(m, src) -} -func (m *ResourceValue) XXX_Size() int { - return m.Size() -} -func (m *ResourceValue) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceValue.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceValue proto.InternalMessageInfo - -func init() { - proto.RegisterType((*ResourceValue)(nil), "akash.base.v1beta3.ResourceValue") -} - -func init() { - proto.RegisterFile("akash/base/v1beta3/resourcevalue.proto", fileDescriptor_a8f01ca02b3f00f6) -} - -var fileDescriptor_a8f01ca02b3f00f6 = []byte{ - // 227 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4b, 0xcc, 0x4e, 0x2c, - 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd6, 0x2f, - 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, 0x4b, 0xcc, 0x29, 0x4d, 0xd5, 0x2b, 0x28, 0xca, - 0x2f, 0xc9, 0x17, 0x12, 0x02, 0xab, 0xd3, 0x03, 0xa9, 0xd3, 0x83, 0xaa, 0x93, 0x12, 0x49, 0xcf, - 0x4f, 0xcf, 0x07, 0x4b, 0xeb, 0x83, 0x58, 0x10, 0x95, 0x4a, 0xe1, 0x5c, 0xbc, 0x41, 0x50, 0x03, - 0xc2, 0x40, 0x06, 0x08, 0x39, 0x70, 0x31, 0x97, 0x25, 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, - 0x38, 0xe9, 0x9d, 0xb8, 0x27, 0xcf, 0x70, 0xeb, 0x9e, 0xbc, 0x5a, 0x7a, 0x66, 0x49, 0x46, 0x69, - 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x72, 0x7e, 0x71, 0x6e, 0x7e, 0x31, 0x94, 0xd2, 0x2d, 0x4e, - 0xc9, 0xd6, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0xf3, 0xcc, 0x2b, 0x09, 0x02, 0x69, 0xb5, 0x62, - 0x79, 0xb1, 0x40, 0x9e, 0xd1, 0x29, 0xe8, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, - 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, - 0xa2, 0x2c, 0x90, 0x0c, 0x03, 0xbb, 0x53, 0x37, 0x2f, 0xb5, 0xa4, 0x3c, 0xbf, 0x28, 0x1b, 0xca, - 0x4b, 0x2c, 0xc8, 0xd4, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, 0x49, 0x85, 0x18, 0x0d, 0xf3, 0x6a, - 0x12, 0x1b, 0xd8, 0xcd, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x8f, 0xc4, 0x66, 0xec, 0x07, - 0x01, 0x00, 0x00, -} - -func (this *ResourceValue) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*ResourceValue) - if !ok { - that2, ok := that.(ResourceValue) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if !this.Val.Equal(that1.Val) { - return false - } - return true -} -func (m *ResourceValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size := m.Val.Size() - i -= size - if _, err := m.Val.MarshalTo(dAtA[i:]); err != nil { - return 0, err - } - i = encodeVarintResourcevalue(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintResourcevalue(dAtA []byte, offset int, v uint64) int { - offset -= sovResourcevalue(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *ResourceValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Val.Size() - n += 1 + l + sovResourcevalue(uint64(l)) - return n -} - -func sovResourcevalue(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozResourcevalue(x uint64) (n int) { - return sovResourcevalue(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *ResourceValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthResourcevalue - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthResourcevalue - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Val.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipResourcevalue(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthResourcevalue - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipResourcevalue(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowResourcevalue - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthResourcevalue - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupResourcevalue - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthResourcevalue - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthResourcevalue = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowResourcevalue = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupResourcevalue = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/types/v1beta3/storage.pb.go b/go/node/types/v1beta3/storage.pb.go deleted file mode 100644 index 9202a1e0..00000000 --- a/go/node/types/v1beta3/storage.pb.go +++ /dev/null @@ -1,478 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: akash/base/v1beta3/storage.proto - -package v1beta3 - -import ( - fmt "fmt" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -// Storage stores resource quantity and storage attributes -type Storage struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` - Quantity ResourceValue `protobuf:"bytes,2,opt,name=quantity,proto3" json:"size" yaml:"size"` - Attributes Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` -} - -func (m *Storage) Reset() { *m = Storage{} } -func (m *Storage) String() string { return proto.CompactTextString(m) } -func (*Storage) ProtoMessage() {} -func (*Storage) Descriptor() ([]byte, []int) { - return fileDescriptor_293fa891b98ded01, []int{0} -} -func (m *Storage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Storage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_Storage.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *Storage) XXX_Merge(src proto.Message) { - xxx_messageInfo_Storage.Merge(m, src) -} -func (m *Storage) XXX_Size() int { - return m.Size() -} -func (m *Storage) XXX_DiscardUnknown() { - xxx_messageInfo_Storage.DiscardUnknown(m) -} - -var xxx_messageInfo_Storage proto.InternalMessageInfo - -func (m *Storage) GetName() string { - if m != nil { - return m.Name - } - return "" -} - -func (m *Storage) GetQuantity() ResourceValue { - if m != nil { - return m.Quantity - } - return ResourceValue{} -} - -func (m *Storage) GetAttributes() Attributes { - if m != nil { - return m.Attributes - } - return nil -} - -func init() { - proto.RegisterType((*Storage)(nil), "akash.base.v1beta3.Storage") -} - -func init() { proto.RegisterFile("akash/base/v1beta3/storage.proto", fileDescriptor_293fa891b98ded01) } - -var fileDescriptor_293fa891b98ded01 = []byte{ - // 348 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x3f, 0x4f, 0xc2, 0x40, - 0x18, 0xc6, 0x7b, 0x40, 0xfc, 0x73, 0x6c, 0x0d, 0x89, 0x04, 0x62, 0x5b, 0x3b, 0x18, 0x12, 0xb5, - 0x17, 0x61, 0x31, 0x6c, 0x76, 0x73, 0xad, 0x89, 0x03, 0xdb, 0x15, 0x2f, 0xa5, 0x81, 0xf6, 0x6a, - 0xef, 0x2d, 0xa6, 0x7e, 0x02, 0x47, 0x3f, 0x82, 0x89, 0x9b, 0x9f, 0x84, 0x91, 0xd1, 0xa9, 0x1a, - 0x58, 0x8c, 0x23, 0x9f, 0xc0, 0xb4, 0x1c, 0x48, 0x62, 0xb7, 0x3e, 0xbd, 0xdf, 0xfd, 0x9e, 0x7b, - 0xf3, 0x62, 0x83, 0x8e, 0xa9, 0x18, 0x11, 0x97, 0x0a, 0x46, 0xa6, 0x97, 0x2e, 0x03, 0xda, 0x23, - 0x02, 0x78, 0x4c, 0x3d, 0x66, 0x45, 0x31, 0x07, 0xae, 0xaa, 0x05, 0x61, 0xe5, 0x84, 0x25, 0x89, - 0x56, 0xc3, 0xe3, 0x1e, 0x2f, 0x8e, 0x49, 0xfe, 0xb5, 0x26, 0x5b, 0x66, 0x89, 0x8b, 0x02, 0xc4, - 0xbe, 0x9b, 0x80, 0xb4, 0xb5, 0x4e, 0x4b, 0x98, 0x98, 0x09, 0x9e, 0xc4, 0x43, 0x36, 0xa5, 0x93, - 0x44, 0x72, 0xe6, 0x5b, 0x05, 0xef, 0xdf, 0xae, 0xdf, 0xa1, 0x9e, 0xe1, 0x5a, 0x48, 0x03, 0xd6, - 0x44, 0x06, 0xea, 0x1c, 0xda, 0x47, 0x3f, 0x99, 0x5e, 0xe4, 0x55, 0xa6, 0xd7, 0x53, 0x1a, 0x4c, - 0xfa, 0x66, 0x9e, 0x4c, 0xa7, 0xf8, 0xa9, 0x0e, 0xf0, 0xc1, 0x43, 0x42, 0x43, 0xf0, 0x21, 0x6d, - 0x56, 0x0c, 0xd4, 0xa9, 0x77, 0x4f, 0xac, 0xff, 0x13, 0x58, 0x8e, 0xec, 0xbc, 0xcb, 0x3b, 0xed, - 0xf6, 0x2c, 0xd3, 0x95, 0xdc, 0x2b, 0xfc, 0xa7, 0x1d, 0x6f, 0x9e, 0x4c, 0x67, 0xeb, 0x53, 0x9f, - 0x11, 0xc6, 0xdb, 0x81, 0x44, 0xb3, 0x6a, 0x54, 0x3b, 0xf5, 0xee, 0x71, 0x99, 0xfe, 0x7a, 0x43, - 0xd9, 0x37, 0x52, 0xdd, 0xf8, 0xbb, 0x78, 0xce, 0x03, 0x1f, 0x58, 0x10, 0x41, 0xba, 0xca, 0xf4, - 0xf6, 0xba, 0xaa, 0xec, 0xd4, 0x7c, 0xff, 0xd4, 0xf1, 0xd6, 0x24, 0x9c, 0x9d, 0xee, 0x7e, 0xed, - 0xfb, 0x55, 0x47, 0xb6, 0x33, 0x5b, 0x68, 0x68, 0xbe, 0xd0, 0xd0, 0xd7, 0x42, 0x43, 0x2f, 0x4b, - 0x4d, 0x99, 0x2f, 0x35, 0xe5, 0x63, 0xa9, 0x29, 0x83, 0x2b, 0xcf, 0x87, 0x51, 0xe2, 0x5a, 0x43, - 0x1e, 0x90, 0xe2, 0x7d, 0x17, 0x21, 0x83, 0x47, 0x1e, 0x8f, 0x65, 0xa2, 0x91, 0x4f, 0x3c, 0x4e, - 0x42, 0x7e, 0xcf, 0x08, 0xa4, 0x11, 0x13, 0x9b, 0x6d, 0xb8, 0x7b, 0xc5, 0x02, 0x7a, 0xbf, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xda, 0x96, 0xa5, 0xba, 0x1a, 0x02, 0x00, 0x00, -} - -func (this *Storage) Equal(that interface{}) bool { - if that == nil { - return this == nil - } - - that1, ok := that.(*Storage) - if !ok { - that2, ok := that.(Storage) - if ok { - that1 = &that2 - } else { - return false - } - } - if that1 == nil { - return this == nil - } else if this == nil { - return false - } - if this.Name != that1.Name { - return false - } - if !this.Quantity.Equal(&that1.Quantity) { - return false - } - if len(this.Attributes) != len(that1.Attributes) { - return false - } - for i := range this.Attributes { - if !this.Attributes[i].Equal(&that1.Attributes[i]) { - return false - } - } - return true -} -func (m *Storage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Storage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Storage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Attributes) > 0 { - for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStorage(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - { - size, err := m.Quantity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintStorage(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintStorage(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintStorage(dAtA []byte, offset int, v uint64) int { - offset -= sovStorage(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Storage) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - if l > 0 { - n += 1 + l + sovStorage(uint64(l)) - } - l = m.Quantity.Size() - n += 1 + l + sovStorage(uint64(l)) - if len(m.Attributes) > 0 { - for _, e := range m.Attributes { - l = e.Size() - n += 1 + l + sovStorage(uint64(l)) - } - } - return n -} - -func sovStorage(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozStorage(x uint64) (n int) { - return sovStorage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Storage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStorage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Storage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Storage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStorage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthStorage - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthStorage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Quantity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStorage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStorage - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStorage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Quantity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStorage - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStorage - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStorage - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attributes = append(m.Attributes, Attribute{}) - if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipStorage(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthStorage - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipStorage(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStorage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStorage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowStorage - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthStorage - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupStorage - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthStorage - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthStorage = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowStorage = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupStorage = fmt.Errorf("proto: unexpected end of group") -) diff --git a/go/node/utils/auth_query.go b/go/node/utils/auth_query.go new file mode 100644 index 00000000..b9bf3ed8 --- /dev/null +++ b/go/node/utils/auth_query.go @@ -0,0 +1,150 @@ +package utils + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + coretypes "github.com/cometbft/cometbft/rpc/core/types" + + "github.com/cosmos/cosmos-sdk/client" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// QueryTxsByEvents performs a search for transactions for a given set of events +// via the Tendermint RPC. An event takes the form of: +// "{eventAttribute}.{attributeKey} = '{attributeValue}'". Each event is +// concatenated with an 'AND' operand. It returns a slice of Info object +// containing txs and metadata. An error is returned if the query fails. +// If an empty string is provided it will order txs by asc +func QueryTxsByEvents(ctx context.Context, cctx client.Context, events []string, page, limit int, orderBy string) (*sdk.SearchTxsResult, error) { + if len(events) == 0 { + return nil, errors.New("must declare at least one event to search") + } + + if page <= 0 { + return nil, errors.New("page must be greater than 0") + } + + if limit <= 0 { + return nil, errors.New("limit must be greater than 0") + } + + // XXX: implement ANY + query := strings.Join(events, " AND ") + + node, err := cctx.GetNode() + if err != nil { + return nil, err + } + + // TODO: this may not always need to be proven + // https://github.com/cosmos/cosmos-sdk/issues/6807 + resTxs, err := node.TxSearch(ctx, query, true, &page, &limit, orderBy) + if err != nil { + return nil, err + } + + resBlocks, err := getBlocksForTxResults(ctx, cctx, resTxs.Txs) + if err != nil { + return nil, err + } + + txs, err := formatTxResults(cctx.TxConfig, resTxs.Txs, resBlocks) + if err != nil { + return nil, err + } + + result := sdk.NewSearchTxsResult(uint64(resTxs.TotalCount), uint64(len(txs)), uint64(page), uint64(limit), txs) // nolint: gosec + + return result, nil +} + +// QueryTx queries for a single transaction by a hash string in hex format. An +// error is returned if the transaction does not exist or cannot be queried. +func QueryTx(ctx context.Context, cctx client.Context, hash []byte) (*sdk.TxResponse, error) { + node, err := cctx.GetNode() + if err != nil { + return nil, err + } + + // TODO: this may not always need to be proven + // https://github.com/cosmos/cosmos-sdk/issues/6807 + resTx, err := node.Tx(ctx, hash, true) + if err != nil { + return nil, err + } + + resBlocks, err := getBlocksForTxResults(ctx, cctx, []*coretypes.ResultTx{resTx}) + if err != nil { + return nil, err + } + + out, err := mkTxResult(cctx.TxConfig, resTx, resBlocks[resTx.Height]) + if err != nil { + return out, err + } + + return out, nil +} + +// formatTxResults parses the indexed txs into a slice of TxResponse objects. +func formatTxResults(txConfig client.TxConfig, resTxs []*coretypes.ResultTx, resBlocks map[int64]*coretypes.ResultBlock) ([]*sdk.TxResponse, error) { + var err error + out := make([]*sdk.TxResponse, len(resTxs)) + for i := range resTxs { + out[i], err = mkTxResult(txConfig, resTxs[i], resBlocks[resTxs[i].Height]) + if err != nil { + return nil, err + } + } + + return out, nil +} + +func getBlocksForTxResults(ctx context.Context, cctx client.Context, resTxs []*coretypes.ResultTx) (map[int64]*coretypes.ResultBlock, error) { + node, err := cctx.GetNode() + if err != nil { + return nil, err + } + + resBlocks := make(map[int64]*coretypes.ResultBlock) + + for _, resTx := range resTxs { + if _, ok := resBlocks[resTx.Height]; !ok { + resBlock, err := node.Block(ctx, &resTx.Height) + if err != nil { + return nil, err + } + + resBlocks[resTx.Height] = resBlock + } + } + + return resBlocks, nil +} + +func mkTxResult(txConfig client.TxConfig, resTx *coretypes.ResultTx, resBlock *coretypes.ResultBlock) (*sdk.TxResponse, error) { + txb, err := txConfig.TxDecoder()(resTx.Tx) + if err != nil { + return nil, err + } + + p, ok := txb.(intoAny) + if !ok { + return nil, fmt.Errorf("expecting a type implementing intoAny, got: %T", txb) + } + + asAny := p.AsAny() + + return sdk.NewResponseResultTx(resTx, asAny, resBlock.Block.Time.Format(time.RFC3339)), nil +} + +// Deprecated: this interface is used only internally for scenario we are +// deprecating (StdTxConfig support) +type intoAny interface { + AsAny() *codectypes.Any +} diff --git a/go/node/utils/gov_query.go b/go/node/utils/gov_query.go new file mode 100644 index 00000000..7370430e --- /dev/null +++ b/go/node/utils/gov_query.go @@ -0,0 +1,200 @@ +package utils + +import ( + "context" + "fmt" + + "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/x/gov/types" + v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" +) + +const ( + defaultPage = 1 + defaultLimit = 30 +) + +// Proposer contains metadata of a governance proposal used for querying a +// proposer. +type Proposer struct { + ProposalID uint64 `json:"proposal_id" yaml:"proposal_id"` + Proposer string `json:"proposer" yaml:"proposer"` +} + +// NewProposer returns a new Proposer given id and proposer +func NewProposer(proposalID uint64, proposer string) Proposer { + return Proposer{proposalID, proposer} +} + +// String implements the fmt.Stringer interface. +func (p Proposer) String() string { + return fmt.Sprintf("Proposal with ID %d was proposed by %s", p.ProposalID, p.Proposer) +} + +// QueryVotesByTxQuery will query for votes via a direct txs tags query. It +// will fetch and build votes directly from the returned txs and returns a JSON +// marshalled result or any error that occurred. +func QueryVotesByTxQuery(ctx context.Context, cctx client.Context, params v1.QueryProposalVotesParams) ([]byte, error) { + var ( + votes []*v1.Vote + nextTxPage = defaultPage + totalLimit = params.Limit * params.Page + ) + + // query interrupted either if we collected enough votes or tx indexer run out of relevant txs + for len(votes) < totalLimit { + // Search for both (legacy) votes and weighted votes. + q := fmt.Sprintf("%s.%s='%d'", types.EventTypeProposalVote, types.AttributeKeyProposalID, params.ProposalID) + searchResult, err := QueryTxsByEvents(ctx, cctx, []string{q}, nextTxPage, defaultLimit, "") + if err != nil { + return nil, err + } + + for _, info := range searchResult.Txs { + for _, msg := range info.GetTx().GetMsgs() { + if voteMsg, ok := msg.(*v1beta1.MsgVote); ok { + votes = append(votes, &v1.Vote{ + Voter: voteMsg.Voter, + ProposalId: params.ProposalID, + Options: v1.NewNonSplitVoteOption(v1.VoteOption(voteMsg.Option)), + }) + } + + if voteMsg, ok := msg.(*v1.MsgVote); ok { + votes = append(votes, &v1.Vote{ + Voter: voteMsg.Voter, + ProposalId: params.ProposalID, + Options: v1.NewNonSplitVoteOption(voteMsg.Option), + }) + } + + if voteWeightedMsg, ok := msg.(*v1beta1.MsgVoteWeighted); ok { + votes = append(votes, convertVote(voteWeightedMsg)) + } + + if voteWeightedMsg, ok := msg.(*v1.MsgVoteWeighted); ok { + votes = append(votes, &v1.Vote{ + Voter: voteWeightedMsg.Voter, + ProposalId: params.ProposalID, + Options: voteWeightedMsg.Options, + }) + } + } + } + if len(searchResult.Txs) != defaultLimit { + break + } + + nextTxPage++ + } + start, end := client.Paginate(len(votes), params.Page, params.Limit, 100) + if start < 0 || end < 0 { + votes = []*v1.Vote{} + } else { + votes = votes[start:end] + } + + bz, err := cctx.LegacyAmino.MarshalJSON(votes) + if err != nil { + return nil, err + } + + return bz, nil +} + +// QueryVoteByTxQuery will query for a single vote via a direct txs tags query. +func QueryVoteByTxQuery(ctx context.Context, cctx client.Context, params v1.QueryVoteParams) ([]byte, error) { + q1 := fmt.Sprintf("%s.%s='%d'", types.EventTypeProposalVote, types.AttributeKeyProposalID, params.ProposalID) + q2 := fmt.Sprintf("%s.%s='%s'", sdk.EventTypeMessage, sdk.AttributeKeySender, params.Voter.String()) + q3 := fmt.Sprintf("%s.%s='%s'", sdk.EventTypeMessage, sdk.AttributeKeySender, params.Voter) + searchResult, err := QueryTxsByEvents(ctx, cctx, []string{fmt.Sprintf("%s AND (%s OR %s)", q1, q2, q3)}, 1, 30, "") + if err != nil { + return nil, err + } + + for _, info := range searchResult.Txs { + for _, msg := range info.GetTx().GetMsgs() { + // there should only be a single vote under the given conditions + var vote *v1.Vote + if voteMsg, ok := msg.(*v1beta1.MsgVote); ok { + vote = &v1.Vote{ + Voter: voteMsg.Voter, + ProposalId: params.ProposalID, + Options: v1.NewNonSplitVoteOption(v1.VoteOption(voteMsg.Option)), + } + } + + if voteMsg, ok := msg.(*v1.MsgVote); ok { + vote = &v1.Vote{ + Voter: voteMsg.Voter, + ProposalId: params.ProposalID, + Options: v1.NewNonSplitVoteOption(voteMsg.Option), + } + } + + if voteWeightedMsg, ok := msg.(*v1beta1.MsgVoteWeighted); ok { + vote = convertVote(voteWeightedMsg) + } + + if voteWeightedMsg, ok := msg.(*v1.MsgVoteWeighted); ok { + vote = &v1.Vote{ + Voter: voteWeightedMsg.Voter, + ProposalId: params.ProposalID, + Options: voteWeightedMsg.Options, + } + } + + if vote != nil { + bz, err := cctx.Codec.MarshalJSON(vote) + if err != nil { + return nil, err + } + + return bz, nil + } + } + } + + return nil, fmt.Errorf("address '%s' did not vote on proposalID %d", params.Voter, params.ProposalID) +} + +// QueryProposerByTxQuery will query for a proposer of a governance proposal by ID. +func QueryProposerByTxQuery(ctx context.Context, cctx client.Context, proposalID uint64) (Proposer, error) { + q := fmt.Sprintf("%s.%s='%d'", types.EventTypeSubmitProposal, types.AttributeKeyProposalID, proposalID) + searchResult, err := QueryTxsByEvents(ctx, cctx, []string{q}, defaultPage, defaultLimit, "") + if err != nil { + return Proposer{}, err + } + + for _, info := range searchResult.Txs { + for _, msg := range info.GetTx().GetMsgs() { + // there should only be a single proposal under the given conditions + if subMsg, ok := msg.(*v1beta1.MsgSubmitProposal); ok { + return NewProposer(proposalID, subMsg.Proposer), nil + } + if subMsg, ok := msg.(*v1.MsgSubmitProposal); ok { + return NewProposer(proposalID, subMsg.Proposer), nil + } + } + } + + return Proposer{}, fmt.Errorf("failed to find the proposer for proposalID %d", proposalID) +} + +// convertVote converts a MsgVoteWeighted into a *v1.Vote. +func convertVote(v *v1beta1.MsgVoteWeighted) *v1.Vote { + opts := make([]*v1.WeightedVoteOption, len(v.Options)) + for i, o := range v.Options { + opts[i] = &v1.WeightedVoteOption{ + Option: v1.VoteOption(o.Option), + Weight: o.Weight.String(), + } + } + return &v1.Vote{ + Voter: v.Voter, + ProposalId: v.ProposalId, + Options: opts, + } +} diff --git a/go/provider/lease/v1/service.pb.go b/go/provider/lease/v1/service.pb.go index 9d5dbc28..3c13d14a 100644 --- a/go/provider/lease/v1/service.pb.go +++ b/go/provider/lease/v1/service.pb.go @@ -6,18 +6,18 @@ package v1 import ( context "context" fmt "fmt" - github_com_akash_network_akash_api_go_manifest_v2beta2 "github.com/akash-network/akash-api/go/manifest/v2beta2" - v2beta2 "github.com/akash-network/akash-api/go/manifest/v2beta2" - v1beta4 "github.com/akash-network/akash-api/go/node/market/v1beta4" - _ "github.com/gogo/protobuf/gogoproto" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" io "io" math "math" math_bits "math/bits" + pkg_akt_dev_go_manifest_v2beta3 "pkg.akt.dev/go/manifest/v2beta3" + v2beta3 "pkg.akt.dev/go/manifest/v2beta3" + v1 "pkg.akt.dev/go/node/market/v1" ) // Reference imports to suppress errors if they are not otherwise used. @@ -349,8 +349,8 @@ func (m *ServiceStatus) GetIps() []LeaseIPStatus { // SendManifestRequest is request type for the SendManifest Providers RPC method type SendManifestRequest struct { - LeaseId v1beta4.LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"LeaseID" yaml:"LeaseID"` - Manifest github_com_akash_network_akash_api_go_manifest_v2beta2.Manifest `protobuf:"bytes,2,rep,name=manifest,proto3,castrepeated=github.com/akash-network/akash-api/go/manifest/v2beta2.Manifest" json:"manifest" yaml:"manifest"` + LeaseId v1.LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"LeaseID" yaml:"LeaseID"` + Manifest pkg_akt_dev_go_manifest_v2beta3.Manifest `protobuf:"bytes,2,rep,name=manifest,proto3,castrepeated=pkg.akt.dev/go/manifest/v2beta3.Manifest" json:"manifest" yaml:"manifest"` } func (m *SendManifestRequest) Reset() { *m = SendManifestRequest{} } @@ -386,14 +386,14 @@ func (m *SendManifestRequest) XXX_DiscardUnknown() { var xxx_messageInfo_SendManifestRequest proto.InternalMessageInfo -func (m *SendManifestRequest) GetLeaseId() v1beta4.LeaseID { +func (m *SendManifestRequest) GetLeaseId() v1.LeaseID { if m != nil { return m.LeaseId } - return v1beta4.LeaseID{} + return v1.LeaseID{} } -func (m *SendManifestRequest) GetManifest() github_com_akash_network_akash_api_go_manifest_v2beta2.Manifest { +func (m *SendManifestRequest) GetManifest() pkg_akt_dev_go_manifest_v2beta3.Manifest { if m != nil { return m.Manifest } @@ -439,8 +439,8 @@ var xxx_messageInfo_SendManifestResponse proto.InternalMessageInfo // ServiceLogsRequest type ServiceLogsRequest struct { - LeaseId v1beta4.LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"LeaseID" yaml:"LeaseID"` - Services []string `protobuf:"bytes,2,rep,name=services,proto3" json:"services" yaml:"services"` + LeaseId v1.LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"LeaseID" yaml:"LeaseID"` + Services []string `protobuf:"bytes,2,rep,name=services,proto3" json:"services" yaml:"services"` } func (m *ServiceLogsRequest) Reset() { *m = ServiceLogsRequest{} } @@ -476,11 +476,11 @@ func (m *ServiceLogsRequest) XXX_DiscardUnknown() { var xxx_messageInfo_ServiceLogsRequest proto.InternalMessageInfo -func (m *ServiceLogsRequest) GetLeaseId() v1beta4.LeaseID { +func (m *ServiceLogsRequest) GetLeaseId() v1.LeaseID { if m != nil { return m.LeaseId } - return v1beta4.LeaseID{} + return v1.LeaseID{} } func (m *ServiceLogsRequest) GetServices() []string { @@ -590,7 +590,7 @@ func (m *ServiceLogsResponse) GetServices() []*ServiceLogs { // ShellRequest type ShellRequest struct { - LeaseId v1beta4.LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"LeaseID" yaml:"LeaseID"` + LeaseId v1.LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"LeaseID" yaml:"LeaseID"` } func (m *ShellRequest) Reset() { *m = ShellRequest{} } @@ -626,17 +626,17 @@ func (m *ShellRequest) XXX_DiscardUnknown() { var xxx_messageInfo_ShellRequest proto.InternalMessageInfo -func (m *ShellRequest) GetLeaseId() v1beta4.LeaseID { +func (m *ShellRequest) GetLeaseId() v1.LeaseID { if m != nil { return m.LeaseId } - return v1beta4.LeaseID{} + return v1.LeaseID{} } // ServiceStatusRequest type ServiceStatusRequest struct { - LeaseId v1beta4.LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"LeaseID" yaml:"LeaseID"` - Services []string `protobuf:"bytes,2,rep,name=services,proto3" json:"services" yaml:"services"` + LeaseId v1.LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"LeaseID" yaml:"LeaseID"` + Services []string `protobuf:"bytes,2,rep,name=services,proto3" json:"services" yaml:"services"` } func (m *ServiceStatusRequest) Reset() { *m = ServiceStatusRequest{} } @@ -672,11 +672,11 @@ func (m *ServiceStatusRequest) XXX_DiscardUnknown() { var xxx_messageInfo_ServiceStatusRequest proto.InternalMessageInfo -func (m *ServiceStatusRequest) GetLeaseId() v1beta4.LeaseID { +func (m *ServiceStatusRequest) GetLeaseId() v1.LeaseID { if m != nil { return m.LeaseId } - return v1beta4.LeaseID{} + return v1.LeaseID{} } func (m *ServiceStatusRequest) GetServices() []string { @@ -751,71 +751,70 @@ func init() { } var fileDescriptor_3a2116fc700fbddb = []byte{ - // 1017 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x57, 0x4d, 0x6f, 0x1b, 0x45, - 0x18, 0xce, 0xae, 0xed, 0xc4, 0x1e, 0xc7, 0x2d, 0x9d, 0x04, 0x62, 0x22, 0xe2, 0x31, 0x53, 0x5a, - 0xa5, 0x4a, 0xb2, 0xdb, 0x18, 0x24, 0x24, 0x7a, 0x40, 0x5a, 0x10, 0x55, 0xa5, 0x82, 0xd2, 0xcd, - 0x0d, 0x0e, 0xd6, 0x38, 0x3b, 0xac, 0x57, 0x59, 0x7b, 0x96, 0xd9, 0xb5, 0x4b, 0x6f, 0xfc, 0x04, - 0xfe, 0x00, 0x17, 0x8e, 0x95, 0x40, 0xdc, 0xf9, 0x03, 0x3d, 0xf6, 0xd8, 0xd3, 0x82, 0x92, 0x9b, - 0x0f, 0x1c, 0xfc, 0x0b, 0xd0, 0x7c, 0xec, 0xae, 0x1d, 0x93, 0x60, 0x40, 0x44, 0x3d, 0x75, 0xdf, - 0x67, 0x9e, 0xf7, 0xeb, 0x99, 0x79, 0x5f, 0x37, 0xe0, 0x0e, 0x39, 0x25, 0x71, 0xdf, 0x8e, 0x38, - 0x1b, 0x07, 0x1e, 0xe5, 0x76, 0x48, 0x49, 0x4c, 0xed, 0xf1, 0xa1, 0x1d, 0x53, 0x3e, 0x0e, 0x4e, - 0xa8, 0x15, 0x71, 0x96, 0x30, 0xb8, 0x25, 0x69, 0x56, 0x46, 0xb3, 0x24, 0xcd, 0x1a, 0x1f, 0x6e, - 0x63, 0xe5, 0x3f, 0x20, 0xc3, 0xe0, 0x6b, 0x1a, 0x27, 0xf6, 0xb8, 0xd3, 0xa3, 0x09, 0xe9, 0xd8, - 0x3e, 0x67, 0xa3, 0x48, 0x39, 0x6f, 0xb7, 0x33, 0x0e, 0x3f, 0xa5, 0x89, 0x3d, 0x3e, 0x14, 0x8c, - 0x0f, 0x54, 0x26, 0xcd, 0xd8, 0xf4, 0x99, 0xcf, 0xe4, 0xa7, 0x2d, 0xbe, 0x14, 0x8a, 0x9f, 0x9b, - 0x00, 0x3e, 0x16, 0xac, 0x63, 0x55, 0xcb, 0x71, 0x42, 0x92, 0x51, 0x0c, 0xdf, 0x01, 0x35, 0x32, - 0x26, 0x41, 0x48, 0x7a, 0x21, 0x6d, 0x1a, 0x6d, 0x63, 0xb7, 0xe2, 0x16, 0x00, 0xdc, 0x04, 0x95, - 0x84, 0x25, 0x24, 0x6c, 0x9a, 0xf2, 0x44, 0x19, 0x10, 0x82, 0xf2, 0x88, 0x07, 0x71, 0xb3, 0xd4, - 0x2e, 0xed, 0xd6, 0x5c, 0xf9, 0x0d, 0x6d, 0xb0, 0xc1, 0x7a, 0xa2, 0x4d, 0xea, 0x75, 0x7d, 0x3a, - 0xa4, 0x9c, 0x24, 0x01, 0x1b, 0x36, 0xcb, 0x6d, 0x63, 0xb7, 0xe4, 0xc2, 0xec, 0xe8, 0x61, 0x7e, - 0x02, 0xb7, 0x41, 0x95, 0xd3, 0x28, 0x0c, 0x4e, 0x48, 0xdc, 0xac, 0xc8, 0xe8, 0xb9, 0x0d, 0xef, - 0x81, 0x37, 0x46, 0x91, 0x47, 0x12, 0xea, 0x75, 0x73, 0xce, 0xaa, 0xe4, 0xdc, 0xd4, 0xb8, 0x9b, - 0x51, 0xef, 0x80, 0x1b, 0x9c, 0x12, 0xef, 0x59, 0x41, 0x5c, 0x93, 0xc4, 0x86, 0x44, 0x73, 0xda, - 0x01, 0x80, 0x79, 0x57, 0x05, 0xb5, 0x2a, 0xa9, 0xb7, 0xf2, 0x93, 0x8c, 0x8e, 0xff, 0x30, 0x40, - 0x43, 0x8a, 0xf5, 0xe8, 0x48, 0xeb, 0xb4, 0x07, 0xca, 0x11, 0xe3, 0x89, 0x94, 0xa8, 0xe1, 0x6c, - 0x4d, 0x52, 0x24, 0xed, 0x69, 0x8a, 0xea, 0xcf, 0xc8, 0x20, 0xfc, 0x08, 0x0b, 0x0b, 0xbb, 0x12, - 0x84, 0x5f, 0x80, 0x06, 0xfd, 0x36, 0xa1, 0x7c, 0x48, 0xc2, 0xae, 0xf4, 0x32, 0xa5, 0xd7, 0xbd, - 0x49, 0x8a, 0xe6, 0x0f, 0xa6, 0x29, 0xda, 0x54, 0xee, 0x73, 0x30, 0x76, 0xd7, 0x33, 0xfb, 0x48, - 0xc4, 0x7b, 0x00, 0xaa, 0xf2, 0x12, 0x4f, 0x58, 0xd8, 0x2c, 0xb5, 0x8d, 0xdd, 0x9a, 0x83, 0x26, - 0x29, 0xca, 0xb1, 0x69, 0x8a, 0x6e, 0xea, 0x22, 0x34, 0x82, 0xdd, 0xfc, 0x10, 0xde, 0x06, 0x66, - 0x10, 0xc9, 0x8b, 0xa8, 0x39, 0x1b, 0x93, 0x14, 0x99, 0x41, 0x34, 0x4d, 0x51, 0x4d, 0x39, 0x04, - 0x11, 0x76, 0xcd, 0x20, 0xc2, 0x3f, 0x98, 0x60, 0xe3, 0x33, 0xc6, 0x9f, 0x12, 0xee, 0x51, 0x2e, - 0x72, 0x16, 0x6d, 0xf7, 0x59, 0xac, 0xda, 0xae, 0xa9, 0xb6, 0x85, 0x5d, 0xb4, 0x2d, 0x2c, 0xec, - 0x4a, 0x30, 0xd7, 0xc8, 0xfc, 0x57, 0x1a, 0x95, 0xfe, 0x9b, 0x46, 0x36, 0xa8, 0xc8, 0x96, 0x75, - 0xa7, 0x6f, 0x4f, 0x52, 0xa4, 0x80, 0x69, 0x8a, 0xd6, 0x67, 0xd4, 0xc1, 0xae, 0x82, 0x45, 0xb5, - 0x43, 0x32, 0xa0, 0xf2, 0xf1, 0xe9, 0xd6, 0x84, 0x5d, 0x54, 0x2b, 0x2c, 0xec, 0x4a, 0x10, 0xbf, - 0x32, 0x41, 0x63, 0x7e, 0x70, 0x32, 0x77, 0x63, 0x09, 0x77, 0xe8, 0x81, 0xd5, 0x58, 0xba, 0x49, - 0x6d, 0xea, 0x9d, 0x3d, 0xeb, 0x92, 0x15, 0x60, 0x2d, 0x8e, 0xa8, 0x83, 0x5e, 0xa4, 0x68, 0x65, - 0x92, 0x22, 0x1d, 0x62, 0x9a, 0xa2, 0x86, 0xca, 0xa0, 0x6c, 0xec, 0xea, 0x03, 0x48, 0x40, 0x45, - 0x28, 0xa3, 0x06, 0xb3, 0xde, 0xd9, 0xbf, 0x34, 0xc9, 0x5f, 0xdc, 0xb4, 0xb3, 0xa3, 0xb3, 0xa8, - 0x10, 0x33, 0xa2, 0x09, 0x53, 0x88, 0x26, 0xfe, 0x85, 0x4f, 0x40, 0x29, 0x88, 0xe2, 0x66, 0x59, - 0x26, 0xb8, 0x7b, 0x75, 0x17, 0xd9, 0xec, 0x38, 0x5b, 0x3a, 0xf4, 0xc2, 0xcb, 0x13, 0xb1, 0xf0, - 0x8f, 0x26, 0xd8, 0x38, 0xa6, 0x43, 0xef, 0x73, 0xbd, 0xf5, 0x5c, 0xfa, 0xcd, 0x88, 0xc6, 0x09, - 0xfc, 0x0a, 0x54, 0x65, 0xbc, 0x6e, 0xe0, 0x49, 0x91, 0xeb, 0x9d, 0x1d, 0x9d, 0x4f, 0xed, 0x3e, - 0x4b, 0xef, 0x3e, 0x9d, 0xec, 0x53, 0xe7, 0x5d, 0x9d, 0x66, 0x4d, 0x03, 0xd3, 0x14, 0xdd, 0x50, - 0xb9, 0x34, 0x80, 0xdd, 0x35, 0x19, 0xf1, 0x91, 0x07, 0x7f, 0x32, 0x40, 0x35, 0x5b, 0xb3, 0x4d, - 0x53, 0x76, 0x53, 0x44, 0x57, 0xb0, 0xa5, 0xb7, 0xaf, 0xf5, 0x50, 0x6c, 0x5f, 0x87, 0x8b, 0xe8, - 0x67, 0x29, 0xaa, 0x66, 0x75, 0x8a, 0x09, 0xcc, 0xb8, 0xc5, 0x04, 0x66, 0x08, 0x7e, 0xfe, 0x1b, - 0xfa, 0xd8, 0x0f, 0x92, 0xfe, 0xa8, 0x67, 0x9d, 0xb0, 0x81, 0x2d, 0x63, 0x1f, 0x0c, 0x69, 0xf2, - 0x94, 0xf1, 0x53, 0x6d, 0x91, 0x28, 0xb0, 0x7d, 0xb6, 0xb0, 0xee, 0xad, 0x5c, 0x89, 0x3c, 0x3e, - 0x7e, 0x0b, 0x6c, 0xce, 0x6b, 0x14, 0x47, 0x6c, 0x18, 0x53, 0xfc, 0xb3, 0x01, 0xa0, 0x7e, 0x2d, - 0x8f, 0x99, 0x1f, 0x5f, 0x8b, 0x76, 0x0f, 0x40, 0x55, 0xff, 0x9e, 0xc5, 0x52, 0x3a, 0xbd, 0x8d, - 0x32, 0xac, 0xd0, 0x22, 0x43, 0xb0, 0x9b, 0x1f, 0x62, 0x1f, 0xd4, 0x67, 0xea, 0xfd, 0x67, 0x53, - 0xb4, 0x07, 0xca, 0x21, 0xf3, 0xd5, 0x0c, 0xad, 0x2b, 0xb2, 0xb0, 0x0b, 0xb2, 0xb0, 0xb0, 0x2b, - 0x41, 0x3c, 0x16, 0xaf, 0x6a, 0x46, 0x18, 0x25, 0x18, 0xec, 0xce, 0x14, 0x6f, 0xc8, 0x7b, 0x7f, - 0xef, 0xd2, 0x57, 0x3c, 0xe3, 0xef, 0xec, 0x5c, 0x2d, 0x4e, 0xd1, 0xe0, 0x29, 0x58, 0x3f, 0xee, - 0xd3, 0x30, 0xbc, 0x8e, 0xab, 0xc0, 0xbf, 0x18, 0xe2, 0x5d, 0xcc, 0x2c, 0x8b, 0xd7, 0xff, 0x01, - 0x7c, 0x67, 0x80, 0x37, 0x2f, 0x94, 0xac, 0xaf, 0xc6, 0x5f, 0xb8, 0x9a, 0xbb, 0x7f, 0x77, 0x35, - 0x7a, 0xc1, 0xdc, 0xd6, 0xc5, 0x2f, 0x57, 0x42, 0xe7, 0xd7, 0x32, 0xa8, 0xca, 0xae, 0xdc, 0xa3, - 0x4f, 0xa0, 0xb8, 0xaf, 0x99, 0xc9, 0x82, 0xfb, 0x57, 0xe4, 0x5c, 0x58, 0x52, 0xdb, 0x07, 0x4b, - 0xb2, 0x75, 0x8b, 0xc3, 0x8b, 0xbf, 0x22, 0x07, 0xcb, 0x75, 0x98, 0xa5, 0xb3, 0x96, 0xa5, 0xeb, - 0x7c, 0x62, 0x08, 0x12, 0x4e, 0xc9, 0xe0, 0x3a, 0xb3, 0xde, 0x37, 0x60, 0xff, 0xc2, 0x94, 0x2f, - 0x33, 0x62, 0x59, 0xb6, 0xfd, 0xe5, 0xc8, 0xba, 0xc3, 0x08, 0xdc, 0x9a, 0xeb, 0xf0, 0x7f, 0xce, - 0x77, 0xdf, 0x70, 0x9e, 0xbc, 0x38, 0x6b, 0x19, 0x2f, 0xcf, 0x5a, 0xc6, 0xef, 0x67, 0x2d, 0xe3, - 0xfb, 0xf3, 0xd6, 0xca, 0xcb, 0xf3, 0xd6, 0xca, 0xab, 0xf3, 0xd6, 0xca, 0x97, 0x1f, 0x2e, 0xb7, - 0xef, 0x17, 0xfe, 0x3c, 0xe8, 0xad, 0xca, 0xff, 0x91, 0xbc, 0xff, 0x67, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x3d, 0x85, 0x37, 0x23, 0x40, 0x0c, 0x00, 0x00, + // 998 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x56, 0x41, 0x73, 0xdb, 0x44, + 0x14, 0x8e, 0x64, 0x3b, 0xb1, 0xd7, 0x76, 0x4b, 0x37, 0x81, 0x98, 0x40, 0xbc, 0x66, 0x4b, 0x3b, + 0xee, 0x24, 0x91, 0x5a, 0xf7, 0xd6, 0xde, 0x04, 0x43, 0xa7, 0x33, 0x85, 0x09, 0xca, 0x0c, 0x07, + 0x38, 0x78, 0x36, 0xd1, 0x56, 0xd1, 0x58, 0xb1, 0x84, 0x24, 0x1b, 0x7a, 0xe3, 0xc4, 0x99, 0x13, + 0x37, 0x7e, 0x00, 0xe5, 0x27, 0xf0, 0x07, 0x7a, 0xec, 0x31, 0x27, 0xc1, 0x24, 0x37, 0x1f, 0x38, + 0xf8, 0x17, 0x30, 0xfb, 0x76, 0x25, 0xd9, 0x31, 0x09, 0x06, 0x26, 0x4c, 0x4f, 0xf6, 0x7b, 0xfb, + 0xed, 0x7b, 0xef, 0xfb, 0xde, 0xbe, 0x67, 0xa3, 0x3b, 0x6c, 0xc0, 0xe2, 0x63, 0x33, 0x8c, 0x82, + 0xb1, 0xe7, 0xf0, 0xc8, 0xf4, 0x39, 0x8b, 0xb9, 0x39, 0x7e, 0x60, 0xc6, 0x3c, 0x1a, 0x7b, 0x47, + 0xdc, 0x08, 0xa3, 0x20, 0x09, 0xf0, 0x26, 0xc0, 0x8c, 0x0c, 0x66, 0x00, 0xcc, 0x18, 0x3f, 0xd8, + 0xda, 0x70, 0x03, 0x37, 0x00, 0x8c, 0x29, 0xbe, 0x49, 0xf8, 0x16, 0x95, 0x51, 0x4f, 0xd8, 0xd0, + 0x7b, 0xce, 0xe3, 0xc4, 0x1c, 0xf7, 0x0e, 0x79, 0xc2, 0x1e, 0x9a, 0x6e, 0x14, 0x8c, 0x42, 0x85, + 0x79, 0x2f, 0xc3, 0x44, 0x03, 0x9e, 0x88, 0x8c, 0x32, 0x26, 0x1c, 0xd2, 0x97, 0x3a, 0xc2, 0xcf, + 0x84, 0x7d, 0x20, 0xcb, 0x38, 0x48, 0x58, 0x32, 0x8a, 0xf1, 0xfb, 0xa8, 0xc6, 0xc6, 0xcc, 0xf3, + 0xd9, 0xa1, 0xcf, 0x5b, 0x5a, 0x47, 0xeb, 0x56, 0xec, 0xc2, 0x81, 0x37, 0x50, 0x25, 0x09, 0x12, + 0xe6, 0xb7, 0x74, 0x38, 0x91, 0x06, 0xc6, 0xa8, 0x3c, 0x8a, 0xbc, 0xb8, 0x55, 0xea, 0x94, 0xba, + 0x35, 0x1b, 0xbe, 0x63, 0x13, 0xad, 0x07, 0x87, 0x82, 0x21, 0x77, 0xfa, 0x2e, 0x1f, 0xf2, 0x88, + 0x25, 0x5e, 0x30, 0x6c, 0x95, 0x3b, 0x5a, 0xb7, 0x64, 0xe3, 0xec, 0xe8, 0x49, 0x7e, 0x82, 0xb7, + 0x50, 0x35, 0xe2, 0xa1, 0xef, 0x1d, 0xb1, 0xb8, 0x55, 0x81, 0xe8, 0xb9, 0x8d, 0xef, 0xa1, 0xb7, + 0x46, 0xa1, 0xc3, 0x12, 0xee, 0xf4, 0x73, 0xcc, 0x2a, 0x60, 0x6e, 0x2a, 0xbf, 0x9d, 0x41, 0xef, + 0xa0, 0x1b, 0x11, 0x67, 0xce, 0x8b, 0x02, 0xb8, 0x06, 0xc0, 0x26, 0x78, 0x73, 0xd8, 0x1e, 0xc2, + 0x39, 0xab, 0x02, 0x5a, 0x05, 0xe8, 0xad, 0xfc, 0x24, 0x83, 0xd3, 0x3f, 0x34, 0xd4, 0x04, 0xb1, + 0x9e, 0xee, 0x2b, 0x9d, 0x76, 0x50, 0x39, 0x0c, 0xa2, 0x04, 0x24, 0x6a, 0x5a, 0x9b, 0x93, 0x94, + 0x80, 0x3d, 0x4d, 0x49, 0xfd, 0x05, 0x3b, 0xf1, 0x1f, 0x51, 0x61, 0x51, 0x1b, 0x9c, 0xf8, 0x33, + 0xd4, 0xe4, 0xdf, 0x26, 0x3c, 0x1a, 0x32, 0xbf, 0x0f, 0xb7, 0x74, 0xb8, 0x75, 0x6f, 0x92, 0x92, + 0xf9, 0x83, 0x69, 0x4a, 0x36, 0xe4, 0xf5, 0x39, 0x37, 0xb5, 0x1b, 0x99, 0xbd, 0x2f, 0xe2, 0x3d, + 0x46, 0x55, 0x68, 0xe2, 0x51, 0xe0, 0xb7, 0x4a, 0x1d, 0xad, 0x5b, 0xb3, 0xc8, 0x24, 0x25, 0xb9, + 0x6f, 0x9a, 0x92, 0x9b, 0xaa, 0x08, 0xe5, 0xa1, 0x76, 0x7e, 0x88, 0x6f, 0x23, 0xdd, 0x0b, 0xa1, + 0x11, 0x35, 0x6b, 0x7d, 0x92, 0x12, 0xdd, 0x0b, 0xa7, 0x29, 0xa9, 0xc9, 0x0b, 0x5e, 0x48, 0x6d, + 0xdd, 0x0b, 0xe9, 0x4f, 0x3a, 0x5a, 0xff, 0x24, 0x88, 0xbe, 0x61, 0x91, 0xc3, 0x23, 0x91, 0xb3, + 0xa0, 0x7d, 0x1c, 0xc4, 0x92, 0x76, 0x4d, 0xd2, 0x16, 0x76, 0x41, 0x5b, 0x58, 0xd4, 0x06, 0x67, + 0xae, 0x91, 0xfe, 0xaf, 0x34, 0x2a, 0xfd, 0x37, 0x8d, 0x4c, 0x54, 0x01, 0xca, 0x8a, 0xe9, 0xbb, + 0x93, 0x94, 0x48, 0xc7, 0x34, 0x25, 0x8d, 0x19, 0x75, 0xa8, 0x2d, 0xdd, 0xa2, 0xda, 0x21, 0x3b, + 0xe1, 0xf0, 0xf8, 0x14, 0x35, 0x61, 0x17, 0xd5, 0x0a, 0x8b, 0xda, 0xe0, 0xa4, 0xa7, 0x3a, 0x6a, + 0xce, 0x0f, 0x4e, 0x76, 0x5d, 0x5b, 0xe2, 0x3a, 0x76, 0xd0, 0x6a, 0x0c, 0xd7, 0x40, 0x9b, 0x7a, + 0x6f, 0xc7, 0xb8, 0x64, 0xfa, 0x8d, 0xc5, 0x11, 0xb5, 0xc8, 0xab, 0x94, 0xac, 0x4c, 0x52, 0xa2, + 0x42, 0x4c, 0x53, 0xd2, 0x94, 0x19, 0xa4, 0x4d, 0x6d, 0x75, 0x80, 0x19, 0xaa, 0x08, 0x65, 0xe4, + 0x60, 0xd6, 0x7b, 0xbb, 0x97, 0x26, 0xf9, 0x8b, 0x4e, 0x5b, 0xdb, 0x2a, 0x8b, 0x0c, 0x31, 0x23, + 0x9a, 0x30, 0x85, 0x68, 0xe2, 0x13, 0x7f, 0x8e, 0x4a, 0x5e, 0x18, 0xb7, 0xca, 0x90, 0xe0, 0xee, + 0xd5, 0x2c, 0xb2, 0xd9, 0xb1, 0x36, 0x55, 0xe8, 0x85, 0x97, 0x27, 0x62, 0xd1, 0xef, 0x75, 0xb4, + 0x7e, 0xc0, 0x87, 0xce, 0xa7, 0x6a, 0xb5, 0xd9, 0xfc, 0xeb, 0x11, 0x8f, 0x13, 0xfc, 0x05, 0xaa, + 0x42, 0xbc, 0xbe, 0xe7, 0x80, 0xc8, 0xf5, 0x5e, 0x4b, 0xe5, 0x93, 0x0b, 0xae, 0xc8, 0xf3, 0xb1, + 0xf5, 0x81, 0xca, 0xb0, 0xa6, 0x1c, 0xd3, 0x94, 0xdc, 0x90, 0x69, 0x94, 0x83, 0xda, 0x6b, 0x10, + 0xec, 0xa9, 0x83, 0x7f, 0xd4, 0x50, 0x35, 0x5b, 0xa3, 0x2d, 0x1d, 0x88, 0x6c, 0xe7, 0x81, 0xa5, + 0xdb, 0x50, 0xdb, 0xd5, 0x78, 0x22, 0xb6, 0xab, 0xf5, 0x95, 0x88, 0x7e, 0x96, 0x92, 0x6a, 0x56, + 0xa2, 0x18, 0xbe, 0x0c, 0x5b, 0x0c, 0x5f, 0xe6, 0xa1, 0x2f, 0x7f, 0x23, 0xdd, 0x70, 0xe0, 0x1a, + 0x6c, 0x90, 0x18, 0x0e, 0x1f, 0x9b, 0x6e, 0xb0, 0xb0, 0xb7, 0x8d, 0x9c, 0x6d, 0x1e, 0x88, 0xbe, + 0x83, 0x36, 0xe6, 0x75, 0x88, 0xc3, 0x60, 0x18, 0x73, 0xfa, 0xb3, 0x86, 0xb0, 0x7a, 0x11, 0xcf, + 0x02, 0x37, 0xbe, 0x6e, 0x7d, 0x1e, 0xa3, 0xaa, 0xfa, 0xa5, 0x8a, 0x41, 0x1e, 0xb5, 0x6c, 0x32, + 0x5f, 0xc1, 0x37, 0xf3, 0x50, 0x3b, 0x3f, 0xa4, 0x2e, 0xaa, 0xcf, 0x94, 0xfa, 0xcf, 0x86, 0x64, + 0x07, 0x95, 0xfd, 0xc0, 0x95, 0x23, 0xd2, 0x90, 0x60, 0x61, 0x17, 0x60, 0x61, 0x51, 0x1b, 0x9c, + 0x74, 0x2c, 0x1e, 0xcd, 0x8c, 0x26, 0x52, 0x2b, 0xdc, 0x9f, 0x29, 0x5e, 0x83, 0xde, 0x7e, 0x78, + 0xe9, 0x23, 0x9d, 0xb9, 0x6f, 0x6d, 0x5f, 0x2d, 0x4e, 0x41, 0xf0, 0x39, 0x6a, 0x1c, 0x1c, 0x73, + 0xdf, 0xbf, 0xe6, 0x2e, 0xd0, 0x5f, 0x34, 0xf1, 0x1a, 0x66, 0xd6, 0xc0, 0x1b, 0xdd, 0xf6, 0xef, + 0x34, 0xf4, 0xf6, 0x85, 0x6a, 0x55, 0x43, 0xdc, 0x85, 0x86, 0xdc, 0xfd, 0xbb, 0x86, 0xa8, 0xad, + 0x71, 0x5b, 0x15, 0xbf, 0x5c, 0x09, 0xbd, 0x5f, 0xcb, 0xa8, 0x0a, 0xac, 0xec, 0xfd, 0x8f, 0xf0, + 0x00, 0x35, 0x66, 0x47, 0x09, 0xef, 0x5e, 0x91, 0x73, 0x61, 0xf3, 0x6c, 0xed, 0x2d, 0x89, 0x56, + 0x14, 0x87, 0x17, 0x7f, 0x1a, 0xf6, 0x96, 0x63, 0x98, 0xa5, 0x33, 0x96, 0x85, 0xab, 0x7c, 0xe2, + 0xe9, 0x27, 0x11, 0x67, 0x27, 0xff, 0x67, 0xd6, 0xfb, 0x1a, 0x3e, 0xbe, 0x30, 0xdb, 0xcb, 0x0c, + 0x56, 0x96, 0x6d, 0x77, 0x39, 0xb0, 0x62, 0x18, 0xa2, 0x5b, 0x73, 0x0c, 0xaf, 0x39, 0xdf, 0x7d, + 0xcd, 0x7a, 0xf4, 0xea, 0xac, 0xad, 0xbd, 0x3e, 0x6b, 0x6b, 0xbf, 0x9f, 0xb5, 0xb5, 0x1f, 0xce, + 0xdb, 0x2b, 0xaf, 0xcf, 0xdb, 0x2b, 0xa7, 0xe7, 0xed, 0x95, 0x2f, 0x3b, 0x17, 0x36, 0xf9, 0xc2, + 0xff, 0xfa, 0xc3, 0x55, 0xf8, 0x3f, 0xf1, 0xf0, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x63, 0xc2, + 0x9a, 0xd6, 0xf9, 0x0b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2730,7 +2729,7 @@ func (m *SendManifestRequest) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Manifest = append(m.Manifest, v2beta2.Group{}) + m.Manifest = append(m.Manifest, v2beta3.Group{}) if err := m.Manifest[len(m.Manifest)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/go/provider/operator/v1/service.pb.go b/go/provider/operator/v1/service.pb.go new file mode 100644 index 00000000..307ff18e --- /dev/null +++ b/go/provider/operator/v1/service.pb.go @@ -0,0 +1,426 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/provider/operator/v1/service.proto + +package v1 + +import ( + context "context" + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + emptypb "google.golang.org/protobuf/types/known/emptypb" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type Operator struct { +} + +func (m *Operator) Reset() { *m = Operator{} } +func (m *Operator) String() string { return proto.CompactTextString(m) } +func (*Operator) ProtoMessage() {} +func (*Operator) Descriptor() ([]byte, []int) { + return fileDescriptor_73bbded1b1ef080b, []int{0} +} +func (m *Operator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Operator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Operator.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Operator) XXX_Merge(src proto.Message) { + xxx_messageInfo_Operator.Merge(m, src) +} +func (m *Operator) XXX_Size() int { + return m.Size() +} +func (m *Operator) XXX_DiscardUnknown() { + xxx_messageInfo_Operator.DiscardUnknown(m) +} + +var xxx_messageInfo_Operator proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Operator)(nil), "akash.provider.operator.v1.Operator") +} + +func init() { + proto.RegisterFile("akash/provider/operator/v1/service.proto", fileDescriptor_73bbded1b1ef080b) +} + +var fileDescriptor_73bbded1b1ef080b = []byte{ + // 241 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x48, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x2f, 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0xcf, 0x2f, 0x48, 0x2d, 0x4a, + 0x2c, 0xc9, 0x2f, 0xd2, 0x2f, 0x33, 0xd4, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, + 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x02, 0xab, 0xd4, 0x83, 0xa9, 0xd4, 0x83, 0xa9, 0xd4, 0x2b, + 0x33, 0x94, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd3, 0x07, 0xb1, 0x20, 0x3a, 0xa4, 0xa4, + 0xd3, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xc1, 0xbc, 0xa4, 0xd2, 0x34, 0xfd, 0xd4, 0xdc, 0x82, + 0x92, 0x4a, 0x88, 0xa4, 0x12, 0x17, 0x17, 0x87, 0x3f, 0xd4, 0x04, 0xa3, 0xc5, 0x8c, 0x5c, 0xdc, + 0x30, 0x4e, 0x50, 0x80, 0xb3, 0x90, 0x2b, 0x17, 0xab, 0x73, 0x46, 0x6a, 0x72, 0xb6, 0x90, 0x98, + 0x1e, 0xc4, 0x08, 0x3d, 0x98, 0x11, 0x7a, 0xae, 0x20, 0x23, 0xa4, 0x54, 0xf4, 0x70, 0x3b, 0x46, + 0x0f, 0x66, 0x92, 0x90, 0x2f, 0x17, 0x77, 0x70, 0x49, 0x51, 0x6a, 0x62, 0x2e, 0x15, 0x0c, 0x33, + 0x60, 0x74, 0x0a, 0x39, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, + 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xab, 0xf4, + 0xcc, 0x92, 0x8c, 0xd2, 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0x59, 0xba, 0x79, 0xa9, 0x25, + 0xe5, 0xf9, 0x45, 0xd9, 0x50, 0x5e, 0x62, 0x41, 0xa6, 0x7e, 0x7a, 0x3e, 0xd6, 0x40, 0x4e, 0x62, + 0x03, 0xbb, 0xc6, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x1c, 0xcd, 0x55, 0xe6, 0x89, 0x01, 0x00, + 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// OperatorRPCClient is the client API for OperatorRPC service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type OperatorRPCClient interface { + // Check status of the operator + Check(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*Operator, error) + // StreamServiceLogs + StreamCheck(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (OperatorRPC_StreamCheckClient, error) +} + +type operatorRPCClient struct { + cc grpc1.ClientConn +} + +func NewOperatorRPCClient(cc grpc1.ClientConn) OperatorRPCClient { + return &operatorRPCClient{cc} +} + +func (c *operatorRPCClient) Check(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (*Operator, error) { + out := new(Operator) + err := c.cc.Invoke(ctx, "/akash.provider.operator.v1.OperatorRPC/Check", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *operatorRPCClient) StreamCheck(ctx context.Context, in *emptypb.Empty, opts ...grpc.CallOption) (OperatorRPC_StreamCheckClient, error) { + stream, err := c.cc.NewStream(ctx, &_OperatorRPC_serviceDesc.Streams[0], "/akash.provider.operator.v1.OperatorRPC/StreamCheck", opts...) + if err != nil { + return nil, err + } + x := &operatorRPCStreamCheckClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type OperatorRPC_StreamCheckClient interface { + Recv() (*Operator, error) + grpc.ClientStream +} + +type operatorRPCStreamCheckClient struct { + grpc.ClientStream +} + +func (x *operatorRPCStreamCheckClient) Recv() (*Operator, error) { + m := new(Operator) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// OperatorRPCServer is the server API for OperatorRPC service. +type OperatorRPCServer interface { + // Check status of the operator + Check(context.Context, *emptypb.Empty) (*Operator, error) + // StreamServiceLogs + StreamCheck(*emptypb.Empty, OperatorRPC_StreamCheckServer) error +} + +// UnimplementedOperatorRPCServer can be embedded to have forward compatible implementations. +type UnimplementedOperatorRPCServer struct { +} + +func (*UnimplementedOperatorRPCServer) Check(ctx context.Context, req *emptypb.Empty) (*Operator, error) { + return nil, status.Errorf(codes.Unimplemented, "method Check not implemented") +} +func (*UnimplementedOperatorRPCServer) StreamCheck(req *emptypb.Empty, srv OperatorRPC_StreamCheckServer) error { + return status.Errorf(codes.Unimplemented, "method StreamCheck not implemented") +} + +func RegisterOperatorRPCServer(s grpc1.Server, srv OperatorRPCServer) { + s.RegisterService(&_OperatorRPC_serviceDesc, srv) +} + +func _OperatorRPC_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(emptypb.Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(OperatorRPCServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.provider.operator.v1.OperatorRPC/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(OperatorRPCServer).Check(ctx, req.(*emptypb.Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _OperatorRPC_StreamCheck_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(emptypb.Empty) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(OperatorRPCServer).StreamCheck(m, &operatorRPCStreamCheckServer{stream}) +} + +type OperatorRPC_StreamCheckServer interface { + Send(*Operator) error + grpc.ServerStream +} + +type operatorRPCStreamCheckServer struct { + grpc.ServerStream +} + +func (x *operatorRPCStreamCheckServer) Send(m *Operator) error { + return x.ServerStream.SendMsg(m) +} + +var _OperatorRPC_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.provider.operator.v1.OperatorRPC", + HandlerType: (*OperatorRPCServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _OperatorRPC_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamCheck", + Handler: _OperatorRPC_StreamCheck_Handler, + ServerStreams: true, + }, + }, + Metadata: "akash/provider/operator/v1/service.proto", +} + +func (m *Operator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Operator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Operator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintService(dAtA []byte, offset int, v uint64) int { + offset -= sovService(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Operator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovService(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozService(x uint64) (n int) { + return sovService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Operator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Operator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Operator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipService(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthService + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupService + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthService + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthService = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowService = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupService = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/provider/v1/metrics.go b/go/provider/v1/metrics.go index 7764e5c0..647c3e22 100644 --- a/go/provider/v1/metrics.go +++ b/go/provider/v1/metrics.go @@ -3,8 +3,8 @@ package v1 import ( "k8s.io/apimachinery/pkg/api/resource" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - types "github.com/akash-network/akash-api/go/node/types/v1beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + rtypes "pkg.akt.dev/go/node/types/resources/v1beta4" ) func NewResourcesMetric() ResourcesMetric { @@ -16,7 +16,7 @@ func NewResourcesMetric() ResourcesMetric { Storage: make(Storage), } } -func (inv *ResourcesMetric) AddResources(res types.Resources) { +func (inv *ResourcesMetric) AddResources(res rtypes.Resources) { if res.CPU != nil { qcpu := *resource.NewMilliQuantity(res.CPU.Units.Val.Int64(), resource.DecimalSI) inv.CPU.Add(qcpu) diff --git a/go/provider/v1/service.pb.go b/go/provider/v1/service.pb.go index af0766e8..56b76dd2 100644 --- a/go/provider/v1/service.pb.go +++ b/go/provider/v1/service.pb.go @@ -6,8 +6,8 @@ package v1 import ( context "context" fmt "fmt" - grpc1 "github.com/gogo/protobuf/grpc" - proto "github.com/gogo/protobuf/proto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" _ "google.golang.org/genproto/googleapis/api/annotations" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" @@ -30,7 +30,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package func init() { proto.RegisterFile("akash/provider/v1/service.proto", fileDescriptor_518d1307e7e58072) } var fileDescriptor_518d1307e7e58072 = []byte{ - // 255 bytes of a gzipped FileDescriptorProto + // 242 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcc, 0x4e, 0x2c, 0xce, 0xd0, 0x2f, 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x2f, 0x33, 0xd4, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x04, 0x2b, 0xd0, @@ -40,13 +40,13 @@ var fileDescriptor_518d1307e7e58072 = []byte{ 0xe5, 0xb0, 0x58, 0x57, 0x92, 0x58, 0x52, 0x0a, 0x95, 0x37, 0x5a, 0xc9, 0xc8, 0xc5, 0x1d, 0x00, 0x95, 0x0c, 0x0a, 0x70, 0x16, 0x0a, 0xe5, 0xe2, 0x74, 0x4f, 0x2d, 0x09, 0x06, 0x2b, 0x11, 0x12, 0xd3, 0x83, 0x98, 0xad, 0x07, 0xb3, 0x58, 0xcf, 0x15, 0x64, 0xb1, 0x94, 0xa4, 0x1e, 0x86, 0x1b, - 0xf5, 0x20, 0x5a, 0x94, 0x44, 0x9b, 0x2e, 0x3f, 0x99, 0xcc, 0xc4, 0x2f, 0xc4, 0x85, 0xb0, 0x29, - 0x89, 0x51, 0x4b, 0xc8, 0x99, 0x8b, 0x27, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x97, 0x6c, 0x93, 0x0d, - 0x18, 0x9d, 0xbc, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, - 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0xca, 0x30, 0x3d, - 0xb3, 0x24, 0xa3, 0x34, 0x49, 0x2f, 0x39, 0x3f, 0x57, 0x1f, 0x6c, 0x80, 0x6e, 0x5e, 0x6a, 0x49, - 0x79, 0x7e, 0x51, 0x36, 0x94, 0x07, 0x0a, 0x9b, 0xf4, 0x7c, 0xe4, 0x50, 0x48, 0x62, 0x03, 0xdb, - 0x6c, 0x0c, 0x08, 0x00, 0x00, 0xff, 0xff, 0x6a, 0xc2, 0xd5, 0x1b, 0x90, 0x01, 0x00, 0x00, + 0xf5, 0x20, 0x5a, 0x94, 0x44, 0x9b, 0x2e, 0x3f, 0x99, 0xcc, 0xc4, 0x9f, 0xc4, 0xa8, 0x25, 0xc4, + 0x85, 0xb0, 0x4c, 0xc8, 0x99, 0x8b, 0x27, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x97, 0x6c, 0x93, 0x0d, + 0x18, 0x9d, 0x4c, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, + 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, 0xf1, 0x58, 0x8e, 0x21, 0x4a, 0xaa, 0x20, + 0x3b, 0x5d, 0x2f, 0x31, 0xbb, 0x44, 0x2f, 0x25, 0xb5, 0x4c, 0x3f, 0x3d, 0x1f, 0xd9, 0xbb, 0x49, + 0x6c, 0x60, 0x2b, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc0, 0xdd, 0x33, 0x26, 0x79, 0x01, + 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/go/provider/v1/service.pb.gw.go b/go/provider/v1/service.pb.gw.go index 3a891120..90914aa5 100644 --- a/go/provider/v1/service.pb.gw.go +++ b/go/provider/v1/service.pb.gw.go @@ -146,7 +146,7 @@ func RegisterProviderRPCHandlerClient(ctx context.Context, mux *runtime.ServeMux } var ( - pattern_ProviderRPC_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "status"}, "", runtime.AssumeColonVerbOpt(true))) + pattern_ProviderRPC_GetStatus_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "status"}, "", runtime.AssumeColonVerbOpt(false))) ) var ( diff --git a/go/provider/v1/status.pb.go b/go/provider/v1/status.pb.go index 58c7929b..c80484be 100644 --- a/go/provider/v1/status.pb.go +++ b/go/provider/v1/status.pb.go @@ -5,23 +5,19 @@ package v1 import ( fmt "fmt" - v1 "github.com/akash-network/akash-api/go/inventory/v1" - _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - _ "google.golang.org/protobuf/types/known/timestamppb" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" io "io" resource "k8s.io/apimachinery/pkg/api/resource" math "math" math_bits "math/bits" - time "time" + v1 "pkg.akt.dev/go/inventory/v1" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -var _ = time.Kitchen // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -460,7 +456,6 @@ type Status struct { BidEngine *BidEngineStatus `protobuf:"bytes,3,opt,name=bid_engine,json=bidEngine,proto3" json:"bid_engine,omitempty" yaml:"bid_engine,omitempty"` Manifest *ManifestStatus `protobuf:"bytes,4,opt,name=manifest,proto3" json:"manifest,omitempty" yaml:"manifest,omitempty"` PublicHostnames []string `protobuf:"bytes,5,rep,name=public_hostnames,json=publicHostnames,proto3" json:"public_hostnames" yaml:"public_hostnames"` - Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,proto3,stdtime" json:"timestamp" yaml:"timestamp"` } func (m *Status) Reset() { *m = Status{} } @@ -531,13 +526,6 @@ func (m *Status) GetPublicHostnames() []string { return nil } -func (m *Status) GetTimestamp() time.Time { - if m != nil { - return m.Timestamp - } - return time.Time{} -} - func init() { proto.RegisterType((*ResourcesMetric)(nil), "akash.provider.v1.ResourcesMetric") proto.RegisterMapType((Storage)(nil), "akash.provider.v1.ResourcesMetric.StorageEntry") @@ -554,80 +542,75 @@ func init() { func init() { proto.RegisterFile("akash/provider/v1/status.proto", fileDescriptor_1ba712d181b7fc9b) } var fileDescriptor_1ba712d181b7fc9b = []byte{ - // 1160 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xce, 0xd6, 0xb5, 0x5d, 0x4f, 0xd2, 0xc6, 0x19, 0x2a, 0xe1, 0xa6, 0xc8, 0x13, 0x46, 0xaa, - 0x94, 0x4a, 0x65, 0x57, 0x49, 0x2b, 0x14, 0x8a, 0x2a, 0xc1, 0xa6, 0x51, 0x5a, 0x41, 0x44, 0x3a, - 0xa5, 0x1c, 0x10, 0x52, 0xb4, 0xb6, 0xa7, 0xeb, 0xad, 0xbd, 0x3b, 0xcb, 0xee, 0xac, 0x91, 0x25, - 0x84, 0x10, 0x57, 0x2e, 0xfd, 0x2b, 0x38, 0xf0, 0x77, 0x70, 0xc8, 0x05, 0xa9, 0x47, 0x2e, 0x4c, - 0x91, 0x73, 0x40, 0xf8, 0x68, 0x6e, 0x9c, 0xd0, 0xec, 0xcc, 0xfe, 0xb0, 0x63, 0xa9, 0x6a, 0x6e, - 0x3b, 0xdf, 0xf7, 0xe6, 0xfb, 0xde, 0xdb, 0xf7, 0x66, 0x77, 0x40, 0xdb, 0x19, 0x38, 0x71, 0xdf, - 0x0a, 0x23, 0x36, 0xf2, 0x7a, 0x34, 0xb2, 0x46, 0x3b, 0x56, 0xcc, 0x1d, 0x9e, 0xc4, 0x66, 0x18, - 0x31, 0xce, 0xe0, 0x46, 0xca, 0x9b, 0x19, 0x6f, 0x8e, 0x76, 0x36, 0x91, 0xcb, 0x98, 0x3b, 0xa4, - 0x56, 0x1a, 0xd0, 0x49, 0x9e, 0x5b, 0xdc, 0xf3, 0x69, 0xcc, 0x1d, 0x3f, 0x54, 0x7b, 0x36, 0xaf, - 0xbb, 0xcc, 0x65, 0xe9, 0xa3, 0x25, 0x9f, 0x34, 0xba, 0xa5, 0x9c, 0xbc, 0x60, 0x44, 0x03, 0xce, - 0xa2, 0xb1, 0xb4, 0xea, 0x0e, 0x93, 0x98, 0xd3, 0x48, 0x47, 0xdc, 0x1b, 0xec, 0xc5, 0xa6, 0xc7, - 0x2c, 0x27, 0xf4, 0x7c, 0xa7, 0xdb, 0xf7, 0x02, 0x1a, 0x8d, 0xad, 0x70, 0xe0, 0x4a, 0xc0, 0x8a, - 0x68, 0xcc, 0x92, 0xa8, 0x4b, 0x2d, 0x97, 0x06, 0x34, 0x72, 0x38, 0xed, 0xa9, 0x5d, 0xf8, 0x9f, - 0x2a, 0x58, 0x27, 0x9a, 0x8c, 0x8f, 0x28, 0x8f, 0xbc, 0x2e, 0xec, 0x80, 0x4a, 0x37, 0x4c, 0x5a, - 0xc6, 0x96, 0xb1, 0xbd, 0xba, 0x6b, 0x9a, 0x4a, 0xd7, 0x2c, 0xeb, 0x9a, 0xe1, 0xc0, 0x95, 0x80, - 0x99, 0xe9, 0x9a, 0x4f, 0x12, 0x27, 0xe0, 0x1e, 0x1f, 0xdb, 0x5b, 0xa7, 0x02, 0x19, 0x13, 0x81, - 0x2a, 0xfb, 0xc7, 0xcf, 0xa6, 0x02, 0x49, 0xa5, 0x99, 0x40, 0x60, 0xec, 0xf8, 0xc3, 0xfb, 0xb8, - 0x1b, 0x26, 0x98, 0x48, 0x08, 0x7e, 0x0b, 0x6a, 0x3e, 0xf5, 0x59, 0x34, 0x6e, 0x5d, 0xba, 0x90, - 0xcd, 0x6d, 0x6d, 0x53, 0x3b, 0x4a, 0x55, 0xa6, 0x02, 0x69, 0xbd, 0x99, 0x40, 0x57, 0x95, 0x99, - 0x5a, 0x63, 0xa2, 0x09, 0xf8, 0x02, 0x54, 0xdc, 0x30, 0x69, 0x55, 0x2e, 0xe4, 0xb7, 0x9d, 0x95, - 0x75, 0xa8, 0xca, 0x72, 0xcb, 0x65, 0xb9, 0x61, 0x82, 0x7f, 0x7d, 0x8d, 0x2e, 0x1f, 0x1e, 0x3f, - 0x8b, 0x89, 0xa4, 0xe0, 0x2f, 0x06, 0xd8, 0xa0, 0x61, 0x9f, 0xfa, 0x34, 0x72, 0x86, 0x27, 0x31, - 0x67, 0x91, 0xe3, 0xd2, 0xd6, 0xe5, 0x0b, 0x59, 0x3f, 0xd6, 0xd6, 0xcd, 0x83, 0x4c, 0xf0, 0xa9, - 0xd2, 0x9b, 0x0a, 0x74, 0xde, 0x64, 0x26, 0x50, 0x4b, 0x65, 0x75, 0x8e, 0xc2, 0xa4, 0x49, 0x17, - 0x24, 0xe0, 0x0f, 0xa0, 0x9e, 0x65, 0x57, 0xdd, 0xaa, 0x6c, 0xaf, 0xee, 0x5a, 0xe6, 0xb9, 0x99, - 0x35, 0x17, 0x06, 0xc4, 0xd4, 0x9b, 0x0f, 0x02, 0x1e, 0x8d, 0xed, 0xbb, 0x13, 0x81, 0xea, 0x45, - 0x46, 0xf5, 0x22, 0x8f, 0x6b, 0x2a, 0x8f, 0xcc, 0xfd, 0xbf, 0x22, 0x8e, 0x64, 0x51, 0x9b, 0x2f, - 0xc0, 0x5a, 0x59, 0x0d, 0x36, 0x41, 0x65, 0x40, 0xc7, 0xe9, 0xec, 0x35, 0x88, 0x7c, 0x84, 0x0f, - 0x41, 0x75, 0xe4, 0x0c, 0x13, 0x7a, 0xb1, 0x41, 0x21, 0x6a, 0xf3, 0xfd, 0x4b, 0x7b, 0x06, 0x3e, - 0x04, 0xb5, 0xcf, 0xa9, 0x13, 0xd3, 0x18, 0x3e, 0x00, 0x35, 0xa7, 0xcb, 0xbd, 0x11, 0x4d, 0x8d, - 0xae, 0xda, 0xb7, 0xe4, 0x24, 0x7d, 0x9a, 0x22, 0x72, 0x92, 0x14, 0x57, 0x4c, 0x92, 0x5a, 0x63, - 0xa2, 0x09, 0xfc, 0x9b, 0x01, 0x20, 0xa1, 0x31, 0x8d, 0x46, 0x0e, 0xf7, 0x58, 0x90, 0x9d, 0x9b, - 0x3d, 0x50, 0xed, 0xb2, 0x24, 0xe0, 0x5a, 0x14, 0x4f, 0x04, 0xaa, 0xee, 0x4b, 0x60, 0x2a, 0x90, - 0x62, 0x66, 0x02, 0xad, 0xe9, 0x93, 0x20, 0x97, 0x98, 0x28, 0x18, 0x72, 0xd0, 0xc8, 0x32, 0x8f, - 0x75, 0x9d, 0xf8, 0xcd, 0x7d, 0xb0, 0x77, 0x4f, 0x05, 0x5a, 0x99, 0x08, 0xd4, 0xc8, 0x89, 0xa9, - 0x40, 0x85, 0xd2, 0x4c, 0xa0, 0xa6, 0x72, 0xcb, 0x21, 0x4c, 0x0a, 0x1a, 0xff, 0x6d, 0x80, 0xb5, - 0x72, 0x19, 0xd0, 0x07, 0xf5, 0x90, 0x06, 0x3d, 0x2f, 0x70, 0xf5, 0xe1, 0xbf, 0xb5, 0x3c, 0x89, - 0x85, 0xc2, 0xed, 0x3b, 0x3a, 0x8f, 0xfa, 0xb1, 0xda, 0x2d, 0xc7, 0x40, 0x0b, 0x15, 0x63, 0xa0, - 0x01, 0x4c, 0x32, 0x0a, 0xf6, 0xf3, 0x2e, 0x5c, 0x7a, 0x1b, 0xb7, 0xdb, 0xda, 0xed, 0x2d, 0x1a, - 0xf6, 0xaf, 0x01, 0x1a, 0x8f, 0xb3, 0x4f, 0x27, 0xec, 0x82, 0xba, 0xfe, 0x74, 0xea, 0x32, 0x6f, - 0x6a, 0xe3, 0xfc, 0xeb, 0x2a, 0x9d, 0xf7, 0x55, 0x48, 0x51, 0x9c, 0x06, 0x64, 0x71, 0x7a, 0x7b, - 0x51, 0x9c, 0x06, 0x30, 0xc9, 0x28, 0xf8, 0xa3, 0x01, 0xd6, 0xa2, 0x52, 0xf2, 0xba, 0x46, 0xf4, - 0x86, 0x1a, 0xed, 0x8f, 0xb5, 0xdd, 0x5c, 0x67, 0xa6, 0x02, 0xcd, 0x89, 0xcd, 0x04, 0x7a, 0x27, - 0xef, 0x6c, 0x8e, 0x62, 0x32, 0x17, 0x84, 0xff, 0x34, 0xc0, 0x55, 0x9d, 0xf4, 0xd3, 0xf4, 0xaf, - 0x04, 0xbf, 0x01, 0xb5, 0x61, 0x7a, 0x02, 0x74, 0xe1, 0x37, 0x96, 0x64, 0xa3, 0x8e, 0x48, 0xf1, - 0x96, 0xd5, 0x5a, 0xbe, 0x65, 0xb5, 0xb5, 0x78, 0xcb, 0x6a, 0x8d, 0x89, 0x26, 0x60, 0x00, 0x1a, - 0xf9, 0x1b, 0xd4, 0xe5, 0xbe, 0xb7, 0xc4, 0x20, 0x6f, 0x44, 0x31, 0xbf, 0x39, 0x24, 0xe7, 0x37, - 0xd7, 0x28, 0xe6, 0x37, 0x87, 0x30, 0x29, 0x68, 0x7c, 0x0c, 0xd6, 0x6d, 0xaf, 0x77, 0x10, 0xb8, - 0x5e, 0x40, 0x75, 0x81, 0x0f, 0x40, 0x8d, 0x45, 0x3d, 0x1a, 0xc5, 0xe5, 0x83, 0xfd, 0x45, 0x8a, - 0xc8, 0x0a, 0x14, 0x57, 0x54, 0xa0, 0xd6, 0x98, 0x68, 0x02, 0xf7, 0xc1, 0xb5, 0x23, 0x27, 0xf0, - 0x9e, 0xd3, 0x98, 0x6b, 0xc1, 0xaf, 0xc0, 0x6a, 0x8f, 0x86, 0x43, 0x36, 0xf6, 0x69, 0xc0, 0x33, - 0xd5, 0x7b, 0x13, 0x81, 0x56, 0x1f, 0x16, 0xf0, 0x54, 0xa0, 0x72, 0xd4, 0x4c, 0x20, 0xa8, 0xf4, - 0x4b, 0x20, 0x26, 0xe5, 0x10, 0xfc, 0x7b, 0x15, 0xd4, 0xb4, 0xc5, 0x13, 0x50, 0xa3, 0x51, 0xc4, - 0xd2, 0x9c, 0x2b, 0xdb, 0x0d, 0xfb, 0x23, 0x99, 0xf3, 0x41, 0x8a, 0x4c, 0x05, 0x6a, 0x2a, 0xee, - 0x0e, 0xf3, 0x3d, 0x4e, 0xfd, 0x90, 0xcb, 0xb7, 0xf2, 0xae, 0xfe, 0xc0, 0x2f, 0x30, 0x98, 0x68, - 0x21, 0xf8, 0x7d, 0x31, 0xe1, 0xaa, 0x0f, 0x5b, 0x4b, 0xfa, 0x30, 0x37, 0x1a, 0xf6, 0x27, 0xfa, - 0x2f, 0x53, 0x1a, 0xf3, 0x0d, 0xad, 0x31, 0xe7, 0xdd, 0x9a, 0x1b, 0xf8, 0xb2, 0x79, 0x3e, 0xfa, - 0x3f, 0x1b, 0x00, 0x74, 0xbc, 0xde, 0x09, 0x4d, 0x3b, 0xa3, 0x7f, 0xb8, 0xcb, 0xbe, 0x67, 0x0b, - 0xdd, 0xb3, 0x1f, 0xe9, 0x1c, 0x1a, 0x39, 0x31, 0x15, 0xe8, 0x7a, 0x21, 0x35, 0x97, 0xc8, 0x4d, - 0x95, 0xc8, 0x32, 0x16, 0x93, 0x46, 0x27, 0x53, 0x80, 0x3f, 0x19, 0xe0, 0x8a, 0xaf, 0x9b, 0xaa, - 0xff, 0xc0, 0xef, 0x2f, 0xc9, 0x65, 0xbe, 0xef, 0xf6, 0xbe, 0x4e, 0xe5, 0x4a, 0x86, 0x4f, 0x05, - 0x82, 0x99, 0xcc, 0x5c, 0x1e, 0x37, 0xf4, 0x6d, 0xe3, 0x1c, 0x87, 0x49, 0xee, 0x0b, 0x7d, 0xd0, - 0x0c, 0x93, 0xce, 0xd0, 0xeb, 0x9e, 0xf4, 0x59, 0xcc, 0x03, 0xc7, 0xa7, 0x71, 0xfa, 0xbf, 0x6d, - 0xd8, 0xf6, 0x44, 0xa0, 0xf5, 0xe3, 0x94, 0x7b, 0x94, 0x51, 0xb2, 0xed, 0x8b, 0xe1, 0x45, 0xdb, - 0x17, 0x19, 0x4c, 0xd6, 0xc3, 0xf9, 0xfd, 0x30, 0x04, 0x8d, 0xfc, 0x5a, 0xd9, 0xaa, 0xa5, 0x35, - 0x6f, 0x9a, 0xea, 0xe2, 0x69, 0x66, 0x17, 0x4f, 0xf3, 0xcb, 0x2c, 0xc2, 0xfe, 0x30, 0x3b, 0x87, - 0x39, 0x24, 0xcf, 0x61, 0xae, 0x50, 0x9c, 0xc3, 0x1c, 0xc2, 0x2f, 0x5f, 0x23, 0x83, 0x14, 0x21, - 0xf6, 0x67, 0xa7, 0x93, 0xb6, 0xf1, 0x6a, 0xd2, 0x36, 0xfe, 0x9a, 0xb4, 0x8d, 0x97, 0x67, 0xed, - 0x95, 0x57, 0x67, 0xed, 0x95, 0x3f, 0xce, 0xda, 0x2b, 0x5f, 0xef, 0xb8, 0x1e, 0xef, 0x27, 0x1d, - 0xb3, 0xcb, 0x7c, 0x2b, 0x7d, 0xed, 0x1f, 0x04, 0x94, 0x7f, 0xc7, 0xa2, 0x81, 0x5e, 0xc9, 0xeb, - 0xa9, 0xcb, 0xca, 0x77, 0xe8, 0x4e, 0x2d, 0xcd, 0xf1, 0xee, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, - 0xa8, 0x57, 0xf0, 0x94, 0x5f, 0x0b, 0x00, 0x00, + // 1087 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x56, 0xcf, 0x6b, 0x1b, 0x47, + 0x14, 0xf6, 0x46, 0xb1, 0x14, 0x8d, 0x9d, 0x58, 0x9e, 0x06, 0xaa, 0x38, 0x45, 0xe3, 0x0e, 0x04, + 0x1c, 0x08, 0xbb, 0xc4, 0xf1, 0xc1, 0x4d, 0x09, 0xb4, 0xeb, 0x18, 0x27, 0x50, 0x53, 0x67, 0x42, + 0x7a, 0x28, 0x05, 0xb3, 0x96, 0xa6, 0xab, 0x8d, 0xb4, 0x3f, 0xba, 0x3b, 0x2b, 0x10, 0x94, 0x52, + 0x7a, 0xed, 0xa5, 0x7f, 0x45, 0x0f, 0xfd, 0x3b, 0x7a, 0xf0, 0x31, 0xc7, 0x42, 0xe9, 0xb4, 0xc8, + 0x87, 0x52, 0x1d, 0xd5, 0x5b, 0x4f, 0x65, 0x76, 0xde, 0xfe, 0x90, 0x2c, 0x08, 0xf1, 0x6d, 0xe7, + 0x7d, 0x33, 0xdf, 0xf7, 0xbd, 0x79, 0x6f, 0x66, 0x16, 0x75, 0x9c, 0x81, 0x93, 0xf4, 0xad, 0x28, + 0x0e, 0x47, 0x5e, 0x8f, 0xc7, 0xd6, 0xe8, 0xa1, 0x95, 0x08, 0x47, 0xa4, 0x89, 0x19, 0xc5, 0xa1, + 0x08, 0xf1, 0x66, 0x86, 0x9b, 0x39, 0x6e, 0x8e, 0x1e, 0x6e, 0xdd, 0x76, 0x43, 0x37, 0xcc, 0x50, + 0x4b, 0x7d, 0xe9, 0x89, 0x5b, 0xdb, 0x9a, 0xc8, 0x0b, 0x46, 0x3c, 0x10, 0x61, 0x3c, 0x56, 0x4c, + 0xdd, 0x61, 0x9a, 0x08, 0x1e, 0xc3, 0x8c, 0xbd, 0xc1, 0x7e, 0x62, 0x79, 0xa1, 0xe5, 0x44, 0x9e, + 0xef, 0x74, 0xfb, 0x5e, 0xc0, 0xe3, 0xb1, 0x15, 0x0d, 0x5c, 0x15, 0xb0, 0x62, 0x9e, 0x84, 0x69, + 0xdc, 0xe5, 0x96, 0xcb, 0x03, 0x1e, 0x3b, 0x82, 0xf7, 0xf4, 0x2a, 0xfa, 0xcf, 0x2a, 0xda, 0x60, + 0x00, 0x26, 0xc7, 0x5c, 0xc4, 0x5e, 0x17, 0x9f, 0xa1, 0x5a, 0x37, 0x4a, 0xdb, 0xc6, 0xb6, 0xb1, + 0xb3, 0xb6, 0x6b, 0x9a, 0x83, 0xfd, 0xc4, 0xf4, 0x42, 0xb3, 0xca, 0x6b, 0x46, 0x03, 0x57, 0x05, + 0xcc, 0x9c, 0xd7, 0x7c, 0x91, 0x3a, 0x81, 0xf0, 0xc4, 0xd8, 0xde, 0x3e, 0x97, 0xc4, 0x98, 0x48, + 0x52, 0x3b, 0x38, 0x79, 0x35, 0x95, 0x44, 0x31, 0xcd, 0x24, 0x41, 0x63, 0xc7, 0x1f, 0x3e, 0xa6, + 0xdd, 0x28, 0xa5, 0x4c, 0x85, 0xf0, 0x37, 0xa8, 0xee, 0x73, 0x3f, 0x8c, 0xc7, 0xed, 0x6b, 0x57, + 0x92, 0xb9, 0x0f, 0x32, 0xf5, 0xe3, 0x8c, 0x65, 0x2a, 0x09, 0xf0, 0xcd, 0x24, 0xb9, 0xa9, 0xc5, + 0xf4, 0x98, 0x32, 0x00, 0xf0, 0x6b, 0x54, 0x73, 0xa3, 0xb4, 0x5d, 0xbb, 0x92, 0xde, 0x4e, 0x9e, + 0xd6, 0x91, 0x4e, 0xcb, 0xad, 0xa6, 0xe5, 0x46, 0x29, 0xfd, 0xe5, 0x4f, 0x72, 0xfd, 0xe8, 0xe4, + 0x55, 0xc2, 0x14, 0x84, 0x7f, 0x36, 0xd0, 0x26, 0x8f, 0xfa, 0xdc, 0xe7, 0xb1, 0x33, 0x3c, 0x4d, + 0x44, 0x18, 0x3b, 0x2e, 0x6f, 0x5f, 0xbf, 0x92, 0xf4, 0x73, 0x90, 0x6e, 0x1d, 0xe6, 0x84, 0x2f, + 0x35, 0xdf, 0x54, 0x92, 0xcb, 0x22, 0x33, 0x49, 0xda, 0xda, 0xd5, 0x25, 0x88, 0xb2, 0x16, 0x5f, + 0xa0, 0xc0, 0xdf, 0xa1, 0x46, 0xee, 0x6e, 0x75, 0xbb, 0xb6, 0xb3, 0xb6, 0x6b, 0x99, 0x97, 0x5a, + 0xd2, 0x5c, 0x68, 0x10, 0x13, 0x16, 0x1f, 0x06, 0x22, 0x1e, 0xdb, 0x8f, 0x26, 0x92, 0x34, 0x4a, + 0x47, 0x8d, 0xd2, 0xc7, 0x2d, 0xed, 0x23, 0x57, 0xff, 0xaf, 0x9c, 0xc7, 0xf2, 0x59, 0x5b, 0xaf, + 0xd1, 0x7a, 0x95, 0x0d, 0xb7, 0x50, 0x6d, 0xc0, 0xc7, 0x59, 0xef, 0x35, 0x99, 0xfa, 0xc4, 0x4f, + 0xd1, 0xea, 0xc8, 0x19, 0xa6, 0xfc, 0x6a, 0x8d, 0xc2, 0xf4, 0xe2, 0xc7, 0xd7, 0xf6, 0x0d, 0x7a, + 0x84, 0xea, 0x9f, 0x71, 0x27, 0xe1, 0x09, 0x7e, 0x82, 0xea, 0x4e, 0x57, 0x78, 0x23, 0x9e, 0x09, + 0xdd, 0xb4, 0xef, 0xa9, 0x4e, 0xfa, 0x34, 0x8b, 0xa8, 0x4e, 0xd2, 0x58, 0xd9, 0x49, 0x7a, 0x4c, + 0x19, 0x00, 0xf4, 0x57, 0x03, 0x61, 0xc6, 0x13, 0x1e, 0x8f, 0x1c, 0xe1, 0x85, 0x41, 0x7e, 0x6e, + 0xf6, 0xd1, 0x6a, 0x37, 0x4c, 0x03, 0x01, 0xa4, 0x74, 0x22, 0xc9, 0xea, 0x81, 0x0a, 0x4c, 0x25, + 0xd1, 0xc8, 0x4c, 0x92, 0x75, 0x38, 0x09, 0x6a, 0x48, 0x99, 0x0e, 0x63, 0x81, 0x9a, 0xb9, 0xf3, + 0x04, 0xf2, 0xa4, 0x6f, 0xaf, 0x83, 0xbd, 0x7b, 0x2e, 0xc9, 0xca, 0x44, 0x92, 0x66, 0x01, 0x4c, + 0x25, 0x29, 0x99, 0x66, 0x92, 0xb4, 0xb4, 0x5a, 0x11, 0xa2, 0xac, 0x84, 0xe9, 0xdf, 0x06, 0x5a, + 0xaf, 0xa6, 0x81, 0x7d, 0xd4, 0x88, 0x78, 0xd0, 0xf3, 0x02, 0x17, 0x0e, 0xff, 0xbd, 0xe5, 0x26, + 0x16, 0x12, 0xb7, 0x1f, 0x80, 0x8f, 0xc6, 0x89, 0x5e, 0xad, 0xda, 0x00, 0x88, 0xca, 0x36, 0x80, + 0x00, 0x65, 0x39, 0x84, 0xfb, 0x45, 0x15, 0xae, 0xbd, 0x8b, 0xda, 0x7d, 0x50, 0x7b, 0x87, 0x82, + 0xfd, 0x6b, 0xa0, 0xe6, 0xf3, 0xfc, 0xea, 0xc4, 0x5d, 0xd4, 0x80, 0xab, 0x13, 0xd2, 0xbc, 0x0b, + 0xc2, 0xc5, 0xed, 0xaa, 0x94, 0x0f, 0xf4, 0x94, 0x32, 0x39, 0x08, 0xa8, 0xe4, 0x60, 0x79, 0x99, + 0x1c, 0x04, 0x28, 0xcb, 0x21, 0xfc, 0xbd, 0x81, 0xd6, 0xe3, 0x8a, 0x79, 0xc8, 0x91, 0xbc, 0x25, + 0x47, 0xfb, 0x63, 0x90, 0x9b, 0xab, 0xcc, 0x54, 0x92, 0x39, 0xb2, 0x99, 0x24, 0xef, 0x15, 0x95, + 0x2d, 0xa2, 0x94, 0xcd, 0x4d, 0xa2, 0x7f, 0x18, 0xe8, 0x26, 0x98, 0x7e, 0x99, 0x3d, 0x3a, 0xf8, + 0x2b, 0x54, 0x1f, 0x66, 0x27, 0x00, 0x12, 0xbf, 0xb3, 0xc4, 0x8d, 0x3e, 0x22, 0xe5, 0x2e, 0xeb, + 0xb1, 0xda, 0x65, 0xbd, 0xb4, 0xdc, 0x65, 0x3d, 0xa6, 0x0c, 0x00, 0x1c, 0xa0, 0x66, 0xb1, 0x83, + 0x90, 0xee, 0x07, 0x4b, 0x04, 0x8a, 0x42, 0x94, 0xfd, 0x5b, 0x84, 0x54, 0xff, 0x16, 0x1c, 0x65, + 0xff, 0x16, 0x21, 0xca, 0x4a, 0x98, 0x9e, 0xa0, 0x0d, 0xdb, 0xeb, 0x1d, 0x06, 0xae, 0x17, 0x70, + 0x48, 0xf0, 0x09, 0xaa, 0x87, 0x71, 0x8f, 0xc7, 0x49, 0xf5, 0x60, 0x7f, 0x9e, 0x45, 0x54, 0x06, + 0x1a, 0x2b, 0x33, 0xd0, 0x63, 0xca, 0x00, 0xa0, 0x7d, 0x74, 0xeb, 0xd8, 0x09, 0xbc, 0xaf, 0x79, + 0x22, 0x80, 0xf0, 0x0b, 0xb4, 0xd6, 0xe3, 0xd1, 0x30, 0x1c, 0xfb, 0x3c, 0x10, 0x39, 0xeb, 0xde, + 0x44, 0x92, 0xb5, 0xa7, 0x65, 0x78, 0x2a, 0x49, 0x75, 0xd6, 0x4c, 0x12, 0xac, 0xf9, 0x2b, 0x41, + 0xca, 0xaa, 0x53, 0xe8, 0xef, 0xd7, 0x51, 0x1d, 0x24, 0x5e, 0xa0, 0x3a, 0x8f, 0xe3, 0x30, 0xf3, + 0x5c, 0xdb, 0x69, 0xda, 0x1f, 0x29, 0xcf, 0x87, 0x59, 0x64, 0x2a, 0x49, 0x4b, 0x63, 0x0f, 0x42, + 0xdf, 0x13, 0xdc, 0x8f, 0x84, 0xda, 0x95, 0xf7, 0xe1, 0x82, 0x5f, 0x40, 0x28, 0x03, 0x22, 0xfc, + 0x6d, 0xd9, 0xe1, 0xba, 0x0e, 0xdb, 0x4b, 0xea, 0x30, 0xd7, 0x1a, 0xf6, 0x27, 0xf0, 0xca, 0x54, + 0xda, 0x7c, 0x13, 0x38, 0xe6, 0xb4, 0xdb, 0x73, 0x0d, 0x5f, 0x15, 0x2f, 0x5a, 0xff, 0x47, 0x03, + 0xa1, 0x33, 0xaf, 0x77, 0xca, 0xb3, 0xca, 0xc0, 0x83, 0xbb, 0xec, 0x3e, 0x5b, 0xa8, 0x9e, 0xfd, + 0x0c, 0x3c, 0x34, 0x0b, 0x60, 0x2a, 0xc9, 0xed, 0x92, 0x6a, 0xce, 0xc8, 0x5d, 0x6d, 0x64, 0x19, + 0x4a, 0x59, 0xf3, 0x2c, 0x67, 0xc0, 0x3f, 0x18, 0xe8, 0x86, 0x0f, 0x45, 0x85, 0x17, 0xf8, 0xc3, + 0x25, 0x5e, 0xe6, 0xeb, 0x6e, 0x1f, 0x80, 0x95, 0x1b, 0x79, 0x7c, 0x2a, 0x09, 0xce, 0x69, 0xe6, + 0x7c, 0xdc, 0x81, 0xbf, 0x8d, 0x4b, 0x18, 0x65, 0x85, 0x2e, 0xf6, 0x51, 0x2b, 0x4a, 0xcf, 0x86, + 0x5e, 0xf7, 0xb4, 0x1f, 0x26, 0x22, 0x70, 0x7c, 0x9e, 0x64, 0xef, 0x6d, 0xd3, 0xb6, 0x27, 0x92, + 0x6c, 0x9c, 0x64, 0xd8, 0xb3, 0x1c, 0x52, 0x65, 0x5f, 0x9c, 0x5e, 0x96, 0x7d, 0x11, 0xa1, 0x6c, + 0x23, 0x9a, 0x5f, 0x6f, 0xef, 0x9d, 0x4f, 0x3a, 0xc6, 0x9b, 0x49, 0xc7, 0xf8, 0x6b, 0xd2, 0x31, + 0x7e, 0xba, 0xe8, 0xac, 0xbc, 0xb9, 0xe8, 0xac, 0xfc, 0x76, 0xd1, 0x59, 0xf9, 0x72, 0x2b, 0x7b, + 0x29, 0x07, 0xc2, 0xec, 0xf1, 0x91, 0xe5, 0x86, 0xd5, 0x3f, 0xd3, 0xb3, 0x7a, 0xf6, 0x4b, 0xf8, + 0xe8, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5c, 0x93, 0x81, 0xc7, 0xb5, 0x0a, 0x00, 0x00, } func (m *ResourcesMetric) Marshal() (dAtA []byte, err error) { @@ -998,14 +981,6 @@ func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - n13, err13 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err13 != nil { - return 0, err13 - } - i -= n13 - i = encodeVarintStatus(dAtA, i, uint64(n13)) - i-- - dAtA[i] = 0x32 if len(m.PublicHostnames) > 0 { for iNdEx := len(m.PublicHostnames) - 1; iNdEx >= 0; iNdEx-- { i -= len(m.PublicHostnames[iNdEx]) @@ -1231,8 +1206,6 @@ func (m *Status) Size() (n int) { n += 1 + l + sovStatus(uint64(l)) } } - l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp) - n += 1 + l + sovStatus(uint64(l)) return n } @@ -2423,39 +2396,6 @@ func (m *Status) Unmarshal(dAtA []byte) error { } m.PublicHostnames = append(m.PublicHostnames, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowStatus - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthStatus - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthStatus - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := github_com_gogo_protobuf_types.StdTimeUnmarshal(&m.Timestamp, dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipStatus(dAtA[iNdEx:]) diff --git a/go/sdkutil/config.go b/go/sdkutil/config.go index d3be973e..22a5abc4 100644 --- a/go/sdkutil/config.go +++ b/go/sdkutil/config.go @@ -8,6 +8,8 @@ import ( // it helps for all tests as well as packages relying on this api to always have the same config // as soon as sdkutil is imported func init() { + sdk.DefaultBondDenom = BondDenom + config := sdk.GetConfig() config.SetBech32PrefixForAccount(Bech32PrefixAccAddr, Bech32PrefixAccPub) config.SetBech32PrefixForValidator(Bech32PrefixValAddr, Bech32PrefixValPub) diff --git a/go/sdkutil/init.go b/go/sdkutil/init.go index 836a8c8b..a9d4b2a6 100644 --- a/go/sdkutil/init.go +++ b/go/sdkutil/init.go @@ -1,6 +1,8 @@ package sdkutil const ( + BondDenom = "uakt" + Bech32PrefixAccAddr = "akash" Bech32PrefixAccPub = "akashpub" diff --git a/go/sdl/.golangci.yaml b/go/sdl/.golangci.yaml new file mode 100644 index 00000000..5a34e6d9 --- /dev/null +++ b/go/sdl/.golangci.yaml @@ -0,0 +1,33 @@ +--- +issues: + exclude: + - comment on exported (method|function|type|const|var) + exclude-use-default: true + +# Skip generated k8s code +run: + exclude-dirs: [] + exclude-files: [] + # Skip vendor/ etc + skip-dirs-use-default: true +linters: + disable-all: true + enable: + - unused + - misspell + - gofmt + - gocritic + - goconst + - govet + - ineffassign + - unparam + - staticcheck + - revive + - gosec + - copyloopvar + - prealloc +linters-settings: + gocritic: + disabled-checks: + - ifElseChain + - singleCaseSwitch diff --git a/go/sdl/_testdata/deployment-svc-mismatch.yaml b/go/sdl/_testdata/deployment-svc-mismatch.yaml new file mode 100644 index 00000000..6bb33b41 --- /dev/null +++ b/go/sdl/_testdata/deployment-svc-mismatch.yaml @@ -0,0 +1,45 @@ +--- +version: "2.0" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + webapp: + westcoast: + profile: web + count: 2 diff --git a/go/sdl/_testdata/legacy/deployment-v2-c2c.yaml b/go/sdl/_testdata/legacy/deployment-v2-c2c.yaml new file mode 100644 index 00000000..4c07ca62 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-c2c.yaml @@ -0,0 +1,58 @@ +--- +version: "2.0" + +services: + web: + image: anapsix/webdis + env: + - REDIS_HOST=redis-server + expose: + - port: 7379 + as: 80 + to: + - global: true + accept: + - webdistest.localhost + + redis-server: + image: redis:rc-alpine3.12 + expose: + - port: 6379 + +profiles: + compute: + web: + resources: + cpu: + units: 0.1 + memory: + size: 16Mi + storage: + size: 128Mi + redis-server: + resources: + cpu: + units: 0.1 + memory: + size: 64Mi + storage: + size: 128Mi + placement: + global: + pricing: + web: + denom: uakt + amount: 9000 + redis-server: + denom: uakt + amount: 9000 + +deployment: + web: + global: + profile: web + count: 1 + redis-server: + global: + profile: redis-server + count: 1 diff --git a/go/sdl/_testdata/legacy/deployment-v2-escrow.yaml b/go/sdl/_testdata/legacy/deployment-v2-escrow.yaml new file mode 100644 index 00000000..cd3fa439 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-escrow.yaml @@ -0,0 +1,58 @@ +--- +version: "2.0" + +services: + web: + image: anapsix/webdis + env: + - REDIS_HOST=redis-server + expose: + - port: 7379 + as: 80 + to: + - global: true + accept: + - webdistest.localhost + + redis-server: + image: redis:rc-alpine3.12 + expose: + - port: 6379 + +profiles: + compute: + web: + resources: + cpu: + units: 0.1 + memory: + size: 16Mi + storage: + size: 128Mi + redis-server: + resources: + cpu: + units: 0.1 + memory: + size: 64Mi + storage: + size: 128Mi + placement: + global: + pricing: + web: + denom: uakt + amount: 10000000 + redis-server: + denom: uakt + amount: 10000000 + +deployment: + web: + global: + profile: web + count: 1 + redis-server: + global: + profile: redis-server + count: 1 diff --git a/go/sdl/_testdata/legacy/deployment-v2-ip-endpoint.yaml b/go/sdl/_testdata/legacy/deployment-v2-ip-endpoint.yaml new file mode 100644 index 00000000..0727d61d --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-ip-endpoint.yaml @@ -0,0 +1,41 @@ +--- +version: "2.0" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: "meow" + accept: + - test.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 + +endpoints: + meow: + kind: "ip" diff --git a/go/sdl/_testdata/legacy/deployment-v2-migrate.yaml b/go/sdl/_testdata/legacy/deployment-v2-migrate.yaml new file mode 100644 index 00000000..3b9492b3 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-migrate.yaml @@ -0,0 +1,37 @@ +--- +version: "2.0" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + accept: + - leaveme.com + - migrateme.com + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 diff --git a/go/sdl/_testdata/legacy/deployment-v2-multi-groups-ip-endpoint.yaml b/go/sdl/_testdata/legacy/deployment-v2-multi-groups-ip-endpoint.yaml new file mode 100644 index 00000000..8cf888c2 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-multi-groups-ip-endpoint.yaml @@ -0,0 +1,69 @@ +--- +version: "2.0" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: "meow" + accept: + - test.localhost + anotherweb: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: "kittens" + accept: + - extratest.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + bob: + resources: + cpu: + units: "0.13" + memory: + size: "256Mi" + storage: + size: "99Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + bob: + pricing: + bob: + denom: uakt + amount: 99 + +deployment: + web: + global: + profile: web + count: 1 + anotherweb: + bob: + profile: bob + count: 3 + +endpoints: + meow: + kind: "ip" + kittens: + kind: "ip" diff --git a/go/sdl/_testdata/legacy/deployment-v2-multi-ip-endpoint.yaml b/go/sdl/_testdata/legacy/deployment-v2-multi-ip-endpoint.yaml new file mode 100644 index 00000000..33ca42e0 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-multi-ip-endpoint.yaml @@ -0,0 +1,56 @@ +--- +version: "2.0" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: "meow" + accept: + - test.localhost + anotherweb: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: "kittens" + accept: + - extratest.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 + anotherweb: + global: + profile: web + count: 3 + +endpoints: + meow: + kind: "ip" + kittens: + kind: "ip" diff --git a/go/sdl/_testdata/legacy/deployment-v2-newcontainer.yaml b/go/sdl/_testdata/legacy/deployment-v2-newcontainer.yaml new file mode 100644 index 00000000..2d3911c3 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-newcontainer.yaml @@ -0,0 +1,36 @@ +--- +version: "2.0" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + accept: + - test.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 diff --git a/go/sdl/_testdata/legacy/deployment-v2-nodeport.yaml b/go/sdl/_testdata/legacy/deployment-v2-nodeport.yaml new file mode 100644 index 00000000..ac5ad0b0 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-nodeport.yaml @@ -0,0 +1,39 @@ +--- +version: "2.0" + +services: + web: + image: hydrogen18/hello_world:20201209 + expose: + - port: 10000 + as: 10000 + to: + - global: true + - port: 10000 + as: 10000 + proto: UDP + to: + - global: true + +profiles: + compute: + web: + resources: + cpu: + units: 0.1 + memory: + size: 16Mi + storage: + size: 128Mi + placement: + global: + pricing: + web: + denom: uakt + amount: 9000 + +deployment: + web: + global: + profile: web + count: 1 diff --git a/go/sdl/_testdata/legacy/deployment-v2-nohost.yaml b/go/sdl/_testdata/legacy/deployment-v2-nohost.yaml new file mode 100644 index 00000000..195e7fa7 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-nohost.yaml @@ -0,0 +1,34 @@ +--- +version: "2.0" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 diff --git a/go/sdl/_testdata/legacy/deployment-v2-shared-ip-endpoint.yaml b/go/sdl/_testdata/legacy/deployment-v2-shared-ip-endpoint.yaml new file mode 100644 index 00000000..a9214b27 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-shared-ip-endpoint.yaml @@ -0,0 +1,55 @@ +--- +version: "2.0" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: "meow" + accept: + - test.localhost + anotherweb: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + as: 81 + to: + - global: true + ip: "meow" + accept: + - extratest.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 + anotherweb: + global: + profile: web + count: 3 + +endpoints: + meow: + kind: "ip" diff --git a/go/sdl/_testdata/legacy/deployment-v2-storage-beta2.yaml b/go/sdl/_testdata/legacy/deployment-v2-storage-beta2.yaml new file mode 100644 index 00000000..5607d798 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-storage-beta2.yaml @@ -0,0 +1,42 @@ +--- +version: "2.0" +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 8080 + as: 80 + to: + - global: true + accept: + - webdistest.localhost + params: + storage: + data: + mount: /var/lib/demo-app +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + - size: "512Mi" + - name: data + size: "128Mi" + attributes: + persistent: "true" + class: beta2 + placement: + global: + pricing: + web: + denom: uakt + amount: 10 +deployment: + web: + global: + profile: web + count: 1 diff --git a/go/sdl/_testdata/legacy/deployment-v2-storage-default.yaml b/go/sdl/_testdata/legacy/deployment-v2-storage-default.yaml new file mode 100644 index 00000000..569b60f3 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-storage-default.yaml @@ -0,0 +1,40 @@ +version: "2.0" +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 8080 + as: 80 + to: + - global: true + accept: + - webdistest.localhost + params: + storage: + data: + mount: "/var/lib/demo-app/data" +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + - size: "512Mi" + - name: data + size: "128Mi" + attributes: + persistent: "true" + placement: + global: + pricing: + web: + denom: uakt + amount: 10 +deployment: + web: + global: + profile: web + count: 1 diff --git a/go/sdl/_testdata/legacy/deployment-v2-storage-updateC.yaml b/go/sdl/_testdata/legacy/deployment-v2-storage-updateC.yaml new file mode 100644 index 00000000..4abdca3e --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-storage-updateC.yaml @@ -0,0 +1,43 @@ +--- +version: "2.0" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + accept: + - testupdatea.localhost + - testupdateb.localhost + params: + storage: + data: + mount: /var/lib/e2e-test +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + - size: "512Mi" + - name: data + size: "256Mi" + attributes: + persistent: "true" + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 diff --git a/go/sdl/_testdata/legacy/deployment-v2-updateA.yaml b/go/sdl/_testdata/legacy/deployment-v2-updateA.yaml new file mode 100644 index 00000000..cd47a4be --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-updateA.yaml @@ -0,0 +1,36 @@ +--- +version: "2.0" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + accept: + - testupdatea.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 diff --git a/go/sdl/_testdata/legacy/deployment-v2-updateB.yaml b/go/sdl/_testdata/legacy/deployment-v2-updateB.yaml new file mode 100644 index 00000000..6e480e66 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2-updateB.yaml @@ -0,0 +1,37 @@ +--- +version: "2.0" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + accept: + - testupdatea.localhost + - testupdateb.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 diff --git a/go/sdl/_testdata/legacy/deployment-v2.1-ip-endpoint.yaml b/go/sdl/_testdata/legacy/deployment-v2.1-ip-endpoint.yaml new file mode 100644 index 00000000..df653d52 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2.1-ip-endpoint.yaml @@ -0,0 +1,41 @@ +--- +version: "2.1" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: "meow" + accept: + - test.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 + +endpoints: + meow: + kind: "ip" diff --git a/go/sdl/_testdata/legacy/deployment-v2.1-multi-groups-ip-endpoint.yaml b/go/sdl/_testdata/legacy/deployment-v2.1-multi-groups-ip-endpoint.yaml new file mode 100644 index 00000000..8cf888c2 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2.1-multi-groups-ip-endpoint.yaml @@ -0,0 +1,69 @@ +--- +version: "2.0" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: "meow" + accept: + - test.localhost + anotherweb: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: "kittens" + accept: + - extratest.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + bob: + resources: + cpu: + units: "0.13" + memory: + size: "256Mi" + storage: + size: "99Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + bob: + pricing: + bob: + denom: uakt + amount: 99 + +deployment: + web: + global: + profile: web + count: 1 + anotherweb: + bob: + profile: bob + count: 3 + +endpoints: + meow: + kind: "ip" + kittens: + kind: "ip" diff --git a/go/sdl/_testdata/legacy/deployment-v2.1-multi-ip-endpoint.yaml b/go/sdl/_testdata/legacy/deployment-v2.1-multi-ip-endpoint.yaml new file mode 100644 index 00000000..9fe15b17 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2.1-multi-ip-endpoint.yaml @@ -0,0 +1,56 @@ +--- +version: "2.1" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: "meow" + accept: + - test.localhost + anotherweb: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: "kittens" + accept: + - extratest.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 + anotherweb: + global: + profile: web + count: 3 + +endpoints: + meow: + kind: "ip" + kittens: + kind: "ip" diff --git a/go/sdl/_testdata/legacy/deployment-v2.1-shared-ip-endpoint.yaml b/go/sdl/_testdata/legacy/deployment-v2.1-shared-ip-endpoint.yaml new file mode 100644 index 00000000..09555f76 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2.1-shared-ip-endpoint.yaml @@ -0,0 +1,55 @@ +--- +version: "2.1" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: "meow" + accept: + - test.localhost + anotherweb: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + as: 81 + to: + - global: true + ip: "meow" + accept: + - extratest.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 + anotherweb: + global: + profile: web + count: 3 + +endpoints: + meow: + kind: "ip" diff --git a/go/sdl/_testdata/legacy/deployment-v2.1.yaml b/go/sdl/_testdata/legacy/deployment-v2.1.yaml new file mode 100644 index 00000000..aaf4d294 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2.1.yaml @@ -0,0 +1,36 @@ +--- +version: "2.1" + +services: + web: + image: bubuntux/riot-web + expose: + - port: 80 + to: + - global: true + accept: + - test.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 30 + +deployment: + web: + global: + profile: web + count: 1 diff --git a/go/sdl/_testdata/legacy/deployment-v2.yaml b/go/sdl/_testdata/legacy/deployment-v2.yaml new file mode 100644 index 00000000..2d3911c3 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment-v2.yaml @@ -0,0 +1,36 @@ +--- +version: "2.0" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + accept: + - test.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 diff --git a/go/sdl/_testdata/legacy/deployment.yaml b/go/sdl/_testdata/legacy/deployment.yaml new file mode 100644 index 00000000..f653f4c6 --- /dev/null +++ b/go/sdl/_testdata/legacy/deployment.yaml @@ -0,0 +1,36 @@ +--- +version: "2.0" + +services: + web: + image: bubuntux/riot-web + expose: + - port: 80 + to: + - global: true + accept: + - test.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 30 + +deployment: + web: + global: + profile: web + count: 1 diff --git a/go/sdl/_testdata/private_service.yaml b/go/sdl/_testdata/private_service.yaml new file mode 100644 index 00000000..73db7d96 --- /dev/null +++ b/go/sdl/_testdata/private_service.yaml @@ -0,0 +1,64 @@ +--- +version: "2.0" +services: + bind: + image: bind9 + expose: + - port: 53 + proto: udp + to: + - global: true + + pg: + image: postgresql + expose: + - port: 5463 + to: + - service: bind + +profiles: + compute: + bind: + resources: + cpu: + units: "50m" + memory: + size: "64Mi" + storage: + size: "16Mi" + pg: + resources: + cpu: + units: "500m" + memory: + size: "512Mi" + storage: + size: "1000Mi" + + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + pg: + denom: uakt + amount: 1000 + bind: + denom: uakt + amount: 333 +deployment: + pg: + westcoast: + profile: pg + count: 1 + bind: + westcoast: + profile: bind + count: 8 diff --git a/go/sdl/_testdata/profile-svc-name-mismatch.yaml b/go/sdl/_testdata/profile-svc-name-mismatch.yaml new file mode 100644 index 00000000..ea88ff49 --- /dev/null +++ b/go/sdl/_testdata/profile-svc-name-mismatch.yaml @@ -0,0 +1,38 @@ +--- +version: "2.0" + +services: + webapp: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + as: 80 + accept: + - thehostname.com + to: + - global: true + +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "512Mi" + storage: + size: "512Mi" + placement: + san-jose: + attributes: + region: sjc + pricing: + web: + denom: uakt + amount: 25 + +deployment: + webapp: + san-jose: + profile: web + count: 1 diff --git a/go/sdl/_testdata/service-mix.yaml b/go/sdl/_testdata/service-mix.yaml new file mode 100644 index 00000000..51a21a01 --- /dev/null +++ b/go/sdl/_testdata/service-mix.yaml @@ -0,0 +1,80 @@ +--- +version: "2.0" +services: + svca: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + svcb: + image: nginx + expose: + - port: 80 + accept: + - bhostname.com + to: + - global: true + - port: 12346 + to: + - global: true + proto: udp + +profiles: + compute: + profilea: + resources: + cpu: + units: "100m" + gpu: + units: "1" + attributes: + vendor: + nvidia: + memory: + size: "128Mi" + storage: + - size: "1Gi" + profileb: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + - size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + blalbla: foo + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + profilea: + denom: uakt + amount: 50 + profileb: + denom: uakt + amount: 50 + +deployment: + svca: + westcoast: + profile: profilea + count: 1 + svcb: + westcoast: + profile: profileb + count: 1 diff --git a/go/sdl/_testdata/service-mix2.yaml b/go/sdl/_testdata/service-mix2.yaml new file mode 100644 index 00000000..3eb556d9 --- /dev/null +++ b/go/sdl/_testdata/service-mix2.yaml @@ -0,0 +1,69 @@ +--- +version: "2.0" +services: + svca: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + svcb: + image: nginx + expose: + - port: 80 + accept: + - bhostname.com + to: + - global: true + - port: 12346 + to: + - global: true + proto: udp + +profiles: + compute: + profilea: + resources: + cpu: + units: "100m" + gpu: + units: "1" + attributes: + vendor: + nvidia: + memory: + size: "128Mi" + storage: + - size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + blalbla: foo + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + profilea: + denom: uakt + amount: 50 + +deployment: + svca: + westcoast: + profile: profilea + count: 1 + svcb: + westcoast: + profile: profilea + count: 1 diff --git a/go/sdl/_testdata/simple-double-ram.yaml b/go/sdl/_testdata/simple-double-ram.yaml new file mode 100644 index 00000000..6e1bdb5c --- /dev/null +++ b/go/sdl/_testdata/simple-double-ram.yaml @@ -0,0 +1,45 @@ +--- +version: "2.0" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "256Mi" + storage: + size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 2 diff --git a/go/sdl/_testdata/simple-gpu.yaml b/go/sdl/_testdata/simple-gpu.yaml new file mode 100644 index 00000000..84048e6f --- /dev/null +++ b/go/sdl/_testdata/simple-gpu.yaml @@ -0,0 +1,52 @@ +--- +version: "2.0" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp +profiles: + compute: + web: + resources: + cpu: + units: "100m" + gpu: + units: 1 + attributes: + vendor: + nvidia: + - model: a100 + memory: + size: "128Mi" + storage: + - size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + blalbla: foo + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 2 diff --git a/go/sdl/_testdata/simple-with-ip.yaml b/go/sdl/_testdata/simple-with-ip.yaml new file mode 100644 index 00000000..4308f095 --- /dev/null +++ b/go/sdl/_testdata/simple-with-ip.yaml @@ -0,0 +1,50 @@ +--- +version: "2.0" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + ip: "meow" + proto: udp +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 2 + +endpoints: + meow: + kind: "ip" diff --git a/go/sdl/_testdata/simple.yaml b/go/sdl/_testdata/simple.yaml new file mode 100644 index 00000000..d164e60e --- /dev/null +++ b/go/sdl/_testdata/simple.yaml @@ -0,0 +1,45 @@ +--- +version: "2.0" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 2 diff --git a/go/sdl/_testdata/simple2.yaml b/go/sdl/_testdata/simple2.yaml new file mode 100644 index 00000000..08dfa81c --- /dev/null +++ b/go/sdl/_testdata/simple2.yaml @@ -0,0 +1,64 @@ +--- +version: "2.0" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + bew: + image: nginx + expose: + - port: 8080 + accept: + - bhostname.com + to: + - global: true + - port: 12346 + to: + - global: true + proto: udp + - port: 12347 + to: + - global: true + proto: udp +profiles: + compute: + bew: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 + bew: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: bew + count: 2 diff --git a/go/sdl/_testdata/simple3.yaml b/go/sdl/_testdata/simple3.yaml new file mode 100644 index 00000000..9e016208 --- /dev/null +++ b/go/sdl/_testdata/simple3.yaml @@ -0,0 +1,45 @@ +--- +version: "2.0" + +services: + app: + image: ubuntu:22.04 + command: + - "sh" + - "-c" + args: + - 'sleep infinity' + expose: + - port: 80 + as: 80 + to: + - global: true + - port: 22 + as: 22 + to: + - global: true + +profiles: + compute: + app: + resources: + cpu: + units: 1 + memory: + size: 2Gi + storage: + size: 10Gi + placement: + akash: + attributes: + host: akash + pricing: + app: + denom: uakt + amount: 1000000 + +deployment: + app: + akash: + profile: app + count: 1 diff --git a/go/sdl/_testdata/simple4.yaml b/go/sdl/_testdata/simple4.yaml new file mode 100644 index 00000000..1f555485 --- /dev/null +++ b/go/sdl/_testdata/simple4.yaml @@ -0,0 +1,90 @@ +--- +version: '2.0' +services: + wordpress: + image: wordpress + depends_on: + - db + expose: + - port: 80 + http_options: + max_body_size: 104857600 + # accept: + # - "example.com" + to: + - global: true + env: + - WORDPRESS_DB_HOST=db + - WORDPRESS_DB_USER=wordpress + - WORDPRESS_DB_PASSWORD=testpass4you + - WORDPRESS_DB_NAME=wordpress + params: + storage: + wordpress-data: + mount: /var/www/html + readOnly: false + db: + # We use a mariadb image which supports both amd64 & arm64 architecture + image: mariadb:10.6.4 + # If you really want to use MySQL, uncomment the following line + #image: mysql:8.0.27 + expose: + - port: 3306 + to: + - service: wordpress + env: + - MYSQL_RANDOM_ROOT_PASSWORD=1 + - MYSQL_DATABASE=wordpress + - MYSQL_USER=wordpress + - MYSQL_PASSWORD=testpass4you + params: + storage: + wordpress-db: + mount: /var/lib/mysql + readOnly: false +profiles: + compute: + wordpress: + resources: + cpu: + units: 1 + memory: + size: 1Gi + storage: + - size: 1Gi + - name: wordpress-data + size: 1Gi + attributes: + persistent: true + class: beta3 + db: + resources: + cpu: + units: 1 + memory: + size: 1Gi + storage: + - size: 1Gi + - name: wordpress-db + size: 1Gi + attributes: + persistent: true + class: beta3 + placement: + akash: + pricing: + wordpress: + denom: uakt + amount: 10000 + db: + denom: uakt + amount: 10000 +deployment: + wordpress: + akash: + profile: wordpress + count: 1 + db: + akash: + profile: db + count: 1 diff --git a/go/sdl/_testdata/storageClass1.yaml b/go/sdl/_testdata/storageClass1.yaml new file mode 100644 index 00000000..695e0b64 --- /dev/null +++ b/go/sdl/_testdata/storageClass1.yaml @@ -0,0 +1,52 @@ +--- +version: "2.0" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + params: + storage: + configs: +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + - size: "1Gi" + - size: 1Gi + name: configs + attributes: + persistent: true + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 1 diff --git a/go/sdl/_testdata/storageClass2.yaml b/go/sdl/_testdata/storageClass2.yaml new file mode 100644 index 00000000..5f907cf0 --- /dev/null +++ b/go/sdl/_testdata/storageClass2.yaml @@ -0,0 +1,53 @@ +--- +version: "2.0" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + params: + storage: + configs: + mount: etc/nginx +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + - size: 1Gi + - size: 1Gi + name: configs + attributes: + persistent: true + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 1 diff --git a/go/sdl/_testdata/storageClass3.yaml b/go/sdl/_testdata/storageClass3.yaml new file mode 100644 index 00000000..1e0bae8e --- /dev/null +++ b/go/sdl/_testdata/storageClass3.yaml @@ -0,0 +1,52 @@ +--- +version: "2.0" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + params: + storage: + data: +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + - size: 1Gi + - size: 1Gi + name: configs + attributes: + persistent: true + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 1 diff --git a/go/sdl/_testdata/storageClass4.yaml b/go/sdl/_testdata/storageClass4.yaml new file mode 100644 index 00000000..5ca8af53 --- /dev/null +++ b/go/sdl/_testdata/storageClass4.yaml @@ -0,0 +1,59 @@ +--- +version: "2.0" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + params: + storage: + config: + mount: /etc/nginx + data: + mount: /etc/nginx +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + - size: 1Gi + - size: 1Gi + name: config + attributes: + persistent: true + - size: 1Gi + name: data + attributes: + persistent: true + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 1 diff --git a/go/sdl/_testdata/storageClass5.yaml b/go/sdl/_testdata/storageClass5.yaml new file mode 100644 index 00000000..5c280bb1 --- /dev/null +++ b/go/sdl/_testdata/storageClass5.yaml @@ -0,0 +1,51 @@ +--- +version: "2.0" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + params: + storage: +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + - size: "1Gi" + - size: 1Gi + name: configs + attributes: + persistent: true + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 1 diff --git a/go/sdl/_testdata/storageClass6.yaml b/go/sdl/_testdata/storageClass6.yaml new file mode 100644 index 00000000..fccfb944 --- /dev/null +++ b/go/sdl/_testdata/storageClass6.yaml @@ -0,0 +1,53 @@ +--- +version: "2.0" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + params: + storage: + configs: + mount: /test +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + - size: "1Gi" + - size: 1Gi + name: configs + attributes: + persistent: true + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 1 diff --git a/go/sdl/_testdata/v2.1-credentials-error.yaml b/go/sdl/_testdata/v2.1-credentials-error.yaml new file mode 100644 index 00000000..83e3fc24 --- /dev/null +++ b/go/sdl/_testdata/v2.1-credentials-error.yaml @@ -0,0 +1,48 @@ +--- +version: "2.1" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + credentials: + username: "foo" + password: "foo" +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 2 diff --git a/go/sdl/_testdata/v2.1-credentials.yaml b/go/sdl/_testdata/v2.1-credentials.yaml new file mode 100644 index 00000000..63622b0a --- /dev/null +++ b/go/sdl/_testdata/v2.1-credentials.yaml @@ -0,0 +1,49 @@ +--- +version: "2.1" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + credentials: + host: "https://test.com/v1" + username: "foo" + password: "foo" +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 2 diff --git a/go/sdl/_testdata/v2.1-deployment-svc-mismatch.yaml b/go/sdl/_testdata/v2.1-deployment-svc-mismatch.yaml new file mode 100644 index 00000000..59e98e00 --- /dev/null +++ b/go/sdl/_testdata/v2.1-deployment-svc-mismatch.yaml @@ -0,0 +1,45 @@ +--- +version: "2.1" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + webapp: + westcoast: + profile: web + count: 2 diff --git a/go/sdl/_testdata/v2.1-private_service.yaml b/go/sdl/_testdata/v2.1-private_service.yaml new file mode 100644 index 00000000..e4945869 --- /dev/null +++ b/go/sdl/_testdata/v2.1-private_service.yaml @@ -0,0 +1,64 @@ +--- +version: "2.1" +services: + bind: + image: bind9 + expose: + - port: 53 + proto: udp + to: + - global: true + + pg: + image: postgresql + expose: + - port: 5463 + to: + - service: bind + +profiles: + compute: + bind: + resources: + cpu: + units: "50m" + memory: + size: "64Mi" + storage: + size: "16Mi" + pg: + resources: + cpu: + units: "500m" + memory: + size: "512Mi" + storage: + size: "1000Mi" + + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + pg: + denom: uakt + amount: 1000 + bind: + denom: uakt + amount: 333 +deployment: + pg: + westcoast: + profile: pg + count: 1 + bind: + westcoast: + profile: bind + count: 8 diff --git a/go/sdl/_testdata/v2.1-profile-svc-name-mismatch.yaml b/go/sdl/_testdata/v2.1-profile-svc-name-mismatch.yaml new file mode 100644 index 00000000..3128102f --- /dev/null +++ b/go/sdl/_testdata/v2.1-profile-svc-name-mismatch.yaml @@ -0,0 +1,38 @@ +--- +version: "2.1" + +services: + webapp: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + as: 80 + accept: + - thehostname.com + to: + - global: true + +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "512Mi" + storage: + size: "512Mi" + placement: + san-jose: + attributes: + region: sjc + pricing: + web: + denom: uakt + amount: 25 + +deployment: + webapp: + san-jose: + profile: web + count: 1 diff --git a/go/sdl/_testdata/v2.1-service-mix.yaml b/go/sdl/_testdata/v2.1-service-mix.yaml new file mode 100644 index 00000000..82991aff --- /dev/null +++ b/go/sdl/_testdata/v2.1-service-mix.yaml @@ -0,0 +1,80 @@ +--- +version: "2.1" +services: + svca: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + svcb: + image: nginx + expose: + - port: 80 + accept: + - bhostname.com + to: + - global: true + - port: 12346 + to: + - global: true + proto: udp + +profiles: + compute: + profilea: + resources: + cpu: + units: "100m" + gpu: + units: "1" + attributes: + vendor: + nvidia: + memory: + size: "128Mi" + storage: + - size: "1Gi" + profileb: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + - size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + blalbla: foo + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + profilea: + denom: uakt + amount: 50 + profileb: + denom: uakt + amount: 50 + +deployment: + svca: + westcoast: + profile: profilea + count: 1 + svcb: + westcoast: + profile: profileb + count: 1 diff --git a/go/sdl/_testdata/v2.1-service-mix2.yaml b/go/sdl/_testdata/v2.1-service-mix2.yaml new file mode 100644 index 00000000..7109fd46 --- /dev/null +++ b/go/sdl/_testdata/v2.1-service-mix2.yaml @@ -0,0 +1,69 @@ +--- +version: "2.1" +services: + svca: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + svcb: + image: nginx + expose: + - port: 80 + accept: + - bhostname.com + to: + - global: true + - port: 12346 + to: + - global: true + proto: udp + +profiles: + compute: + profilea: + resources: + cpu: + units: "100m" + gpu: + units: "1" + attributes: + vendor: + nvidia: + memory: + size: "128Mi" + storage: + - size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + blalbla: foo + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + profilea: + denom: uakt + amount: 50 + +deployment: + svca: + westcoast: + profile: profilea + count: 1 + svcb: + westcoast: + profile: profilea + count: 1 diff --git a/go/sdl/_testdata/v2.1-simple-gpu.yaml b/go/sdl/_testdata/v2.1-simple-gpu.yaml new file mode 100644 index 00000000..cc50fd82 --- /dev/null +++ b/go/sdl/_testdata/v2.1-simple-gpu.yaml @@ -0,0 +1,52 @@ +--- +version: "2.1" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp +profiles: + compute: + web: + resources: + cpu: + units: "100m" + gpu: + units: 1 + attributes: + vendor: + nvidia: + - model: a100 + memory: + size: "128Mi" + storage: + - size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + blalbla: foo + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 2 diff --git a/go/sdl/_testdata/v2.1-simple-with-ip.yaml b/go/sdl/_testdata/v2.1-simple-with-ip.yaml new file mode 100644 index 00000000..4121c0e5 --- /dev/null +++ b/go/sdl/_testdata/v2.1-simple-with-ip.yaml @@ -0,0 +1,50 @@ +--- +version: "2.1" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + ip: "meow" + proto: udp +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 2 + +endpoints: + meow: + kind: "ip" diff --git a/go/sdl/_testdata/v2.1-simple.yaml b/go/sdl/_testdata/v2.1-simple.yaml new file mode 100644 index 00000000..dc1ac315 --- /dev/null +++ b/go/sdl/_testdata/v2.1-simple.yaml @@ -0,0 +1,45 @@ +--- +version: "2.1" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp +profiles: + compute: + web: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: web + count: 2 diff --git a/go/sdl/_testdata/v2.1-simple2.yaml b/go/sdl/_testdata/v2.1-simple2.yaml new file mode 100644 index 00000000..08dfa81c --- /dev/null +++ b/go/sdl/_testdata/v2.1-simple2.yaml @@ -0,0 +1,64 @@ +--- +version: "2.0" +services: + web: + image: nginx + expose: + - port: 80 + accept: + - ahostname.com + to: + - global: true + - port: 12345 + to: + - global: true + proto: udp + bew: + image: nginx + expose: + - port: 8080 + accept: + - bhostname.com + to: + - global: true + - port: 12346 + to: + - global: true + proto: udp + - port: 12347 + to: + - global: true + proto: udp +profiles: + compute: + bew: + resources: + cpu: + units: "100m" + memory: + size: "128Mi" + storage: + size: "1Gi" + placement: + westcoast: + attributes: + region: us-west + signedBy: + anyOf: + - 1 + - 2 + allOf: + - 3 + - 4 + pricing: + web: + denom: uakt + amount: 50 + bew: + denom: uakt + amount: 50 +deployment: + web: + westcoast: + profile: bew + count: 2 diff --git a/go/sdl/_testdata/v2.1-simple3.yaml b/go/sdl/_testdata/v2.1-simple3.yaml new file mode 100644 index 00000000..b4d8713a --- /dev/null +++ b/go/sdl/_testdata/v2.1-simple3.yaml @@ -0,0 +1,45 @@ +--- +version: "2.1" + +services: + app: + image: ubuntu:22.04 + command: + - "sh" + - "-c" + args: + - 'sleep infinity' + expose: + - port: 80 + as: 80 + to: + - global: true + - port: 22 + as: 22 + to: + - global: true + +profiles: + compute: + app: + resources: + cpu: + units: 1 + memory: + size: 2Gi + storage: + size: 10Gi + placement: + akash: + attributes: + host: akash + pricing: + app: + denom: uakt + amount: 1000000 + +deployment: + app: + akash: + profile: app + count: 1 diff --git a/go/sdl/_testdata/v2.1-simple4.yaml b/go/sdl/_testdata/v2.1-simple4.yaml new file mode 100644 index 00000000..6a899a90 --- /dev/null +++ b/go/sdl/_testdata/v2.1-simple4.yaml @@ -0,0 +1,90 @@ +--- +version: '2.1' +services: + wordpress: + image: wordpress + depends_on: + - db + expose: + - port: 80 + http_options: + max_body_size: 104857600 + # accept: + # - "example.com" + to: + - global: true + env: + - WORDPRESS_DB_HOST=db + - WORDPRESS_DB_USER=wordpress + - WORDPRESS_DB_PASSWORD=testpass4you + - WORDPRESS_DB_NAME=wordpress + params: + storage: + wordpress-data: + mount: /var/www/html + readOnly: false + db: + # We use a mariadb image which supports both amd64 & arm64 architecture + image: mariadb:10.6.4 + # If you really want to use MySQL, uncomment the following line + #image: mysql:8.0.27 + expose: + - port: 3306 + to: + - service: wordpress + env: + - MYSQL_RANDOM_ROOT_PASSWORD=1 + - MYSQL_DATABASE=wordpress + - MYSQL_USER=wordpress + - MYSQL_PASSWORD=testpass4you + params: + storage: + wordpress-db: + mount: /var/lib/mysql + readOnly: false +profiles: + compute: + wordpress: + resources: + cpu: + units: 1 + memory: + size: 1Gi + storage: + - size: 1Gi + - name: wordpress-data + size: 1Gi + attributes: + persistent: true + class: beta3 + db: + resources: + cpu: + units: 1 + memory: + size: 1Gi + storage: + - size: 1Gi + - name: wordpress-db + size: 1Gi + attributes: + persistent: true + class: beta3 + placement: + akash: + pricing: + wordpress: + denom: uakt + amount: 10000 + db: + denom: uakt + amount: 10000 +deployment: + wordpress: + akash: + profile: wordpress + count: 1 + db: + akash: + profile: db + count: 1 diff --git a/go/sdl/coin.go b/go/sdl/coin.go new file mode 100644 index 00000000..20fb7a3f --- /dev/null +++ b/go/sdl/coin.go @@ -0,0 +1,55 @@ +package sdl + +import ( + "errors" + "fmt" + + "cosmossdk.io/math" + + sdk "github.com/cosmos/cosmos-sdk/types" + "gopkg.in/yaml.v3" +) + +// v2Coin is an alias sdk.Coin to allow our custom UnmarshalYAML +// for now it supports PoC when actual pricing is specified as two fields +// aka amount and denom. we let UnmarshalYAML to deal with that and put result +// into Value field. +// discussion https://github.com/akash-network/node/issues/771 +type v2Coin struct { + Value sdk.DecCoin `yaml:"-"` +} + +var errInvalidCoinAmount = errors.New("invalid coin amount") + +func (sdl *v2Coin) UnmarshalYAML(node *yaml.Node) error { + parsedCoin := struct { + Amount string `yaml:"amount"` + Denom string `yaml:"denom"` + }{} + + if err := node.Decode(&parsedCoin); err != nil { + return err + } + + amount, err := math.LegacyNewDecFromStr(parsedCoin.Amount) + if err != nil { + return err + } + + if amount.IsZero() { + return fmt.Errorf("%w: amount is zero", errInvalidCoinAmount) + } + + // Never pass negative amounts to cosmos SDK DecCoin + if amount.IsNegative() { + return fmt.Errorf("%w: amount %q is negative", errNegativeValue, amount.String()) + } + + coin := sdk.NewDecCoinFromDec(parsedCoin.Denom, amount) + + *sdl = v2Coin{ + Value: coin, + } + + return nil +} diff --git a/go/sdl/coin_test.go b/go/sdl/coin_test.go new file mode 100644 index 00000000..45cc00a3 --- /dev/null +++ b/go/sdl/coin_test.go @@ -0,0 +1,46 @@ +package sdl + +import ( + "testing" + + "gopkg.in/yaml.v3" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func TestPricing(t *testing.T) { + lessThanOne, err := math.LegacyNewDecFromStr("0.7") + require.NoError(t, err) + tests := []struct { + text string + value sdk.DecCoin + err bool + }{ + {"amount: 1\ndenom: uakt", sdk.NewDecCoin("uakt", math.NewInt(1)), false}, + {"amount: -1\ndenom: uakt", sdk.NewDecCoin("uakt", math.NewInt(1)), true}, + {"amount: 0.7\ndenom: uakt", sdk.NewDecCoinFromDec("uakt", lessThanOne), false}, + {"amount: -0.7\ndenom: uakt", sdk.NewDecCoin("uakt", math.NewInt(0)), true}, + } + + for idx, test := range tests { + buf := []byte(test.text) + obj := &v2Coin{} + + err := yaml.Unmarshal(buf, obj) + + if test.err { + assert.Error(t, err, "idx:%v text:`%v`", idx, test.text) + continue + } + + if !assert.NoError(t, err, "idx:%v text:`%v`", idx, test.text) { + continue + } + + assert.Equal(t, test.value, obj.Value, "idx:%v text:`%v`", idx, test.text) + } +} diff --git a/go/sdl/cpu.go b/go/sdl/cpu.go new file mode 100644 index 00000000..344636d1 --- /dev/null +++ b/go/sdl/cpu.go @@ -0,0 +1,44 @@ +package sdl + +import ( + "fmt" + "sort" + + "gopkg.in/yaml.v3" + + types "pkg.akt.dev/go/node/types/attributes/v1" +) + +type v2CPUAttributes types.Attributes + +type v2ResourceCPU struct { + Units cpuQuantity `yaml:"units"` + Attributes v2CPUAttributes `yaml:"attributes,omitempty"` +} + +func (sdl *v2CPUAttributes) UnmarshalYAML(node *yaml.Node) error { + var attr v2CPUAttributes + + for i := 0; i+1 < len(node.Content); i += 2 { + var value string + switch node.Content[i].Value { + case "arch": + if err := node.Content[i+1].Decode(&value); err != nil { + return err + } + default: + return fmt.Errorf("unsupported cpu attribute \"%s\"", node.Content[i].Value) + } + + attr = append(attr, types.Attribute{ + Key: node.Content[i].Value, + Value: value, + }) + } + + sort.Sort(types.Attributes(attr)) + + *sdl = attr + + return nil +} diff --git a/go/sdl/cpu_test.go b/go/sdl/cpu_test.go new file mode 100644 index 00000000..2e896f1a --- /dev/null +++ b/go/sdl/cpu_test.go @@ -0,0 +1,24 @@ +package sdl + +import ( + "testing" + + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" +) + +func TestV2ResourceCPU_Valid(t *testing.T) { + var stream = ` +units: 0.1 +attributes: + arch: amd64 +` + var p v2ResourceCPU + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + require.Equal(t, cpuQuantity(100), p.Units) + require.Equal(t, 1, len(p.Attributes)) + require.Equal(t, "arch", p.Attributes[0].Key) + require.Equal(t, "amd64", p.Attributes[0].Value) +} diff --git a/go/sdl/expose.go b/go/sdl/expose.go new file mode 100644 index 00000000..865fd32f --- /dev/null +++ b/go/sdl/expose.go @@ -0,0 +1,115 @@ +package sdl + +import ( + "net/url" + "sort" + + "gopkg.in/yaml.v3" + + manifest "pkg.akt.dev/go/manifest/v2beta3" +) + +type v2Accept struct { + Items []string `yaml:"items,omitempty"` +} + +func (p *v2Accept) UnmarshalYAML(node *yaml.Node) error { + var accept []string + if err := node.Decode(&accept); err != nil { + return err + } + + for _, item := range accept { + if _, err := url.ParseRequestURI("http://" + item); err != nil { + return err + } + } + + p.Items = accept + + return nil +} + +func (sdl v2Exposes) toManifestExpose(endpointNames map[string]uint32) (manifest.ServiceExposes, error) { + exposeCount := 0 + for _, expose := range sdl { + if len(expose.To) > 0 { + exposeCount += len(expose.To) + } else { + exposeCount++ + } + } + + res := make(manifest.ServiceExposes, 0, exposeCount) + + for _, expose := range sdl { + exp, err := expose.toManifestExposes(endpointNames) + if err != nil { + return nil, err + } + + res = append(res, exp...) + } + + sort.Sort(res) + + return res, nil +} + +func (sdl v2Expose) toManifestExposes(endpointNames map[string]uint32) (manifest.ServiceExposes, error) { + exposeCount := len(sdl.To) + if exposeCount == 0 { + exposeCount = 1 + } + + res := make(manifest.ServiceExposes, 0, exposeCount) + + proto, err := manifest.ParseServiceProtocol(sdl.Proto) + if err != nil { + return nil, err + } + + httpOptions, err := sdl.HTTPOptions.asManifest() + if err != nil { + return nil, err + } + + if len(sdl.To) > 0 { + for _, to := range sdl.To { + // This value is created just so it can be passed to the utility function + expose := manifest.ServiceExpose{ + Service: to.Service, + Port: sdl.Port, + ExternalPort: sdl.As, + Proto: proto, + Global: to.Global, + Hosts: sdl.Accept.Items, + HTTPOptions: httpOptions, + IP: to.IP, + } + + // Check to see if an IP endpoint is also specified + if expose.Global && len(expose.IP) != 0 { + seqNo := endpointNames[expose.IP] + expose.EndpointSequenceNumber = seqNo + } + + res = append(res, expose) + } + } else { + expose := manifest.ServiceExpose{ + Service: "", + Port: sdl.Port, + ExternalPort: sdl.As, + Proto: proto, + Global: false, + Hosts: sdl.Accept.Items, + HTTPOptions: httpOptions, + IP: "", + } + + res = append(res, expose) + } + + return res, nil +} diff --git a/go/sdl/full_test.go b/go/sdl/full_test.go new file mode 100644 index 00000000..108710f3 --- /dev/null +++ b/go/sdl/full_test.go @@ -0,0 +1,66 @@ +package sdl + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFull(t *testing.T) { + stream := ` +version: "2.0" +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + as: 80 + accept: + - hello.localhost + to: + - global: true + params: + storage: + data: + mount: "/var/lib/demo-app/data" +profiles: + compute: + web: + resources: + cpu: + units: 0.1 + attributes: + arch: amd64 + gpu: + units: 1 + attributes: + vendor: + nvidia: + - model: a100 + memory: + size: 16Mi + storage: + - size: 128Mi + - name: data + size: 1Gi + attributes: + persistent: true + class: default + placement: + westcoast: + attributes: + region: us-west + pricing: + web: + amount: 1 + denom: uakt +deployment: + web: + westcoast: + profile: web + count: 1 +` + + _, err := Read([]byte(stream)) + require.NoError(t, err) +} diff --git a/go/sdl/go.mod b/go/sdl/go.mod new file mode 100644 index 00000000..543a4cbe --- /dev/null +++ b/go/sdl/go.mod @@ -0,0 +1,154 @@ +module pkg.akt.dev/go/sdl + +go 1.22.2 + +toolchain go1.22.5 + +require ( + cosmossdk.io/math v1.3.0 + github.com/blang/semver/v4 v4.0.0 + github.com/cosmos/cosmos-sdk v0.47.16-ics-lsm + github.com/stretchr/testify v1.9.0 + gopkg.in/yaml.v3 v3.0.1 + pkg.akt.dev/go v0.0.1-rc12 +) + +replace ( + // Use regen gogoproto tag + // To be replaced by cosmos/gogoproto in future versions + github.com/gogo/protobuf => github.com/cosmos/gogoproto v1.3.3-alpha.regen.1 + + //// replace broken goleveldb + github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 + + // stick with compatible version of rapid in v0.47.x line + pgregory.net/rapid => pgregory.net/rapid v0.5.5 +) + +require ( + cosmossdk.io/api v0.3.1 // indirect + cosmossdk.io/core v0.5.1 // indirect + cosmossdk.io/depinject v1.0.0-alpha.4 // indirect + cosmossdk.io/errors v1.0.1 // indirect + filippo.io/edwards25519 v1.1.0 // indirect + github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect + github.com/99designs/keyring v1.2.2 // indirect + github.com/ChainSafe/go-schnorrkel v1.0.0 // indirect + github.com/armon/go-metrics v0.4.1 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 // indirect + github.com/btcsuite/btcd/btcec/v2 v2.3.2 // indirect + github.com/cespare/xxhash v1.1.0 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cockroachdb/errors v1.10.0 // indirect + github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect + github.com/cockroachdb/redact v1.1.5 // indirect + github.com/cometbft/cometbft v0.37.6 // indirect + github.com/cometbft/cometbft-db v0.7.0 // indirect + github.com/confio/ics23/go v0.9.1 // indirect + github.com/cosmos/btcutil v1.0.5 // indirect + github.com/cosmos/cosmos-proto v1.0.0-beta.5 // indirect + github.com/cosmos/go-bip39 v1.0.0 // indirect + github.com/cosmos/gogoproto v1.4.12 // indirect + github.com/cosmos/iavl v0.20.1 // indirect + github.com/cosmos/ledger-cosmos-go v0.12.4 // indirect + github.com/danieljoos/wincred v1.1.2 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 // indirect + github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/ristretto v0.1.1 // indirect + github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/dvsekhvalnov/jose2go v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/getsentry/sentry-go v0.23.0 // indirect + github.com/go-kit/kit v0.12.0 // indirect + github.com/go-kit/log v0.2.1 // indirect + github.com/go-logfmt/logfmt v0.5.1 // indirect + github.com/go-logr/logr v1.4.1 // indirect + github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 // indirect + github.com/gogo/protobuf v1.3.3 // indirect + github.com/golang/glog v1.2.0 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/snappy v0.0.4 // indirect + github.com/google/btree v1.1.2 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/gorilla/websocket v1.5.1 // indirect + github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect + github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect + github.com/gtank/merlin v0.1.1 // indirect + github.com/gtank/ristretto255 v0.1.2 // indirect + github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect + github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c // indirect + github.com/hdevalence/ed25519consensus v0.1.0 // indirect + github.com/huandu/skiplist v1.2.0 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.17.0 // indirect + github.com/kr/pretty v0.3.1 // indirect + github.com/kr/text v0.2.0 // indirect + github.com/libp2p/go-buffer-pool v0.1.0 // indirect + github.com/magiconair/properties v1.8.7 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/mtibben/percent v0.2.1 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.16.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect + github.com/rogpeppe/go-internal v1.11.0 // indirect + github.com/sagikazarmark/locafero v0.4.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sasha-s/go-deadlock v0.3.1 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect + github.com/spf13/afero v1.11.0 // indirect + github.com/spf13/cast v1.6.0 // indirect + github.com/spf13/cobra v1.8.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + github.com/spf13/viper v1.18.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect + github.com/syndtr/goleveldb v1.0.1-0.20220721030215-126854af5e6d // indirect + github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c // indirect + github.com/tendermint/go-amino v0.16.0 // indirect + github.com/tidwall/btree v1.6.0 // indirect + github.com/zondax/hid v0.9.2 // indirect + github.com/zondax/ledger-go v0.14.3 // indirect + go.etcd.io/bbolt v1.3.7 // indirect + go.uber.org/atomic v1.10.0 // indirect + go.uber.org/multierr v1.9.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/exp v0.0.0-20231006140011-7918f672742d // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/term v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect + google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/ini.v1 v1.67.0 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + k8s.io/api v0.30.1 // indirect + k8s.io/apimachinery v0.30.1 // indirect + k8s.io/klog/v2 v2.120.1 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + pgregory.net/rapid v1.1.0 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/go/sdl/go.sum b/go/sdl/go.sum new file mode 100644 index 00000000..968609fe --- /dev/null +++ b/go/sdl/go.sum @@ -0,0 +1,628 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cosmossdk.io/api v0.3.1 h1:NNiOclKRR0AOlO4KIqeaG6PS6kswOMhHD0ir0SscNXE= +cosmossdk.io/api v0.3.1/go.mod h1:DfHfMkiNA2Uhy8fj0JJlOCYOBp4eWUUJ1te5zBGNyIw= +cosmossdk.io/core v0.5.1 h1:vQVtFrIYOQJDV3f7rw4pjjVqc1id4+mE0L9hHP66pyI= +cosmossdk.io/core v0.5.1/go.mod h1:KZtwHCLjcFuo0nmDc24Xy6CRNEL9Vl/MeimQ2aC7NLE= +cosmossdk.io/depinject v1.0.0-alpha.4 h1:PLNp8ZYAMPTUKyG9IK2hsbciDWqna2z1Wsl98okJopc= +cosmossdk.io/depinject v1.0.0-alpha.4/go.mod h1:HeDk7IkR5ckZ3lMGs/o91AVUc7E596vMaOmslGFM3yU= +cosmossdk.io/errors v1.0.1 h1:bzu+Kcr0kS/1DuPBtUFdWjzLqyUuCiyHjyJB6srBV/0= +cosmossdk.io/errors v1.0.1/go.mod h1:MeelVSZThMi4bEakzhhhE/CKqVv3nOJDA25bIqRDu/U= +cosmossdk.io/log v1.3.1 h1:UZx8nWIkfbbNEWusZqzAx3ZGvu54TZacWib3EzUYmGI= +cosmossdk.io/log v1.3.1/go.mod h1:2/dIomt8mKdk6vl3OWJcPk2be3pGOS8OQaLUM/3/tCM= +cosmossdk.io/math v1.3.0 h1:RC+jryuKeytIiictDslBP9i1fhkVm6ZDmZEoNP316zE= +cosmossdk.io/math v1.3.0/go.mod h1:vnRTxewy+M7BtXBNFybkuhSH4WfedVAAnERHgVFhp3k= +cosmossdk.io/tools/rosetta v0.2.1 h1:ddOMatOH+pbxWbrGJKRAawdBkPYLfKXutK9IETnjYxw= +cosmossdk.io/tools/rosetta v0.2.1/go.mod h1:Pqdc1FdvkNV3LcNIkYWt2RQY6IP1ge6YWZk8MhhO9Hw= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 h1:/vQbFIOMbk2FiG/kXiLl8BRyzTWDw7gX/Hz7Dd5eDMs= +github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4/go.mod h1:hN7oaIRCjzsZ2dE+yG5k+rsdt3qcwykqK6HVGcKwsw4= +github.com/99designs/keyring v1.2.2 h1:pZd3neh/EmUzWONb35LxQfvuY7kiSXAq3HQd97+XBn0= +github.com/99designs/keyring v1.2.2/go.mod h1:wes/FrByc8j7lFOAGLGSNEg8f/PaI3cgTBqhFkHUrPk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/ChainSafe/go-schnorrkel v1.0.0 h1:3aDA67lAykLaG1y3AOjs88dMxC88PgUuHRrLeDnvGIM= +github.com/ChainSafe/go-schnorrkel v1.0.0/go.mod h1:dpzHYVxLZcp8pjlV+O+UR8K0Hp/z7vcchBSbMBEhCw4= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= +github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA= +github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816 h1:41iFGWnSlI2gVpmOtVTJZNodLdLQLn/KsJqFvXwnd/s= +github.com/bgentry/speakeasy v0.1.1-0.20220910012023-760eaf8b6816/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/boz/go-lifecycle v0.1.1 h1:tG/wff7Zxbkf19g4D4I0G8Y4sq83iT5QjD4rzEf/zrI= +github.com/boz/go-lifecycle v0.1.1/go.mod h1:zdagAUMcC2C0OmQkBlJZFV77uF4GCVaGphAexGi7oho= +github.com/btcsuite/btcd/btcec/v2 v2.3.2 h1:5n0X6hX0Zk+6omWcihdYvdAlGf2DfasC0GMf7DClJ3U= +github.com/btcsuite/btcd/btcec/v2 v2.3.2/go.mod h1:zYzJ8etWJQIv1Ogk7OzpWjowwOdXY1W/17j2MW85J04= +github.com/btcsuite/btcd/btcutil v1.1.2 h1:XLMbX8JQEiwMcYft2EGi8zPUkoa0abKIU6/BJSRsjzQ= +github.com/btcsuite/btcd/btcutil v1.1.2/go.mod h1:UR7dsSJzJUfMmFiiLlIrMq1lS9jh9EdCV7FStZSnpi0= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1 h1:q0rUy8C/TYNBQS1+CGKw68tLOFYSNEs0TFnxxnS9+4U= +github.com/btcsuite/btcd/chaincfg/chainhash v1.0.1/go.mod h1:7SFka0XMvUgj3hfZtydOrQY2mwhPclbT2snogU7SQQc= +github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= +github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/readline v1.5.1 h1:upd/6fQk4src78LMRzh5vItIt361/o4uq553V8B5sGI= +github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/errors v1.10.0 h1:lfxS8zZz1+OjtV4MtNWgboi/W5tyLEB6VQZBXN+0VUU= +github.com/cockroachdb/errors v1.10.0/go.mod h1:lknhIsEVQ9Ss/qKDBQS/UqFSvPQjOwNq2qyKAxtHRqE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= +github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= +github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= +github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/coinbase/rosetta-sdk-go/types v1.0.0 h1:jpVIwLcPoOeCR6o1tU+Xv7r5bMONNbHU7MuEHboiFuA= +github.com/coinbase/rosetta-sdk-go/types v1.0.0/go.mod h1:eq7W2TMRH22GTW0N0beDnN931DW0/WOI1R2sdHNHG4c= +github.com/cometbft/cometbft v0.37.6 h1:2BSD0lGPbcIyRd99Pf1zH0Sa8o0pbfqVWEDbZ4Ec2Uc= +github.com/cometbft/cometbft v0.37.6/go.mod h1:5FRkFil9uagHZogIX9x8z51c3GIPpQmdIN8Mq46HfzY= +github.com/cometbft/cometbft-db v0.7.0 h1:uBjbrBx4QzU0zOEnU8KxoDl18dMNgDh+zZRUE0ucsbo= +github.com/cometbft/cometbft-db v0.7.0/go.mod h1:yiKJIm2WKrt6x8Cyxtq9YTEcIMPcEe4XPxhgX59Fzf0= +github.com/confio/ics23/go v0.9.1 h1:3MV46eeWwO3xCauKyAtuAdJYMyPnnchW4iLr2bTw6/U= +github.com/confio/ics23/go v0.9.1/go.mod h1:4LPZ2NYqnYIVRklaozjNR1FScgDJ2s5Xrp+e/mYVRak= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/cosmos/btcutil v1.0.5 h1:t+ZFcX77LpKtDBhjucvnOH8C2l2ioGsBNEQ3jef8xFk= +github.com/cosmos/btcutil v1.0.5/go.mod h1:IyB7iuqZMJlthe2tkIFL33xPyzbFYP0XVdS8P5lUPis= +github.com/cosmos/cosmos-proto v1.0.0-beta.5 h1:eNcayDLpip+zVLRLYafhzLvQlSmyab+RC5W7ZfmxJLA= +github.com/cosmos/cosmos-proto v1.0.0-beta.5/go.mod h1:hQGLpiIUloJBMdQMMWb/4wRApmI9hjHH05nefC0Ojec= +github.com/cosmos/cosmos-sdk v0.47.16-ics-lsm h1:+mlfnZ4Cs8HMw9xy7Epjv56avptYSTsX3TVlUDX3Qcs= +github.com/cosmos/cosmos-sdk v0.47.16-ics-lsm/go.mod h1:uzvMwHXmuRDSOaF8ec9HickjLHJcItWBREdkaDHcPiE= +github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= +github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= +github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= +github.com/cosmos/gogogateway v1.2.0 h1:Ae/OivNhp8DqBi/sh2A8a1D0y638GpL3tkmLQAiKxTE= +github.com/cosmos/gogogateway v1.2.0/go.mod h1:iQpLkGWxYcnCdz5iAdLcRBSw3h7NXeOkZ4GUkT+tbFI= +github.com/cosmos/gogoproto v1.3.3-alpha.regen.1 h1:Qmv/wAw4xHnjN5iZ9qHergfk1O7nnYl7ZsIY5lF+E9k= +github.com/cosmos/gogoproto v1.3.3-alpha.regen.1/go.mod h1:2DjTFR1HhMQhiWC5sZ4OhQ3+NtdbZ6oBDKQwq5Ou+FI= +github.com/cosmos/gogoproto v1.4.12 h1:vB6Lbe/rtnYGjQuFxkPiPYiCybqFT8QvLipDZP8JpFE= +github.com/cosmos/gogoproto v1.4.12/go.mod h1:LnZob1bXRdUoqMMtwYlcR3wjiElmlC+FkjaZRv1/eLY= +github.com/cosmos/iavl v0.20.1 h1:rM1kqeG3/HBT85vsZdoSNsehciqUQPWrR4BYmqE2+zg= +github.com/cosmos/iavl v0.20.1/go.mod h1:WO7FyvaZJoH65+HFOsDir7xU9FWk2w9cHXNW1XHcl7A= +github.com/cosmos/ledger-cosmos-go v0.12.4 h1:drvWt+GJP7Aiw550yeb3ON/zsrgW0jgh5saFCr7pDnw= +github.com/cosmos/ledger-cosmos-go v0.12.4/go.mod h1:fjfVWRf++Xkygt9wzCsjEBdjcf7wiiY35fv3ctT+k4M= +github.com/cosmos/rosetta-sdk-go v0.10.0 h1:E5RhTruuoA7KTIXUcMicL76cffyeoyvNybzUGSKFTcM= +github.com/cosmos/rosetta-sdk-go v0.10.0/go.mod h1:SImAZkb96YbwvoRkzSMQB6noNJXFgWl/ENIznEoYQI4= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/creachadair/taskgroup v0.3.2 h1:zlfutDS+5XG40AOxcHDSThxKzns8Tnr9jnr6VqkYlkM= +github.com/creachadair/taskgroup v0.3.2/go.mod h1:wieWwecHVzsidg2CsUnFinW1faVN4+kq+TDlRJQ0Wbk= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= +github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0= +github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0 h1:HbphB4TFFXpv7MNrT52FGrrgVXF1owhMVTHFZIlnvd4= +github.com/decred/dcrd/dcrec/secp256k1/v4 v4.1.0/go.mod h1:DZGJHZMqrU4JJqFAWUS2UO1+lbSKsdiOoYi9Zzey7Fc= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f h1:U5y3Y5UE0w7amNe7Z5G/twsBW0KEalRQXZzf8ufSh9I= +github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFMt8koVQZ6WFms69WAsDWr2XsYL3Hkl7jkoLE= +github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= +github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= +github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= +github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= +github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= +github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= +github.com/edwingeng/deque/v2 v2.1.1 h1:+xjC3TnaeMPLZMi7QQf9jN2K00MZmTwruApqplbL9IY= +github.com/edwingeng/deque/v2 v2.1.1/go.mod h1:HukI8CQe9KDmZCcURPZRYVYjH79Zy2tIjTF9sN3Bgb0= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= +github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/getsentry/sentry-go v0.23.0 h1:dn+QRCeJv4pPt9OjVXiMcGIBIefaTJPw/h0bZWO05nE= +github.com/getsentry/sentry-go v0.23.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= +github.com/go-kit/log v0.2.1 h1:MRVx0/zhvdseW+Gza6N9rVzU/IVzaeE1SFI4raAhmBU= +github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= +github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68= +github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= +github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= +github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= +github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= +github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= +github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= +github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c h1:PdZEHcpa3117kJ1Wa5EYupzCzn9QlBby8Fx2YpZPYvo= +github.com/hashicorp/hcl v1.0.1-0.20191016231534-914dc3f8dd7c/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hdevalence/ed25519consensus v0.1.0 h1:jtBwzzcHuTmFrQN6xQZn6CQEO/V9f7HsjsjeEZ6auqU= +github.com/hdevalence/ed25519consensus v0.1.0/go.mod h1:w3BHWjwJbFU29IRHL1Iqkw3sus+7FctEyM4RqDxYNzo= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/huandu/go-assert v1.1.5 h1:fjemmA7sSfYHJD7CUqs9qTwwfdNAx7/j2/ZlHXzNB3c= +github.com/huandu/go-assert v1.1.5/go.mod h1:yOLvuqZwmcHIC5rIzrBhT7D3Q9c3GFnd0JrPVhn/06U= +github.com/huandu/skiplist v1.2.0 h1:gox56QD77HzSC0w+Ws3MH3iie755GBJU1OER3h5VsYw= +github.com/huandu/skiplist v1.2.0/go.mod h1:7v3iFjLcSAzO4fN5B8dvebvo/qsfumiLiDXMrPiHF9w= +github.com/improbable-eng/grpc-web v0.15.0 h1:BN+7z6uNXZ1tQGcNAuaU1YjsLTApzkjt2tzCixLaUPQ= +github.com/improbable-eng/grpc-web v0.15.0/go.mod h1:1sy9HKV4Jt9aEs9JSnkWlRJPuPtwNr0l57L4f878wP8= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac h1:GcJkaxD5Wy/Ucn+L0USlpbGJy9O6+7r0nBI7ftJ7Uu0= +github.com/jmhodges/levigo v1.0.1-0.20191019112844-b572e7f4cdac/go.mod h1:dM7ihgFM8Do6WGIfOXWPgpJ+4bKGR/4ZkYh8HKDdFy4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lib/pq v1.10.7 h1:p7ZhMD+KsSRozJr34udlUrhboJwWAgCg34+/ZZNvZZw= +github.com/lib/pq v1.10.7/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= +github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= +github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= +github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mimoo/StrobeGo v0.0.0-20181016162300-f8f6d4d2b643/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0 h1:QRUSJEgZn2Snx0EmT/QLXibWjSUDjKWvXIT19NBVp94= +github.com/mimoo/StrobeGo v0.0.0-20210601165009-122bf33a46e0/go.mod h1:43+3pMjjKimDBf5Kr4ZFNGbLql1zKkbImw+fZbw3geM= +github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= +github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= +github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= +github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08 h1:hDSdbBuw3Lefr6R18ax0tZ2BJeNB3NehB3trOwYBsdU= +github.com/petermattis/goid v0.0.0-20230317030725-371a4b8eda08/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/rakyll/statik v0.1.7 h1:OF3QCZUuyPxuGEP7B4ypUa7sB/iHtqOTDYZXGM8KOdQ= +github.com/rakyll/statik v0.1.7/go.mod h1:AlZONWzMtEnMs7W4e/1LURLiI49pIMmp6V9Unghqrcc= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= +github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= +github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/rs/zerolog v1.32.0 h1:keLypqrlIjaFsbmJOBdB/qvyF8KEtCWHwobLp5l/mQ0= +github.com/rs/zerolog v1.32.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= +github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sasha-s/go-deadlock v0.3.1 h1:sqv7fDNShgjcaxkO0JNcOAlr8B9+cV5Ey/OB71efZx0= +github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= +github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= +github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= +github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= +github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= +github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzHWCjJB1zZfXPIAaDpzXIEJ0eS6B5Ok= +github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= +github.com/tendermint/go-amino v0.16.0 h1:GyhmgQKvqF82e2oZeuMSp9JTN0N09emoSZlb2lyGa2E= +github.com/tendermint/go-amino v0.16.0/go.mod h1:TQU0M1i/ImAo+tYpZi73AU3V/dKeCoMC9Sphe2ZwGME= +github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= +github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/zondax/hid v0.9.2 h1:WCJFnEDMiqGF64nlZz28E9qLVZ0KSJ7xpc5DLEyma2U= +github.com/zondax/hid v0.9.2/go.mod h1:l5wttcP0jwtdLjqjMMWFVEE7d1zO0jvSPA9OPZxWpEM= +github.com/zondax/ledger-go v0.14.3 h1:wEpJt2CEcBJ428md/5MgSLsXLBos98sBOyxNmCjfUCw= +github.com/zondax/ledger-go v0.14.3/go.mod h1:IKKaoxupuB43g4NxeQmbLXv7T9AlQyie1UpHb342ycI= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.step.sm/crypto v0.45.1 h1:Xb8XldsbqT6pDYsg46BVPP1euASNbeNAhzrlvUP3QWo= +go.step.sm/crypto v0.45.1/go.mod h1:XtJBuMuZb11YeJpG8uP3fyBl2MerXWJ/pWQX/Au+Kt8= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= +go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= +go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= +golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200813134508-3edf25e44fcc/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210819135213-f52c844e1c1c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200324203455-a04cca1dde73/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= +google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= +google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8 h1:W5Xj/70xIA4x60O/IFyXivR5MGqblAb8R3w26pnD6No= +google.golang.org/genproto/googleapis/api v0.0.0-20240513163218-0867130af1f8/go.mod h1:vPrPUTsDCYxXWjP7clS81mZ6/803D8K4iM9Ma27VKas= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8 h1:mxSlqyb8ZAHsYDCfiXN1EDdNTdvjUJSLY+OnAUtYNYA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240513163218-0867130af1f8/go.mod h1:I7Y+G38R2bu5j1aLzfFmQfTcU/WnFuqDwLZAbvKTKpM= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= +gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.30.1 h1:kCm/6mADMdbAxmIh0LBjS54nQBE+U4KmbCfIkF5CpJY= +k8s.io/api v0.30.1/go.mod h1:ddbN2C0+0DIiPntan/bye3SW3PdwLa11/0yqwvuRrJM= +k8s.io/apimachinery v0.30.1 h1:ZQStsEfo4n65yAdlGTfP/uSHMQSoYzU/oeEbkmF7P2U= +k8s.io/apimachinery v0.30.1/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= +k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= +k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= +nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v0.5.5 h1:jkgx1TjbQPD/feRoK+S/mXw9e1uj6WilpHrXJowi6oA= +pgregory.net/rapid v0.5.5/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= +pkg.akt.dev/go v0.0.1-rc12 h1:rAQrWdFxg9wvTg3bYNn40ALazu5PASrn3wK4gjE66F0= +pkg.akt.dev/go v0.0.1-rc12/go.mod h1:nuijviJzKCbydXYxtdVCE2vvlmFCzgkxcuHV88wAJFE= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/go/sdl/gpu.go b/go/sdl/gpu.go new file mode 100644 index 00000000..17a730cd --- /dev/null +++ b/go/sdl/gpu.go @@ -0,0 +1,131 @@ +package sdl + +import ( + "fmt" + "sort" + + "gopkg.in/yaml.v3" + + types "pkg.akt.dev/go/node/types/attributes/v1" +) + +type gpuInterface string + +type v2GPUNvidia struct { + Model string `yaml:"model"` + RAM *memoryQuantity `yaml:"ram,omitempty"` + Interface *gpuInterface `yaml:"interface,omitempty"` +} + +func (sdl *v2GPUNvidia) String() string { + key := sdl.Model + if sdl.RAM != nil { + key = fmt.Sprintf("%s/ram/%s", key, sdl.RAM.StringWithSuffix("Gi")) + } + + if sdl.Interface != nil { + key = fmt.Sprintf("%s/interface/%s", key, *sdl.Interface) + } + + return key +} + +type v2GPUsNvidia []v2GPUNvidia + +type gpuVendor struct { + Nvidia v2GPUsNvidia `yaml:"nvidia,omitempty"` +} + +type v2GPUAttributes types.Attributes + +type v2ResourceGPU struct { + Units gpuQuantity `yaml:"units" json:"units"` + Attributes v2GPUAttributes `yaml:"attributes,omitempty" json:"attributes,omitempty"` +} + +func (sdl *v2ResourceGPU) UnmarshalYAML(node *yaml.Node) error { + res := v2ResourceGPU{} + + for i := 0; i < len(node.Content); i += 2 { + switch node.Content[i].Value { + case "units": + if err := node.Content[i+1].Decode(&res.Units); err != nil { + return err + } + case "attributes": + if err := node.Content[i+1].Decode(&res.Attributes); err != nil { + return err + } + default: + return fmt.Errorf("sdl: unsupported field (%s) for GPU resource", node.Content[i].Value) + } + } + + if res.Units > 0 && len(res.Attributes) == 0 { + return fmt.Errorf("sdl: GPU attributes must be present if units > 0") + } + + *sdl = res + + return nil +} + +func (sdl *v2GPUAttributes) UnmarshalYAML(node *yaml.Node) error { + var res types.Attributes + + var vendor *gpuVendor + + for i := 0; i < len(node.Content); i += 2 { + switch node.Content[i].Value { + case "vendor": + if err := node.Content[i+1].Decode(&vendor); err != nil { + return err + } + default: + return fmt.Errorf("sdl: unsupported attribute (%s) for GPU resource", node.Content[i].Value) + } + } + + if vendor == nil { + return fmt.Errorf("sdl: invalid GPU attributes. at least one vendor must be set") + } + + res = make(types.Attributes, 0, len(vendor.Nvidia)) + + for _, model := range vendor.Nvidia { + res = append(res, types.Attribute{ + Key: fmt.Sprintf("vendor/nvidia/model/%s", model.String()), + Value: "true", + }) + } + + if len(res) == 0 { + res = append(res, types.Attribute{ + Key: "vendor/nvidia/model/*", + Value: "true", + }) + } + + sort.Sort(res) + + if err := res.Validate(); err != nil { + return fmt.Errorf("sdl: invalid GPU attributes: %w", err) + } + + *sdl = v2GPUAttributes(res) + + return nil +} + +func (sdl *gpuInterface) UnmarshalYAML(node *yaml.Node) error { + switch node.Value { + case "pcie": + case "sxm": + default: + return fmt.Errorf("sdl: invalid GPU interface %s. expected \"pcie|sxm\"", node.Value) + } + + *sdl = gpuInterface(node.Value) + + return nil +} diff --git a/go/sdl/gpu_test.go b/go/sdl/gpu_test.go new file mode 100644 index 00000000..8277fbc7 --- /dev/null +++ b/go/sdl/gpu_test.go @@ -0,0 +1,190 @@ +package sdl + +import ( + "testing" + + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" +) + +func TestV2ResourceGPU_EmptyVendor(t *testing.T) { + var stream = ` +units: 1 +attributes: + vendor: +` + var p v2ResourceGPU + + err := yaml.Unmarshal([]byte(stream), &p) + require.Error(t, err) +} + +func TestV2ResourceGPU_Wildcard(t *testing.T) { + var stream = ` +units: 1 +attributes: + vendor: + nvidia: +` + var p v2ResourceGPU + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + require.Equal(t, gpuQuantity(1), p.Units) + require.Equal(t, 1, len(p.Attributes)) + require.Equal(t, "vendor/nvidia/model/*", p.Attributes[0].Key) + require.Equal(t, "true", p.Attributes[0].Value) +} + +func TestV2ResourceGPU_SingleModel(t *testing.T) { + var stream = ` +units: 1 +attributes: + vendor: + nvidia: + - model: a100 +` + var p v2ResourceGPU + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + require.Equal(t, gpuQuantity(1), p.Units) + require.Equal(t, 1, len(p.Attributes)) + require.Equal(t, "vendor/nvidia/model/a100", p.Attributes[0].Key) + require.Equal(t, "true", p.Attributes[0].Value) +} + +func TestV2ResourceGPU_SingleModelWithRAM(t *testing.T) { + var stream = ` +units: 1 +attributes: + vendor: + nvidia: + - model: a100 + ram: 80Gi +` + var p v2ResourceGPU + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + require.Equal(t, gpuQuantity(1), p.Units) + require.Equal(t, 1, len(p.Attributes)) + require.Equal(t, "vendor/nvidia/model/a100/ram/80Gi", p.Attributes[0].Key) + require.Equal(t, "true", p.Attributes[0].Value) +} + +func TestV2ResourceGPU_InvalidRAMUnit(t *testing.T) { + var stream = ` +units: 1 +attributes: + vendor: + nvidia: + - model: a100 + ram: 80G +` + var p v2ResourceGPU + + err := yaml.Unmarshal([]byte(stream), &p) + require.Error(t, err) +} + +func TestV2ResourceGPU_InterfaceInvalid(t *testing.T) { + var stream = ` +units: 1 +attributes: + vendor: + nvidia: + - model: a100 + interface: pciex +` + var p v2ResourceGPU + + err := yaml.Unmarshal([]byte(stream), &p) + require.Error(t, err) +} + +func TestV2ResourceGPU_RamWithInterface(t *testing.T) { + var stream = ` +units: 1 +attributes: + vendor: + nvidia: + - model: a100 + ram: 80Gi + interface: pcie +` + var p v2ResourceGPU + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + require.Equal(t, gpuQuantity(1), p.Units) + require.Equal(t, 1, len(p.Attributes)) + require.Equal(t, "vendor/nvidia/model/a100/ram/80Gi/interface/pcie", p.Attributes[0].Key) + require.Equal(t, "true", p.Attributes[0].Value) +} + +func TestV2ResourceGPU_MultipleModels(t *testing.T) { + var stream = ` +units: 1 +attributes: + vendor: + nvidia: + - model: a100 + ram: 80Gi + - model: a100 + ram: 40Gi +` + var p v2ResourceGPU + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + require.Equal(t, gpuQuantity(1), p.Units) + require.Equal(t, 2, len(p.Attributes)) + require.Equal(t, "vendor/nvidia/model/a100/ram/40Gi", p.Attributes[0].Key) + require.Equal(t, "true", p.Attributes[0].Value) + require.Equal(t, "vendor/nvidia/model/a100/ram/80Gi", p.Attributes[1].Key) + require.Equal(t, "true", p.Attributes[1].Value) +} + +func TestV2ResourceGPU_MultipleModels2(t *testing.T) { + var stream = ` +units: 1 +attributes: + vendor: + nvidia: + - model: a100 + ram: 80Gi + - model: a100 +` + var p v2ResourceGPU + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + require.Equal(t, gpuQuantity(1), p.Units) + require.Equal(t, 2, len(p.Attributes)) + require.Equal(t, "vendor/nvidia/model/a100", p.Attributes[0].Key) + require.Equal(t, "true", p.Attributes[0].Value) + require.Equal(t, "vendor/nvidia/model/a100/ram/80Gi", p.Attributes[1].Key) + require.Equal(t, "true", p.Attributes[1].Value) +} + +func TestV2ResourceGPU_MultipleModels3(t *testing.T) { + var stream = ` +units: 1 +attributes: + vendor: + nvidia: + - model: a6000 + - model: a40 +` + var p v2ResourceGPU + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + require.Equal(t, gpuQuantity(1), p.Units) + require.Equal(t, 2, len(p.Attributes)) + require.Equal(t, "vendor/nvidia/model/a40", p.Attributes[0].Key) + require.Equal(t, "true", p.Attributes[0].Value) + require.Equal(t, "vendor/nvidia/model/a6000", p.Attributes[1].Key) + require.Equal(t, "true", p.Attributes[1].Value) +} diff --git a/go/sdl/groupBuilder_v2.go b/go/sdl/groupBuilder_v2.go new file mode 100644 index 00000000..50a59103 --- /dev/null +++ b/go/sdl/groupBuilder_v2.go @@ -0,0 +1,149 @@ +package sdl + +import ( + "sort" + + manifest "pkg.akt.dev/go/manifest/v2beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + types "pkg.akt.dev/go/node/types/attributes/v1" +) + +type groupsBuilderV2 struct { + dgroup *dtypes.GroupSpec + mgroup *manifest.Group + boundComputes map[string]map[string]int +} + +// buildGroups +func (sdl *v2) buildGroups() error { + endpointsNames := sdl.computeEndpointSequenceNumbers() + + groups := make(map[string]*groupsBuilderV2) + + for _, svcName := range sdl.Deployments.svcNames() { + depl := sdl.Deployments[svcName] + + for _, placementName := range depl.placementNames() { + // objects below have been ensured to exist + svcdepl := depl[placementName] + compute := sdl.Profiles.Compute[svcdepl.Profile] + svc := sdl.Services[svcName] + infra := sdl.Profiles.Placement[placementName] + price := infra.Pricing[svcdepl.Profile] + + group := groups[placementName] + + if group == nil { + group = &groupsBuilderV2{ + dgroup: &dtypes.GroupSpec{ + Name: placementName, + }, + mgroup: &manifest.Group{ + Name: placementName, + }, + boundComputes: make(map[string]map[string]int), + } + + group.dgroup.Requirements.Attributes = types.Attributes(infra.Attributes) + group.dgroup.Requirements.SignedBy = infra.SignedBy + + // keep ordering stable + sort.Sort(group.dgroup.Requirements.Attributes) + + groups[placementName] = group + } + + if _, exists := group.boundComputes[placementName]; !exists { + group.boundComputes[placementName] = make(map[string]int) + } + + expose, err := sdl.Services[svcName].Expose.toManifestExpose(endpointsNames) + if err != nil { + return err + } + + resources := compute.Resources.toResources() + resources.Endpoints = expose.GetEndpoints() + + res := compute.Resources.toResources() + res.Endpoints = expose.GetEndpoints() + + var resID uint32 + if ln := uint32(len(group.dgroup.Resources)); ln > 0 { // nolint: gosec + resID = ln + 1 + } else { + resID = 1 + } + + res.ID = resID + resources.ID = res.ID + + group.dgroup.Resources = append(group.dgroup.Resources, dtypes.ResourceUnit{ + Resources: res, + Price: price.Value, + Count: svcdepl.Count, + }) + + group.boundComputes[placementName][svcdepl.Profile] = len(group.dgroup.Resources) - 1 + + msvc := manifest.Service{ + Name: svcName, + Image: svc.Image, + Args: svc.Args, + Env: svc.Env, + Resources: resources, + Count: svcdepl.Count, + Command: svc.Command, + Expose: expose, + } + + if svc.Params != nil { + params := &manifest.ServiceParams{} + + if len(svc.Params.Storage) > 0 { + params.Storage = make([]manifest.StorageParams, 0, len(svc.Params.Storage)) + for volName, volParams := range svc.Params.Storage { + params.Storage = append(params.Storage, manifest.StorageParams{ + Name: volName, + Mount: volParams.Mount, + ReadOnly: volParams.ReadOnly, + }) + } + } + + msvc.Params = params + } + + if svc.Credentials != nil { + msvc.Credentials = &manifest.ImageCredentials{ + Host: svc.Credentials.Host, + Username: svc.Credentials.Username, + Password: svc.Credentials.Password, + } + } + + group.mgroup.Services = append(group.mgroup.Services, msvc) + } + } + + // keep ordering stable + names := make([]string, 0, len(groups)) + for name := range groups { + names = append(names, name) + } + sort.Strings(names) + + sdl.result.dgroups = make(dtypes.GroupSpecs, 0, len(names)) + sdl.result.mgroups = make(manifest.Groups, 0, len(names)) + + for _, name := range names { + mgroup := *groups[name].mgroup + // stable ordering services by name + sort.Sort(mgroup.Services) + + sdl.result.dgroups = append(sdl.result.dgroups, *groups[name].dgroup) + sdl.result.mgroups = append(sdl.result.mgroups, mgroup) + } + + return nil +} diff --git a/go/sdl/groupBuilder_v2_1.go b/go/sdl/groupBuilder_v2_1.go new file mode 100644 index 00000000..34a3ca11 --- /dev/null +++ b/go/sdl/groupBuilder_v2_1.go @@ -0,0 +1,160 @@ +package sdl + +import ( + "sort" + "strings" + + manifest "pkg.akt.dev/go/manifest/v2beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + types "pkg.akt.dev/go/node/types/attributes/v1" +) + +type groupsBuilderV2_1 struct { + dgroup *dtypes.GroupSpec + mgroup *manifest.Group + boundComputes map[string]map[string]int +} + +// buildGroups +func (sdl *v2_1) buildGroups() error { + endpointsNames := sdl.computeEndpointSequenceNumbers() + + groups := make(map[string]*groupsBuilderV2_1) + + for _, svcName := range sdl.Deployments.svcNames() { + depl := sdl.Deployments[svcName] + + for _, placementName := range depl.placementNames() { + // objects below have been ensured to exist + svcdepl := depl[placementName] + compute := sdl.Profiles.Compute[svcdepl.Profile] + svc := sdl.Services[svcName] + infra := sdl.Profiles.Placement[placementName] + price := infra.Pricing[svcdepl.Profile] + + group := groups[placementName] + + if group == nil { + group = &groupsBuilderV2_1{ + dgroup: &dtypes.GroupSpec{ + Name: placementName, + }, + mgroup: &manifest.Group{ + Name: placementName, + }, + boundComputes: make(map[string]map[string]int), + } + + group.dgroup.Requirements.Attributes = types.Attributes(infra.Attributes) + group.dgroup.Requirements.SignedBy = infra.SignedBy + + // keep ordering stable + sort.Sort(group.dgroup.Requirements.Attributes) + + groups[placementName] = group + } + + if _, exists := group.boundComputes[placementName]; !exists { + group.boundComputes[placementName] = make(map[string]int) + } + + expose, err := sdl.Services[svcName].Expose.toManifestExpose(endpointsNames) + if err != nil { + return err + } + + resources := compute.Resources.toResources() + resources.Endpoints = expose.GetEndpoints() + + if location, bound := group.boundComputes[placementName][svcdepl.Profile]; !bound { + res := compute.Resources.toResources() + res.Endpoints = expose.GetEndpoints() + + var resID uint32 + if ln := uint32(len(group.dgroup.Resources)); ln > 0 { // nolint: gosec + resID = ln + 1 + } else { + resID = 1 + } + + res.ID = resID + resources.ID = res.ID + + group.dgroup.Resources = append(group.dgroup.Resources, dtypes.ResourceUnit{ + Resources: res, + Price: price.Value, + Count: svcdepl.Count, + }) + + group.boundComputes[placementName][svcdepl.Profile] = len(group.dgroup.Resources) - 1 + } else { + resources.ID = group.dgroup.Resources[location].ID + + group.dgroup.Resources[location].Count += svcdepl.Count + group.dgroup.Resources[location].Endpoints = append(group.dgroup.Resources[location].Endpoints, expose.GetEndpoints()...) + + sort.Sort(group.dgroup.Resources[location].Endpoints) + } + + msvc := manifest.Service{ + Name: svcName, + Image: svc.Image, + Args: svc.Args, + Env: svc.Env, + Resources: resources, + Count: svcdepl.Count, + Command: svc.Command, + Expose: expose, + } + + if svc.Params != nil { + params := &manifest.ServiceParams{} + + if len(svc.Params.Storage) > 0 { + params.Storage = make([]manifest.StorageParams, 0, len(svc.Params.Storage)) + for volName, volParams := range svc.Params.Storage { + params.Storage = append(params.Storage, manifest.StorageParams{ + Name: volName, + Mount: volParams.Mount, + ReadOnly: volParams.ReadOnly, + }) + } + } + + msvc.Params = params + } + + if svc.Credentials != nil { + msvc.Credentials = &manifest.ImageCredentials{ + Host: strings.TrimSpace(svc.Credentials.Host), + Email: strings.TrimSpace(svc.Credentials.Email), + Username: strings.TrimSpace(svc.Credentials.Username), + Password: strings.TrimSpace(svc.Credentials.Password), + } + } + + group.mgroup.Services = append(group.mgroup.Services, msvc) + } + } + + // keep ordering stable + names := make([]string, 0, len(groups)) + for name := range groups { + names = append(names, name) + } + sort.Strings(names) + + sdl.result.dgroups = make(dtypes.GroupSpecs, 0, len(names)) + sdl.result.mgroups = make(manifest.Groups, 0, len(names)) + + for _, name := range names { + mgroup := *groups[name].mgroup + // stable ordering services by name + sort.Sort(mgroup.Services) + + sdl.result.dgroups = append(sdl.result.dgroups, *groups[name].dgroup) + sdl.result.mgroups = append(sdl.result.mgroups, mgroup) + } + + return nil +} diff --git a/go/sdl/memory.go b/go/sdl/memory.go new file mode 100644 index 00000000..6aac0153 --- /dev/null +++ b/go/sdl/memory.go @@ -0,0 +1,42 @@ +package sdl + +import ( + "sort" + + "gopkg.in/yaml.v3" + + types "pkg.akt.dev/go/node/types/attributes/v1" +) + +type v2MemoryAttributes types.Attributes + +type v2ResourceMemory struct { + Quantity byteQuantity `yaml:"size"` + Attributes v2MemoryAttributes `yaml:"-"` +} + +func (sdl *v2MemoryAttributes) UnmarshalYAML(node *yaml.Node) error { + var attr v2MemoryAttributes + + var res map[string]string + + if err := node.Decode(&res); err != nil { + return err + } + + for k, v := range res { + attr = append(attr, types.Attribute{ + Key: k, + Value: v, + }) + } + + // keys are unique in attributes parsed from sdl so don't need to use sort.SliceStable + sort.Slice(attr, func(i, j int) bool { + return attr[i].Key < attr[j].Key + }) + + *sdl = attr + + return nil +} diff --git a/go/sdl/placement.go b/go/sdl/placement.go new file mode 100644 index 00000000..397afd37 --- /dev/null +++ b/go/sdl/placement.go @@ -0,0 +1,37 @@ +package sdl + +import ( + "sort" + + "gopkg.in/yaml.v3" + + types "pkg.akt.dev/go/node/types/attributes/v1" +) + +type v2PlacementAttributes types.Attributes + +func (sdl *v2PlacementAttributes) UnmarshalYAML(node *yaml.Node) error { + var attr v2PlacementAttributes + + var res map[string]string + + if err := node.Decode(&res); err != nil { + return err + } + + for k, v := range res { + attr = append(attr, types.Attribute{ + Key: k, + Value: v, + }) + } + + // keys are unique in attributes parsed from sdl so don't need to use sort.SliceStable + sort.Slice(attr, func(i, j int) bool { + return attr[i].Key < attr[j].Key + }) + + *sdl = attr + + return nil +} diff --git a/go/sdl/pricing.go b/go/sdl/pricing.go new file mode 100644 index 00000000..db2f1978 --- /dev/null +++ b/go/sdl/pricing.go @@ -0,0 +1,4 @@ +package sdl + +// todo should pricing values be in form of range? +type v2PlacementPricing map[string]v2Coin diff --git a/go/sdl/resources.go b/go/sdl/resources.go new file mode 100644 index 00000000..58056a0a --- /dev/null +++ b/go/sdl/resources.go @@ -0,0 +1,60 @@ +package sdl + +import ( + types "pkg.akt.dev/go/node/types/attributes/v1" + rtypes "pkg.akt.dev/go/node/types/resources/v1beta4" +) + +type v2ComputeResources struct { + CPU *v2ResourceCPU `yaml:"cpu"` + GPU *v2ResourceGPU `yaml:"gpu"` + Memory *v2ResourceMemory `yaml:"memory"` + Storage v2ResourceStorageArray `yaml:"storage"` +} + +func (sdl *v2ComputeResources) toResources() rtypes.Resources { + if sdl == nil { + return rtypes.Resources{} + } + + units := rtypes.Resources{ + Endpoints: rtypes.Endpoints{}, + } + + if sdl.CPU != nil { + units.CPU = &rtypes.CPU{ + Units: rtypes.NewResourceValue(uint64(sdl.CPU.Units)), + Attributes: types.Attributes(sdl.CPU.Attributes), + } + } + + if sdl.GPU != nil { + units.GPU = &rtypes.GPU{ + Units: rtypes.NewResourceValue(uint64(sdl.GPU.Units)), + Attributes: types.Attributes(sdl.GPU.Attributes), + } + } else { + units.GPU = &rtypes.GPU{ + Units: rtypes.NewResourceValue(0), + } + } + + if sdl.Memory != nil { + units.Memory = &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(uint64(sdl.Memory.Quantity)), + Attributes: types.Attributes(sdl.Memory.Attributes), + } + } + + for _, storage := range sdl.Storage { + storageEntry := rtypes.Storage{ + Name: storage.Name, + Quantity: rtypes.NewResourceValue(uint64(storage.Quantity)), + Attributes: types.Attributes(storage.Attributes), + } + + units.Storage = append(units.Storage, storageEntry) + } + + return units +} diff --git a/go/sdl/sdl.go b/go/sdl/sdl.go new file mode 100644 index 00000000..741bb97a --- /dev/null +++ b/go/sdl/sdl.go @@ -0,0 +1,159 @@ +package sdl + +import ( + "errors" + "fmt" + "os" + + "github.com/blang/semver/v4" + "gopkg.in/yaml.v3" + + manifest "pkg.akt.dev/go/manifest/v2beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" +) + +const ( + sdlVersionField = "version" +) + +var ( + errUninitializedConfig = errors.New("sdl: uninitialized") + errSDLInvalidNoVersion = fmt.Errorf("%w: no version found", errSDLInvalid) +) + +// SDL is the interface which wraps Validate, Deployment and Manifest methods +type SDL interface { + DeploymentGroups() (dtypes.GroupSpecs, error) + Manifest() (manifest.Manifest, error) + Version() ([]byte, error) + validate() error +} + +var _ SDL = (*sdl)(nil) + +type sdl struct { + Ver semver.Version `yaml:"version,-"` + data SDL `yaml:"-"` +} + +func (s *sdl) UnmarshalYAML(node *yaml.Node) error { + var result sdl + + foundVersion := false + for idx := range node.Content { + if node.Content[idx].Value == sdlVersionField { + var err error + if result.Ver, err = semver.ParseTolerant(node.Content[idx+1].Value); err != nil { + return err + } + foundVersion = true + break + } + } + + if !foundVersion { + return errSDLInvalidNoVersion + } + + // nolint: gocritic + if result.Ver.EQ(semver.MustParse("2.0.0")) { + var decoded v2 + if err := node.Decode(&decoded); err != nil { + return err + } + + result.data = &decoded + } else if result.Ver.GE(semver.MustParse("2.1.0")) { + var decoded v2_1 + if err := node.Decode(&decoded); err != nil { + return err + } + + result.data = &decoded + } else { + return fmt.Errorf("%w: config: unsupported version %q", errSDLInvalid, result.Ver) + } + + *s = result + + return nil +} + +// ReadFile read from given path and returns SDL instance +func ReadFile(path string) (SDL, error) { + buf, err := os.ReadFile(path) + if err != nil { + return nil, err + } + return Read(buf) +} + +// Read reads buffer data and returns SDL instance +func Read(buf []byte) (SDL, error) { + obj := &sdl{} + if err := yaml.Unmarshal(buf, obj); err != nil { + return nil, err + } + + if err := obj.validate(); err != nil { + return nil, err + } + + dgroups, err := obj.DeploymentGroups() + if err != nil { + return nil, err + } + + vgroups := make([]dtypes.GroupSpec, 0, len(dgroups)) + for _, dgroup := range dgroups { + vgroups = append(vgroups, dgroup) + } + + if err := dtypes.ValidateDeploymentGroups(vgroups); err != nil { + return nil, err + } + + m, err := obj.Manifest() + if err != nil { + return nil, err + } + + if err := m.Validate(); err != nil { + return nil, err + } + + return obj, nil +} + +// Version creates the deterministic Deployment Version hash from the SDL. +func (s *sdl) Version() ([]byte, error) { + if s.data == nil { + return nil, errUninitializedConfig + } + + return s.data.Version() +} + +func (s *sdl) DeploymentGroups() (dtypes.GroupSpecs, error) { + if s.data == nil { + return dtypes.GroupSpecs{}, errUninitializedConfig + } + + return s.data.DeploymentGroups() +} + +func (s *sdl) Manifest() (manifest.Manifest, error) { + if s.data == nil { + return manifest.Manifest{}, errUninitializedConfig + } + + return s.data.Manifest() +} + +func (s *sdl) validate() error { + if s.data == nil { + return errUninitializedConfig + } + + return s.data.validate() +} diff --git a/go/sdl/sdl_test.go b/go/sdl/sdl_test.go new file mode 100644 index 00000000..2cc5821b --- /dev/null +++ b/go/sdl/sdl_test.go @@ -0,0 +1,59 @@ +package sdl + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSDLManifestVersion(t *testing.T) { + obj, err := ReadFile("_testdata/simple.yaml") + require.NoError(t, err) + + m, err := obj.Manifest() + require.NoError(t, err) + + version, err := m.Version() + require.NoError(t, err) + // Should return a value + require.NotEmpty(t, version) + + obj, err = ReadFile("_testdata/private_service.yaml") + require.NoError(t, err) + + m, err = obj.Manifest() + require.NoError(t, err) + + secondVersion, err := m.Version() + require.NoError(t, err) + // Should return a value + require.NotEmpty(t, secondVersion) + // Should be different from the first + require.NotEqual(t, secondVersion, version) +} + +func TestSDLManifestVersionChangesWithVersion(t *testing.T) { + obj, err := ReadFile("_testdata/simple.yaml") + require.NoError(t, err) + + m, err := obj.Manifest() + require.NoError(t, err) + + version, err := m.Version() + require.NoError(t, err) + // Should return a value + require.NotEmpty(t, version) + + obj, err = ReadFile("_testdata/simple-double-ram.yaml") + require.NoError(t, err) + + m, err = obj.Manifest() + require.NoError(t, err) + + secondVersion, err := m.Version() + require.NoError(t, err) + // Should return a value + require.NotEmpty(t, secondVersion) + // Should be different from the first + require.NotEqual(t, secondVersion, version) +} diff --git a/go/sdl/storage.go b/go/sdl/storage.go new file mode 100644 index 00000000..091cce19 --- /dev/null +++ b/go/sdl/storage.go @@ -0,0 +1,251 @@ +package sdl + +import ( + "errors" + "fmt" + "sort" + + "gopkg.in/yaml.v3" + + types "pkg.akt.dev/go/node/types/attributes/v1" +) + +const ( + StorageEphemeral = "ephemeral" + StorageAttributePersistent = "persistent" + StorageAttributeClass = "class" + StorageAttributeMount = "mount" + StorageAttributeReadOnly = "readOnly" // we might not need it at this point of time + StorageClassDefault = "default" + StorageClassRAM = "ram" +) + +var ( + errUnsupportedStorageAttribute = errors.New("sdl: unsupported storage attribute") + errStorageDupMountPoint = errors.New("sdl: duplicated mount point") + errStorageMultipleRootEphemeral = errors.New("sdl: multiple root ephemeral storages are not allowed") + errStorageDuplicatedVolumeName = errors.New("sdl: duplicated volume name") + errStorageEphemeralClass = errors.New("sdl: ephemeral storage should not set attribute class") + errStorageRAMClass = errors.New("sdl: ram storage class cannot be persistent") +) + +type v2StorageAttributes types.Attributes + +type v2ServiceStorageParams struct { + Mount string `yaml:"mount"` + ReadOnly bool `yaml:"readOnly"` +} + +type v2ResourceStorage struct { + Name string `yaml:"name"` + Quantity byteQuantity `yaml:"size"` + Attributes v2StorageAttributes `yaml:"attributes,omitempty"` +} + +type v2ResourceStorageArray []v2ResourceStorage + +type validateAttrFn func(string, *string) error + +var allowedStorageClasses = map[string]bool{ + "default": true, + "beta1": true, + "beta2": true, + "beta3": true, + StorageClassRAM: true, +} + +var validateStorageAttributes = map[string]validateAttrFn{ + StorageAttributePersistent: validateAttributeBool, + StorageAttributeClass: validateAttributeStorageClass, +} + +func validateAttributeBool(key string, val *string) error { + if res, valid := unifyStringAsBool(*val); valid { + *val = res + + return nil + } + + return fmt.Errorf("sdl: invalid value for attribute \"%s\". expected bool", key) +} + +func validateAttributeStorageClass(_ string, val *string) error { + if _, valid := allowedStorageClasses[*val]; valid { + return nil + } + + return fmt.Errorf("sdl: invalid value for attribute class") +} + +// UnmarshalYAML unmarshal storage config +// data can be present either as single entry mapping or an array of them +// nolint: gofmt +// e.g +// single entity +// ```yaml +// storage: +// +// size: 1Gi +// attributes: +// class: ssd +// +// ``` +// +// ```yaml +// storage: +// - size: 512Mi # ephemeral storage +// - size: 1Gi +// name: cache +// attributes: +// class: ssd +// - size: 100Gi +// name: data +// attributes: +// persistent: true # this volumes survives pod restart +// class: gp # aka general purpose +// +// ``` +func (sdl *v2ResourceStorageArray) UnmarshalYAML(node *yaml.Node) error { + var nodes v2ResourceStorageArray + + switch node.Kind { + case yaml.SequenceNode: + for _, content := range node.Content { + var nd v2ResourceStorage + if err := content.Decode(&nd); err != nil { + return err + } + + // set default name to ephemeral. later in validation error thrown if multiple + if nd.Name == "" { + nd.Name = "default" + } + nodes = append(nodes, nd) + } + case yaml.MappingNode: + var nd v2ResourceStorage + if err := node.Decode(&nd); err != nil { + return err + } + + nd.Name = "default" + nodes = append(nodes, nd) + } + + // check for duplicated volume names + names := make(map[string]string) + for _, nd := range nodes { + if _, exists := names[nd.Name]; exists { + return errStorageDuplicatedVolumeName + } + + names[nd.Name] = nd.Name + } + + nodes.sort() + + *sdl = nodes + + return nil +} + +func (sdl *v2StorageAttributes) UnmarshalYAML(node *yaml.Node) error { + var attr v2StorageAttributes + + var res map[string]string + + if err := node.Decode(&res); err != nil { + return err + } + + // set default + if _, set := res[StorageAttributePersistent]; !set { + res[StorageAttributePersistent] = valueFalse + } + + persistent := res[StorageAttributePersistent] + class := res[StorageAttributeClass] + + switch class { + case "": + if persistent == valueTrue { + res[StorageAttributeClass] = StorageClassDefault + } + case StorageClassRAM: + if persistent != valueFalse { + return errStorageRAMClass + } + default: + if persistent == valueFalse { + return errStorageEphemeralClass + } + } + + for k, v := range res { + validateFn, supportedAttr := validateStorageAttributes[k] + if !supportedAttr { + return fmt.Errorf("%w: %s", errUnsupportedStorageAttribute, k) + } + + val := v + if err := validateFn(k, &val); err != nil { + return err + } + + attr = append(attr, types.Attribute{ + Key: k, + Value: val, + }) + } + + // at this point keys are unique in attributes parsed from sdl so don't need to use sort.SliceStable + sort.Slice(attr, func(i, j int) bool { + return attr[i].Key < attr[j].Key + }) + + *sdl = attr + + return nil +} + +// sort storage slice in the following order +// 1. smaller size +// 2. if sizes are equal then one without class goes up +// 3. when both class present use lexicographic order +// 4. if no class in both cases check persistent attribute. one persistent = false goes up +// 5. volume name +func (sdl v2ResourceStorageArray) sort() { + sort.SliceStable(sdl, func(i, j int) bool { + if sdl[i].Quantity < sdl[j].Quantity { + return true + } + + if sdl[i].Quantity > sdl[j].Quantity { + return false + } + + iAttr := types.Attributes(sdl[i].Attributes) + jAttr := types.Attributes(sdl[j].Attributes) + + iClass, iExists := iAttr.Find(StorageAttributePersistent).AsString() + jClass, jExists := jAttr.Find(StorageAttributePersistent).AsString() + + if (!iExists && jExists) || + (jExists && iExists && iClass < jClass) { + return true + } else if iExists && !jExists { + return false + } + + iPersistent, _ := iAttr.Find(StorageAttributePersistent).AsBool() + jPersistent, _ := jAttr.Find(StorageAttributePersistent).AsBool() + + if !iPersistent { + return true + } else if !jPersistent { + return false + } + + return sdl[i].Name < sdl[j].Name + }) +} diff --git a/go/sdl/storage_test.go b/go/sdl/storage_test.go new file mode 100644 index 00000000..8649827a --- /dev/null +++ b/go/sdl/storage_test.go @@ -0,0 +1,230 @@ +package sdl + +import ( + "testing" + + "gopkg.in/yaml.v3" + + "github.com/stretchr/testify/require" + + types "pkg.akt.dev/go/node/types/attributes/v1" + "pkg.akt.dev/go/node/types/unit" +) + +func TestStorage_LegacyValid(t *testing.T) { + var stream = ` +size: 1Gi +` + var p v2ResourceStorageArray + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + + require.Len(t, p, 1) + require.Equal(t, byteQuantity(1*unit.Gi), p[0].Quantity) + require.Len(t, p[0].Attributes, 0) +} + +func TestStorage_ArraySingleElemValid(t *testing.T) { + var stream = ` +- size: 1Gi +` + var p v2ResourceStorageArray + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + + require.Len(t, p, 1) + require.Equal(t, byteQuantity(1*unit.Gi), p[0].Quantity) + require.Len(t, p[0].Attributes, 0) +} + +func TestStorage_AttributesPersistentValidClass(t *testing.T) { + var stream = ` +- size: 1Gi + attributes: + persistent: true + class: default +` + var p v2ResourceStorageArray + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + + require.Len(t, p, 1) + require.Equal(t, byteQuantity(1*unit.Gi), p[0].Quantity) + require.Len(t, p[0].Attributes, 2) + + attr := types.Attributes(p[0].Attributes) + require.Equal(t, attr[0].Key, "class") + require.Equal(t, attr[0].Value, "default") +} + +func TestStorage_AttributesUnknown(t *testing.T) { + var stream = ` +- size: 1Gi + attributes: + somefield: foo +` + var p v2ResourceStorageArray + + err := yaml.Unmarshal([]byte(stream), &p) + require.ErrorIs(t, err, errUnsupportedStorageAttribute) +} + +func TestStorage_MultipleUnnamedEphemeral(t *testing.T) { + var stream = ` +- size: 1Gi +- size: 2Gi +` + var p v2ResourceStorageArray + + err := yaml.Unmarshal([]byte(stream), &p) + require.EqualError(t, err, errStorageDuplicatedVolumeName.Error()) +} + +func TestStorage_EphemeralNoClass(t *testing.T) { + var stream = ` +- size: 1Gi +` + var p v2ResourceStorageArray + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) +} + +func TestStorage_EphemeralClass(t *testing.T) { + var stream = ` +- size: 1Gi + attributes: + class: foo +` + + var p v2ResourceStorageArray + + err := yaml.Unmarshal([]byte(stream), &p) + require.EqualError(t, err, errStorageEphemeralClass.Error()) +} + +func TestStorage_PersistentDefaultClass(t *testing.T) { + var stream = ` +- size: 1Gi + attributes: + persistent: true +` + + var p v2ResourceStorageArray + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + require.Len(t, p[0].Attributes, 2) + + require.Equal(t, p[0].Attributes[0].Key, "class") + require.Equal(t, p[0].Attributes[0].Value, "default") +} + +func TestStorage_PersistentClass(t *testing.T) { + var stream = ` +- size: 1Gi + attributes: + persistent: true + class: beta1 +` + + var p v2ResourceStorageArray + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + require.Len(t, p[0].Attributes, 2) + + require.Equal(t, p[0].Attributes[0].Key, "class") + require.Equal(t, p[0].Attributes[0].Value, "beta1") +} + +func TestStorage_RAMClass_Valid(t *testing.T) { + var stream = ` +- size: 1Gi + attributes: + persistent: false + class: ram +` + + var p v2ResourceStorageArray + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) + require.Len(t, p[0].Attributes, 2) + + require.Equal(t, p[0].Attributes[0].Key, "class") + require.Equal(t, p[0].Attributes[0].Value, "ram") +} + +func TestStorage_RamClass_Invalid(t *testing.T) { + var stream = ` +- size: 1Gi + attributes: + persistent: true + class: ram +` + + var p v2ResourceStorageArray + + err := yaml.Unmarshal([]byte(stream), &p) + require.Error(t, err) +} + +func TestStorage_StableSort(t *testing.T) { + storage := v2ResourceStorageArray{ + { + Quantity: 2 * unit.Gi, + Attributes: v2StorageAttributes{ + types.Attribute{ + Key: "persistent", + Value: "true", + }, + }, + }, + { + Quantity: 1 * unit.Gi, + }, + { + Quantity: 10 * unit.Gi, + }, + } + + storage.sort() + + require.Equal(t, byteQuantity(1*unit.Gi), storage[0].Quantity) + require.Equal(t, byteQuantity(2*unit.Gi), storage[1].Quantity) + require.Equal(t, byteQuantity(10*unit.Gi), storage[2].Quantity) +} + +func TestStorage_Invalid_InvalidMount(t *testing.T) { + _, err := ReadFile("./_testdata/storageClass1.yaml") + require.Error(t, err) + require.Contains(t, err.Error(), "expected absolute path") +} + +func TestStorage_Invalid_MountNotAbsolute(t *testing.T) { + _, err := ReadFile("./_testdata/storageClass2.yaml") + require.Error(t, err) + require.Contains(t, err.Error(), "expected absolute path") +} + +func TestStorage_Invalid_VolumeReference(t *testing.T) { + _, err := ReadFile("./_testdata/storageClass3.yaml") + require.Error(t, err) + require.Contains(t, err.Error(), "references to no-existing compute volume") +} + +func TestStorage_Invalid_DuplicatedMount(t *testing.T) { + _, err := ReadFile("./_testdata/storageClass4.yaml") + require.Error(t, err) + require.Contains(t, err.Error(), "already in use by volume") +} + +func TestStorage_Invalid_NoMount(t *testing.T) { + _, err := ReadFile("./_testdata/storageClass5.yaml") + require.Error(t, err) + require.Contains(t, err.Error(), "to have mount") +} diff --git a/go/sdl/units.go b/go/sdl/units.go new file mode 100644 index 00000000..16c59e53 --- /dev/null +++ b/go/sdl/units.go @@ -0,0 +1,154 @@ +package sdl + +import ( + "fmt" + "strconv" + "strings" + + "gopkg.in/yaml.v3" + + "pkg.akt.dev/go/node/types/unit" +) + +var ( + errNegativeValue = fmt.Errorf("invalid: negative value not allowed") +) + +var unitSuffixes = map[string]uint64{ + "k": unit.K, + "Ki": unit.Ki, + "M": unit.M, + "Mi": unit.Mi, + "G": unit.G, + "Gi": unit.Gi, + "T": unit.T, + "Ti": unit.Ti, + "P": unit.P, + "Pi": unit.Pi, + "E": unit.E, + "Ei": unit.Ei, +} + +var memorySuffixes = map[string]uint64{ + "Ki": unit.Ki, + "Mi": unit.Mi, + "Gi": unit.Gi, + "Ti": unit.Ti, + "Pi": unit.Pi, + "Ei": unit.Ei, +} + +// CPU shares. One CPUQuantity = 1/1000 of a CPU +type cpuQuantity uint32 + +type gpuQuantity uint64 + +func (u *cpuQuantity) UnmarshalYAML(node *yaml.Node) error { + sval := node.Value + if strings.HasSuffix(sval, "m") { + sval = strings.TrimSuffix(sval, "m") + val, err := strconv.ParseUint(sval, 10, 32) + if err != nil { + return err + } + *u = cpuQuantity(val) // nolint: gosec + return nil + } + + val, err := strconv.ParseFloat(sval, 64) + if err != nil { + return err + } + + val *= 1000 + + if val < 0 { + return errNegativeValue + } + + *u = cpuQuantity(val) + + return nil +} + +func (u *gpuQuantity) UnmarshalYAML(node *yaml.Node) error { + sval := node.Value + + val, err := strconv.ParseUint(sval, 10, 64) + if err != nil { + return err + } + + *u = gpuQuantity(val) + + return nil +} + +// Memory,Storage size in bytes. +type byteQuantity uint64 +type memoryQuantity uint64 + +func (u *byteQuantity) UnmarshalYAML(node *yaml.Node) error { + val, err := parseWithSuffix(node.Value, unitSuffixes) + if err != nil { + return err + } + *u = byteQuantity(val) + return nil +} + +func (u *memoryQuantity) UnmarshalYAML(node *yaml.Node) error { + val, err := parseWithSuffix(node.Value, memorySuffixes) + if err != nil { + return err + } + *u = memoryQuantity(val) + return nil +} + +func (u *memoryQuantity) StringWithSuffix(suffix string) string { + unit, exists := memorySuffixes[suffix] + + val := uint64(*u) / unit + + res := fmt.Sprintf("%d", val) + if exists { + res += suffix + } + + return res +} + +func parseWithSuffix(sval string, units map[string]uint64) (uint64, error) { + for suffix, unit := range units { + if !strings.HasSuffix(sval, suffix) { + continue + } + + sval := strings.TrimSuffix(sval, suffix) + + val, err := strconv.ParseFloat(sval, 64) + if err != nil { + return 0, err + } + + val *= float64(unit) + + if val < 0 { + return 0, errNegativeValue + } + + return uint64(val), nil + } + + val, err := strconv.ParseFloat(sval, 64) + if err != nil { + return 0, err + } + + if val < 0 { + return 0, errNegativeValue + } + + return uint64(val), nil +} diff --git a/go/sdl/units_test.go b/go/sdl/units_test.go new file mode 100644 index 00000000..1cdf98da --- /dev/null +++ b/go/sdl/units_test.go @@ -0,0 +1,100 @@ +package sdl + +import ( + "testing" + + "gopkg.in/yaml.v3" + + "github.com/stretchr/testify/assert" + + "pkg.akt.dev/go/node/types/unit" +) + +func TestCPUQuantity(t *testing.T) { + + type vtype struct { + Val cpuQuantity `yaml:"val"` + } + + tests := []struct { + text string + value uint32 + err bool + }{ + {`val: 1`, 1000, false}, + {`val: -1`, 1000, true}, + + {`val: 0.5`, 500, false}, + {`val: -0.5`, 500, true}, + + {`val: "100m"`, 100, false}, + {`val: "-100m"`, 100, true}, + + {`val: ""`, 0, true}, + } + + for idx, test := range tests { + buf := []byte(test.text) + obj := &vtype{} + + err := yaml.Unmarshal(buf, obj) + + if test.err { + assert.Error(t, err, "idx:%v text:`%v`", idx, test.text) + continue + } + + if !assert.NoError(t, err, "idx:%v text:`%v`", idx, test.text) { + continue + } + + assert.Equal(t, cpuQuantity(test.value), obj.Val, "idx:%v text:`%v`", idx, test.text) + } +} + +func TestByteQuantity(t *testing.T) { + type vtype struct { + Val byteQuantity `yaml:"val"` + } + + tests := []struct { + text string + value uint64 + err bool + }{ + {`val: 1`, 1, false}, + {`val: -1`, 1, true}, + + {`val: "1M"`, unit.M, false}, + {`val: "-1M"`, 0, true}, + + {`val: "0.5M"`, unit.M / 2, false}, + {`val: "-0.5M"`, 0, true}, + + {`val: "3M"`, 3 * unit.M, false}, + {`val: "3G"`, 3 * unit.G, false}, + {`val: "3T"`, 3 * unit.T, false}, + {`val: "3P"`, 3 * unit.P, false}, + {`val: "3E"`, 3 * unit.E, false}, + + {`val: ""`, 0, true}, + } + + for idx, test := range tests { + buf := []byte(test.text) + obj := &vtype{} + + err := yaml.Unmarshal(buf, obj) + + if test.err { + assert.Error(t, err, "idx:%v text:`%v`", idx, test.text) + continue + } + + if !assert.NoError(t, err, "idx:%v text:`%v`", idx, test.text) { + continue + } + + assert.Equal(t, byteQuantity(test.value), obj.Val, "idx:%v text:`%v`", idx, test.text) + } +} diff --git a/go/sdl/util/util.go b/go/sdl/util/util.go new file mode 100644 index 00000000..02958d51 --- /dev/null +++ b/go/sdl/util/util.go @@ -0,0 +1,31 @@ +package util + +import ( + "math" + + sdk "github.com/cosmos/cosmos-sdk/types" + + rtypes "pkg.akt.dev/go/node/types/resources/v1beta4" +) + +func ComputeCommittedResources(factor float64, rv rtypes.ResourceValue) rtypes.ResourceValue { + // If the value is less than 1, commit the original value. There is no concept of under-commit + if factor <= 1.0 { + return rv + } + + v := rv.Val.Uint64() + fraction := 1.0 / factor + committedValue := math.Round(float64(v) * fraction) + + // Don't return a value of zero, since this is used as a resource request + if committedValue <= 0 { + committedValue = 1 + } + + result := rtypes.ResourceValue{ + Val: sdk.NewInt(int64(committedValue)), + } + + return result +} diff --git a/go/sdl/util/util_test.go b/go/sdl/util/util_test.go new file mode 100644 index 00000000..65ab0a39 --- /dev/null +++ b/go/sdl/util/util_test.go @@ -0,0 +1,29 @@ +package util_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + rtypes "pkg.akt.dev/go/node/types/resources/v1beta4" + "pkg.akt.dev/go/sdl/util" +) + +func TestComputeCommittedResources(t *testing.T) { + rv := rtypes.NewResourceValue(100) + // Negative factor returns original value + require.Equal(t, uint64(100), util.ComputeCommittedResources(-1.0, rv).Val.Uint64()) + + // Zero factor returns original value + require.Equal(t, uint64(100), util.ComputeCommittedResources(0.0, rv).Val.Uint64()) + + // Factor of one returns the original value + require.Equal(t, uint64(100), util.ComputeCommittedResources(1.0, rv).Val.Uint64()) + + require.Equal(t, uint64(50), util.ComputeCommittedResources(2.0, rv).Val.Uint64()) + + require.Equal(t, uint64(33), util.ComputeCommittedResources(3.0, rv).Val.Uint64()) + + // Even for huge overcommit values, zero is not returned + require.Equal(t, uint64(1), util.ComputeCommittedResources(10000.0, rv).Val.Uint64()) +} diff --git a/go/sdl/utils.go b/go/sdl/utils.go new file mode 100644 index 00000000..6f4ac1c5 --- /dev/null +++ b/go/sdl/utils.go @@ -0,0 +1,17 @@ +package sdl + +const ( + valueFalse = "false" + valueTrue = "true" +) + +// as per yaml following allowed as bool values +func unifyStringAsBool(val string) (string, bool) { + if val == valueTrue || val == "on" || val == "yes" { + return valueTrue, true + } else if val == valueFalse || val == "off" || val == "no" { + return valueFalse, true + } + + return "", false +} diff --git a/go/sdl/v2.go b/go/sdl/v2.go new file mode 100644 index 00000000..27ea8591 --- /dev/null +++ b/go/sdl/v2.go @@ -0,0 +1,582 @@ +package sdl + +import ( + "errors" + "fmt" + "path" + "regexp" + "sort" + "strconv" + "strings" + + "gopkg.in/yaml.v3" + + manifest "pkg.akt.dev/go/manifest/v2beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + types "pkg.akt.dev/go/node/types/attributes/v1" +) + +const ( + nextCaseError = "error" + nextCaseTimeout = "timeout" + nextCase500 = "500" + nextCase502 = "502" + nextCase503 = "503" + nextCase504 = "504" + nextCase403 = "403" + nextCase404 = "404" + nextCase400 = "429" + nextCaseOff = "off" + defaultMaxBodySize = uint32(1048576) + upperLimitBodySize = uint32(104857600) + defaultReadTimeout = uint32(60000) + upperLimitReadTimeout = defaultReadTimeout + defaultSendTimeout = uint32(60000) + upperLimitSendTimeout = defaultSendTimeout + defaultNextTries = uint32(3) + endpointKindIP = "ip" +) + +var ( + defaultNextCases = []string{nextCaseError, nextCaseTimeout} + errCannotSpecifyOffAndOtherCases = errors.New("if 'off' is specified, no other cases may be specified") + errUnknownNextCase = errors.New("next case is unknown") + errHTTPOptionNotAllowed = errors.New("http option not allowed") + errSDLInvalid = errors.New("SDL invalid") + errCredentialNoHost = errors.New("Service Credentials missing Host") + errCredentialNoUsername = errors.New("Service Credentials missing Username") + errCredentialNoPassword = errors.New("Service Credentials missing Password") +) + +var endpointNameValidationRegex = regexp.MustCompile(`^[[:lower:]]+[[:lower:]-_\d]+$`) + +var _ SDL = (*v2)(nil) + +type v2 struct { + Include []string `yaml:",omitempty"` + Services map[string]v2Service `yaml:"services,omitempty"` + Profiles v2profiles `yaml:"profiles,omitempty"` + Deployments v2Deployments `yaml:"deployment"` + Endpoints map[string]v2Endpoint `yaml:"endpoints"` + + result struct { + dgroups dtypes.GroupSpecs + mgroups manifest.Groups + } +} + +type v2Deployments map[string]v2Deployment + +type v2Endpoint struct { + Kind string `yaml:"kind"` +} + +type v2ExposeTo struct { + Service string `yaml:"service,omitempty"` + Global bool `yaml:"global,omitempty"` + HTTPOptions v2HTTPOptions `yaml:"http_options"` + IP string `yaml:"ip"` +} + +type v2HTTPOptions struct { + MaxBodySize uint32 `yaml:"max_body_size"` + ReadTimeout uint32 `yaml:"read_timeout"` + SendTimeout uint32 `yaml:"send_timeout"` + NextTries uint32 `yaml:"next_tries"` + NextTimeout uint32 `yaml:"next_timeout"` + NextCases []string `yaml:"next_cases"` +} + +func (ho v2HTTPOptions) asManifest() (manifest.ServiceExposeHTTPOptions, error) { + maxBodySize := ho.MaxBodySize + + if maxBodySize == 0 { + maxBodySize = defaultMaxBodySize + } else if maxBodySize > upperLimitBodySize { + return manifest.ServiceExposeHTTPOptions{}, fmt.Errorf("%w: body size cannot be greater than %d bytes", errHTTPOptionNotAllowed, upperLimitBodySize) + } + + readTimeout := ho.ReadTimeout + if readTimeout == 0 { + readTimeout = defaultReadTimeout + } else if readTimeout > upperLimitReadTimeout { + return manifest.ServiceExposeHTTPOptions{}, fmt.Errorf("%w: read timeout cannot be greater than %d ms", errHTTPOptionNotAllowed, upperLimitReadTimeout) + } + + sendTimeout := ho.SendTimeout + if sendTimeout == 0 { + sendTimeout = defaultSendTimeout + } else if sendTimeout > upperLimitSendTimeout { + return manifest.ServiceExposeHTTPOptions{}, fmt.Errorf("%w: send timeout cannot be greater than %d ms", errHTTPOptionNotAllowed, upperLimitSendTimeout) + } + + nextTries := ho.NextTries + if nextTries == 0 { + nextTries = defaultNextTries + } + + nextCases := ho.NextCases + if len(nextCases) == 0 { + nextCases = defaultNextCases + } else { + for _, nextCase := range nextCases { + switch nextCase { + case nextCaseOff: + if len(nextCases) != 1 { + return manifest.ServiceExposeHTTPOptions{}, errCannotSpecifyOffAndOtherCases + } + case nextCaseError: + case nextCaseTimeout: + case nextCase500: + case nextCase502: + case nextCase503: + case nextCase504: + case nextCase403: + case nextCase404: + case nextCase400: + default: + return manifest.ServiceExposeHTTPOptions{}, fmt.Errorf("%w: %q", errUnknownNextCase, nextCase) + } + } + } + + return manifest.ServiceExposeHTTPOptions{ + MaxBodySize: maxBodySize, + ReadTimeout: readTimeout, + SendTimeout: sendTimeout, + NextTries: nextTries, + NextTimeout: ho.NextTimeout, + NextCases: nextCases, + }, nil +} + +type v2Expose struct { + Port uint32 + As uint32 + Proto string `yaml:"proto,omitempty"` + To []v2ExposeTo `yaml:"to,omitempty"` + Accept v2Accept `yaml:"accept"` + HTTPOptions v2HTTPOptions `yaml:"http_options"` +} + +type v2Exposes []v2Expose + +type v2Dependency struct { + Service string `yaml:"service"` +} + +type v2ServiceParams struct { + Storage map[string]v2ServiceStorageParams `yaml:"storage,omitempty"` +} + +type v2Service struct { + Image string + Command []string `yaml:",omitempty"` + Args []string `yaml:",omitempty"` + Env []string `yaml:",omitempty"` + Expose v2Exposes `yaml:",omitempty"` + Dependencies []v2Dependency `yaml:",omitempty"` + Params *v2ServiceParams `yaml:",omitempty"` + Credentials *v2ServiceCredentials `yaml:",omitempty"` +} + +type v2ServiceCredentials struct { + Host string `yaml:",omitempty"` + Email string `yaml:",omitempty"` + Username string `yaml:",omitempty"` + Password string `yaml:",omitempty"` +} + +func (c v2ServiceCredentials) validate() error { + if strings.TrimSpace(c.Host) == "" { + return errCredentialNoHost + } + if strings.TrimSpace(c.Username) == "" { + return errCredentialNoUsername + } + if strings.TrimSpace(c.Password) == "" { + return errCredentialNoPassword + } + return nil +} + +type v2ServiceDeployment struct { + // Compute profile name + Profile string + + // Number of instances + Count uint32 +} + +// placement-profile -> { compute-profile, count } +type v2Deployment map[string]v2ServiceDeployment + +type v2ProfileCompute struct { + Resources *v2ComputeResources `yaml:"resources,omitempty"` +} + +type v2ProfilePlacement struct { + Attributes v2PlacementAttributes `yaml:"attributes"` + SignedBy types.SignedBy `yaml:"signedBy"` + Pricing v2PlacementPricing `yaml:"pricing"` +} + +type v2profiles struct { + Compute map[string]v2ProfileCompute `yaml:"compute"` + Placement map[string]v2ProfilePlacement `yaml:"placement"` +} + +func (sdl *v2) DeploymentGroups() (dtypes.GroupSpecs, error) { + return sdl.result.dgroups, nil +} + +func (sdl *v2) Manifest() (manifest.Manifest, error) { + return manifest.Manifest(sdl.result.mgroups), nil +} + +// Version creates the deterministic Deployment Version hash from the SDL. +func (sdl *v2) Version() ([]byte, error) { + return manifest.Manifest(sdl.result.mgroups).Version() +} + +func (sdl *v2) UnmarshalYAML(node *yaml.Node) error { + result := v2{} + +loop: + for i := 0; i < len(node.Content); i += 2 { + var val interface{} + switch node.Content[i].Value { + case "include": + val = &result.Include + case "services": + val = &result.Services + case "profiles": + val = &result.Profiles + case "deployment": + val = &result.Deployments + case "endpoints": + val = &result.Endpoints + case sdlVersionField: + // version is already verified + continue loop + default: + return fmt.Errorf("sdl: unexpected field %s", node.Content[i].Value) + } + + if err := node.Content[i+1].Decode(val); err != nil { + return err + } + } + + if err := result.buildGroups(); err != nil { + return err + } + + *sdl = result + + return nil +} + +func (sdl *v2) validate() error { + for endpointName, endpoint := range sdl.Endpoints { + if !endpointNameValidationRegex.MatchString(endpointName) { + return fmt.Errorf( + "%w: endpoint named %q is not a valid name", + errSDLInvalid, + endpointName, + ) + } + + if len(endpoint.Kind) == 0 { + return fmt.Errorf("%w: endpoint named %q has no kind", errSDLInvalid, endpointName) + } + + // Validate endpoint kind, there is only one allowed value for now + if endpoint.Kind != endpointKindIP { + return fmt.Errorf( + "%w: endpoint named %q, unknown kind %q", + errSDLInvalid, + endpointName, + endpoint.Kind, + ) + } + } + + endpointsUsed := make(map[string]struct{}) + portsUsed := make(map[string]string) + for _, svcName := range sdl.Deployments.svcNames() { + depl := sdl.Deployments[svcName] + + for _, placementName := range v2DeploymentPlacementNames(depl) { + svcdepl := depl[placementName] + + compute, ok := sdl.Profiles.Compute[svcdepl.Profile] + if !ok { + return fmt.Errorf( + "%w: %v.%v: no compute profile named %v", + errSDLInvalid, + svcName, + placementName, + svcdepl.Profile, + ) + } + + infra, ok := sdl.Profiles.Placement[placementName] + if !ok { + return fmt.Errorf( + "%w: %v.%v: no placement profile named %v", + errSDLInvalid, + svcName, + placementName, + placementName, + ) + } + + if _, ok := infra.Pricing[svcdepl.Profile]; !ok { + return fmt.Errorf( + "%w: %v.%v: no pricing for profile %v", + errSDLInvalid, + svcName, + placementName, + svcdepl.Profile, + ) + } + + svc, ok := sdl.Services[svcName] + if !ok { + return fmt.Errorf( + "%w: %v.%v: no service profile named %v", + errSDLInvalid, + svcName, + placementName, + svcName, + ) + } + + if svc.Credentials != nil { + if err := svc.Credentials.validate(); err != nil { + return fmt.Errorf( + "%w: %v.%v: %v", + errSDLInvalid, + svcName, + placementName, + err, + ) + } + } + + for _, serviceExpose := range svc.Expose { + for _, to := range serviceExpose.To { + // Check to see if an IP endpoint is also specified + if len(to.IP) != 0 { + if !to.Global { + return fmt.Errorf( + "%w: error on %q if an IP is declared the directive must be declared as global", + errSDLInvalid, + svcName, + ) + } + endpoint, endpointExists := sdl.Endpoints[to.IP] + if !endpointExists { + return fmt.Errorf( + "%w: error on service %q no endpoint named %q exists", + errSDLInvalid, + svcName, + to.IP, + ) + } + + if endpoint.Kind != endpointKindIP { + return fmt.Errorf( + "%w: error on service %q endpoint %q has type %q, should be %q", + errSDLInvalid, + svcName, + to.IP, + endpoint.Kind, + endpointKindIP, + ) + } + + endpointsUsed[to.IP] = struct{}{} + + // Endpoint exists. Now check for port collisions across a single endpoint, port, & protocol + portKey := fmt.Sprintf( + "%s-%d-%s", + to.IP, + serviceExpose.As, + serviceExpose.Proto, + ) + otherServiceName, inUse := portsUsed[portKey] + if inUse { + return fmt.Errorf( + "%w: IP endpoint %q port: %d protocol: %s specified by service %q already in use by %q", + errSDLInvalid, + to.IP, + serviceExpose.Port, + serviceExpose.Proto, + svcName, + otherServiceName, + ) + } + portsUsed[portKey] = svcName + } + } + } + + // validate storage's attributes and parameters + volumes := make(map[string]v2ResourceStorage) + for _, volume := range compute.Resources.Storage { + // making deepcopy here as we gonna merge compute attributes and service parameters for validation below + attr := make(v2StorageAttributes, len(volume.Attributes)) + + copy(attr, volume.Attributes) + + volumes[volume.Name] = v2ResourceStorage{ + Name: volume.Name, + Quantity: volume.Quantity, + Attributes: attr, + } + } + + attr := make(map[string]string) + mounts := make(map[string]string) + + if svc.Params != nil { + for name, params := range svc.Params.Storage { + if _, exists := volumes[name]; !exists { + return fmt.Errorf( + "%w: service \"%s\" references to no-existing compute volume named \"%s\"", + errSDLInvalid, + svcName, + name, + ) + } + + if !path.IsAbs(params.Mount) { + return fmt.Errorf( + "%w: invalid value for \"service.%s.params.%s.mount\" parameter. expected absolute path", + errSDLInvalid, + svcName, + name, + ) + } + + attr[StorageAttributeMount] = params.Mount + attr[StorageAttributeReadOnly] = strconv.FormatBool(params.ReadOnly) + + mount := attr[StorageAttributeMount] + if vlname, exists := mounts[mount]; exists { + if mount == "" { + return errStorageMultipleRootEphemeral + } + + return fmt.Errorf( + "%w: mount %q already in use by volume %q", + errStorageDupMountPoint, + mount, + vlname, + ) + } + + mounts[mount] = name + } + } + + for name, volume := range volumes { + for _, nd := range types.Attributes(volume.Attributes) { + attr[nd.Key] = nd.Value + } + + persistent, _ := strconv.ParseBool(attr[StorageAttributePersistent]) + + if persistent && attr[StorageAttributeMount] == "" { + return fmt.Errorf( + "%w: compute.storage.%s has persistent=true which requires service.%s.params.storage.%s to have mount", + errSDLInvalid, + name, + svcName, + name, + ) + } + } + } + } + + for endpointName := range sdl.Endpoints { + _, inUse := endpointsUsed[endpointName] + if !inUse { + return fmt.Errorf( + "%w: endpoint %q declared but never used", + errSDLInvalid, + endpointName, + ) + } + } + + return nil +} + +func (sdl *v2) computeEndpointSequenceNumbers() map[string]uint32 { + var endpointNames []string + res := make(map[string]uint32) + + for _, serviceName := range sdl.Deployments.svcNames() { + for _, expose := range sdl.Services[serviceName].Expose { + for _, to := range expose.To { + if to.Global && len(to.IP) == 0 { + continue + } + + endpointNames = append(endpointNames, to.IP) + } + } + } + + if len(endpointNames) == 0 { + return res + } + + // Make the assignment stable + sort.Strings(endpointNames) + + // Start at zero, so the first assigned one is 1 + endpointSeqNumber := uint32(0) + for _, name := range endpointNames { + endpointSeqNumber++ + seqNo := endpointSeqNumber + res[name] = seqNo + } + + return res +} + +func (sdl v2Deployments) svcNames() []string { + names := make([]string, 0, len(sdl)) + for name := range sdl { + names = append(names, name) + } + sort.Strings(names) + + return names +} + +// placementNames stable ordered placement names +func (sdl v2Deployment) placementNames() []string { + names := make([]string, 0, len(sdl)) + for name := range sdl { + names = append(names, name) + } + sort.Strings(names) + + return names +} + +func v2DeploymentPlacementNames(m v2Deployment) []string { + names := make([]string, 0, len(m)) + for name := range m { + names = append(names, name) + } + sort.Strings(names) + + return names +} diff --git a/go/sdl/v2_1.go b/go/sdl/v2_1.go new file mode 100644 index 00000000..f14e8831 --- /dev/null +++ b/go/sdl/v2_1.go @@ -0,0 +1,364 @@ +package sdl + +import ( + "fmt" + "path" + "sort" + "strconv" + + "gopkg.in/yaml.v3" + + manifest "pkg.akt.dev/go/manifest/v2beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + types "pkg.akt.dev/go/node/types/attributes/v1" +) + +var _ SDL = (*v2_1)(nil) + +type v2_1 struct { + Include []string `yaml:",omitempty"` + Services map[string]v2Service `yaml:"services,omitempty"` + Profiles v2profiles `yaml:"profiles,omitempty"` + Deployments v2Deployments `yaml:"deployment"` + Endpoints map[string]v2Endpoint `yaml:"endpoints"` + + result struct { + dgroups dtypes.GroupSpecs + mgroups manifest.Groups + } +} + +func (sdl *v2_1) DeploymentGroups() (dtypes.GroupSpecs, error) { + return sdl.result.dgroups, nil +} + +func (sdl *v2_1) Manifest() (manifest.Manifest, error) { + return manifest.Manifest(sdl.result.mgroups), nil +} + +// Version creates the deterministic Deployment Version hash from the SDL. +func (sdl *v2_1) Version() ([]byte, error) { + return manifest.Manifest(sdl.result.mgroups).Version() +} + +func (sdl *v2_1) UnmarshalYAML(node *yaml.Node) error { + result := v2_1{} + +loop: + for i := 0; i < len(node.Content); i += 2 { + var val interface{} + switch node.Content[i].Value { + case "include": + val = &result.Include + case "services": + val = &result.Services + case "profiles": + val = &result.Profiles + case "deployment": + val = &result.Deployments + case "endpoints": + val = &result.Endpoints + case sdlVersionField: + // version is already verified + continue loop + default: + return fmt.Errorf("sdl: unexpected field %s", node.Content[i].Value) + } + + if err := node.Content[i+1].Decode(val); err != nil { + return err + } + } + + if err := result.buildGroups(); err != nil { + return err + } + + *sdl = result + + return nil +} + +func (sdl *v2_1) validate() error { + for endpointName, endpoint := range sdl.Endpoints { + if !endpointNameValidationRegex.MatchString(endpointName) { + return fmt.Errorf( + "%w: endpoint named %q is not a valid name", + errSDLInvalid, + endpointName, + ) + } + + if len(endpoint.Kind) == 0 { + return fmt.Errorf("%w: endpoint named %q has no kind", errSDLInvalid, endpointName) + } + + // Validate endpoint kind, there is only one allowed value for now + if endpoint.Kind != endpointKindIP { + return fmt.Errorf( + "%w: endpoint named %q, unknown kind %q", + errSDLInvalid, + endpointName, + endpoint.Kind, + ) + } + } + + endpointsUsed := make(map[string]struct{}) + portsUsed := make(map[string]string) + for _, svcName := range sdl.Deployments.svcNames() { + depl := sdl.Deployments[svcName] + + for _, placementName := range v2DeploymentPlacementNames(depl) { + svcdepl := depl[placementName] + + compute, ok := sdl.Profiles.Compute[svcdepl.Profile] + if !ok { + return fmt.Errorf( + "%w: %v.%v: no compute profile named %v", + errSDLInvalid, + svcName, + placementName, + svcdepl.Profile, + ) + } + + infra, ok := sdl.Profiles.Placement[placementName] + if !ok { + return fmt.Errorf( + "%w: %v.%v: no placement profile named %v", + errSDLInvalid, + svcName, + placementName, + placementName, + ) + } + + if _, ok := infra.Pricing[svcdepl.Profile]; !ok { + return fmt.Errorf( + "%w: %v.%v: no pricing for profile %v", + errSDLInvalid, + svcName, + placementName, + svcdepl.Profile, + ) + } + + svc, ok := sdl.Services[svcName] + if !ok { + return fmt.Errorf( + "%w: %v.%v: no service profile named %v", + errSDLInvalid, + svcName, + placementName, + svcName, + ) + } + + if svc.Credentials != nil { + if err := svc.Credentials.validate(); err != nil { + return fmt.Errorf( + "%w: %v.%v: %v", + errSDLInvalid, + svcName, + placementName, + err, + ) + } + } + + for _, serviceExpose := range svc.Expose { + for _, to := range serviceExpose.To { + // Check to see if an IP endpoint is also specified + if len(to.IP) != 0 { + if !to.Global { + return fmt.Errorf( + "%w: error on %q if an IP is declared the directive must be declared as global", + errSDLInvalid, + svcName, + ) + } + endpoint, endpointExists := sdl.Endpoints[to.IP] + if !endpointExists { + return fmt.Errorf( + "%w: error on service %q no endpoint named %q exists", + errSDLInvalid, + svcName, + to.IP, + ) + } + + if endpoint.Kind != endpointKindIP { + return fmt.Errorf( + "%w: error on service %q endpoint %q has type %q, should be %q", + errSDLInvalid, + svcName, + to.IP, + endpoint.Kind, + endpointKindIP, + ) + } + + endpointsUsed[to.IP] = struct{}{} + + // Endpoint exists. Now check for port collisions across a single endpoint, port, & protocol + portKey := fmt.Sprintf( + "%s-%d-%s", + to.IP, + serviceExpose.As, + serviceExpose.Proto, + ) + otherServiceName, inUse := portsUsed[portKey] + if inUse { + return fmt.Errorf( + "%w: IP endpoint %q port: %d protocol: %s specified by service %q already in use by %q", + errSDLInvalid, + to.IP, + serviceExpose.Port, + serviceExpose.Proto, + svcName, + otherServiceName, + ) + } + portsUsed[portKey] = svcName + } + } + } + + // validate storage's attributes and parameters + volumes := make(map[string]v2ResourceStorage) + for _, volume := range compute.Resources.Storage { + // making deepcopy here as we gonna merge compute attributes and service parameters for validation below + attr := make(v2StorageAttributes, len(volume.Attributes)) + + copy(attr, volume.Attributes) + + volumes[volume.Name] = v2ResourceStorage{ + Name: volume.Name, + Quantity: volume.Quantity, + Attributes: attr, + } + } + + if svc.Params != nil { + mounts := make(map[string]string) + + for name, params := range svc.Params.Storage { + + volume, exists := volumes[name] + + if !exists { + return fmt.Errorf( + "%w: service \"%s\" references to no-existing compute volume named \"%s\"", + errSDLInvalid, + svcName, + name, + ) + } + + if !path.IsAbs(params.Mount) { + return fmt.Errorf( + "%w: invalid value for \"service.%s.params.%s.mount\" parameter. expected absolute path", + errSDLInvalid, + svcName, + name, + ) + } + + if vlname, exists := mounts[params.Mount]; exists { + if params.Mount == "" { + return errStorageMultipleRootEphemeral + } + + return fmt.Errorf( + "%w: mount %q already in use by volume %q", + errStorageDupMountPoint, + params.Mount, + vlname, + ) + } + + mounts[params.Mount] = name + + attr := make(map[string]string) + attr[StorageAttributeMount] = params.Mount + attr[StorageAttributeReadOnly] = strconv.FormatBool(params.ReadOnly) + + for _, nd := range types.Attributes(volume.Attributes) { + attr[nd.Key] = nd.Value + } + + persistent, _ := strconv.ParseBool(attr[StorageAttributePersistent]) + class := attr[StorageAttributeClass] + + if persistent && params.Mount == "" { + return fmt.Errorf( + "%w: compute.storage.%s has persistent=true which requires service.%s.params.storage.%s to have mount", + errSDLInvalid, + name, + svcName, + name, + ) + } + + if class == StorageClassRAM && params.ReadOnly { + return fmt.Errorf( + "%w: services.%s.params.storage.%s has readOnly=true which is not allowed for storage class \"%s\"", + errSDLInvalid, + svcName, + name, + class, + ) + } + } + } + } + } + + for endpointName := range sdl.Endpoints { + _, inUse := endpointsUsed[endpointName] + if !inUse { + return fmt.Errorf( + "%w: endpoint %q declared but never used", + errSDLInvalid, + endpointName, + ) + } + } + + return nil +} + +func (sdl *v2_1) computeEndpointSequenceNumbers() map[string]uint32 { + var endpointNames []string + res := make(map[string]uint32) + + for _, serviceName := range sdl.Deployments.svcNames() { + for _, expose := range sdl.Services[serviceName].Expose { + for _, to := range expose.To { + if to.Global && len(to.IP) == 0 { + continue + } + + endpointNames = append(endpointNames, to.IP) + } + } + } + + if len(endpointNames) == 0 { + return res + } + + // Make the assignment stable + sort.Strings(endpointNames) + + // Start at zero, so the first assigned one is 1 + endpointSeqNumber := uint32(0) + for _, name := range endpointNames { + endpointSeqNumber++ + seqNo := endpointSeqNumber + res[name] = seqNo + } + + return res +} diff --git a/go/sdl/v2_1_ip_test.go b/go/sdl/v2_1_ip_test.go new file mode 100644 index 00000000..8b9bce78 --- /dev/null +++ b/go/sdl/v2_1_ip_test.go @@ -0,0 +1,279 @@ +package sdl + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + manifest "pkg.akt.dev/go/manifest/v2beta3" + rtypes "pkg.akt.dev/go/node/types/resources/v1beta4" +) + +func TestV2_1_ParseSimpleWithIP(t *testing.T) { + sdl, err := ReadFile("./_testdata/v2.1-simple-with-ip.yaml") + require.NoError(t, err) + require.NotNil(t, sdl) + + groups, err := sdl.DeploymentGroups() + require.NoError(t, err) + require.Len(t, groups, 1) + group := groups[0] + resources := group.GetResourceUnits() + require.Len(t, resources, 1) + resource := resources[0] + + ipEndpoint := findIPEndpoint(t, resource.Resources.Endpoints, 1) + + require.Equal(t, ipEndpoint.Kind, rtypes.Endpoint_LEASED_IP) + + mani, err := sdl.Manifest() + require.NoError(t, err) + var exposeIP manifest.ServiceExpose + for _, expose := range mani[0].Services[0].Expose { + if len(expose.IP) != 0 { + exposeIP = expose + break + } + } + require.NotEmpty(t, exposeIP.IP) + require.Equal(t, exposeIP.Proto, manifest.UDP) + require.Equal(t, exposeIP.Port, uint32(12345)) + require.True(t, exposeIP.Global) +} + +func TestV2_1_Parse_IP(t *testing.T) { + sdl1, err := ReadFile("_testdata/legacy/deployment-v2.1-ip-endpoint.yaml") + require.NoError(t, err) + groups, err := sdl1.DeploymentGroups() + require.NoError(t, err) + + require.Len(t, groups, 1) + group := groups[0] + + resources := group.GetResourceUnits() + require.Len(t, resources, 1) + resource := resources[0] + endpoints := resource.Resources.Endpoints + require.Len(t, endpoints, 2) + + var ipEndpoint rtypes.Endpoint + for _, endpoint := range endpoints { + if endpoint.Kind == rtypes.Endpoint_LEASED_IP { + ipEndpoint = endpoint + } + } + + require.Equal(t, ipEndpoint.Kind, rtypes.Endpoint_LEASED_IP) + require.Greater(t, ipEndpoint.SequenceNumber, uint32(0)) + + mani, err := sdl1.Manifest() + require.NoError(t, err) + maniGroups := mani.GetGroups() + require.Len(t, maniGroups, 1) + maniGroup := maniGroups[0] + services := maniGroup.Services + require.Len(t, services, 1) + + service := services[0] + exposes := service.Expose + require.Len(t, exposes, 1) + + expose := exposes[0] + + require.True(t, expose.Global) + require.Equal(t, expose.IP, "meow") + require.Greater(t, expose.EndpointSequenceNumber, uint32(0)) +} + +func TestV2_1_Parse_SharedIP(t *testing.T) { + // Read a file with 1 group having 1 endpoint shared amongst containers + sdl1, err := ReadFile("_testdata/legacy/deployment-v2.1-shared-ip-endpoint.yaml") + require.NoError(t, err) + + groups, err := sdl1.DeploymentGroups() + require.NoError(t, err) + require.Len(t, groups, 1) + + group := groups[0] + + resources := group.GetResourceUnits() + require.Len(t, resources, 1) + + resource := resources[0] + ipEndpoint1 := findIPEndpoint(t, resource.Resources.Endpoints, 1) + require.Greater(t, ipEndpoint1.SequenceNumber, uint32(0)) + + ipEndpoint2 := findIPEndpoint(t, resource.Resources.Endpoints, 2) + require.Greater(t, ipEndpoint2.SequenceNumber, uint32(0)) + + mani, err := sdl1.Manifest() + require.NoError(t, err) + + maniGroups := mani.GetGroups() + require.Len(t, maniGroups, 1) + maniGroup := maniGroups[0] + + services := maniGroup.Services + require.Len(t, services, 2) + serviceA := services[0] + + serviceIPEndpoint := findIPEndpoint(t, serviceA.Resources.Endpoints, 1) + require.Equal(t, serviceIPEndpoint.SequenceNumber, ipEndpoint1.SequenceNumber) + + serviceB := services[1] + serviceIPEndpoint = findIPEndpoint(t, serviceB.Resources.Endpoints, 1) + require.Equal(t, serviceIPEndpoint.SequenceNumber, ipEndpoint2.SequenceNumber) +} + +func TestV2_1_Parse_MultipleIP(t *testing.T) { + // Read a file with 1 group having two endpoints + sdl1, err := ReadFile("_testdata/legacy/deployment-v2.1-multi-ip-endpoint.yaml") + require.NoError(t, err) + + groups, err := sdl1.DeploymentGroups() + require.NoError(t, err) + require.Len(t, groups, 1) + + group := groups[0] + + resources := group.GetResourceUnits() + require.Len(t, resources, 1) + + mani, err := sdl1.Manifest() + require.NoError(t, err) + _ = mani +} + +func TestV2_1_Parse_MultipleGroupsIP(t *testing.T) { + // Read a file with two groups, each one having an IP endpoint that is distinct + sdl1, err := ReadFile("_testdata/legacy/deployment-v2.1-multi-groups-ip-endpoint.yaml") + require.NoError(t, err) + + groups, err := sdl1.DeploymentGroups() + require.NoError(t, err) + require.Len(t, groups, 2) + + resources := groups[0].GetResourceUnits() + require.Len(t, resources, 1) + + resource := resources[0] + require.Len(t, resource.Resources.Endpoints, 2) + ipEndpointFirstGroup := findIPEndpoint(t, resource.Resources.Endpoints, 1) + require.Greater(t, ipEndpointFirstGroup.SequenceNumber, uint32(0)) + + resources = groups[1].GetResourceUnits() + require.Len(t, resources, 1) + + resource = resources[0] + require.Len(t, resource.Resources.Endpoints, 2) + ipEndpointSecondGroup := findIPEndpoint(t, resource.Resources.Endpoints, 1) + require.Greater(t, ipEndpointSecondGroup.SequenceNumber, uint32(0)) + require.NotEqual(t, ipEndpointFirstGroup.SequenceNumber, ipEndpointSecondGroup.SequenceNumber) + + mani, err := sdl1.Manifest() + require.NoError(t, err) + maniGroups := mani.GetGroups() + require.Len(t, maniGroups, 2) + + maniGroup := maniGroups[0] + mresources := maniGroup.GetResourceUnits() + require.Len(t, mresources, 1) + mresource := mresources[0] + require.Equal(t, findIPEndpoint(t, mresource.Endpoints, 1).SequenceNumber, ipEndpointFirstGroup.SequenceNumber) + + maniGroup = maniGroups[1] + mresources = maniGroup.GetResourceUnits() + require.Len(t, mresources, 1) + mresource = mresources[0] + require.Equal(t, findIPEndpoint(t, mresource.Endpoints, 1).SequenceNumber, ipEndpointSecondGroup.SequenceNumber) + +} + +func TestV2_1_Parse_IPEndpointNaming(t *testing.T) { + makeSDLWithEndpointName := func(name string) []byte { + const originalSDL = `--- +version: "2.1" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: %q + accept: + - test.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 + +endpoints: + %q: + kind: ip +` + buf := &bytes.Buffer{} + _, err := fmt.Fprintf(buf, originalSDL, name, name) + require.NoError(t, err) + return buf.Bytes() + } + + _, err := Read(makeSDLWithEndpointName("meow72-memes")) + require.NoError(t, err) + + _, err = Read(makeSDLWithEndpointName("meow72-mem_es")) + require.NoError(t, err) + + _, err = Read(makeSDLWithEndpointName("!important")) + require.Error(t, err) + require.ErrorIs(t, err, errSDLInvalid) + require.Contains(t, err.Error(), "not a valid name") + + _, err = Read(makeSDLWithEndpointName("foo^bar")) + require.Error(t, err) + require.ErrorIs(t, err, errSDLInvalid) + require.Contains(t, err.Error(), "not a valid name") + + _, err = Read(makeSDLWithEndpointName("ROAR")) + require.Error(t, err) + require.ErrorIs(t, err, errSDLInvalid) + require.Contains(t, err.Error(), "not a valid name") + + _, err = Read(makeSDLWithEndpointName("996")) + require.Error(t, err) + require.ErrorIs(t, err, errSDLInvalid) + require.Contains(t, err.Error(), "not a valid name") + + _, err = Read(makeSDLWithEndpointName("_kittens")) + require.Error(t, err) + require.ErrorIs(t, err, errSDLInvalid) + require.Contains(t, err.Error(), "not a valid name") + + _, err = Read(makeSDLWithEndpointName("-kittens")) + require.Error(t, err) + require.ErrorIs(t, err, errSDLInvalid) + require.Contains(t, err.Error(), "not a valid name") +} diff --git a/go/sdl/v2_1_test.go b/go/sdl/v2_1_test.go new file mode 100644 index 00000000..bc9cc319 --- /dev/null +++ b/go/sdl/v2_1_test.go @@ -0,0 +1,761 @@ +package sdl + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + manifest "pkg.akt.dev/go/manifest/v2beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + atypes "pkg.akt.dev/go/node/types/attributes/v1" + rtypes "pkg.akt.dev/go/node/types/resources/v1beta4" + "pkg.akt.dev/go/node/types/unit" +) + +func TestV2_1_ParseSimpleGPU(t *testing.T) { + sdl, err := ReadFile("./_testdata/v2.1-simple-gpu.yaml") + require.NoError(t, err) + + groups, err := sdl.DeploymentGroups() + require.NoError(t, err) + assert.Len(t, groups, 1) + + group := groups[0] + assert.Len(t, group.GetResourceUnits(), 1) + assert.Len(t, group.Requirements.Attributes, 2) + + assert.Equal(t, atypes.Attribute{ + Key: "region", + Value: "us-west", + }, group.Requirements.Attributes[1]) + + assert.Len(t, group.GetResourceUnits(), 1) + + assert.Equal(t, dtypes.ResourceUnit{ + Count: 2, + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(randCPU), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(randGPU), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/a100", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(randMemory), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(randStorage), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Price: AkashDecCoin(t, 50), + }, group.GetResourceUnits()[0]) + + mani, err := sdl.Manifest() + require.NoError(t, err) + + assert.Len(t, mani.GetGroups(), 1) + + expectedHosts := make([]string, 1) + expectedHosts[0] = "ahostname.com" // nolint: goconst + assert.Equal(t, manifest.Group{ + Name: "westcoast", + Services: []manifest.Service{ + { + Name: "web", + Image: "nginx", + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(100), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(1), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/a100", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(128 * unit.Mi), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Count: 2, + Expose: []manifest.ServiceExpose{ + {Port: 80, Global: true, Proto: manifest.TCP, Hosts: expectedHosts, + HTTPOptions: manifest.ServiceExposeHTTPOptions{ + MaxBodySize: 1048576, + ReadTimeout: 60000, + SendTimeout: 60000, + NextTries: 3, + NextTimeout: 0, + NextCases: []string{"error", "timeout"}, + }}, + {Port: 12345, Global: true, Proto: manifest.UDP, + HTTPOptions: manifest.ServiceExposeHTTPOptions{ + MaxBodySize: 1048576, + ReadTimeout: 60000, + SendTimeout: 60000, + NextTries: 3, + NextTimeout: 0, + NextCases: []string{"error", "timeout"}, + }}, + }, + }, + }, + }, mani.GetGroups()[0]) +} + +func TestV2_1_Parse_Deployments(t *testing.T) { + sdl1, err := ReadFile("_testdata/legacy/deployment-v2.1.yaml") + require.NoError(t, err) + _, err = sdl1.DeploymentGroups() + require.NoError(t, err) + + _, err = sdl1.Manifest() + require.NoError(t, err) + + sha1, err := sdl1.Version() + require.NoError(t, err) + assert.Len(t, sha1, 32) + + sdl2, err := ReadFile("_testdata/legacy/deployment-v2.yaml") + require.NoError(t, err) + sha2, err := sdl2.Version() + + require.NoError(t, err) + assert.Len(t, sha2, 32) + require.NotEqual(t, sha1, sha2) +} + +func Test_V2_1_Cross_Validates(t *testing.T) { + sdl2, err := ReadFile("_testdata/legacy/deployment-v2.yaml") + require.NoError(t, err) + dgroups, err := sdl2.DeploymentGroups() + require.NoError(t, err) + m, err := sdl2.Manifest() + require.NoError(t, err) + + // This is a single document producing both the manifest & deployment groups + // These should always agree with each other. If this test fails at least one of the + // following is ture + // 1. Cross validation logic is wrong + // 2. The DeploymentGroups() & Manifest() code do not agree with one another + err = m.CheckAgainstGSpecs(dgroups) + require.NoError(t, err) + + // Repeat the same test with another file + sdl2, err = ReadFile("./_testdata/v2.1-simple.yaml") + require.NoError(t, err) + dgroups, err = sdl2.DeploymentGroups() + require.NoError(t, err) + m, err = sdl2.Manifest() + require.NoError(t, err) + + // This is a single document producing both the manifest & deployment groups + // These should always agree with each other + err = m.CheckAgainstGSpecs(dgroups) + require.NoError(t, err) + + // Repeat the same test with another file + sdl2, err = ReadFile("./_testdata/v2.1-simple3.yaml") + require.NoError(t, err) + dgroups, err = sdl2.DeploymentGroups() + require.NoError(t, err) + m, err = sdl2.Manifest() + require.NoError(t, err) + + // This is a single document producing both the manifest & deployment groups + // These should always agree with each other + err = m.CheckAgainstGSpecs(dgroups) + require.NoError(t, err) + + // Repeat the same test with another file + sdl2, err = ReadFile("./_testdata/v2.1-private_service.yaml") + require.NoError(t, err) + dgroups, err = sdl2.DeploymentGroups() + require.NoError(t, err) + m, err = sdl2.Manifest() + require.NoError(t, err) + + // This is a single document producing both the manifest & deployment groups + // These should always agree with each other + err = m.CheckAgainstGSpecs(dgroups) + require.NoError(t, err) + +} + +func Test_V2_1_Parse_simple(t *testing.T) { + sdl, err := ReadFile("./_testdata/v2.1-simple.yaml") + require.NoError(t, err) + + groups, err := sdl.DeploymentGroups() + require.NoError(t, err) + assert.Len(t, groups, 1) + + group := groups[0] + assert.Len(t, group.GetResourceUnits(), 1) + + assert.Equal(t, atypes.Attribute{ + Key: "region", + Value: "us-west", + }, group.Requirements.Attributes[0]) + + assert.Len(t, group.GetResourceUnits(), 1) + + assert.Equal(t, dtypes.ResourceUnit{ + Count: 2, + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(randCPU), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(0), + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(randMemory), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(randStorage), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Price: AkashDecCoin(t, 50), + }, group.GetResourceUnits()[0]) + + mani, err := sdl.Manifest() + require.NoError(t, err) + + assert.Len(t, mani.GetGroups(), 1) + + expectedHosts := make([]string, 1) + expectedHosts[0] = "ahostname.com" + assert.Equal(t, manifest.Group{ + Name: "westcoast", + Services: []manifest.Service{ + { + Name: "web", + Image: "nginx", + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(100), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(0), + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(128 * unit.Mi), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Count: 2, + Expose: []manifest.ServiceExpose{ + {Port: 80, Global: true, Proto: manifest.TCP, Hosts: expectedHosts, + HTTPOptions: manifest.ServiceExposeHTTPOptions{ + MaxBodySize: 1048576, + ReadTimeout: 60000, + SendTimeout: 60000, + NextTries: 3, + NextTimeout: 0, + NextCases: []string{"error", "timeout"}, + }}, + {Port: 12345, Global: true, Proto: manifest.UDP, + HTTPOptions: manifest.ServiceExposeHTTPOptions{ + MaxBodySize: 1048576, + ReadTimeout: 60000, + SendTimeout: 60000, + NextTries: 3, + NextTimeout: 0, + NextCases: []string{"error", "timeout"}, + }}, + }, + }, + }, + }, mani.GetGroups()[0]) + + assert.Nil(t, mani.GetGroups()[0].Services[0].Credentials) + +} + +func Test_V2_1_Parse_credentials(t *testing.T) { + sdl, err := ReadFile("./_testdata/v2.1-credentials.yaml") + require.NoError(t, err) + + mani, err := sdl.Manifest() + require.NoError(t, err) + + assert.Len(t, mani.GetGroups(), 1) + + grp := mani.GetGroups()[0] + assert.Len(t, grp.Services, 1) + + svc := grp.Services[0] + + assert.NotNil(t, svc) + + creds := svc.Credentials + assert.NotNil(t, creds) + + assert.Equal(t, "https://test.com/v1", creds.Host) + assert.Equal(t, "foo", creds.Username) + assert.Equal(t, "foo", creds.Password) +} + +func Test_V2_1_Parse_credentials_error(t *testing.T) { + _, err := ReadFile("./_testdata/v2.1-credentials-error.yaml") + require.Error(t, err) +} + +func Test_v2_1_Parse_ProfileNameNotServiceName(t *testing.T) { + sdl, err := ReadFile("./_testdata/v2.1-profile-svc-name-mismatch.yaml") + require.NoError(t, err) + + dgroups, err := sdl.DeploymentGroups() + require.NoError(t, err) + assert.Len(t, dgroups, 1) + + mani, err := sdl.Manifest() + require.NoError(t, err) + assert.Len(t, mani.GetGroups(), 1) +} + +func Test_v2_1_Parse_DeploymentNameServiceNameMismatch(t *testing.T) { + sdl, err := ReadFile("./_testdata/v2.1-deployment-svc-mismatch.yaml") + require.Error(t, err) + require.Nil(t, sdl) + require.Contains(t, err.Error(), "no service profile named") + + sdl, err = ReadFile("./_testdata/v2.1-simple2.yaml") + require.NoError(t, err) + require.NotNil(t, sdl) + + dgroups, err := sdl.DeploymentGroups() + require.NoError(t, err) + assert.Len(t, dgroups, 1) + + mani, err := sdl.Manifest() + require.NoError(t, err) + assert.Len(t, mani.GetGroups(), 1) + + require.Equal(t, dgroups[0].Name, mani.GetGroups()[0].Name) + // SDL lists 2 services, but particular deployment specifies only one + require.Len(t, mani.GetGroups()[0].Services, 1) + + // make sure deployment maps to the right service + require.Len(t, mani.GetGroups()[0].Services[0].Expose, 2) + require.Len(t, mani.GetGroups()[0].Services[0].Expose[0].Hosts, 1) + require.Equal(t, mani.GetGroups()[0].Services[0].Expose[0].Hosts[0], "ahostname.com") +} + +func TestV2_1_ParseServiceMix(t *testing.T) { + sdl, err := ReadFile("./_testdata/v2.1-service-mix.yaml") + require.NoError(t, err) + + groups, err := sdl.DeploymentGroups() + require.NoError(t, err) + assert.Len(t, groups, 1) + + group := groups[0] + assert.Len(t, group.GetResourceUnits(), 2) + assert.Len(t, group.Requirements.Attributes, 2) + + assert.Equal(t, atypes.Attribute{ + Key: "region", + Value: "us-west", + }, group.Requirements.Attributes[1]) + + assert.Equal(t, dtypes.ResourceUnits{ + { + Count: 1, + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(randCPU), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(randGPU), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/*", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(randMemory), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(randStorage), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Price: AkashDecCoin(t, 50), + }, + { + Count: 1, + Resources: rtypes.Resources{ + ID: 2, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(randCPU), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(0), + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(randMemory), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(randStorage), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Price: AkashDecCoin(t, 50), + }, + }, group.GetResourceUnits()) + + mani, err := sdl.Manifest() + require.NoError(t, err) + + assert.Len(t, mani.GetGroups(), 1) + + assert.Equal(t, manifest.Group{ + Name: "westcoast", + Services: []manifest.Service{ + { + Name: "svca", + Image: "nginx", + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(100), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(1), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/*", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(128 * unit.Mi), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Count: 1, + Expose: []manifest.ServiceExpose{ + { + Port: 80, Global: true, Proto: manifest.TCP, Hosts: []string{"ahostname.com"}, + HTTPOptions: defaultHTTPOptions, + }, + { + Port: 12345, Global: true, Proto: manifest.UDP, + HTTPOptions: defaultHTTPOptions, + }, + }, + }, + { + Name: "svcb", + Image: "nginx", + Resources: rtypes.Resources{ + ID: 2, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(100), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(0), + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(128 * unit.Mi), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Count: 1, + Expose: []manifest.ServiceExpose{ + { + Port: 80, Global: true, Proto: manifest.TCP, Hosts: []string{"bhostname.com"}, + HTTPOptions: defaultHTTPOptions, + }, + { + Port: 12346, Global: true, Proto: manifest.UDP, + HTTPOptions: defaultHTTPOptions, + }, + }, + }, + }, + }, mani.GetGroups()[0]) +} + +func TestV2_1_ParseServiceMix2(t *testing.T) { + sdl, err := ReadFile("./_testdata/v2.1-service-mix2.yaml") + require.NoError(t, err) + + groups, err := sdl.DeploymentGroups() + require.NoError(t, err) + assert.Len(t, groups, 1) + + group := groups[0] + assert.Len(t, group.GetResourceUnits(), 1) + assert.Len(t, group.Requirements.Attributes, 2) + + assert.Equal(t, atypes.Attribute{ + Key: "region", + Value: "us-west", + }, group.Requirements.Attributes[1]) + + assert.Equal(t, dtypes.ResourceUnits{ + { + Count: 2, + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(randCPU), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(randGPU), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/*", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(randMemory), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(randStorage), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Price: AkashDecCoin(t, 50), + }, + }, group.GetResourceUnits()) + + mani, err := sdl.Manifest() + require.NoError(t, err) + + assert.Len(t, mani.GetGroups(), 1) + + assert.Equal(t, manifest.Group{ + Name: "westcoast", + Services: []manifest.Service{ + { + Name: "svca", + Image: "nginx", + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(100), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(1), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/*", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(128 * unit.Mi), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Count: 1, + Expose: []manifest.ServiceExpose{ + { + Port: 80, Global: true, Proto: manifest.TCP, Hosts: []string{"ahostname.com"}, + HTTPOptions: defaultHTTPOptions, + }, + { + Port: 12345, Global: true, Proto: manifest.UDP, + HTTPOptions: defaultHTTPOptions, + }, + }, + }, + { + Name: "svcb", + Image: "nginx", + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(100), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(1), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/*", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(128 * unit.Mi), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Count: 1, + Expose: []manifest.ServiceExpose{ + { + Port: 80, Global: true, Proto: manifest.TCP, Hosts: []string{"bhostname.com"}, + HTTPOptions: defaultHTTPOptions, + }, + { + Port: 12346, Global: true, Proto: manifest.UDP, + HTTPOptions: defaultHTTPOptions, + }, + }, + }, + }, + }, mani.GetGroups()[0]) +} diff --git a/go/sdl/v2_ip_test.go b/go/sdl/v2_ip_test.go new file mode 100644 index 00000000..c0de408f --- /dev/null +++ b/go/sdl/v2_ip_test.go @@ -0,0 +1,297 @@ +package sdl + +import ( + "bytes" + "fmt" + "testing" + + "github.com/stretchr/testify/require" + + manifest "pkg.akt.dev/go/manifest/v2beta3" + rtypes "pkg.akt.dev/go/node/types/resources/v1beta4" +) + +func findIPEndpoint(t *testing.T, endpoints rtypes.Endpoints, id int) rtypes.Endpoint { + t.Helper() + + idx := 0 + for _, endpoint := range endpoints { + if endpoint.Kind == rtypes.Endpoint_LEASED_IP { + idx++ + if id == idx { + return endpoint + } + } + } + + t.Fatal("did not find any IP endpoints") + return rtypes.Endpoint{} +} + +func TestV2ParseSimpleWithIP(t *testing.T) { + sdl, err := ReadFile("./_testdata/simple-with-ip.yaml") + require.NoError(t, err) + require.NotNil(t, sdl) + + groups, err := sdl.DeploymentGroups() + require.NoError(t, err) + require.Len(t, groups, 1) + group := groups[0] + resources := group.GetResourceUnits() + require.Len(t, resources, 1) + resource := resources[0] + + ipEndpoint := findIPEndpoint(t, resource.Resources.Endpoints, 1) + + require.Equal(t, ipEndpoint.Kind, rtypes.Endpoint_LEASED_IP) + + mani, err := sdl.Manifest() + require.NoError(t, err) + var exposeIP manifest.ServiceExpose + for _, expose := range mani[0].Services[0].Expose { + if len(expose.IP) != 0 { + exposeIP = expose + break + } + } + require.NotEmpty(t, exposeIP.IP) + require.Equal(t, exposeIP.Proto, manifest.UDP) + require.Equal(t, exposeIP.Port, uint32(12345)) + require.True(t, exposeIP.Global) +} + +func TestV2Parse_IP(t *testing.T) { + sdl1, err := ReadFile("_testdata/legacy/deployment-v2-ip-endpoint.yaml") + require.NoError(t, err) + groups, err := sdl1.DeploymentGroups() + require.NoError(t, err) + + require.Len(t, groups, 1) + group := groups[0] + + resources := group.GetResourceUnits() + require.Len(t, resources, 1) + resource := resources[0] + endpoints := resource.Resources.Endpoints + require.Len(t, endpoints, 2) + + var ipEndpoint rtypes.Endpoint + for _, endpoint := range endpoints { + if endpoint.Kind == rtypes.Endpoint_LEASED_IP { + ipEndpoint = endpoint + } + } + + require.Equal(t, ipEndpoint.Kind, rtypes.Endpoint_LEASED_IP) + require.Greater(t, ipEndpoint.SequenceNumber, uint32(0)) + + mani, err := sdl1.Manifest() + require.NoError(t, err) + maniGroups := mani.GetGroups() + require.Len(t, maniGroups, 1) + maniGroup := maniGroups[0] + services := maniGroup.Services + require.Len(t, services, 1) + + service := services[0] + exposes := service.Expose + require.Len(t, exposes, 1) + + expose := exposes[0] + + require.True(t, expose.Global) + require.Equal(t, expose.IP, "meow") + require.Greater(t, expose.EndpointSequenceNumber, uint32(0)) +} + +func TestV2Parse_SharedIP(t *testing.T) { + // Read a file with 1 group having 1 endpoint shared amongst containers + sdl1, err := ReadFile("_testdata/legacy/deployment-v2-shared-ip-endpoint.yaml") + require.NoError(t, err) + + groups, err := sdl1.DeploymentGroups() + require.NoError(t, err) + require.Len(t, groups, 1) + + group := groups[0] + + resources := group.GetResourceUnits() + require.Len(t, resources, 2) + + // resource := resources[0] + ipEndpoint1 := findIPEndpoint(t, resources[0].Resources.Endpoints, 1) + require.Greater(t, ipEndpoint1.SequenceNumber, uint32(0)) + + ipEndpoint2 := findIPEndpoint(t, resources[1].Resources.Endpoints, 1) + require.Greater(t, ipEndpoint2.SequenceNumber, uint32(0)) + + mani, err := sdl1.Manifest() + require.NoError(t, err) + + maniGroups := mani.GetGroups() + require.Len(t, maniGroups, 1) + maniGroup := maniGroups[0] + + services := maniGroup.Services + require.Len(t, services, 2) + serviceA := services[0] + + serviceIPEndpoint := findIPEndpoint(t, serviceA.Resources.Endpoints, 1) + require.Equal(t, serviceIPEndpoint.SequenceNumber, ipEndpoint1.SequenceNumber) + + serviceB := services[1] + serviceIPEndpoint = findIPEndpoint(t, serviceB.Resources.Endpoints, 1) + require.Equal(t, serviceIPEndpoint.SequenceNumber, ipEndpoint2.SequenceNumber) +} + +func TestV2Parse_MultipleIP(t *testing.T) { + // Read a file with 1 group having two endpoints + sdl1, err := ReadFile("_testdata/legacy/deployment-v2-multi-ip-endpoint.yaml") + require.NoError(t, err) + + groups, err := sdl1.DeploymentGroups() + require.NoError(t, err) + require.Len(t, groups, 1) + + group := groups[0] + + resources := group.GetResourceUnits() + require.Len(t, resources, 2) + + mani, err := sdl1.Manifest() + require.NoError(t, err) + _ = mani +} + +func TestV2Parse_MultipleGroupsIP(t *testing.T) { + // Read a file with two groups, each one having an IP endpoint that is distinct + sdl1, err := ReadFile("_testdata/legacy/deployment-v2-multi-groups-ip-endpoint.yaml") + require.NoError(t, err) + + groups, err := sdl1.DeploymentGroups() + require.NoError(t, err) + require.Len(t, groups, 2) + + resources := groups[0].GetResourceUnits() + require.Len(t, resources, 1) + + resource := resources[0] + require.Len(t, resource.Resources.Endpoints, 2) + ipEndpointFirstGroup := findIPEndpoint(t, resource.Resources.Endpoints, 1) + require.Greater(t, ipEndpointFirstGroup.SequenceNumber, uint32(0)) + + resources = groups[1].GetResourceUnits() + require.Len(t, resources, 1) + + resource = resources[0] + require.Len(t, resource.Resources.Endpoints, 2) + ipEndpointSecondGroup := findIPEndpoint(t, resource.Resources.Endpoints, 1) + require.Greater(t, ipEndpointSecondGroup.SequenceNumber, uint32(0)) + require.NotEqual(t, ipEndpointFirstGroup.SequenceNumber, ipEndpointSecondGroup.SequenceNumber) + + mani, err := sdl1.Manifest() + require.NoError(t, err) + maniGroups := mani.GetGroups() + require.Len(t, maniGroups, 2) + + maniGroup := maniGroups[0] + mresources := maniGroup.GetResourceUnits() + require.Len(t, mresources, 1) + mresource := mresources[0] + require.Equal(t, findIPEndpoint(t, mresource.Endpoints, 1).SequenceNumber, ipEndpointFirstGroup.SequenceNumber) + + maniGroup = maniGroups[1] + mresources = maniGroup.GetResourceUnits() + require.Len(t, mresources, 1) + mresource = mresources[0] + require.Equal(t, findIPEndpoint(t, mresource.Endpoints, 1).SequenceNumber, ipEndpointSecondGroup.SequenceNumber) + +} + +func TestV2Parse_IPEndpointNaming(t *testing.T) { + makeSDLWithEndpointName := func(name string) []byte { + const originalSDL = `--- +version: "2.0" + +services: + web: + image: ghcr.io/akash-network/demo-app + expose: + - port: 80 + to: + - global: true + ip: %q + accept: + - test.localhost + +profiles: + compute: + web: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + size: "512Mi" + + placement: + global: + pricing: + web: + denom: uakt + amount: 10 + +deployment: + web: + global: + profile: web + count: 1 + +endpoints: + %q: + kind: ip +` + buf := &bytes.Buffer{} + _, err := fmt.Fprintf(buf, originalSDL, name, name) + require.NoError(t, err) + return buf.Bytes() + } + + _, err := Read(makeSDLWithEndpointName("meow72-memes")) + require.NoError(t, err) + + _, err = Read(makeSDLWithEndpointName("meow72-mem_es")) + require.NoError(t, err) + + _, err = Read(makeSDLWithEndpointName("!important")) + require.Error(t, err) + require.ErrorIs(t, err, errSDLInvalid) + require.Contains(t, err.Error(), "not a valid name") + + _, err = Read(makeSDLWithEndpointName("foo^bar")) + require.Error(t, err) + require.ErrorIs(t, err, errSDLInvalid) + require.Contains(t, err.Error(), "not a valid name") + + _, err = Read(makeSDLWithEndpointName("ROAR")) + require.Error(t, err) + require.ErrorIs(t, err, errSDLInvalid) + require.Contains(t, err.Error(), "not a valid name") + + _, err = Read(makeSDLWithEndpointName("996")) + require.Error(t, err) + require.ErrorIs(t, err, errSDLInvalid) + require.Contains(t, err.Error(), "not a valid name") + + _, err = Read(makeSDLWithEndpointName("_kittens")) + require.Error(t, err) + require.ErrorIs(t, err, errSDLInvalid) + require.Contains(t, err.Error(), "not a valid name") + + _, err = Read(makeSDLWithEndpointName("-kittens")) + require.Error(t, err) + require.ErrorIs(t, err, errSDLInvalid) + require.Contains(t, err.Error(), "not a valid name") + +} diff --git a/go/sdl/v2_test.go b/go/sdl/v2_test.go new file mode 100644 index 00000000..bf9d4c42 --- /dev/null +++ b/go/sdl/v2_test.go @@ -0,0 +1,942 @@ +package sdl + +import ( + "testing" + + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v3" + + manifest "pkg.akt.dev/go/manifest/v2beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + atypes "pkg.akt.dev/go/node/types/attributes/v1" + rtypes "pkg.akt.dev/go/node/types/resources/v1beta4" + "pkg.akt.dev/go/node/types/unit" +) + +func TestV2Expose(t *testing.T) { + var stream = ` +- port: 80 + as: 80 + accept: + - hello.localhost + to: + - global: true +` + + var p []v2Expose + + err := yaml.Unmarshal([]byte(stream), &p) + require.NoError(t, err) +} + +func AkashDecCoin(t testing.TB, amount int64) sdk.DecCoin { + t.Helper() + amt := math.NewInt(amount) + + return sdk.NewDecCoin("uakt", amt) +} + +const ( + randCPU uint64 = 100 + randGPU uint64 = 1 + randMemory uint64 = 128 * unit.Mi + randStorage uint64 = 1 * unit.Gi +) + +var ( + defaultHTTPOptions = manifest.ServiceExposeHTTPOptions{ + MaxBodySize: defaultMaxBodySize, + ReadTimeout: defaultReadTimeout, + SendTimeout: defaultSendTimeout, + NextTries: defaultNextTries, + NextCases: []string{"error", "timeout"}, + } +) + +func TestV2ParseSimpleGPU(t *testing.T) { + sdl, err := ReadFile("./_testdata/simple-gpu.yaml") + require.NoError(t, err) + + groups, err := sdl.DeploymentGroups() + require.NoError(t, err) + assert.Len(t, groups, 1) + + group := groups[0] + assert.Len(t, group.GetResourceUnits(), 1) + assert.Len(t, group.Requirements.Attributes, 2) + + assert.Equal(t, atypes.Attribute{ + Key: "region", + Value: "us-west", + }, group.Requirements.Attributes[1]) + + assert.Len(t, group.GetResourceUnits(), 1) + + assert.Equal(t, dtypes.ResourceUnit{ + Count: 2, + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(randCPU), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(randGPU), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/a100", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(randMemory), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(randStorage), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Price: AkashDecCoin(t, 50), + }, group.GetResourceUnits()[0]) + + mani, err := sdl.Manifest() + require.NoError(t, err) + + assert.Len(t, mani.GetGroups(), 1) + + expectedHosts := make([]string, 1) + expectedHosts[0] = "ahostname.com" + assert.Equal(t, manifest.Group{ + Name: "westcoast", + Services: manifest.Services{ + { + Name: "web", + Image: "nginx", + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(100), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(1), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/a100", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(128 * unit.Mi), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Count: 2, + Expose: []manifest.ServiceExpose{ + {Port: 80, Global: true, Proto: manifest.TCP, Hosts: expectedHosts, + HTTPOptions: manifest.ServiceExposeHTTPOptions{ + MaxBodySize: 1048576, + ReadTimeout: 60000, + SendTimeout: 60000, + NextTries: 3, + NextTimeout: 0, + NextCases: []string{"error", "timeout"}, + }}, + {Port: 12345, Global: true, Proto: manifest.UDP, + HTTPOptions: manifest.ServiceExposeHTTPOptions{ + MaxBodySize: 1048576, + ReadTimeout: 60000, + SendTimeout: 60000, + NextTries: 3, + NextTimeout: 0, + NextCases: []string{"error", "timeout"}, + }}, + }, + }, + }, + }, mani.GetGroups()[0]) +} + +func TestV2Parse_Deployments(t *testing.T) { + sdl1, err := ReadFile("_testdata/legacy/deployment.yaml") + require.NoError(t, err) + _, err = sdl1.DeploymentGroups() + require.NoError(t, err) + + _, err = sdl1.Manifest() + require.NoError(t, err) + + sha1, err := sdl1.Version() + require.NoError(t, err) + assert.Len(t, sha1, 32) + + sdl2, err := ReadFile("_testdata/legacy/deployment-v2.yaml") + require.NoError(t, err) + sha2, err := sdl2.Version() + + require.NoError(t, err) + assert.Len(t, sha2, 32) + require.NotEqual(t, sha1, sha2) +} + +func Test_V2_Cross_Validates(t *testing.T) { + sdl2, err := ReadFile("_testdata/legacy/deployment-v2.yaml") + require.NoError(t, err) + dgroups, err := sdl2.DeploymentGroups() + require.NoError(t, err) + m, err := sdl2.Manifest() + require.NoError(t, err) + + // This is a single document producing both the manifest & deployment groups + // These should always agree with each other. If this test fails at least one of the + // following is ture + // 1. Cross validation logic is wrong + // 2. The DeploymentGroups() & Manifest() code do not agree with one another + err = m.CheckAgainstGSpecs(dgroups) + require.NoError(t, err) + + // Repeat the same test with another file + sdl2, err = ReadFile("./_testdata/simple.yaml") + require.NoError(t, err) + dgroups, err = sdl2.DeploymentGroups() + require.NoError(t, err) + m, err = sdl2.Manifest() + require.NoError(t, err) + + // This is a single document producing both the manifest & deployment groups + // These should always agree with each other + err = m.CheckAgainstGSpecs(dgroups) + require.NoError(t, err) + + // Repeat the same test with another file + sdl2, err = ReadFile("./_testdata/simple3.yaml") + require.NoError(t, err) + dgroups, err = sdl2.DeploymentGroups() + require.NoError(t, err) + m, err = sdl2.Manifest() + require.NoError(t, err) + + // This is a single document producing both the manifest & deployment groups + // These should always agree with each other + err = m.CheckAgainstGSpecs(dgroups) + require.NoError(t, err) + + // Repeat the same test with another file + sdl2, err = ReadFile("./_testdata/private_service.yaml") + require.NoError(t, err) + dgroups, err = sdl2.DeploymentGroups() + require.NoError(t, err) + m, err = sdl2.Manifest() + require.NoError(t, err) + + // This is a single document producing both the manifest & deployment groups + // These should always agree with each other + err = m.CheckAgainstGSpecs(dgroups) + require.NoError(t, err) + +} + +func Test_V2_Parse_simple(t *testing.T) { + sdl, err := ReadFile("./_testdata/simple.yaml") + require.NoError(t, err) + + groups, err := sdl.DeploymentGroups() + require.NoError(t, err) + assert.Len(t, groups, 1) + + group := groups[0] + assert.Len(t, group.GetResourceUnits(), 1) + + assert.Equal(t, atypes.Attribute{ + Key: "region", + Value: "us-west", + }, group.Requirements.Attributes[0]) + + assert.Len(t, group.GetResourceUnits(), 1) + + assert.Equal(t, dtypes.ResourceUnit{ + Count: 2, + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(randCPU), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(0), + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(randMemory), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(randStorage), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Price: AkashDecCoin(t, 50), + }, group.GetResourceUnits()[0]) + + mani, err := sdl.Manifest() + require.NoError(t, err) + + assert.Len(t, mani.GetGroups(), 1) + + expectedHosts := make([]string, 1) + expectedHosts[0] = "ahostname.com" + assert.Equal(t, manifest.Group{ + Name: "westcoast", + Services: manifest.Services{ + { + Name: "web", + Image: "nginx", + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(100), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(0), + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(128 * unit.Mi), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Count: 2, + Expose: []manifest.ServiceExpose{ + {Port: 80, Global: true, Proto: manifest.TCP, Hosts: expectedHosts, + HTTPOptions: manifest.ServiceExposeHTTPOptions{ + MaxBodySize: 1048576, + ReadTimeout: 60000, + SendTimeout: 60000, + NextTries: 3, + NextTimeout: 0, + NextCases: []string{"error", "timeout"}, + }}, + {Port: 12345, Global: true, Proto: manifest.UDP, + HTTPOptions: manifest.ServiceExposeHTTPOptions{ + MaxBodySize: 1048576, + ReadTimeout: 60000, + SendTimeout: 60000, + NextTries: 3, + NextTimeout: 0, + NextCases: []string{"error", "timeout"}, + }}, + }, + }, + }, + }, mani.GetGroups()[0]) +} + +func Test_v1_Parse_ProfileNameNotServiceName(t *testing.T) { + sdl, err := ReadFile("./_testdata/profile-svc-name-mismatch.yaml") + require.NoError(t, err) + + dgroups, err := sdl.DeploymentGroups() + require.NoError(t, err) + assert.Len(t, dgroups, 1) + + mani, err := sdl.Manifest() + require.NoError(t, err) + assert.Len(t, mani.GetGroups(), 1) +} + +func Test_v2_Parse_DeploymentNameServiceNameMismatch(t *testing.T) { + sdl, err := ReadFile("./_testdata/deployment-svc-mismatch.yaml") + require.Error(t, err) + require.Nil(t, sdl) + require.Contains(t, err.Error(), "no service profile named") + + sdl, err = ReadFile("./_testdata/simple2.yaml") + require.NoError(t, err) + require.NotNil(t, sdl) + + dgroups, err := sdl.DeploymentGroups() + require.NoError(t, err) + assert.Len(t, dgroups, 1) + + mani, err := sdl.Manifest() + require.NoError(t, err) + assert.Len(t, mani.GetGroups(), 1) + + require.Equal(t, dgroups[0].Name, mani.GetGroups()[0].Name) + // SDL lists 2 services, but particular deployment specifies only one + require.Len(t, mani.GetGroups()[0].Services, 1) + + // make sure deployment maps to the right service + require.Len(t, mani.GetGroups()[0].Services[0].Expose, 2) + require.Len(t, mani.GetGroups()[0].Services[0].Expose[0].Hosts, 1) + require.Equal(t, mani.GetGroups()[0].Services[0].Expose[0].Hosts[0], "ahostname.com") +} + +func TestV2ParseServiceMix(t *testing.T) { + sdl, err := ReadFile("./_testdata/service-mix.yaml") + require.NoError(t, err) + + groups, err := sdl.DeploymentGroups() + require.NoError(t, err) + assert.Len(t, groups, 1) + + group := groups[0] + assert.Len(t, group.GetResourceUnits(), 2) + assert.Len(t, group.Requirements.Attributes, 2) + + assert.Equal(t, atypes.Attribute{ + Key: "region", + Value: "us-west", + }, group.Requirements.Attributes[1]) + + assert.Equal(t, dtypes.ResourceUnits{ + { + Count: 1, + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(randCPU), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(randGPU), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/*", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(randMemory), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(randStorage), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Price: AkashDecCoin(t, 50), + }, + { + Count: 1, + Resources: rtypes.Resources{ + ID: 2, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(randCPU), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(0), + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(randMemory), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(randStorage), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Price: AkashDecCoin(t, 50), + }, + }, group.GetResourceUnits()) + + mani, err := sdl.Manifest() + require.NoError(t, err) + + assert.Len(t, mani.GetGroups(), 1) + + assert.Equal(t, manifest.Group{ + Name: "westcoast", + Services: []manifest.Service{ + { + Name: "svca", + Image: "nginx", + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(100), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(1), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/*", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(128 * unit.Mi), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Count: 1, + Expose: []manifest.ServiceExpose{ + { + Port: 80, Global: true, Proto: manifest.TCP, Hosts: []string{"ahostname.com"}, + HTTPOptions: defaultHTTPOptions, + }, + { + Port: 12345, Global: true, Proto: manifest.UDP, + HTTPOptions: defaultHTTPOptions, + }, + }, + }, + { + Name: "svcb", + Image: "nginx", + Resources: rtypes.Resources{ + ID: 2, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(100), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(0), + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(128 * unit.Mi), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Count: 1, + Expose: []manifest.ServiceExpose{ + { + Port: 80, Global: true, Proto: manifest.TCP, Hosts: []string{"bhostname.com"}, + HTTPOptions: defaultHTTPOptions, + }, + { + Port: 12346, Global: true, Proto: manifest.UDP, + HTTPOptions: defaultHTTPOptions, + }, + }, + }, + }, + }, mani.GetGroups()[0]) +} + +func TestV2ParseServiceMix2(t *testing.T) { + sdl, err := ReadFile("./_testdata/service-mix2.yaml") + require.NoError(t, err) + + groups, err := sdl.DeploymentGroups() + require.NoError(t, err) + assert.Len(t, groups, 1) + + group := groups[0] + assert.Len(t, group.GetResourceUnits(), 2) + assert.Len(t, group.Requirements.Attributes, 2) + + assert.Equal(t, atypes.Attribute{ + Key: "region", + Value: "us-west", + }, group.Requirements.Attributes[1]) + + assert.Equal(t, dtypes.ResourceUnits{ + { + Count: 1, + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(randCPU), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(randGPU), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/*", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(randMemory), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(randStorage), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Price: AkashDecCoin(t, 50), + }, + { + Count: 1, + Resources: rtypes.Resources{ + ID: 2, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(randCPU), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(randGPU), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/*", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(randMemory), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(randStorage), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Price: AkashDecCoin(t, 50), + }, + }, group.GetResourceUnits()) + + mani, err := sdl.Manifest() + require.NoError(t, err) + + assert.Len(t, mani.GetGroups(), 1) + + assert.Equal(t, manifest.Group{ + Name: "westcoast", + Services: []manifest.Service{ + { + Name: "svca", + Image: "nginx", + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(100), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(1), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/*", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(128 * unit.Mi), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Count: 1, + Expose: []manifest.ServiceExpose{ + { + Port: 80, Global: true, Proto: manifest.TCP, Hosts: []string{"ahostname.com"}, + HTTPOptions: defaultHTTPOptions, + }, + { + Port: 12345, Global: true, Proto: manifest.UDP, + HTTPOptions: defaultHTTPOptions, + }, + }, + }, + { + Name: "svcb", + Image: "nginx", + Resources: rtypes.Resources{ + ID: 2, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(100), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(1), + Attributes: atypes.Attributes{ + { + Key: "vendor/nvidia/model/*", + Value: "true", + }, + }, + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(128 * unit.Mi), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Count: 1, + Expose: []manifest.ServiceExpose{ + { + Port: 80, Global: true, Proto: manifest.TCP, Hosts: []string{"bhostname.com"}, + HTTPOptions: defaultHTTPOptions, + }, + { + Port: 12346, Global: true, Proto: manifest.UDP, + HTTPOptions: defaultHTTPOptions, + }, + }, + }, + }, + }, mani.GetGroups()[0]) +} + +func TestV2ParseStorageName(t *testing.T) { + sdl, err := ReadFile("./_testdata/storageClass6.yaml") + require.NoError(t, err) + + groups, err := sdl.DeploymentGroups() + require.NoError(t, err) + assert.Len(t, groups, 1) + + group := groups[0] + assert.Len(t, group.GetResourceUnits(), 1) + assert.Len(t, group.Requirements.Attributes, 1) + + assert.Equal(t, atypes.Attribute{ + Key: "region", + Value: "us-west", + }, group.Requirements.Attributes[0]) + + assert.Equal(t, dtypes.ResourceUnits{ + { + Count: 1, + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(randCPU), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(0), + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(randMemory), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(randStorage), + }, + { + Name: "configs", + Quantity: rtypes.NewResourceValue(randStorage), + Attributes: atypes.Attributes{ + { + Key: "class", + Value: "default", + }, + { + Key: "persistent", + Value: "true", + }, + }, + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Price: AkashDecCoin(t, 50), + }, + }, group.GetResourceUnits()) + + mani, err := sdl.Manifest() + require.NoError(t, err) + + assert.Len(t, mani.GetGroups(), 1) + + assert.Equal(t, manifest.Group{ + Name: "westcoast", + Services: []manifest.Service{ + { + Name: "web", + Image: "nginx", + Resources: rtypes.Resources{ + ID: 1, + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(100), + }, + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(0), + }, + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(128 * unit.Mi), + }, + Storage: rtypes.Volumes{ + { + Name: "default", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + }, + { + Name: "configs", + Quantity: rtypes.NewResourceValue(1 * unit.Gi), + Attributes: atypes.Attributes{ + { + Key: "class", + Value: "default", + }, + { + Key: "persistent", + Value: "true", + }, + }, + }, + }, + Endpoints: rtypes.Endpoints{ + { + Kind: rtypes.Endpoint_SHARED_HTTP, + }, + { + Kind: rtypes.Endpoint_RANDOM_PORT, + }, + }, + }, + Params: &manifest.ServiceParams{ + Storage: []manifest.StorageParams{ + { + Name: "configs", + Mount: "/test", + ReadOnly: false, + }, + }, + }, + Count: 1, + Expose: []manifest.ServiceExpose{ + { + Port: 80, Global: true, Proto: manifest.TCP, Hosts: []string{"ahostname.com"}, + HTTPOptions: defaultHTTPOptions, + }, + { + Port: 12345, Global: true, Proto: manifest.UDP, + HTTPOptions: defaultHTTPOptions, + }, + }, + }, + }, + }, mani.GetGroups()[0]) +} diff --git a/go/testutil/base.go b/go/testutil/base.go index d1d5c3be..3fd6dc02 100644 --- a/go/testutil/base.go +++ b/go/testutil/base.go @@ -4,14 +4,15 @@ import ( "fmt" "testing" + "github.com/cometbft/cometbft/libs/rand" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/tendermint/tendermint/libs/rand" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - types "github.com/akash-network/akash-api/go/node/types/v1beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + attr "pkg.akt.dev/go/node/types/attributes/v1" + types "pkg.akt.dev/go/node/types/resources/v1beta4" // ensure sdkutil.init() to seal SDK config for the tests - _ "github.com/akash-network/akash-api/go/sdkutil" + _ "pkg.akt.dev/go/sdkutil" ) // CoinDenom provides ability to create coins in test functions and @@ -36,17 +37,17 @@ func ProviderHostname(t testing.TB) string { } // Attribute generates a random sdk.Attribute -func Attribute(t testing.TB) types.Attribute { +func Attribute(t testing.TB) attr.Attribute { t.Helper() - return types.NewStringAttribute(Name(t, "attr-key"), Name(t, "attr-value")) + return attr.NewStringAttribute(Name(t, "attr-key"), Name(t, "attr-value")) } // Attributes generates a set of sdk.Attribute -func Attributes(t testing.TB) []types.Attribute { +func Attributes(t testing.TB) []attr.Attribute { t.Helper() count := rand.Intn(10) + 1 - vals := make([]types.Attribute, 0, count) + vals := make(attr.Attributes, 0, count) for i := 0; i < count; i++ { vals = append(vals, Attribute(t)) } @@ -54,8 +55,8 @@ func Attributes(t testing.TB) []types.Attribute { } // PlacementRequirements generates placement requirements -func PlacementRequirements(t testing.TB) types.PlacementRequirements { - return types.PlacementRequirements{ +func PlacementRequirements(t testing.TB) attr.PlacementRequirements { + return attr.PlacementRequirements{ Attributes: Attributes(t), } } @@ -95,7 +96,7 @@ func Resources(t testing.TB) []dtypes.ResourceUnit { coin := sdk.NewDecCoin(CoinDenom, sdk.NewInt(rand.Int63n(9999)+1)) res := dtypes.ResourceUnit{ Resources: types.Resources{ - ID: uint32(i) + 1, + ID: uint32(i) + 1, // nolint: gosec CPU: &types.CPU{ Units: types.NewResourceValue(uint64(dtypes.GetValidationConfig().Unit.Min.CPU)), }, diff --git a/go/testutil/cert.go b/go/testutil/cert.go index b767fb02..20f0d9b5 100644 --- a/go/testutil/cert.go +++ b/go/testutil/cert.go @@ -17,9 +17,9 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - types "github.com/akash-network/akash-api/go/node/cert/v1beta3" - certutils "github.com/akash-network/akash-api/go/node/cert/v1beta3/utils" - clientmocks "github.com/akash-network/akash-api/go/node/client/v1beta2/mocks" + types "pkg.akt.dev/go/node/cert/v1" + certutils "pkg.akt.dev/go/node/cert/v1/utils" + clientmocks "pkg.akt.dev/go/node/client/v1beta3/mocks" ) type TestCertificate struct { @@ -183,7 +183,7 @@ func Certificate(t testing.TB, addr sdk.Address, opts ...CertificateOption) Test Filter: types.CertificateFilter{ Owner: addr.String(), Serial: res.Serial.String(), - State: "valid", + State: types.CertificateValid.String(), }, }). Return(&types.QueryCertificatesResponse{ @@ -202,7 +202,7 @@ func Certificate(t testing.TB, addr sdk.Address, opts ...CertificateOption) Test return res } -func CertificateRequireEqualResponse(t *testing.T, cert TestCertificate, resp types.CertificateResponse, state types.Certificate_State) { +func CertificateRequireEqualResponse(t *testing.T, cert TestCertificate, resp types.CertificateResponse, state types.State) { t.Helper() require.Equal(t, state, resp.Certificate.State) diff --git a/go/testutil/deployment.go b/go/testutil/deployment.go index 9e12036a..ab502a91 100644 --- a/go/testutil/deployment.go +++ b/go/testutil/deployment.go @@ -5,33 +5,34 @@ import ( "math/rand" "testing" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1" + dtypesv1beta4 "pkg.akt.dev/go/node/deployment/v1beta4" ) // sum256Seed provides a consistent sha256 value for initial Deployment.Version const sum256Seed = "hihi" -// DefaultDeploymentVersion provides consistent sha256 sum for initial Deployment.Version -var DefaultDeploymentVersion = sha256.Sum256([]byte(sum256Seed)) +// DefaultDeploymentHash provides consistent sha256 sum for initial Deployment.Version +var DefaultDeploymentHash = sha256.Sum256([]byte(sum256Seed)) // Deployment generates a dtype.Deployment in state `DeploymentActive` func Deployment(t testing.TB) dtypes.Deployment { t.Helper() return dtypes.Deployment{ - DeploymentID: DeploymentID(t), - State: dtypes.DeploymentActive, - Version: DefaultDeploymentVersion[:], + ID: DeploymentID(t), + State: dtypes.DeploymentActive, + Hash: DefaultDeploymentHash[:], } } // DeploymentGroup generates a dtype.DepDeploymentGroup in state `GroupOpen` // with a set of random required attributes -func DeploymentGroup(t testing.TB, did dtypes.DeploymentID, gseq uint32) dtypes.Group { +func DeploymentGroup(t testing.TB, did dtypes.DeploymentID, gseq uint32) dtypesv1beta4.Group { t.Helper() - return dtypes.Group{ - GroupID: dtypes.MakeGroupID(did, gseq), - State: dtypes.GroupOpen, - GroupSpec: dtypes.GroupSpec{ + return dtypesv1beta4.Group{ + ID: dtypes.MakeGroupID(did, gseq), + State: dtypesv1beta4.GroupOpen, + GroupSpec: dtypesv1beta4.GroupSpec{ Name: Name(t, "dgroup"), Requirements: PlacementRequirements(t), Resources: Resources(t), @@ -40,9 +41,9 @@ func DeploymentGroup(t testing.TB, did dtypes.DeploymentID, gseq uint32) dtypes. } // GroupSpec generator -func GroupSpec(t testing.TB) dtypes.GroupSpec { +func GroupSpec(t testing.TB) dtypesv1beta4.GroupSpec { t.Helper() - return dtypes.GroupSpec{ + return dtypesv1beta4.GroupSpec{ Name: Name(t, "dgroup"), Requirements: PlacementRequirements(t), Resources: Resources(t), @@ -50,12 +51,12 @@ func GroupSpec(t testing.TB) dtypes.GroupSpec { } // DeploymentGroups returns a set of deployment groups generated by DeploymentGroup -func DeploymentGroups(t testing.TB, did dtypes.DeploymentID, gseq uint32) []dtypes.Group { +func DeploymentGroups(t testing.TB, did dtypes.DeploymentID, gseq uint32) dtypesv1beta4.Groups { t.Helper() count := rand.Intn(5) + 5 // nolint:gosec - vals := make([]dtypes.Group, 0, count) + vals := make(dtypesv1beta4.Groups, 0, count) for i := 0; i < count; i++ { - vals = append(vals, DeploymentGroup(t, did, gseq+uint32(i))) + vals = append(vals, DeploymentGroup(t, did, gseq+uint32(i))) // nolint: gosec } return vals } diff --git a/go/testutil/ids.go b/go/testutil/ids.go index 6ed7fa10..0f4d9bba 100644 --- a/go/testutil/ids.go +++ b/go/testutil/ids.go @@ -6,17 +6,18 @@ import ( "math/rand" "testing" + "github.com/cometbft/cometbft/crypto/ed25519" + "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/crypto/keyring" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/tendermint/tendermint/crypto/ed25519" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - mtypes "github.com/akash-network/akash-api/go/node/market/v1beta4" + dtypes "pkg.akt.dev/go/node/deployment/v1" + mtypes "pkg.akt.dev/go/node/market/v1" ) -func Keyring(t testing.TB) keyring.Keyring { +func Keyring(t testing.TB, cdc codec.Codec) keyring.Keyring { t.Helper() - obj := keyring.NewInMemory() + obj := keyring.NewInMemory(cdc) return obj } diff --git a/go/testutil/log.go b/go/testutil/log.go index 3935d5e0..294aeaa8 100644 --- a/go/testutil/log.go +++ b/go/testutil/log.go @@ -4,7 +4,7 @@ import ( "sync" "testing" - "github.com/tendermint/tendermint/libs/log" + "github.com/cometbft/cometbft/libs/log" ) func Logger(t testing.TB) log.Logger { diff --git a/go/testutil/v1beta1/base.go b/go/testutil/v1beta1/base.go deleted file mode 100644 index 79fe5ff2..00000000 --- a/go/testutil/v1beta1/base.go +++ /dev/null @@ -1,88 +0,0 @@ -package testutil - -import ( - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/tendermint/tendermint/libs/rand" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta1" - types "github.com/akash-network/akash-api/go/node/types/v1beta1" - "github.com/akash-network/akash-api/go/testutil" -) - -func ProviderHostname(t testing.TB) string { - return "https://" + testutil.Hostname(t) -} - -// Attribute generates a random sdk.Attribute -func Attribute(t testing.TB) types.Attribute { - t.Helper() - return types.NewStringAttribute(testutil.Name(t, "attr-key"), testutil.Name(t, "attr-value")) -} - -// Attributes generates a set of sdk.Attribute -func Attributes(t testing.TB) []types.Attribute { - t.Helper() - count := rand.Intn(10) + 1 - - vals := make([]types.Attribute, 0, count) - for i := 0; i < count; i++ { - vals = append(vals, Attribute(t)) - } - return vals -} - -// PlacementRequirements generates placement requirements -func PlacementRequirements(t testing.TB) types.PlacementRequirements { - return types.PlacementRequirements{ - Attributes: Attributes(t), - } -} - -func RandCPUUnits() uint { - return testutil.RandRangeUint( - dtypes.GetValidationConfig().MinUnitCPU, - dtypes.GetValidationConfig().MaxUnitCPU) -} - -func RandMemoryQuantity() uint64 { - return testutil.RandRangeUint64( - dtypes.GetValidationConfig().MinUnitMemory, - dtypes.GetValidationConfig().MaxUnitMemory) -} - -func RandStorageQuantity() uint64 { - return testutil.RandRangeUint64( - dtypes.GetValidationConfig().MinUnitStorage, - dtypes.GetValidationConfig().MaxUnitStorage) -} - -// Resources produces an attribute list for populating a Group's -// 'Resources' fields. -func Resources(t testing.TB) []dtypes.Resource { - t.Helper() - count := rand.Intn(10) + 1 - - vals := make([]dtypes.Resource, 0, count) - for i := 0; i < count; i++ { - coin := sdk.NewCoin(testutil.CoinDenom, sdk.NewInt(rand.Int63n(9999)+1)) - res := dtypes.Resource{ - Resources: types.ResourceUnits{ - CPU: &types.CPU{ - Units: types.NewResourceValue(uint64(dtypes.GetValidationConfig().MinUnitCPU)), - }, - Memory: &types.Memory{ - Quantity: types.NewResourceValue(dtypes.GetValidationConfig().MinUnitMemory), - }, - Storage: &types.Storage{ - Quantity: types.NewResourceValue(dtypes.GetValidationConfig().MinUnitStorage), - }, - }, - Count: 1, - Price: coin, - } - vals = append(vals, res) - } - return vals -} diff --git a/go/testutil/v1beta1/deployment.go b/go/testutil/v1beta1/deployment.go deleted file mode 100644 index f70bd0f7..00000000 --- a/go/testutil/v1beta1/deployment.go +++ /dev/null @@ -1,62 +0,0 @@ -package testutil - -import ( - "crypto/sha256" - "math/rand" - "testing" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta1" - "github.com/akash-network/akash-api/go/testutil" -) - -// sum256Seed provides a consistent sha256 value for initial Deployment.Version -const sum256Seed = "hihi" - -// DefaultDeploymentVersion provides consistent sha256 sum for initial Deployment.Version -var DefaultDeploymentVersion = sha256.Sum256([]byte(sum256Seed)) - -// Deployment generates a dtype.Deployment in state `DeploymentActive` -func Deployment(t testing.TB) dtypes.Deployment { - t.Helper() - return dtypes.Deployment{ - DeploymentID: DeploymentID(t), - State: dtypes.DeploymentActive, - Version: DefaultDeploymentVersion[:], - } -} - -// DeploymentGroup generates a dtype.DepDeploymentGroup in state `GroupOpen` -// with a set of random required attributes -func DeploymentGroup(t testing.TB, did dtypes.DeploymentID, gseq uint32) dtypes.Group { - t.Helper() - return dtypes.Group{ - GroupID: dtypes.MakeGroupID(did, gseq), - State: dtypes.GroupOpen, - GroupSpec: dtypes.GroupSpec{ - Name: testutil.Name(t, "dgroup"), - Requirements: PlacementRequirements(t), - Resources: Resources(t), - }, - } -} - -// GroupSpec generator -func GroupSpec(t testing.TB) dtypes.GroupSpec { - t.Helper() - return dtypes.GroupSpec{ - Name: testutil.Name(t, "dgroup"), - Requirements: PlacementRequirements(t), - Resources: Resources(t), - } -} - -// DeploymentGroups returns a set of deployment groups generated by DeploymentGroup -func DeploymentGroups(t testing.TB, did dtypes.DeploymentID, gseq uint32) []dtypes.Group { - t.Helper() - count := rand.Intn(5) + 5 // nolint:gosec - vals := make([]dtypes.Group, 0, count) - for i := 0; i < count; i++ { - vals = append(vals, DeploymentGroup(t, did, gseq+uint32(i))) - } - return vals -} diff --git a/go/testutil/v1beta1/event.go b/go/testutil/v1beta1/event.go deleted file mode 100644 index 805d36f7..00000000 --- a/go/testutil/v1beta1/event.go +++ /dev/null @@ -1,48 +0,0 @@ -package testutil - -import ( - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta1" - ptypes "github.com/akash-network/akash-api/go/node/provider/v1beta1" - "github.com/akash-network/akash-api/go/sdkutil" -) - -func ParseEvent(t testing.TB, events []abci.Event) sdkutil.Event { - t.Helper() - - require.Equal(t, 1, len(events)) - - sev := sdk.StringifyEvent(events[0]) - ev, err := sdkutil.ParseEvent(sev) - - require.NoError(t, err) - - return ev -} - -func ParseDeploymentEvent(t testing.TB, events []abci.Event) sdkutil.ModuleEvent { - t.Helper() - - uev := ParseEvent(t, events) - - iev, err := dtypes.ParseEvent(uev) - require.NoError(t, err) - - return iev -} - -func ParseProviderEvent(t testing.TB, events []abci.Event) sdkutil.ModuleEvent { - t.Helper() - - uev := ParseEvent(t, events) - - iev, err := ptypes.ParseEvent(uev) - require.NoError(t, err) - - return iev -} diff --git a/go/testutil/v1beta1/ids.go b/go/testutil/v1beta1/ids.go deleted file mode 100644 index 454bb295..00000000 --- a/go/testutil/v1beta1/ids.go +++ /dev/null @@ -1,96 +0,0 @@ -package testutil - -import ( - cryptorand "crypto/rand" - "crypto/sha256" - "math/rand" - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/tendermint/tendermint/crypto/ed25519" - - mtypes "github.com/akash-network/akash-api/go/node/market/v1beta1" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta1" -) - -// AccAddress provides an Account's Address bytes from a ed25519 generated -// private key. -func AccAddress(t testing.TB) sdk.AccAddress { - t.Helper() - privKey := ed25519.GenPrivKey() - return sdk.AccAddress(privKey.PubKey().Address()) -} - -func Key(t testing.TB) ed25519.PrivKey { - t.Helper() - return ed25519.GenPrivKey() -} - -func DeploymentID(t testing.TB) dtypes.DeploymentID { - t.Helper() - return dtypes.DeploymentID{ - Owner: AccAddress(t).String(), - DSeq: uint64(rand.Uint32()), // nolint: gosec - } -} - -func DeploymentIDForAccount(t testing.TB, addr sdk.Address) dtypes.DeploymentID { - t.Helper() - return dtypes.DeploymentID{ - Owner: addr.String(), - DSeq: uint64(rand.Uint32()), // nolint: gosec - } -} - -// DeploymentVersion provides a random sha256 sum for simulating Deployments. -func DeploymentVersion(t testing.TB) []byte { - t.Helper() - src := make([]byte, 128) - _, err := cryptorand.Read(src) - if err != nil { - t.Fatal(err) - } - sum := sha256.Sum256(src) - return sum[:] -} - -func GroupID(t testing.TB) dtypes.GroupID { - t.Helper() - return dtypes.MakeGroupID(DeploymentID(t), rand.Uint32()) // nolint: gosec -} - -func GroupIDForAccount(t testing.TB, addr sdk.Address) dtypes.GroupID { - t.Helper() - return dtypes.MakeGroupID(DeploymentIDForAccount(t, addr), rand.Uint32()) // nolint: gosec -} - -func OrderID(t testing.TB) mtypes.OrderID { - t.Helper() - return mtypes.MakeOrderID(GroupID(t), rand.Uint32()) // nolint: gosec -} - -func OrderIDForAccount(t testing.TB, addr sdk.Address) mtypes.OrderID { - t.Helper() - return mtypes.MakeOrderID(GroupIDForAccount(t, addr), rand.Uint32()) // nolint: gosec -} - -func BidID(t testing.TB) mtypes.BidID { - t.Helper() - return mtypes.MakeBidID(OrderID(t), AccAddress(t)) -} - -func BidIDForAccount(t testing.TB, owner, provider sdk.Address) mtypes.BidID { - t.Helper() - return mtypes.MakeBidID(OrderIDForAccount(t, owner), provider.Bytes()) -} - -func LeaseID(t testing.TB) mtypes.LeaseID { - t.Helper() - return mtypes.MakeLeaseID(BidID(t)) -} - -func LeaseIDForAccount(t testing.TB, owner, provider sdk.Address) mtypes.LeaseID { - t.Helper() - return mtypes.MakeLeaseID(BidIDForAccount(t, owner, provider)) -} diff --git a/go/testutil/v1beta1/provider.go b/go/testutil/v1beta1/provider.go deleted file mode 100644 index 8ac77863..00000000 --- a/go/testutil/v1beta1/provider.go +++ /dev/null @@ -1,22 +0,0 @@ -package testutil - -import ( - "testing" - - ptypes "github.com/akash-network/akash-api/go/node/provider/v1beta1" - "github.com/akash-network/akash-api/go/testutil" -) - -func Provider(t testing.TB) ptypes.Provider { - t.Helper() - - return ptypes.Provider{ - Owner: AccAddress(t).String(), - HostURI: testutil.Hostname(t), - Attributes: Attributes(t), - Info: ptypes.ProviderInfo{ - EMail: "test@example.com", - Website: ProviderHostname(t), - }, - } -} diff --git a/go/testutil/v1beta1/types.go b/go/testutil/v1beta1/types.go deleted file mode 100644 index 319954ec..00000000 --- a/go/testutil/v1beta1/types.go +++ /dev/null @@ -1,21 +0,0 @@ -package testutil - -import ( - "testing" - - types "github.com/akash-network/akash-api/go/node/types/v1beta1" -) - -func ResourceUnits(_ testing.TB) types.ResourceUnits { - return types.ResourceUnits{ - CPU: &types.CPU{ - Units: types.NewResourceValue(uint64(RandCPUUnits())), - }, - Memory: &types.Memory{ - Quantity: types.NewResourceValue(RandMemoryQuantity()), - }, - Storage: &types.Storage{ - Quantity: types.NewResourceValue(RandStorageQuantity()), - }, - } -} diff --git a/go/testutil/v1beta2/base.go b/go/testutil/v1beta2/base.go deleted file mode 100644 index ab84b0f6..00000000 --- a/go/testutil/v1beta2/base.go +++ /dev/null @@ -1,90 +0,0 @@ -package testutil - -import ( - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/tendermint/tendermint/libs/rand" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta2" - types "github.com/akash-network/akash-api/go/node/types/v1beta2" - "github.com/akash-network/akash-api/go/testutil" -) - -func ProviderHostname(t testing.TB) string { - return "https://" + testutil.Hostname(t) -} - -// Attribute generates a random sdk.Attribute -func Attribute(t testing.TB) types.Attribute { - t.Helper() - return types.NewStringAttribute(testutil.Name(t, "attr-key"), testutil.Name(t, "attr-value")) -} - -// Attributes generates a set of sdk.Attribute -func Attributes(t testing.TB) []types.Attribute { - t.Helper() - count := rand.Intn(10) + 1 - - vals := make([]types.Attribute, 0, count) - for i := 0; i < count; i++ { - vals = append(vals, Attribute(t)) - } - return vals -} - -// PlacementRequirements generates placement requirements -func PlacementRequirements(t testing.TB) types.PlacementRequirements { - return types.PlacementRequirements{ - Attributes: Attributes(t), - } -} - -func RandCPUUnits() uint { - return testutil.RandRangeUint( - dtypes.GetValidationConfig().MinUnitCPU, - dtypes.GetValidationConfig().MaxUnitCPU) -} - -func RandMemoryQuantity() uint64 { - return testutil.RandRangeUint64( - dtypes.GetValidationConfig().MinUnitMemory, - dtypes.GetValidationConfig().MaxUnitMemory) -} - -func RandStorageQuantity() uint64 { - return testutil.RandRangeUint64( - dtypes.GetValidationConfig().MinUnitStorage, - dtypes.GetValidationConfig().MaxUnitStorage) -} - -// Resources produces an attribute list for populating a Group's -// 'Resources' fields. -func Resources(t testing.TB) []dtypes.Resource { - t.Helper() - count := rand.Intn(10) + 1 - - vals := make([]dtypes.Resource, 0, count) - for i := 0; i < count; i++ { - coin := sdk.NewDecCoin(testutil.CoinDenom, sdk.NewInt(rand.Int63n(9999)+1)) - res := dtypes.Resource{ - Resources: types.ResourceUnits{ - CPU: &types.CPU{ - Units: types.NewResourceValue(uint64(dtypes.GetValidationConfig().MinUnitCPU)), - }, - Memory: &types.Memory{ - Quantity: types.NewResourceValue(dtypes.GetValidationConfig().MinUnitMemory), - }, - Storage: types.Volumes{ - types.Storage{ - Quantity: types.NewResourceValue(dtypes.GetValidationConfig().MinUnitStorage), - }, - }, - }, - Count: 1, - Price: coin, - } - vals = append(vals, res) - } - return vals -} diff --git a/go/testutil/v1beta2/deployment.go b/go/testutil/v1beta2/deployment.go deleted file mode 100644 index 3271225a..00000000 --- a/go/testutil/v1beta2/deployment.go +++ /dev/null @@ -1,62 +0,0 @@ -package testutil - -import ( - "crypto/sha256" - "math/rand" - "testing" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta2" - "github.com/akash-network/akash-api/go/testutil" -) - -// sum256Seed provides a consistent sha256 value for initial Deployment.Version -const sum256Seed = "hihi" - -// DefaultDeploymentVersion provides consistent sha256 sum for initial Deployment.Version -var DefaultDeploymentVersion = sha256.Sum256([]byte(sum256Seed)) - -// Deployment generates a dtype.Deployment in state `DeploymentActive` -func Deployment(t testing.TB) dtypes.Deployment { - t.Helper() - return dtypes.Deployment{ - DeploymentID: DeploymentID(t), - State: dtypes.DeploymentActive, - Version: DefaultDeploymentVersion[:], - } -} - -// DeploymentGroup generates a dtype.DepDeploymentGroup in state `GroupOpen` -// with a set of random required attributes -func DeploymentGroup(t testing.TB, did dtypes.DeploymentID, gseq uint32) dtypes.Group { - t.Helper() - return dtypes.Group{ - GroupID: dtypes.MakeGroupID(did, gseq), - State: dtypes.GroupOpen, - GroupSpec: dtypes.GroupSpec{ - Name: testutil.Name(t, "dgroup"), - Requirements: PlacementRequirements(t), - Resources: Resources(t), - }, - } -} - -// GroupSpec generator -func GroupSpec(t testing.TB) dtypes.GroupSpec { - t.Helper() - return dtypes.GroupSpec{ - Name: testutil.Name(t, "dgroup"), - Requirements: PlacementRequirements(t), - Resources: Resources(t), - } -} - -// DeploymentGroups returns a set of deployment groups generated by DeploymentGroup -func DeploymentGroups(t testing.TB, did dtypes.DeploymentID, gseq uint32) []dtypes.Group { - t.Helper() - count := rand.Intn(5) + 5 // nolint:gosec - vals := make([]dtypes.Group, 0, count) - for i := 0; i < count; i++ { - vals = append(vals, DeploymentGroup(t, did, gseq+uint32(i))) - } - return vals -} diff --git a/go/testutil/v1beta2/event.go b/go/testutil/v1beta2/event.go deleted file mode 100644 index d48b3e73..00000000 --- a/go/testutil/v1beta2/event.go +++ /dev/null @@ -1,60 +0,0 @@ -package testutil - -import ( - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta2" - mtypes "github.com/akash-network/akash-api/go/node/market/v1beta2" - ptypes "github.com/akash-network/akash-api/go/node/provider/v1beta2" - "github.com/akash-network/akash-api/go/sdkutil" -) - -func ParseEvent(t testing.TB, events []abci.Event) sdkutil.Event { - t.Helper() - - require.Equal(t, 1, len(events)) - - sev := sdk.StringifyEvent(events[0]) - ev, err := sdkutil.ParseEvent(sev) - - require.NoError(t, err) - - return ev -} - -func ParseDeploymentEvent(t testing.TB, events []abci.Event) sdkutil.ModuleEvent { - t.Helper() - - uev := ParseEvent(t, events) - - iev, err := dtypes.ParseEvent(uev) - require.NoError(t, err) - - return iev -} - -func ParseMarketEvent(t testing.TB, events []abci.Event) sdkutil.ModuleEvent { - t.Helper() - - uev := ParseEvent(t, events) - - iev, err := mtypes.ParseEvent(uev) - require.NoError(t, err) - - return iev -} - -func ParseProviderEvent(t testing.TB, events []abci.Event) sdkutil.ModuleEvent { - t.Helper() - - uev := ParseEvent(t, events) - - iev, err := ptypes.ParseEvent(uev) - require.NoError(t, err) - - return iev -} diff --git a/go/testutil/v1beta2/ids.go b/go/testutil/v1beta2/ids.go deleted file mode 100644 index 4d55ccb0..00000000 --- a/go/testutil/v1beta2/ids.go +++ /dev/null @@ -1,95 +0,0 @@ -package testutil - -import ( - cryptorand "crypto/rand" - "crypto/sha256" - "math/rand" - "testing" - - sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/tendermint/tendermint/crypto/ed25519" - - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta2" - mtypes "github.com/akash-network/akash-api/go/node/market/v1beta2" -) - -// AccAddress provides an Account's Address bytes from a ed25519 generated -// private key. -func AccAddress(t testing.TB) sdk.AccAddress { - t.Helper() - privKey := ed25519.GenPrivKey() - return sdk.AccAddress(privKey.PubKey().Address()) -} - -func Key(t testing.TB) ed25519.PrivKey { - t.Helper() - return ed25519.GenPrivKey() -} - -func DeploymentID(t testing.TB) dtypes.DeploymentID { - t.Helper() - return dtypes.DeploymentID{ - Owner: AccAddress(t).String(), - DSeq: uint64(rand.Uint32()), // nolint: gosec - } -} - -func DeploymentIDForAccount(t testing.TB, addr sdk.Address) dtypes.DeploymentID { - t.Helper() - return dtypes.DeploymentID{ - Owner: addr.String(), - DSeq: uint64(rand.Uint32()), // nolint: gosec - } -} - -// DeploymentVersion provides a random sha256 sum for simulating Deployments. -func DeploymentVersion(t testing.TB) []byte { - t.Helper() - src := make([]byte, 128) - _, err := cryptorand.Read(src) - if err != nil { - t.Fatal(err) - } - sum := sha256.Sum256(src) - return sum[:] -} - -func GroupID(t testing.TB) dtypes.GroupID { - t.Helper() - return dtypes.MakeGroupID(DeploymentID(t), rand.Uint32()) // nolint: gosec -} - -func GroupIDForAccount(t testing.TB, addr sdk.Address) dtypes.GroupID { - t.Helper() - return dtypes.MakeGroupID(DeploymentIDForAccount(t, addr), rand.Uint32()) // nolint: gosec -} - -func OrderID(t testing.TB) mtypes.OrderID { - t.Helper() - return mtypes.MakeOrderID(GroupID(t), rand.Uint32()) // nolint: gosec -} - -func OrderIDForAccount(t testing.TB, addr sdk.Address) mtypes.OrderID { - t.Helper() - return mtypes.MakeOrderID(GroupIDForAccount(t, addr), rand.Uint32()) // nolint: gosec -} - -func BidID(t testing.TB) mtypes.BidID { - t.Helper() - return mtypes.MakeBidID(OrderID(t), AccAddress(t)) -} - -func BidIDForAccount(t testing.TB, owner, provider sdk.Address) mtypes.BidID { - t.Helper() - return mtypes.MakeBidID(OrderIDForAccount(t, owner), provider.Bytes()) -} - -func LeaseID(t testing.TB) mtypes.LeaseID { - t.Helper() - return mtypes.MakeLeaseID(BidID(t)) -} - -func LeaseIDForAccount(t testing.TB, owner, provider sdk.Address) mtypes.LeaseID { - t.Helper() - return mtypes.MakeLeaseID(BidIDForAccount(t, owner, provider)) -} diff --git a/go/testutil/v1beta2/provider.go b/go/testutil/v1beta2/provider.go deleted file mode 100644 index 5759cca9..00000000 --- a/go/testutil/v1beta2/provider.go +++ /dev/null @@ -1,22 +0,0 @@ -package testutil - -import ( - "testing" - - ptypes "github.com/akash-network/akash-api/go/node/provider/v1beta2" - "github.com/akash-network/akash-api/go/testutil" -) - -func Provider(t testing.TB) ptypes.Provider { - t.Helper() - - return ptypes.Provider{ - Owner: AccAddress(t).String(), - HostURI: testutil.Hostname(t), - Attributes: Attributes(t), - Info: ptypes.ProviderInfo{ - EMail: "test@example.com", - Website: ProviderHostname(t), - }, - } -} diff --git a/go/testutil/v1beta2/types.go b/go/testutil/v1beta2/types.go deleted file mode 100644 index 265d3306..00000000 --- a/go/testutil/v1beta2/types.go +++ /dev/null @@ -1,23 +0,0 @@ -package testutil - -import ( - "testing" - - types "github.com/akash-network/akash-api/go/node/types/v1beta2" -) - -func ResourceUnits(_ testing.TB) types.ResourceUnits { - return types.ResourceUnits{ - CPU: &types.CPU{ - Units: types.NewResourceValue(uint64(RandCPUUnits())), - }, - Memory: &types.Memory{ - Quantity: types.NewResourceValue(RandMemoryQuantity()), - }, - Storage: types.Volumes{ - types.Storage{ - Quantity: types.NewResourceValue(RandStorageQuantity()), - }, - }, - } -} diff --git a/go/testutil/v1beta3/base.go b/go/testutil/v1beta3/base.go index abb75d7d..1e8bf5ab 100644 --- a/go/testutil/v1beta3/base.go +++ b/go/testutil/v1beta3/base.go @@ -3,13 +3,15 @@ package testutil import ( "testing" - sdk "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/libs/rand" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - types "github.com/akash-network/akash-api/go/node/types/v1beta3" - "github.com/akash-network/akash-api/go/testutil" + "github.com/cometbft/cometbft/libs/rand" + sdk "github.com/cosmos/cosmos-sdk/types" + + dtypes "pkg.akt.dev/go/node/deployment/v1beta4" + attr "pkg.akt.dev/go/node/types/attributes/v1" + rtypes "pkg.akt.dev/go/node/types/resources/v1beta4" + "pkg.akt.dev/go/testutil" ) func ProviderHostname(t testing.TB) string { @@ -17,17 +19,17 @@ func ProviderHostname(t testing.TB) string { } // Attribute generates a random sdk.Attribute -func Attribute(t testing.TB) types.Attribute { +func Attribute(t testing.TB) attr.Attribute { t.Helper() - return types.NewStringAttribute(testutil.Name(t, "attr-key"), testutil.Name(t, "attr-value")) + return attr.NewStringAttribute(testutil.Name(t, "attr-key"), testutil.Name(t, "attr-value")) } // Attributes generates a set of sdk.Attribute -func Attributes(t testing.TB) []types.Attribute { +func Attributes(t testing.TB) attr.Attributes { t.Helper() count := rand.Intn(10) + 1 - vals := make([]types.Attribute, 0, count) + vals := make(attr.Attributes, 0, count) for i := 0; i < count; i++ { vals = append(vals, Attribute(t)) } @@ -35,8 +37,8 @@ func Attributes(t testing.TB) []types.Attribute { } // PlacementRequirements generates placement requirements -func PlacementRequirements(t testing.TB) types.PlacementRequirements { - return types.PlacementRequirements{ +func PlacementRequirements(t testing.TB) attr.PlacementRequirements { + return attr.PlacementRequirements{ Attributes: Attributes(t), } } @@ -70,29 +72,29 @@ func RandStorageQuantity() uint64 { func ResourcesList(t testing.TB, startID uint32) dtypes.ResourceUnits { require.GreaterOrEqual(t, startID, uint32(1)) - count := uint32(rand.Intn(10)) + 1 + count := uint32(rand.Intn(10)) + 1 // nolint: gosec vals := make(dtypes.ResourceUnits, 0, count) for i := uint32(0); i < count; i++ { coin := sdk.NewDecCoin(testutil.CoinDenom, sdk.NewInt(rand.Int63n(9999)+1)) res := dtypes.ResourceUnit{ - Resources: types.Resources{ + Resources: rtypes.Resources{ ID: i + startID, - CPU: &types.CPU{ - Units: types.NewResourceValue(uint64(dtypes.GetValidationConfig().Unit.Min.CPU)), + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(uint64(dtypes.GetValidationConfig().Unit.Min.CPU)), }, - GPU: &types.GPU{ - Units: types.NewResourceValue(uint64(dtypes.GetValidationConfig().Unit.Min.GPU) + 1), + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(uint64(dtypes.GetValidationConfig().Unit.Min.GPU) + 1), }, - Memory: &types.Memory{ - Quantity: types.NewResourceValue(dtypes.GetValidationConfig().Unit.Min.Memory), + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(dtypes.GetValidationConfig().Unit.Min.Memory), }, - Storage: types.Volumes{ - types.Storage{ - Quantity: types.NewResourceValue(dtypes.GetValidationConfig().Unit.Min.Storage), + Storage: rtypes.Volumes{ + rtypes.Storage{ + Quantity: rtypes.NewResourceValue(dtypes.GetValidationConfig().Unit.Min.Storage), }, }, - Endpoints: types.Endpoints{}, + Endpoints: rtypes.Endpoints{}, }, Count: 1, Price: coin, diff --git a/go/testutil/v1beta3/deployment.go b/go/testutil/v1beta3/deployment.go index cda6ee27..3b78cf35 100644 --- a/go/testutil/v1beta3/deployment.go +++ b/go/testutil/v1beta3/deployment.go @@ -5,34 +5,35 @@ import ( "math/rand" "testing" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - "github.com/akash-network/akash-api/go/testutil" + dtypes "pkg.akt.dev/go/node/deployment/v1" + dtypesv1beta4 "pkg.akt.dev/go/node/deployment/v1beta4" + "pkg.akt.dev/go/testutil" ) // sum256Seed provides a consistent sha256 value for initial Deployment.Version const sum256Seed = "hihi" -// DefaultDeploymentVersion provides consistent sha256 sum for initial Deployment.Version -var DefaultDeploymentVersion = sha256.Sum256([]byte(sum256Seed)) +// DefaultDeploymentHash provides consistent sha256 sum for initial Deployment.Version +var DefaultDeploymentHash = sha256.Sum256([]byte(sum256Seed)) // Deployment generates a dtype.Deployment in state `DeploymentActive` func Deployment(t testing.TB) dtypes.Deployment { t.Helper() return dtypes.Deployment{ - DeploymentID: DeploymentID(t), - State: dtypes.DeploymentActive, - Version: DefaultDeploymentVersion[:], + ID: DeploymentID(t), + State: dtypes.DeploymentActive, + Hash: DefaultDeploymentHash[:], } } // DeploymentGroup generates a dtype.DepDeploymentGroup in state `GroupOpen` // with a set of random required attributes -func DeploymentGroup(t testing.TB, did dtypes.DeploymentID, gseq uint32) dtypes.Group { +func DeploymentGroup(t testing.TB, did dtypes.DeploymentID, gseq uint32) dtypesv1beta4.Group { t.Helper() - return dtypes.Group{ - GroupID: dtypes.MakeGroupID(did, gseq), - State: dtypes.GroupOpen, - GroupSpec: dtypes.GroupSpec{ + return dtypesv1beta4.Group{ + ID: dtypes.MakeGroupID(did, gseq), + State: dtypesv1beta4.GroupOpen, + GroupSpec: dtypesv1beta4.GroupSpec{ Name: testutil.Name(t, "dgroup"), Requirements: PlacementRequirements(t), Resources: ResourcesList(t, 1), @@ -41,9 +42,9 @@ func DeploymentGroup(t testing.TB, did dtypes.DeploymentID, gseq uint32) dtypes. } // GroupSpec generator -func GroupSpec(t testing.TB) dtypes.GroupSpec { +func GroupSpec(t testing.TB) dtypesv1beta4.GroupSpec { t.Helper() - return dtypes.GroupSpec{ + return dtypesv1beta4.GroupSpec{ Name: testutil.Name(t, "dgroup"), Requirements: PlacementRequirements(t), Resources: ResourcesList(t, 1), @@ -51,12 +52,12 @@ func GroupSpec(t testing.TB) dtypes.GroupSpec { } // DeploymentGroups returns a set of deployment groups generated by DeploymentGroup -func DeploymentGroups(t testing.TB, did dtypes.DeploymentID, gseq uint32) []dtypes.Group { +func DeploymentGroups(t testing.TB, did dtypes.DeploymentID, gseq uint32) dtypesv1beta4.Groups { t.Helper() count := rand.Intn(5) + 5 // nolint:gosec - vals := make([]dtypes.Group, 0, count) + vals := make(dtypesv1beta4.Groups, 0, count) for i := 0; i < count; i++ { - vals = append(vals, DeploymentGroup(t, did, gseq+uint32(i))) + vals = append(vals, DeploymentGroup(t, did, gseq+uint32(i))) // nolint: gosec } return vals } diff --git a/go/testutil/v1beta3/event.go b/go/testutil/v1beta3/event.go index 32e694c7..d89623e2 100644 --- a/go/testutil/v1beta3/event.go +++ b/go/testutil/v1beta3/event.go @@ -3,14 +3,11 @@ package testutil import ( "testing" + abci "github.com/cometbft/cometbft/abci/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - mtypes "github.com/akash-network/akash-api/go/node/market/v1beta3" - ptypes "github.com/akash-network/akash-api/go/node/provider/v1beta3" - "github.com/akash-network/akash-api/go/sdkutil" + "pkg.akt.dev/go/sdkutil" ) func ParseEvent(t testing.TB, events []abci.Event) sdkutil.Event { @@ -26,35 +23,35 @@ func ParseEvent(t testing.TB, events []abci.Event) sdkutil.Event { return ev } -func ParseDeploymentEvent(t testing.TB, events []abci.Event) sdkutil.ModuleEvent { - t.Helper() - - uev := ParseEvent(t, events) - - iev, err := dtypes.ParseEvent(uev) - require.NoError(t, err) - - return iev -} - -func ParseMarketEvent(t testing.TB, events []abci.Event) sdkutil.ModuleEvent { - t.Helper() - - uev := ParseEvent(t, events) - - iev, err := mtypes.ParseEvent(uev) - require.NoError(t, err) - - return iev -} - -func ParseProviderEvent(t testing.TB, events []abci.Event) sdkutil.ModuleEvent { - t.Helper() - - uev := ParseEvent(t, events) - - iev, err := ptypes.ParseEvent(uev) - require.NoError(t, err) - - return iev -} +// func ParseDeploymentEvent(t testing.TB, events []abci.Event) sdkutil.ModuleEvent { +// t.Helper() +// +// uev := ParseEvent(t, events) +// +// iev, err := dtypes.ParseEvent(uev) +// require.NoError(t, err) +// +// return iev +// } +// +// func ParseMarketEvent(t testing.TB, events []abci.Event) sdkutil.ModuleEvent { +// t.Helper() +// +// uev := ParseEvent(t, events) +// +// iev, err := mtypes.ParseEvent(uev) +// require.NoError(t, err) +// +// return iev +// } +// +// func ParseProviderEvent(t testing.TB, events []abci.Event) sdkutil.ModuleEvent { +// t.Helper() +// +// uev := ParseEvent(t, events) +// +// iev, err := ptypes.ParseEvent(uev) +// require.NoError(t, err) +// +// return iev +// } diff --git a/go/testutil/v1beta3/ids.go b/go/testutil/v1beta3/ids.go index 570426de..2dcefa40 100644 --- a/go/testutil/v1beta3/ids.go +++ b/go/testutil/v1beta3/ids.go @@ -6,11 +6,11 @@ import ( "math/rand" "testing" + "github.com/cometbft/cometbft/crypto/ed25519" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/tendermint/tendermint/crypto/ed25519" - dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" - mtypes "github.com/akash-network/akash-api/go/node/market/v1beta3" + dtypes "pkg.akt.dev/go/node/deployment/v1" + mtypes "pkg.akt.dev/go/node/market/v1" ) // AccAddress provides an Account's Address bytes from a ed25519 generated diff --git a/go/testutil/v1beta3/provider.go b/go/testutil/v1beta3/provider.go index e1a53c23..4d4ff0fc 100644 --- a/go/testutil/v1beta3/provider.go +++ b/go/testutil/v1beta3/provider.go @@ -3,8 +3,8 @@ package testutil import ( "testing" - ptypes "github.com/akash-network/akash-api/go/node/provider/v1beta3" - "github.com/akash-network/akash-api/go/testutil" + ptypes "pkg.akt.dev/go/node/provider/v1beta4" + "pkg.akt.dev/go/testutil" ) func Provider(t testing.TB) ptypes.Provider { @@ -14,7 +14,7 @@ func Provider(t testing.TB) ptypes.Provider { Owner: AccAddress(t).String(), HostURI: testutil.Hostname(t), Attributes: Attributes(t), - Info: ptypes.ProviderInfo{ + Info: ptypes.Info{ EMail: "test@example.com", Website: ProviderHostname(t), }, diff --git a/go/testutil/v1beta3/types.go b/go/testutil/v1beta3/types.go index 63ea3a58..2e2d5cc0 100644 --- a/go/testutil/v1beta3/types.go +++ b/go/testutil/v1beta3/types.go @@ -3,24 +3,24 @@ package testutil import ( "testing" - types "github.com/akash-network/akash-api/go/node/types/v1beta3" + rtypes "pkg.akt.dev/go/node/types/resources/v1beta4" ) -func Resources(_ testing.TB) types.Resources { - return types.Resources{ +func Resources(_ testing.TB) rtypes.Resources { + return rtypes.Resources{ ID: 1, - CPU: &types.CPU{ - Units: types.NewResourceValue(uint64(RandCPUUnits())), + CPU: &rtypes.CPU{ + Units: rtypes.NewResourceValue(uint64(RandCPUUnits())), }, - Memory: &types.Memory{ - Quantity: types.NewResourceValue(RandMemoryQuantity()), + Memory: &rtypes.Memory{ + Quantity: rtypes.NewResourceValue(RandMemoryQuantity()), }, - GPU: &types.GPU{ - Units: types.NewResourceValue(uint64(RandGPUUnits())), + GPU: &rtypes.GPU{ + Units: rtypes.NewResourceValue(uint64(RandGPUUnits())), }, - Storage: types.Volumes{ - types.Storage{ - Quantity: types.NewResourceValue(RandStorageQuantity()), + Storage: rtypes.Volumes{ + rtypes.Storage{ + Quantity: rtypes.NewResourceValue(RandStorageQuantity()), }, }, } diff --git a/go/tools.go b/go/tools.go new file mode 100644 index 00000000..ea0d775b --- /dev/null +++ b/go/tools.go @@ -0,0 +1,27 @@ +//go:build tools +// +build tools + +package _go + +import ( + _ "github.com/99designs/keyring" + _ "github.com/grpc-ecosystem/grpc-gateway/runtime" + _ "google.golang.org/grpc" + + _ "github.com/pseudomuto/protoc-gen-doc" + + _ "github.com/cosmos/gogoproto/gogoproto" + _ "github.com/cosmos/gogoproto/protoc-gen-combo" + _ "github.com/cosmos/gogoproto/protoc-gen-gocosmos" + _ "github.com/cosmos/gogoproto/protoc-gen-gofast" + _ "github.com/cosmos/gogoproto/protoc-gen-gogo" + _ "github.com/cosmos/gogoproto/protoc-gen-gogofast" + _ "github.com/cosmos/gogoproto/protoc-gen-gogofaster" + _ "github.com/cosmos/gogoproto/protoc-gen-gogoslick" + _ "github.com/cosmos/gogoproto/protoc-gen-gogotypes" + _ "github.com/cosmos/gogoproto/protoc-gen-gostring" + _ "github.com/cosmos/gogoproto/protoc-min-version" + + _ "k8s.io/api" + _ "k8s.io/apimachinery" +) diff --git a/go/util/ctxlog/ctxlog.go b/go/util/ctxlog/ctxlog.go index 09b25062..085f6003 100644 --- a/go/util/ctxlog/ctxlog.go +++ b/go/util/ctxlog/ctxlog.go @@ -3,7 +3,7 @@ package ctxlog import ( "context" - "github.com/tendermint/tendermint/libs/log" + "github.com/cometbft/cometbft/libs/log" ) type ctxKey string diff --git a/go/util/pubsub/bus.go b/go/util/pubsub/bus.go new file mode 100644 index 00000000..a26748d4 --- /dev/null +++ b/go/util/pubsub/bus.go @@ -0,0 +1,202 @@ +package pubsub + +import ( + "errors" + + "github.com/boz/go-lifecycle" +) + +// ErrNotRunning is the error with message "not running" +var ErrNotRunning = errors.New("not running") + +// Event interface +type Event interface{} + +type Publisher interface { + Publish(Event) error +} + +// Bus is an async event bus that allows subscriptions to behave as a bus themselves. +// When an event is published, it is sent to all subscribers asynchronously - a subscriber +// cannot block other subscribers. +// +// NOTE: this should probably be in util/event or something (not in provider/event) +type Bus interface { + Publisher + Subscribe() (Subscriber, error) + Close() + Done() <-chan struct{} +} + +// Subscriber emits events it sees on the channel returned by Events(). +// A Clone() of a subscriber will emit all events that have not been emitted +// from the cloned subscriber. This is important so that events are not missed +// when adding subscribers for sub-components (see `provider/bidengine/{service,order}.go`) +type Subscriber interface { + Events() <-chan Event + Clone() (Subscriber, error) + Close() + Done() <-chan struct{} +} + +type bus struct { + subscriptions map[*bus]bool + + evbuf []Event + + eventch chan Event + parentch chan *bus + + pubch chan Event + subch chan chan<- Subscriber + unsubch chan *bus + + lc lifecycle.Lifecycle +} + +// NewBus runs a new bus and returns bus details +func NewBus() Bus { + bus := &bus{ + subscriptions: make(map[*bus]bool), + pubch: make(chan Event), + subch: make(chan chan<- Subscriber), + unsubch: make(chan *bus), + lc: lifecycle.New(), + } + + go bus.run() + + return bus +} + +func (b *bus) Publish(ev Event) error { + select { + case b.pubch <- ev: + return nil + case <-b.lc.ShuttingDown(): + return ErrNotRunning + } +} + +func (b *bus) Subscribe() (Subscriber, error) { + ch := make(chan Subscriber, 1) + + select { + case b.subch <- ch: + return <-ch, nil + case <-b.lc.ShuttingDown(): + return nil, ErrNotRunning + } +} + +func (b *bus) Clone() (Subscriber, error) { + return b.Subscribe() +} + +func (b *bus) Events() <-chan Event { + return b.eventch +} + +func (b *bus) Close() { + b.lc.Shutdown(nil) +} + +func (b *bus) Done() <-chan struct{} { + return b.lc.Done() +} + +func (b *bus) run() { + defer b.lc.ShutdownCompleted() + + var outch chan<- Event + var curev Event + +loop: + for { + + if b.eventch != nil && len(b.evbuf) > 0 { + // If we're emitting events (Subscriber mode) and there + // are events to emit, set up the output channel and output + // event accordingly. + outch = b.eventch + curev = b.evbuf[0] + } else { + // otherwise block the output (sending to a nil channel always blocks) + outch = nil + } + + select { + case err := <-b.lc.ShutdownRequest(): + b.lc.ShutdownInitiated(err) + break loop + + case outch <- curev: + // Event was emitted. Shrink current event buffer. + b.evbuf = b.evbuf[1:] + + case ev := <-b.pubch: + // publish event + + // Buffer event. + if b.eventch != nil { + b.evbuf = append(b.evbuf, ev) + } + + // Publish to children. + for sub := range b.subscriptions { + if err := sub.Publish(ev); err != nil && !errors.Is(err, ErrNotRunning) { + panic(err) + } + } + + case ch := <-b.subch: + // new subscription + + sub := newSubscriber(b) + b.subscriptions[sub] = true + + ch <- sub + + case sub := <-b.unsubch: + // subscription closed + delete(b.subscriptions, sub) + } + } + + for sub := range b.subscriptions { + sub.lc.ShutdownAsync(nil) + } + + for len(b.subscriptions) > 0 { + sub := <-b.unsubch + delete(b.subscriptions, sub) + } + + if b.parentch != nil { + b.parentch <- b + } +} + +func newSubscriber(parent *bus) *bus { + // Re-use bus struct, but populate output channel (eventch) + // to enable subscriber mode. + + evbuf := make([]Event, len(parent.evbuf)) + copy(evbuf, parent.evbuf) + + sub := &bus{ + eventch: make(chan Event), + parentch: parent.unsubch, + evbuf: evbuf, + + subscriptions: make(map[*bus]bool), + pubch: make(chan Event), + subch: make(chan chan<- Subscriber), + unsubch: make(chan *bus), + lc: lifecycle.New(), + } + + go sub.run() + + return sub +} diff --git a/go/util/pubsub/bus_test.go b/go/util/pubsub/bus_test.go new file mode 100644 index 00000000..641ea459 --- /dev/null +++ b/go/util/pubsub/bus_test.go @@ -0,0 +1,150 @@ +package pubsub_test + +import ( + "testing" + + "github.com/cometbft/cometbft/crypto/ed25519" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "pkg.akt.dev/go/util/pubsub" +) + +func TestBus(t *testing.T) { + bus := pubsub.NewBus() + defer bus.Close() + + did := ed25519.GenPrivKey().PubKey().Address() + + ev := newEvent(did) + + assert.NoError(t, bus.Publish(ev)) + + sub1, err := bus.Subscribe() + require.NoError(t, err) + + sub2, err := bus.Subscribe() + require.NoError(t, err) + + assert.NoError(t, bus.Publish(ev)) + + select { + case newEv := <-sub1.Events(): + assert.Equal(t, ev, newEv) + case <-pubsub.AfterThreadStart(t): + require.Fail(t, "time out") + } + + select { + case newEv := <-sub2.Events(): + assert.Equal(t, ev, newEv) + case <-pubsub.AfterThreadStart(t): + require.Fail(t, "time out") + } + + sub2.Close() + + select { + case <-sub2.Done(): + case <-pubsub.AfterThreadStart(t): + require.Fail(t, "time out") + } + + assert.NoError(t, bus.Publish(ev)) + + select { + case newEv := <-sub1.Events(): + assert.Equal(t, ev, newEv) + case <-pubsub.AfterThreadStart(t): + require.Fail(t, "time out") + } + + select { + case <-sub2.Events(): + require.Fail(t, "spurious event") + case <-pubsub.AfterThreadStart(t): + } + + bus.Close() + + select { + case <-sub1.Done(): + case <-pubsub.AfterThreadStart(t): + require.Fail(t, "time out") + } + + assert.Equal(t, pubsub.ErrNotRunning, bus.Publish(ev)) + +} + +func TestClone(t *testing.T) { + bus := pubsub.NewBus() + defer bus.Close() + + did1 := ed25519.GenPrivKey().PubKey().Address() + ev1 := newEvent(did1) + + did2 := ed25519.GenPrivKey().PubKey().Address() + ev2 := newEvent(did2) + + assert.NoError(t, bus.Publish(ev1)) + + sub1, err := bus.Subscribe() + require.NoError(t, err) + + select { + case <-sub1.Events(): + require.Fail(t, "spurious event") + case <-pubsub.AfterThreadStart(t): + } + + assert.NoError(t, bus.Publish(ev1)) + assert.NoError(t, bus.Publish(ev2)) + + // allow event propagation + pubsub.SleepForThreadStart(t) + + // clone subscription + sub2, err := sub1.Clone() + require.NoError(t, err) + + // both subscriptions should receive both events + + for i, pev := range []pubsub.Event{ev1, ev2} { + select { + case ev := <-sub1.Events(): + assert.Equal(t, pev, ev, "sub1 event %v", i+1) + case <-pubsub.AfterThreadStart(t): + require.Fail(t, "timeout sub1 event %v", i+1) + } + + select { + case ev := <-sub2.Events(): + assert.Equal(t, pev, ev, "sub2 event %v", i+1) + case <-pubsub.AfterThreadStart(t): + require.Fail(t, "timeout sub2 event %v", i+1) + } + } + + // sub1 should close sub2 + sub1.Close() + + select { + case <-sub2.Done(): + case <-pubsub.AfterThreadStart(t): + require.Fail(t, "time out closing sub2") + } + + select { + case <-sub1.Done(): + case <-pubsub.AfterThreadStart(t): + require.Fail(t, "time out closing sub1") + } + +} + +type testEvent []byte + +func newEvent(addr []byte) testEvent { + return testEvent(addr) +} diff --git a/go/util/pubsub/util.go b/go/util/pubsub/util.go new file mode 100644 index 00000000..937c9131 --- /dev/null +++ b/go/util/pubsub/util.go @@ -0,0 +1,34 @@ +package pubsub + +import ( + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +const ( + defaultDelayThreadStart = time.Millisecond * 6 +) + +// AfterThreadStart waits for the duration of delay thread start +func AfterThreadStart(t *testing.T) <-chan time.Time { + return time.After(delayThreadStart(t)) +} + +// SleepForThreadStart pass go routine for the duration of delay thread start +func SleepForThreadStart(t *testing.T) { + time.Sleep(delayThreadStart(t)) +} + +func delayThreadStart(t *testing.T) time.Duration { + if val := os.Getenv("TEST_DELAY_THREAD_START"); val != "" { + d, err := time.ParseDuration(val) + require.NoError(t, err) + + return d + } + + return defaultDelayThreadStart +} diff --git a/go/util/runner/runner.go b/go/util/runner/runner.go new file mode 100644 index 00000000..0fae3d6b --- /dev/null +++ b/go/util/runner/runner.go @@ -0,0 +1,40 @@ +package runner + +// Task is a function type which returns result instance +type Task func() Result + +// Do executes task and send output to channel +func Do(task Task) <-chan Result { + ch := make(chan Result, 1) + go func() { + ch <- task() + }() + return ch +} + +// Result interface wraps Value and Error methods. +type Result interface { + Value() interface{} + Error() error +} + +// NewResult returns result instance with value as input +func NewResult(value interface{}, err error) Result { + return result{ + value: value, + err: err, + } +} + +type result struct { + value interface{} + err error +} + +func (r result) Value() interface{} { + return r.value +} + +func (r result) Error() error { + return r.err +} diff --git a/go/util/tls/key_pair_manager.go b/go/util/tls/key_pair_manager.go new file mode 100644 index 00000000..621984eb --- /dev/null +++ b/go/util/tls/key_pair_manager.go @@ -0,0 +1,315 @@ +package tls + +import ( + "bytes" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/pem" + "errors" + "fmt" + "io" + "math/big" + "net" + "os" + "time" + + "go.step.sm/crypto/pemutil" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + + types "pkg.akt.dev/go/node/cert/v1" +) + +var ( + AuthVersionOID = asn1.ObjectIdentifier{2, 23, 133, 2, 6} + + ErrCertificate = errors.New("certificate error") + ErrInvalidSerialFlag = fmt.Errorf("%w: invalid value in serial flag. expected integer", ErrCertificate) + errCertificateNotFoundInPEM = fmt.Errorf("%w: certificate not found in PEM", ErrCertificate) + errPrivateKeyNotFoundInPEM = fmt.Errorf("%w: private key not found in PEM", ErrCertificate) + errPublicKeyNotFoundInPEM = fmt.Errorf("%w: public key not found in PEM", ErrCertificate) + errUnsupportedEncryptedPEM = errors.New("unsupported encrypted PEM") +) + +type KeyPairManager interface { + KeyExists() (bool, error) + Generate(notBefore, notAfter time.Time, domains []string) error + + // Read the PEM blocks, containing the cert, private key, & public key + Read(fin ...io.Reader) ([]byte, []byte, []byte, error) + + ReadX509KeyPair(fin ...io.Reader) (*x509.Certificate, tls.Certificate, error) +} + +type keyPairManager struct { + addr sdk.AccAddress + passwordBytes []byte + passwordLegacy []byte + homeDir string +} + +func NewKeyPairManager(cctx sdkclient.Context, fromAddress sdk.AccAddress) (KeyPairManager, error) { + sig, _, err := cctx.Keyring.SignByAddress(fromAddress, []byte(fromAddress.String())) + if err != nil { + return nil, err + } + + // ignore error if ledger device is being used + // due to its jsonparser not liking bech address sent as data in binary format + // if test or file keyring used it will allow to decode old private keys for the mTLS cert + sigLegacy, _, _ := cctx.Keyring.SignByAddress(fromAddress, fromAddress.Bytes()) + + return &keyPairManager{ + addr: fromAddress, + passwordBytes: sig, + passwordLegacy: sigLegacy, + homeDir: cctx.HomeDir, + }, nil +} + +func (kpm *keyPairManager) getKeyPath() string { + return kpm.homeDir + "/" + kpm.addr.String() + ".pem" +} + +func (kpm *keyPairManager) ReadX509KeyPair(fin ...io.Reader) (*x509.Certificate, tls.Certificate, error) { + certData, privKeyData, _, err := kpm.Read(fin...) + if err != nil { + return nil, tls.Certificate{}, err + } + + x509cert, err := x509.ParseCertificate(certData) + if err != nil { + return nil, tls.Certificate{}, fmt.Errorf("could not parse x509 cert: %w", err) + } + + result := tls.Certificate{ + Certificate: [][]byte{certData}, + } + + result.PrivateKey, err = x509.ParsePKCS8PrivateKey(privKeyData) + if err != nil { + return nil, tls.Certificate{}, fmt.Errorf("%w: failed parsing private key data", err) + } + + return x509cert, result, err +} + +func (kpm *keyPairManager) KeyExists() (bool, error) { + _, err := os.Stat(kpm.getKeyPath()) + if err == nil { + return true, nil + } + + if os.IsNotExist(err) { + return false, nil + } + + return false, err +} + +func (kpm *keyPairManager) Generate(notBefore, notAfter time.Time, domains []string) error { + var err error + var pemOut *os.File + if pemOut, err = os.OpenFile(kpm.getKeyPath(), os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600); err != nil { + return err + } + + err = kpm.generateImpl(notBefore, notAfter, domains, pemOut) + + closeErr := pemOut.Close() + if closeErr != nil { + return closeErr + } + + return err +} + +func (kpm *keyPairManager) generateImpl(notBefore, notAfter time.Time, domains []string, fout io.Writer) error { + var err error + // Generate the private key + var priv *ecdsa.PrivateKey + if priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader); err != nil { + return fmt.Errorf("could not generate key: %w", err) + } + + serialNumber := new(big.Int).SetInt64(time.Now().UTC().UnixNano()) + + extKeyUsage := []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + } + + if len(domains) != 0 { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: kpm.addr.String(), + ExtraNames: []pkix.AttributeTypeAndValue{ + { + Type: AuthVersionOID, + Value: "v0.0.1", + }, + }, + }, + Issuer: pkix.Name{ + CommonName: kpm.addr.String(), + }, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageDataEncipherment | x509.KeyUsageKeyEncipherment, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: true, + } + + var ips []net.IP + + for i := len(domains) - 1; i >= 0; i-- { + if ip := net.ParseIP(domains[i]); ip != nil { + ips = append(ips, ip) + domains = append(domains[:i], domains[i+1:]...) + } + } + + if len(domains) != 0 || len(ips) != 0 { + template.PermittedDNSDomainsCritical = true + template.PermittedDNSDomains = domains + template.DNSNames = domains + template.IPAddresses = ips + } + + var certDer []byte + if certDer, err = x509.CreateCertificate(rand.Reader, &template, &template, priv.Public(), priv); err != nil { + return fmt.Errorf("could not create certificate: %w", err) + } + + var keyDer []byte + if keyDer, err = x509.MarshalPKCS8PrivateKey(priv); err != nil { + return fmt.Errorf("could not create private key: %w", err) + } + + var blk *pem.Block + blk, err = pemutil.EncryptPKCS8PrivateKey(rand.Reader, keyDer, kpm.passwordBytes, x509.PEMCipherAES256) + if err != nil { + return fmt.Errorf("could not encrypt private key as PEM: %w", err) + } + + // Write the certificate + if err = pem.Encode(fout, &pem.Block{Type: types.PemBlkTypeCertificate, Bytes: certDer}); err != nil { + return fmt.Errorf("could not encode certificate as PEM: %w", err) + } + + // Write the encrypted private key + if err = pem.Encode(fout, blk); err != nil { + return fmt.Errorf("could not encode private key as PEM: %w", err) + } + + return nil +} + +func (kpm *keyPairManager) Read(fin ...io.Reader) ([]byte, []byte, []byte, error) { + var pemIn io.Reader + var closeMe io.ReadCloser + + if len(fin) != 0 { + if len(fin) != 1 { + return nil, nil, nil, fmt.Errorf("%w: Read() takes exactly 1 or 0 arguments, not %d", ErrCertificate, len(fin)) + } + pemIn = fin[0] + } + + if pemIn == nil { + fopen, err := os.OpenFile(kpm.getKeyPath(), os.O_RDONLY, 0x0) + if err != nil { + return nil, nil, nil, fmt.Errorf("could not open certificate PEM file: %w", err) + } + closeMe = fopen + pemIn = fopen + } + + cert, privKey, pubKey, err := kpm.readImpl(pemIn) + + if closeMe != nil { + closeErr := closeMe.Close() + if closeErr != nil { + return nil, nil, nil, fmt.Errorf("could not close PEM file: %w", closeErr) + } + } + + return cert, privKey, pubKey, err +} + +func (kpm *keyPairManager) readImpl(fin io.Reader) ([]byte, []byte, []byte, error) { + buf := &bytes.Buffer{} + _, err := io.Copy(buf, fin) + if err != nil { + return nil, nil, nil, fmt.Errorf("failed reading certificate PEM file: %w", err) + } + data := buf.Bytes() + + // Read certificate + block, remaining := pem.Decode(data) + if block == nil { + return nil, nil, nil, errCertificateNotFoundInPEM + } + cert := block.Bytes + + // Read private key + block, _ = pem.Decode(remaining) + if block == nil { + return nil, nil, nil, errPrivateKeyNotFoundInPEM + } + + var privKeyPlaintext []byte + var privKeyI interface{} + + // PKCS#8 header defined in RFC7468 section 11 + // nolint: gocritic + if block.Type == "ENCRYPTED PRIVATE KEY" { + privKeyPlaintext, err = pemutil.DecryptPKCS8PrivateKey(block.Bytes, kpm.passwordBytes) + } else if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + // nolint:staticcheck + privKeyPlaintext, _ = x509.DecryptPEMBlock(block, kpm.passwordBytes) + + // DecryptPEMBlock may not return IncorrectPasswordError. + // Try parse private key instead and if it fails give another try with legacy password + privKeyI, err = x509.ParsePKCS8PrivateKey(privKeyPlaintext) + if err != nil { + // nolint:staticcheck + privKeyPlaintext, err = x509.DecryptPEMBlock(block, kpm.passwordLegacy) + } + } else { + return nil, nil, nil, errUnsupportedEncryptedPEM + } + if err != nil { + return nil, nil, nil, fmt.Errorf("%w: failed decrypting x509 block with private key", err) + } + + if privKeyI == nil { + if privKeyI, err = x509.ParsePKCS8PrivateKey(privKeyPlaintext); err != nil { + return nil, nil, nil, fmt.Errorf("%w: failed parsing private key data", err) + } + } + + eckey, valid := privKeyI.(*ecdsa.PrivateKey) + if !valid { + return nil, nil, nil, fmt.Errorf("%w: unexpected private key type, expected %T but got %T", + errPublicKeyNotFoundInPEM, + &ecdsa.PrivateKey{}, + privKeyI) + } + + var pubKey []byte + if pubKey, err = x509.MarshalPKIXPublicKey(eckey.Public()); err != nil { + return nil, nil, nil, fmt.Errorf("%w: failed extracting public key", err) + } + + return cert, privKeyPlaintext, pubKey, nil +} diff --git a/go/util/tls/utils.go b/go/util/tls/utils.go new file mode 100644 index 00000000..e0af7023 --- /dev/null +++ b/go/util/tls/utils.go @@ -0,0 +1,58 @@ +package tls + +import ( + "context" + "crypto/tls" + "fmt" + "io" + "time" + + "github.com/cosmos/cosmos-sdk/client" + + ctypes "pkg.akt.dev/go/node/cert/v1" +) + +// LoadAndQueryCertificateForAccount wraps LoadAndQueryPEMForAccount and tls.X509KeyPair +func LoadAndQueryCertificateForAccount(ctx context.Context, cctx client.Context, fin io.Reader) (tls.Certificate, error) { + kpm, err := NewKeyPairManager(cctx, cctx.FromAddress) + if err != nil { + return tls.Certificate{}, err + } + + x509cert, tlsCert, err := kpm.ReadX509KeyPair(fin) + if err != nil { + return tls.Certificate{}, err + } + + // Check if valid according to time + if x509cert.NotBefore.After(time.Now().UTC()) { + return tls.Certificate{}, fmt.Errorf("%w: certificate is not yet active, start ts %s", ErrCertificate, x509cert.NotBefore) + } + + if time.Now().UTC().After(x509cert.NotAfter) { + return tls.Certificate{}, fmt.Errorf("%w: certificate has been expired since %s", ErrCertificate, x509cert.NotAfter) + } + + params := &ctypes.QueryCertificatesRequest{ + Filter: ctypes.CertificateFilter{ + Owner: x509cert.Subject.CommonName, + Serial: x509cert.SerialNumber.String(), + }, + } + + certs, err := ctypes.NewQueryClient(cctx).Certificates(ctx, params) + if err != nil { + return tls.Certificate{}, err + } + + if len(certs.Certificates) == 0 { + return tls.Certificate{}, fmt.Errorf("%w: certificate has not been committed to blockchain", ErrCertificate) + } + + foundCert := certs.Certificates[0] + if foundCert.GetCertificate().State != ctypes.CertificateValid { + return tls.Certificate{}, fmt.Errorf("%w: certificate is not valid", ErrCertificate) + } + + return tlsCert, nil +} diff --git a/go/util/tls/verify.go b/go/util/tls/verify.go index 30fb5008..7ebdcbf6 100644 --- a/go/util/tls/verify.go +++ b/go/util/tls/verify.go @@ -9,7 +9,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" - ctypes "github.com/akash-network/akash-api/go/node/cert/v1beta3" + ctypes "pkg.akt.dev/go/node/cert/v1" ) type InvalidReason int diff --git a/go/util/units/units.go b/go/util/units/units.go index e74ab695..0712a6d0 100644 --- a/go/util/units/units.go +++ b/go/util/units/units.go @@ -9,7 +9,7 @@ import ( "gopkg.in/yaml.v3" - "github.com/akash-network/akash-api/go/node/types/unit" + "pkg.akt.dev/go/node/types/unit" ) type ByteQuantity uint64 diff --git a/go/util/units/units_test.go b/go/util/units/units_test.go index c21943cf..1524cc77 100644 --- a/go/util/units/units_test.go +++ b/go/util/units/units_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/assert" "gopkg.in/yaml.v3" - "github.com/akash-network/akash-api/go/node/types/unit" + "pkg.akt.dev/go/node/types/unit" ) func TestByteQuantity(t *testing.T) { diff --git a/make/codegen.mk b/make/codegen.mk index 234bc851..4cbfa269 100644 --- a/make/codegen.mk +++ b/make/codegen.mk @@ -1,24 +1,33 @@ +PROTO_GEN_MODS ?= go \ +ts \ +doc + .PHONY: proto-gen -ifeq ($(PROTO_LEGACY), true) -proto-gen: modvendor $(PROTOC) $(PROTOC_GEN_GOCOSMOS) $(PROTOC_GEN_GRPC_GATEWAY) $(PROTOC_GEN_DOC) $(AKASH_TS_NODE_MODULES) - ./script/protocgen-legacy.sh -else -proto-gen: modvendor gogoproto $(BUF) $(PROTOC_GEN_GRPC_GATEWAY) $(PROTOC_GEN_GO) - ./script/protocgen.sh -endif - -.PHONY: proto-gen-swagger -proto-gen-swagger: modvendor $(BUF) $(PROTOC_GEN_SWAGGER) $(SWAGGER_COMBINE) - ./script/protoc-gen-swagger.sh +proto-gen: $(patsubst %, proto-gen-%,$(PROTO_GEN_MODS)) + +.PHONY: proto-gen-go +proto-gen-go: $(BUF) $(GOGOPROTO) $(PROTOC_GEN_GOCOSMOS) $(PROTOC_GEN_GRPC_GATEWAY) $(PROTOC_GEN_GO) + ./script/protocgen.sh go $(GO_MOD_NAME) $(GO_ROOT) + +.PHONY: proto-gen-pulsar +proto-gen-pulsar: $(BUF) $(PROTOC_GEN_GO) $(PROTOC_GEN_PULSAR) + ./script/protocgen.sh pulsar $(GO_MOD_NAME) + +.PHONY: proto-gen-ts +proto-gen-ts: $(BUF) $(AKASH_TS_NODE_MODULES) + ./script/protocgen.sh ts + +.PHONY: proto-gen-doc +proto-gen-doc: $(BUF) $(SWAGGER_COMBINE) $(PROTOC_GEN_DOC) $(PROTOC_GEN_SWAGGER) + ./script/protocgen.sh doc $(GO_MOD_NAME) mocks: $(MOCKERY) - $(GO) generate ./... + (cd $(GO_ROOT); $(GO) generate ./...) .PHONY: codegen -codegen: proto-gen proto-gen-swagger mocks +codegen: proto-gen mocks .PHONY: changelog changelog: $(GIT_CHGLOG) @echo "generating changelog to changelog" ./script/changelog.sh $(shell git describe --tags --abbrev=0) changelog.md - diff --git a/make/lint.mk b/make/lint.mk index e6695d11..f161d98e 100644 --- a/make/lint.mk +++ b/make/lint.mk @@ -8,11 +8,7 @@ node .PHONY: lint-go lint-go: $(GOLANGCI_LINT) - $(GOLANGCI_LINT_RUN) ./... --issues-exit-code=0 --deadline=20m - -.PHONY: lint-go-% -lint-go-%: $(GOLANGCI_LINT) - $(GOLINT) $* + @$(TOOLS) golint "$(GO_MODULES)" "$(GO_TEST_DIRS)" .PHONY: lint-proto-% lint-proto-%: @@ -32,13 +28,13 @@ lint-shell: .PHONY: lint lint: $(patsubst %, lint-%,$(SUB_LINT)) -.PHONY: check-breaking +.PHONY: proto-check-breaking proto-check-breaking: $(BUF) $(BUF) breaking --against '.git#branch=main' -.PHONY: format +.PHONY: proto-format proto-format: - $(DOCKER_CLANG) find ./ ! -path "./vendor/*" -name *.proto -exec clang-format -i {} \; + $(DOCKER_CLANG) find ./ ! -path "./go/vendor/*" -name *.proto -exec clang-format -i {} \; .PHONY: lint-ts lint-ts: $(AKASH_TS_NODE_MODULES) diff --git a/make/mod.mk b/make/mod.mk index a1668340..abcb4ea8 100644 --- a/make/mod.mk +++ b/make/mod.mk @@ -18,26 +18,23 @@ endef .PHONY: deps-tidy deps-tidy: - $(GO) mod tidy + (cd $(GO_ROOT); $(GO) mod tidy) .PHONY: deps-vendor deps-vendor: - go mod vendor + (cd $(GO_ROOT); GOWORK=off go mod vendor) .PHONY: modsensure modsensure: deps-tidy deps-vendor .PHONY: modvendor modvendor: export VENDOR_BUF:=$(VENDOR_BUF) -modvendor: $(MODVENDOR) $(PROTOC) modsensure +modvendor: $(MODVENDOR) modsensure @echo "vendoring non-go files..." - $(MODVENDOR) -copy="**/*.proto" -include=github.com/cosmos/cosmos-sdk/proto,github.com/cosmos/cosmos-sdk/third_party/proto - $(MODVENDOR) -copy="**/Makefile" -include=github.com/cosmos/gogoproto - $(MODVENDOR) -copy="**/*.proto" -include=github.com/cosmos/cosmos-proto/proto - $(MODVENDOR) -copy="**/swagger.yaml" -include=github.com/cosmos/cosmos-proto/client/docs/swagger-ui - $(MODVENDOR) -copy="**/*.proto" -include=k8s.io/apimachinery - @ln -snf ../../vendor/k8s.io .cache/include/k8s.io - @echo "$${VENDOR_BUF}" > vendor/k8s.io/buf.yaml - @echo "$${VENDOR_BUF}" > .cache/include/google/buf.yaml - @echo "$${VENDOR_BUF}" > vendor/github.com/cosmos/cosmos-sdk/proto/buf.yaml - @echo "$${VENDOR_BUF}" > vendor/github.com/cosmos/cosmos-sdk/third_party/proto/buf.yaml + @(cd $(GO_ROOT); \ + $(MODVENDOR) -copy="**/*.proto" -include=k8s.io/apimachinery; \ + $(MODVENDOR) -copy="**/swagger.yaml" -include=github.com/cosmos/cosmos-sdk/client/docs/swagger-ui \ + ) + @mkdir -p .cache/include/k8s + ln -snf ../../../$(GO_ROOT)/vendor/k8s.io .cache/include/k8s/io + echo "$${VENDOR_BUF}" > $(GO_ROOT)/vendor/k8s.io/buf.yaml diff --git a/make/setup-cache.mk b/make/setup-cache.mk index 6c04189b..8f299216 100644 --- a/make/setup-cache.mk +++ b/make/setup-cache.mk @@ -16,7 +16,6 @@ $(AKASH_DEVCACHE): @echo "creating .cache dir structure..." mkdir -p $@ mkdir -p $(AKASH_DEVCACHE_BIN) - mkdir -p $(AKASH_DEVCACHE_BIN)/legacy mkdir -p $(AKASH_DEVCACHE_INCLUDE) mkdir -p $(AKASH_DEVCACHE_VERSIONS) mkdir -p $(AKASH_DEVCACHE_NODE_MODULES) @@ -27,7 +26,7 @@ cache: $(AKASH_DEVCACHE) $(BUF_VERSION_FILE): $(AKASH_DEVCACHE) @echo "installing buf v$(BUF_VERSION) ..." rm -f $(BUF) - GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/bufbuild/buf/cmd/buf@v$(BUF_VERSION) + (cd $(GO_ROOT); GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/bufbuild/buf/cmd/buf@v$(BUF_VERSION)) rm -rf "$(dir $@)" mkdir -p "$(dir $@)" touch $@ @@ -39,43 +38,34 @@ $(PROTOC_VERSION_FILE): $(AKASH_DEVCACHE) (cd /tmp; \ curl -sOL "https://github.com/protocolbuffers/protobuf/releases/download/v${PROTOC_VERSION}/${PROTOC_ZIP}"; \ unzip -oq ${PROTOC_ZIP} -d $(AKASH_DEVCACHE) bin/protoc; \ - unzip -oq ${PROTOC_ZIP} -d $(AKASH_DEVCACHE) \ - 'include/google/protobuf/descriptor.proto' \ - 'include/google/protobuf/struct.proto' \ - 'include/google/protobuf/empty.proto' \ - 'include/google/protobuf/timestamp.proto'; \ rm -f ${PROTOC_ZIP}) rm -rf "$(dir $@)" mkdir -p "$(dir $@)" touch $@ - # TODO https://github.com/akash-network/support/issues/77 - cp -rf $(PROTOC) $(AKASH_DEVCACHE_BIN)/legacy/ $(PROTOC): $(PROTOC_VERSION_FILE) -# TODO https://github.com/akash-network/support/issues/77 - $(PROTOC_GEN_GOCOSMOS_VERSION_FILE): $(AKASH_DEVCACHE) @echo "installing protoc-gen-gocosmos $(PROTOC_GEN_GOCOSMOS_VERSION) ..." rm -f $(PROTOC_GEN_GOCOSMOS) - GOBIN=$(AKASH_DEVCACHE_BIN)/legacy $(GO) install $(ROOT_DIR)/vendor/github.com/regen-network/cosmos-proto/protoc-gen-gocosmos + (cd $(GO_ROOT); GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/cosmos/gogoproto/protoc-gen-gocosmos) rm -rf "$(dir $@)" mkdir -p "$(dir $@)" touch $@ -$(PROTOC_GEN_GOCOSMOS): $(PROTOC_GEN_GOCOSMOS_VERSION_FILE) modvendor +$(PROTOC_GEN_GOCOSMOS): $(PROTOC_GEN_GOCOSMOS_VERSION_FILE) #modvendor $(GOGOPROTO_VERSION_FILE): $(AKASH_DEVCACHE) - @echo "installing gogoproto binaries $(GOGOPROTO_VERSION) ..." + @echo "installing gogoproto $(GOGOPROTO_VERSION) ..." rm -f $(BUF) - GOBIN=$(AKASH_DEVCACHE_BIN) make -C vendor/github.com/cosmos/gogoproto install + (cd $(GO_ROOT); GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/cosmos/gogoproto/gogoproto) rm -rf "$(dir $@)" mkdir -p "$(dir $@)" touch $@ -gogoproto: $(GOGOPROTO_VERSION_FILE) +$(GOGOPROTO): $(GOGOPROTO_VERSION_FILE) $(PROTOC_GEN_GO_PULSAR_VERSION_FILE): $(AKASH_DEVCACHE) @echo "installing protoc-gen-go-pulsar $(PROTOC_GEN_GO_PULSAR_VERSION) ..." rm -f $(PROTOC_GEN_GO_PULSAR) - GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/cosmos/cosmos-proto/cmd/protoc-gen-go-pulsar@$(PROTOC_GEN_GO_PULSAR_VERSION) + (cd $(GO_ROOT); GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/cosmos/cosmos-proto/cmd/protoc-gen-go-pulsar@$(PROTOC_GEN_GO_PULSAR_VERSION)) rm -rf "$(dir $@)" mkdir -p "$(dir $@)" touch $@ @@ -84,7 +74,7 @@ $(PROTOC_GEN_GO_PULSAR): $(PROTOC_GEN_GO_PULSAR_VERSION_FILE) $(PROTOC_GEN_GO_VERSION_FILE): $(AKASH_DEVCACHE) @echo "installing protoc-gen-go $(PROTOC_GEN_GO_VERSION) ..." rm -f $(PROTOC_GEN_GO) - GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install google.golang.org/protobuf/cmd/protoc-gen-go@$(PROTOC_GEN_GO_VERSION) + (cd $(GO_ROOT); GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install google.golang.org/protobuf/cmd/protoc-gen-go@$(PROTOC_GEN_GO_VERSION)) rm -rf "$(dir $@)" mkdir -p "$(dir $@)" touch $@ @@ -93,7 +83,7 @@ $(PROTOC_GEN_GO): $(PROTOC_GEN_GO_VERSION_FILE) $(PROTOC_GEN_DOC_VERSION_FILE): $(AKASH_DEVCACHE) @echo "installing protoc-gen-doc $(PROTOC_GEN_DOC_VERSION) ..." rm -f $(PROTOC_GEN_DOC) - GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc@$(PROTOC_GEN_DOC_VERSION) + (cd $(GO_ROOT); GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc@$(PROTOC_GEN_DOC_VERSION)) rm -rf "$(dir $@)" mkdir -p "$(dir $@)" touch $@ @@ -102,7 +92,7 @@ $(PROTOC_GEN_DOC): $(PROTOC_GEN_DOC_VERSION_FILE) $(PROTOC_GEN_GRPC_GATEWAY_VERSION_FILE): $(AKASH_DEVCACHE) @echo "Installing protoc-gen-grpc-gateway $(PROTOC_GEN_GRPC_GATEWAY_VERSION) ..." rm -f $(PROTOC_GEN_GRPC_GATEWAY) - GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@$(PROTOC_GEN_GRPC_GATEWAY_VERSION) + (cd $(GO_ROOT); GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway@$(PROTOC_GEN_GRPC_GATEWAY_VERSION)) rm -rf "$(dir $@)" mkdir -p "$(dir $@)" touch $@ @@ -111,7 +101,7 @@ $(PROTOC_GEN_GRPC_GATEWAY): $(PROTOC_GEN_GRPC_GATEWAY_VERSION_FILE) $(PROTOC_GEN_SWAGGER_VERSION_FILE): $(AKASH_DEVCACHE) @echo "Installing protoc-gen-grpc-gateway $(PROTOC_GEN_SWAGGER_VERSION) ..." rm -f $(PROTOC_GEN_GRPC_GATEWAY) - GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@$(PROTOC_GEN_SWAGGER_VERSION) + (cd $(GO_ROOT); GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger@$(PROTOC_GEN_SWAGGER_VERSION)) rm -rf "$(dir $@)" mkdir -p "$(dir $@)" touch $@ @@ -120,7 +110,7 @@ $(PROTOC_GEN_SWAGGER): $(PROTOC_GEN_SWAGGER_VERSION_FILE) $(MODVENDOR_VERSION_FILE): $(AKASH_DEVCACHE) @echo "installing modvendor $(MODVENDOR_VERSION) ..." rm -f $(MODVENDOR) - GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/goware/modvendor@$(MODVENDOR_VERSION) + (cd $(GO_ROOT); GOBIN=$(AKASH_DEVCACHE_BIN) $(GO) install github.com/goware/modvendor@$(MODVENDOR_VERSION)) rm -rf "$(dir $@)" mkdir -p "$(dir $@)" touch $@ @@ -129,7 +119,7 @@ $(MODVENDOR): $(MODVENDOR_VERSION_FILE) $(GIT_CHGLOG_VERSION_FILE): $(AKASH_DEVCACHE) @echo "installing git-chglog $(GIT_CHGLOG_VERSION) ..." rm -f $(GIT_CHGLOG) - GOBIN=$(AKASH_DEVCACHE_BIN) go install github.com/git-chglog/git-chglog/cmd/git-chglog@$(GIT_CHGLOG_VERSION) + (cd $(GO_ROOT); GOBIN=$(AKASH_DEVCACHE_BIN) go install github.com/git-chglog/git-chglog/cmd/git-chglog@$(GIT_CHGLOG_VERSION)) rm -rf "$(dir $@)" mkdir -p "$(dir $@)" touch $@ @@ -138,7 +128,7 @@ $(GIT_CHGLOG): $(GIT_CHGLOG_VERSION_FILE) $(MOCKERY_VERSION_FILE): $(AKASH_DEVCACHE) @echo "installing mockery $(MOCKERY_VERSION) ..." rm -f $(MOCKERY) - GOBIN=$(AKASH_DEVCACHE_BIN) go install -ldflags '-s -w -X github.com/vektra/mockery/v2/pkg/config.SemVer=$(MOCKERY_VERSION)' github.com/vektra/mockery/v2@v$(MOCKERY_VERSION) + (cd $(GO_ROOT); GOBIN=$(AKASH_DEVCACHE_BIN) go install -ldflags '-s -w -X github.com/vektra/mockery/v2/pkg/config.SemVer=$(MOCKERY_VERSION)' github.com/vektra/mockery/v2@v$(MOCKERY_VERSION)) rm -rf "$(dir $@)" mkdir -p "$(dir $@)" touch $@ @@ -147,7 +137,7 @@ $(MOCKERY): $(MOCKERY_VERSION_FILE) $(GOLANGCI_LINT_VERSION_FILE): $(AP_DEVCACHE) @echo "installing golangci-lint $(GOLANGCI_LINT_VERSION) ..." rm -f $(MOCKERY) - GOBIN=$(AKASH_DEVCACHE_BIN) go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION) + (cd $(GO_ROOT); GOBIN=$(AKASH_DEVCACHE_BIN) go install github.com/golangci/golangci-lint/cmd/golangci-lint@$(GOLANGCI_LINT_VERSION)) rm -rf "$(dir $@)" mkdir -p "$(dir $@)" touch $@ diff --git a/make/test.mk b/make/test.mk index 87ad400b..d9b6f0bc 100644 --- a/make/test.mk +++ b/make/test.mk @@ -1,26 +1,25 @@ -TEST_MODULE ?= ./... -SUB_TESTS ?= go \ -ts +GO_COVER_PACKAGES = $(shell cd $(GO_ROOT); go list $(GO_TEST_DIRS) | grep -v mock | paste -sd, -) + +GO_CURR_MODULE = $(shell go list -m 2>/dev/null) -COVER_PACKAGES = $(shell go list ./... | grep -v mock | paste -sd, -) +SUB_TESTS ?= go \ +ts -TEST_TIMEOUT ?= 300 -TEST_RACE ?= 0 -TEST_NOCACHE ?= 0 -TEST_VERBOSE ?= 0 +GO_TEST_OPTS ?= +GO_TEST_TIMEOUT ?= 300 -test_flags := -timeout $(TEST_TIMEOUT)s +test_go_flags := -mod=$(GOMOD) -timeout $(GO_TEST_TIMEOUT)s -ifeq ($(TEST_NOCACHE), 1) -test_flags += -count=1 +ifneq (,$(findstring nocache,$(GO_TEST_OPTS))) +test_go_flags += -count=1 endif -ifeq ($(TEST_RACE), 1) -test_flags += -race +ifneq (,$(findstring race,$(GO_TEST_OPTS))) +test_go_flags += -race endif -ifeq ($(TEST_VERBOSE), 1) -test_flags += -v +ifneq (,$(findstring verbose,$(GO_TEST_OPTS))) +test_go_flags += -v endif .PHONY: test @@ -31,23 +30,20 @@ test-coverage: $(patsubst %, test-coverage-%,$(SUB_TESTS)) .PHONY: test-ts test-ts: $(AKASH_TS_NODE_MODULES) - cd ts && npm run test + cd $(TS_ROOT) && npm run test .PHONY: test-coverage-ts test-coverage-ts: $(AKASH_TS_NODE_MODULES) - cd ts && npm run test:cov + cd $(TS_ROOT) && npm run test:cov .PHONY: test-go +test-go: export GO111MODULE := $(GO111MODULE) +test-coverage-go: export GOWORK := $(GOWORK) test-go: - $(GO) test $(test_flags) $(TEST_MODULE) + @$(TOOLS) gotest "$(GO_MODULES)" "$(test_go_flags)" "$(GO_TEST_DIRS)" .PHONY: test-coverage-go +test-coverage-go: export GO111MODULE := $(GO111MODULE) +test-coverage-go: export GOWORK := $(GOWORK) test-coverage-go: - $(GO) test -coverprofile=coverage.txt \ - -covermode=count \ - -coverpkg="$(COVER_PACKAGES)" \ - $(TEST_MODULE) - -.PHONY: test-go-vet -test-go-vet: - $(GO) vet $(TEST_MODULE) + @$(TOOLS) gocoverage "$(GO_MODULES)" "$(test_go_flags)" "$(GO_TEST_DIRS)" diff --git a/proto/node/akash/audit/v1/audit.proto b/proto/node/akash/audit/v1/audit.proto new file mode 100644 index 00000000..3ad92634 --- /dev/null +++ b/proto/node/akash/audit/v1/audit.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; +package akash.audit.v1; + +import "gogoproto/gogo.proto"; +import "akash/base/attributes/v1/attribute.proto"; + +option go_package = "pkg.akt.dev/go/node/audit/v1"; + +// Provider stores owner auditor and attributes details +message AuditedProvider { + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + string auditor = 2 [ + (gogoproto.jsontag) = "auditor", + (gogoproto.moretags) = "yaml:\"auditor\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 4 [ + (gogoproto.castrepeated) = "pkg.akt.dev/go/node/types/attributes/v1.Attributes", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; +} + +// Attributes +message AuditedAttributesStore { + option (gogoproto.equal) = false; + repeated akash.base.attributes.v1.Attribute attributes = 1 [ + (gogoproto.castrepeated) = "pkg.akt.dev/go/node/types/attributes/v1.Attributes", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; +} + +// AttributesFilters defines filters used to filter deployments +message AttributesFilters { + option (gogoproto.equal) = false; + + repeated string auditors = 1 [ + (gogoproto.jsontag) = "auditors", + (gogoproto.moretags) = "yaml:\"auditors\"" + ]; + repeated string owners = 2 [ + (gogoproto.jsontag) = "owners", + (gogoproto.moretags) = "yaml:\"owners\"" + ]; +} diff --git a/proto/node/akash/audit/v1/event.proto b/proto/node/akash/audit/v1/event.proto new file mode 100644 index 00000000..eb036f71 --- /dev/null +++ b/proto/node/akash/audit/v1/event.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; +package akash.audit.v1; + +import "gogoproto/gogo.proto"; + +import "cosmos_proto/cosmos.proto"; + +option go_package = "pkg.akt.dev/go/node/audit/v1"; + +// EventTrustedAuditorCreated defines an SDK message for signing a provider attributes +message EventTrustedAuditorCreated { + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + string auditor = 2 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "auditor", + (gogoproto.moretags) = "yaml:\"auditor\"" + ]; +} + +// EventTrustedAuditorCreated defines an SDK message for signing a provider attributes +message EventTrustedAuditorDeleted { + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + string auditor = 2 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "auditor", + (gogoproto.moretags) = "yaml:\"auditor\"" + ]; +} diff --git a/proto/node/akash/audit/v1/genesis.proto b/proto/node/akash/audit/v1/genesis.proto new file mode 100644 index 00000000..932109fc --- /dev/null +++ b/proto/node/akash/audit/v1/genesis.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; +package akash.audit.v1; + +import "gogoproto/gogo.proto"; + +import "akash/audit/v1/audit.proto"; + +option go_package = "pkg.akt.dev/go/node/audit/v1"; + +// GenesisState defines the basic genesis state used by audit module +message GenesisState { + repeated AuditedProvider providers = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "providers", + (gogoproto.moretags) = "yaml:\"providers\"" + ]; +} diff --git a/proto/node/akash/audit/v1/msg.proto b/proto/node/akash/audit/v1/msg.proto new file mode 100644 index 00000000..6d554f7a --- /dev/null +++ b/proto/node/akash/audit/v1/msg.proto @@ -0,0 +1,61 @@ +syntax = "proto3"; +package akash.audit.v1; + +import "gogoproto/gogo.proto"; + +import "cosmos_proto/cosmos.proto"; +import "cosmos/msg/v1/msg.proto"; + +import "akash/base/attributes/v1/attribute.proto"; + +option go_package = "pkg.akt.dev/go/node/audit/v1"; + +// MsgSignProviderAttributes defines an SDK message for signing a provider attributes +message MsgSignProviderAttributes { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "auditor"; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + string auditor = 2 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "auditor", + (gogoproto.moretags) = "yaml:\"auditor\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 3 [ + (gogoproto.castrepeated) = "pkg.akt.dev/go/node/types/attributes/v1.Attributes", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; +} + +// MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. +message MsgSignProviderAttributesResponse {} + +// MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes +message MsgDeleteProviderAttributes { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "auditor"; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + string auditor = 2 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "auditor", + (gogoproto.moretags) = "yaml:\"auditor\"" + ]; + repeated string keys = 3 [ + (gogoproto.jsontag) = "keys", + (gogoproto.moretags) = "yaml:\"keys\"" + ]; +} + +// MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. +message MsgDeleteProviderAttributesResponse {} diff --git a/proto/node/akash/audit/v1/query.proto b/proto/node/akash/audit/v1/query.proto new file mode 100644 index 00000000..267a685c --- /dev/null +++ b/proto/node/akash/audit/v1/query.proto @@ -0,0 +1,83 @@ +syntax = "proto3"; + +// buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + +package akash.audit.v1; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; + +import "akash/audit/v1/audit.proto"; + +option go_package = "pkg.akt.dev/go/node/audit/v1"; + +// Query defines the gRPC querier service +service Query { + // AllProvidersAttributes queries all providers + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + rpc AllProvidersAttributes(QueryAllProvidersAttributesRequest) returns (QueryProvidersResponse) { + option (google.api.http).get = "/akash/audit/v1/audit/attributes/list"; + } + + // ProviderAttributes queries all provider signed attributes + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + rpc ProviderAttributes(QueryProviderAttributesRequest) returns (QueryProvidersResponse) { + option (google.api.http).get = "/akash/audit/v1/audit/attributes/{owner}/list"; + } + + // ProviderAuditorAttributes queries provider signed attributes by specific auditor + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + rpc ProviderAuditorAttributes(QueryProviderAuditorRequest) returns (QueryProvidersResponse) { + option (google.api.http).get = "/akash/audit/v1/audit/attributes/{auditor}/{owner}"; + } + + // AuditorAttributes queries all providers signed by this auditor + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + rpc AuditorAttributes(QueryAuditorAttributesRequest) returns (QueryProvidersResponse) { + option (google.api.http).get = "/akash/provider/v1/auditor/{auditor}/list"; + } +} + +// QueryProvidersResponse is response type for the Query/Providers RPC method +message QueryProvidersResponse { + repeated AuditedProvider providers = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "AuditedProviders" + ]; + + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryProviderRequest is request type for the Query/Provider RPC method +message QueryProviderRequest { + string auditor = 1; + string owner = 2; +} + +// QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method +message QueryAllProvidersAttributesRequest { + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryProviderAttributesRequest is request type for the Query/Provider RPC method +message QueryProviderAttributesRequest { + string owner = 1; + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryProviderAuditorRequest is request type for the Query/Providers RPC method +message QueryProviderAuditorRequest { + string auditor = 1; + string owner = 2; +} + +// QueryAuditorAttributesRequest is request type for the Query/Providers RPC method +message QueryAuditorAttributesRequest { + string auditor = 1; + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} diff --git a/proto/node/akash/audit/v1/service.proto b/proto/node/akash/audit/v1/service.proto new file mode 100644 index 00000000..36e6ba77 --- /dev/null +++ b/proto/node/akash/audit/v1/service.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; +package akash.audit.v1; + +import "cosmos/msg/v1/msg.proto"; + +import "akash/audit/v1/msg.proto"; + +option go_package = "pkg.akt.dev/go/node/audit/v1"; + +// Msg defines the provider Msg service +service Msg { + option (cosmos.msg.v1.service) = true; + + // SignProviderAttributes defines a method that signs provider attributes + rpc SignProviderAttributes(MsgSignProviderAttributes) returns (MsgSignProviderAttributesResponse); + + // DeleteProviderAttributes defines a method that deletes provider attributes + rpc DeleteProviderAttributes(MsgDeleteProviderAttributes) returns (MsgDeleteProviderAttributesResponse); +} diff --git a/proto/node/akash/audit/v1beta1/audit.proto b/proto/node/akash/audit/v1beta1/audit.proto deleted file mode 100644 index 0443e341..00000000 --- a/proto/node/akash/audit/v1beta1/audit.proto +++ /dev/null @@ -1,126 +0,0 @@ -syntax = "proto3"; -package akash.audit.v1beta1; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta1/attribute.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/audit/v1beta1"; - -// Msg defines the provider Msg service -service Msg { - // SignProviderAttributes defines a method that signs provider attributes - rpc SignProviderAttributes(MsgSignProviderAttributes) returns (MsgSignProviderAttributesResponse); - - // DeleteProviderAttributes defines a method that deletes provider attributes - rpc DeleteProviderAttributes(MsgDeleteProviderAttributes) returns (MsgDeleteProviderAttributesResponse); -} - -// Provider stores owner auditor and attributes details -message Provider { - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - string auditor = 2 [ - (gogoproto.jsontag) = "auditor", - (gogoproto.moretags) = "yaml:\"auditor\"" - ]; - - repeated akash.base.v1beta1.Attribute attributes = 4 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta1.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; -} - -// Attributes -message AuditedAttributes { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = true; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - string auditor = 2 [ - (gogoproto.jsontag) = "auditor", - (gogoproto.moretags) = "yaml:\"auditor\"" - ]; - repeated akash.base.v1beta1.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta1.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; -} - -// AttributesResponse represents details of deployment along with group details -message AttributesResponse { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = true; - - repeated AuditedAttributes attributes = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; -} - -// AttributesFilters defines filters used to filter deployments -message AttributesFilters { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = true; - - repeated string auditors = 1 [ - (gogoproto.jsontag) = "auditors", - (gogoproto.moretags) = "yaml:\"auditors\"" - ]; - repeated string owners = 2 [ - (gogoproto.jsontag) = "owners", - (gogoproto.moretags) = "yaml:\"owners\"" - ]; -} - -// MsgSignProviderAttributes defines an SDK message for signing a provider attributes -message MsgSignProviderAttributes { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - string auditor = 2 [ - (gogoproto.jsontag) = "auditor", - (gogoproto.moretags) = "yaml:\"auditor\"" - ]; - repeated akash.base.v1beta1.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta1.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; -} - -// MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. -message MsgSignProviderAttributesResponse {} - -// MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes -message MsgDeleteProviderAttributes { - option (gogoproto.equal) = false; - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - string auditor = 2 [ - (gogoproto.jsontag) = "auditor", - (gogoproto.moretags) = "yaml:\"auditor\"" - ]; - repeated string keys = 3 [ - (gogoproto.jsontag) = "keys", - (gogoproto.moretags) = "yaml:\"keys\"" - ]; -} - -// MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. -message MsgDeleteProviderAttributesResponse {} diff --git a/proto/node/akash/audit/v1beta2/audit.proto b/proto/node/akash/audit/v1beta2/audit.proto deleted file mode 100644 index ca3fc86e..00000000 --- a/proto/node/akash/audit/v1beta2/audit.proto +++ /dev/null @@ -1,99 +0,0 @@ -syntax = "proto3"; -package akash.audit.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta2/attribute.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/audit/v1beta2"; - -// Msg defines the provider Msg service -service Msg { - // SignProviderAttributes defines a method that signs provider attributes - rpc SignProviderAttributes(MsgSignProviderAttributes) returns (MsgSignProviderAttributesResponse); - - // DeleteProviderAttributes defines a method that deletes provider attributes - rpc DeleteProviderAttributes(MsgDeleteProviderAttributes) returns (MsgDeleteProviderAttributesResponse); -} - -// Provider stores owner auditor and attributes details -message Provider { - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - - string auditor = 2 [(gogoproto.jsontag) = "auditor", (gogoproto.moretags) = "yaml:\"auditor\""]; - - repeated akash.base.v1beta2.Attribute attributes = 4 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta2.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; -} - -// Attributes -message AuditedAttributes { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = true; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - - string auditor = 2 [(gogoproto.jsontag) = "auditor", (gogoproto.moretags) = "yaml:\"auditor\""]; - - repeated akash.base.v1beta2.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta2.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; -} - -// AttributesResponse represents details of deployment along with group details -message AttributesResponse { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = true; - - repeated AuditedAttributes attributes = 1 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "attributes", (gogoproto.moretags) = "yaml:\"attributes\""]; -} - -// AttributesFilters defines filters used to filter deployments -message AttributesFilters { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = true; - - repeated string auditors = 1 [(gogoproto.jsontag) = "auditors", (gogoproto.moretags) = "yaml:\"auditors\""]; - - repeated string owners = 2 [(gogoproto.jsontag) = "owners", (gogoproto.moretags) = "yaml:\"owners\""]; -} - -// MsgSignProviderAttributes defines an SDK message for signing a provider attributes -message MsgSignProviderAttributes { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - - string auditor = 2 [(gogoproto.jsontag) = "auditor", (gogoproto.moretags) = "yaml:\"auditor\""]; - - repeated akash.base.v1beta2.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta2.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; -} - -// MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. -message MsgSignProviderAttributesResponse {} - -// MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes -message MsgDeleteProviderAttributes { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - - string auditor = 2 [(gogoproto.jsontag) = "auditor", (gogoproto.moretags) = "yaml:\"auditor\""]; - - repeated string keys = 3 [(gogoproto.jsontag) = "keys", (gogoproto.moretags) = "yaml:\"keys\""]; -} - -// MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. -message MsgDeleteProviderAttributesResponse {} diff --git a/proto/node/akash/audit/v1beta2/genesis.proto b/proto/node/akash/audit/v1beta2/genesis.proto deleted file mode 100644 index 04cb43c9..00000000 --- a/proto/node/akash/audit/v1beta2/genesis.proto +++ /dev/null @@ -1,13 +0,0 @@ -syntax = "proto3"; -package akash.audit.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/audit/v1beta2/audit.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/audit/v1beta2"; - -// GenesisState defines the basic genesis state used by audit module -message GenesisState { - repeated AuditedAttributes attributes = 1 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "attributes", (gogoproto.moretags) = "yaml:\"attributes\""]; -} diff --git a/proto/node/akash/audit/v1beta2/query.proto b/proto/node/akash/audit/v1beta2/query.proto deleted file mode 100644 index bd29153b..00000000 --- a/proto/node/akash/audit/v1beta2/query.proto +++ /dev/null @@ -1,79 +0,0 @@ -syntax = "proto3"; - -// buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - -package akash.audit.v1beta2; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/audit/v1beta2/audit.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/audit/v1beta2"; - -// Query defines the gRPC querier service -service Query { - // AllProvidersAttributes queries all providers - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - rpc AllProvidersAttributes(QueryAllProvidersAttributesRequest) returns (QueryProvidersResponse) { - option (google.api.http).get = "/akash/audit/v1beta2/audit/attributes/list"; - } - - // ProviderAttributes queries all provider signed attributes - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - rpc ProviderAttributes(QueryProviderAttributesRequest) returns (QueryProvidersResponse) { - option (google.api.http).get = "/akash/audit/v1beta2/audit/attributes/{owner}/list"; - } - - // ProviderAuditorAttributes queries provider signed attributes by specific auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - rpc ProviderAuditorAttributes(QueryProviderAuditorRequest) returns (QueryProvidersResponse) { - option (google.api.http).get = "/akash/audit/v1beta2/audit/attributes/{auditor}/{owner}"; - } - - // AuditorAttributes queries all providers signed by this auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - rpc AuditorAttributes(QueryAuditorAttributesRequest) returns (QueryProvidersResponse) { - option (google.api.http).get = "/akash/provider/v1beta2/auditor/{auditor}/list"; - } -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -message QueryProvidersResponse { - repeated Provider providers = 1 [(gogoproto.nullable) = false, (gogoproto.castrepeated) = "Providers"]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryProviderRequest is request type for the Query/Provider RPC method -message QueryProviderRequest { - string auditor = 1; - string owner = 2; -} - -// QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method -message QueryAllProvidersAttributesRequest { - cosmos.base.query.v1beta1.PageRequest pagination = 1; -} - -// QueryProviderAttributesRequest is request type for the Query/Provider RPC method -message QueryProviderAttributesRequest { - string owner = 1; - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryProviderAuditorRequest is request type for the Query/Providers RPC method -message QueryProviderAuditorRequest { - string auditor = 1; - string owner = 2; -} - -// QueryAuditorAttributesRequest is request type for the Query/Providers RPC method -message QueryAuditorAttributesRequest { - string auditor = 1; - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} diff --git a/proto/node/akash/audit/v1beta3/audit.proto b/proto/node/akash/audit/v1beta3/audit.proto deleted file mode 100644 index ebe5c4ce..00000000 --- a/proto/node/akash/audit/v1beta3/audit.proto +++ /dev/null @@ -1,124 +0,0 @@ -syntax = "proto3"; -package akash.audit.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta3/attribute.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/audit/v1beta3"; - -// Msg defines the provider Msg service -service Msg { - // SignProviderAttributes defines a method that signs provider attributes - rpc SignProviderAttributes(MsgSignProviderAttributes) returns (MsgSignProviderAttributesResponse); - - // DeleteProviderAttributes defines a method that deletes provider attributes - rpc DeleteProviderAttributes(MsgDeleteProviderAttributes) returns (MsgDeleteProviderAttributesResponse); -} - -// Provider stores owner auditor and attributes details -message Provider { - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - string auditor = 2 [ - (gogoproto.jsontag) = "auditor", - (gogoproto.moretags) = "yaml:\"auditor\"" - ]; - repeated akash.base.v1beta3.Attribute attributes = 4 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; -} - -// Attributes -message AuditedAttributes { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = true; - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - string auditor = 2 [ - (gogoproto.jsontag) = "auditor", - (gogoproto.moretags) = "yaml:\"auditor\"" - ]; - repeated akash.base.v1beta3.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; -} - -// AttributesResponse represents details of deployment along with group details -message AttributesResponse { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = true; - - repeated AuditedAttributes attributes = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; -} - -// AttributesFilters defines filters used to filter deployments -message AttributesFilters { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = true; - - repeated string auditors = 1 [ - (gogoproto.jsontag) = "auditors", - (gogoproto.moretags) = "yaml:\"auditors\"" - ]; - repeated string owners = 2 [ - (gogoproto.jsontag) = "owners", - (gogoproto.moretags) = "yaml:\"owners\"" - ]; -} - -// MsgSignProviderAttributes defines an SDK message for signing a provider attributes -message MsgSignProviderAttributes { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - string auditor = 2 [ - (gogoproto.jsontag) = "auditor", - (gogoproto.moretags) = "yaml:\"auditor\"" - ]; - repeated akash.base.v1beta3.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; -} - -// MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. -message MsgSignProviderAttributesResponse {} - -// MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes -message MsgDeleteProviderAttributes { - option (gogoproto.equal) = false; - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - string auditor = 2 [ - (gogoproto.jsontag) = "auditor", - (gogoproto.moretags) = "yaml:\"auditor\"" - ]; - repeated string keys = 3 [ - (gogoproto.jsontag) = "keys", - (gogoproto.moretags) = "yaml:\"keys\"" - ]; -} - -// MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. -message MsgDeleteProviderAttributesResponse {} diff --git a/proto/node/akash/audit/v1beta3/genesis.proto b/proto/node/akash/audit/v1beta3/genesis.proto deleted file mode 100644 index c176f1a7..00000000 --- a/proto/node/akash/audit/v1beta3/genesis.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; -package akash.audit.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/audit/v1beta3/audit.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/audit/v1beta3"; - -// GenesisState defines the basic genesis state used by audit module -message GenesisState { - repeated AuditedAttributes attributes = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; -} diff --git a/proto/node/akash/audit/v1beta3/query.proto b/proto/node/akash/audit/v1beta3/query.proto deleted file mode 100644 index 9991acb4..00000000 --- a/proto/node/akash/audit/v1beta3/query.proto +++ /dev/null @@ -1,82 +0,0 @@ -syntax = "proto3"; - -// buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - -package akash.audit.v1beta3; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/audit/v1beta3/audit.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/audit/v1beta3"; - -// Query defines the gRPC querier service -service Query { - // AllProvidersAttributes queries all providers - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - rpc AllProvidersAttributes(QueryAllProvidersAttributesRequest) returns (QueryProvidersResponse) { - option (google.api.http).get = "/akash/audit/v1beta3/audit/attributes/list"; - } - - // ProviderAttributes queries all provider signed attributes - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - rpc ProviderAttributes(QueryProviderAttributesRequest) returns (QueryProvidersResponse) { - option (google.api.http).get = "/akash/audit/v1beta3/audit/attributes/{owner}/list"; - } - - // ProviderAuditorAttributes queries provider signed attributes by specific auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - rpc ProviderAuditorAttributes(QueryProviderAuditorRequest) returns (QueryProvidersResponse) { - option (google.api.http).get = "/akash/audit/v1beta3/audit/attributes/{auditor}/{owner}"; - } - - // AuditorAttributes queries all providers signed by this auditor - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - rpc AuditorAttributes(QueryAuditorAttributesRequest) returns (QueryProvidersResponse) { - option (google.api.http).get = "/akash/provider/v1beta3/auditor/{auditor}/list"; - } -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -message QueryProvidersResponse { - repeated Provider providers = 1 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Providers" - ]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryProviderRequest is request type for the Query/Provider RPC method -message QueryProviderRequest { - string auditor = 1; - string owner = 2; -} - -// QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method -message QueryAllProvidersAttributesRequest { - cosmos.base.query.v1beta1.PageRequest pagination = 1; -} - -// QueryProviderAttributesRequest is request type for the Query/Provider RPC method -message QueryProviderAttributesRequest { - string owner = 1; - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryProviderAuditorRequest is request type for the Query/Providers RPC method -message QueryProviderAuditorRequest { - string auditor = 1; - string owner = 2; -} - -// QueryAuditorAttributesRequest is request type for the Query/Providers RPC method -message QueryAuditorAttributesRequest { - string auditor = 1; - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} diff --git a/proto/node/akash/base/attributes/v1/attribute.proto b/proto/node/akash/base/attributes/v1/attribute.proto new file mode 100644 index 00000000..dc5bd411 --- /dev/null +++ b/proto/node/akash/base/attributes/v1/attribute.proto @@ -0,0 +1,57 @@ +syntax = "proto3"; + +package akash.base.attributes.v1; + +import "gogoproto/gogo.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = false; +option go_package = "pkg.akt.dev/go/node/types/attributes/v1"; + +// Attribute represents key value pair +message Attribute { + option (gogoproto.goproto_getters) = false; + string key = 1 [(gogoproto.moretags) = "yaml:\"key\""]; + string value = 2 [(gogoproto.moretags) = "yaml:\"value\""]; +} + +// SignedBy represents validation accounts that tenant expects signatures for provider attributes +// AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many +// entries there +// this behaviour to be discussed +message SignedBy { + option (gogoproto.goproto_getters) = false; + option (gogoproto.stringer) = false; + + // all_of all keys in this list must have signed attributes + repeated string all_of = 1 [ + (gogoproto.jsontag) = "all_of", + (gogoproto.moretags) = "yaml:\"allOf\"" + ]; + // any_of at least of of the keys from the list must have signed attributes + repeated string any_of = 2 [ + (gogoproto.jsontag) = "any_of", + (gogoproto.moretags) = "yaml:\"anyOf\"" + ]; +} + +// PlacementRequirements +message PlacementRequirements { + option (gogoproto.goproto_getters) = false; + option (gogoproto.stringer) = false; + + // SignedBy list of keys that tenants expect to have signatures from + SignedBy signed_by = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "signed_by", + (gogoproto.moretags) = "yaml:\"signed_by\"" + ]; + + // Attribute list of attributes tenant expects from the provider + repeated Attribute attributes = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Attributes", + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; +} diff --git a/proto/node/akash/base/resources/v1beta4/cpu.proto b/proto/node/akash/base/resources/v1beta4/cpu.proto new file mode 100644 index 00000000..9be985e7 --- /dev/null +++ b/proto/node/akash/base/resources/v1beta4/cpu.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; +package akash.base.resources.v1beta4; + +import "gogoproto/gogo.proto"; + +import "akash/base/attributes/v1/attribute.proto"; +import "akash/base/resources/v1beta4/resourcevalue.proto"; + +option go_package = "pkg.akt.dev/go/node/types/resources/v1beta4"; + +// CPU stores resource units and cpu config attributes +message CPU { + option (gogoproto.equal) = true; + ResourceValue units = 1 [ + (gogoproto.nullable) = false + ]; + repeated akash.base.attributes.v1.Attribute attributes = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "pkg.akt.dev/go/node/types/attributes/v1.Attributes", + (gogoproto.jsontag) = "attributes,omitempty", + (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" + ]; +} diff --git a/proto/node/akash/base/resources/v1beta4/endpoint.proto b/proto/node/akash/base/resources/v1beta4/endpoint.proto new file mode 100644 index 00000000..8c1dc297 --- /dev/null +++ b/proto/node/akash/base/resources/v1beta4/endpoint.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; +package akash.base.resources.v1beta4; + +import "gogoproto/gogo.proto"; + +option go_package = "pkg.akt.dev/go/node/types/resources/v1beta4"; + +// Endpoint describes a publicly accessible IP service +message Endpoint { + option (gogoproto.equal) = true; + + // This describes how the endpoint is implemented when the lease is deployed + enum Kind { + // Describes an endpoint that becomes a Kubernetes Ingress + SHARED_HTTP = 0; + // Describes an endpoint that becomes a Kubernetes NodePort + RANDOM_PORT = 1; + // Describes an endpoint that becomes a leased IP + LEASED_IP = 2; + } + + Kind kind = 1; + uint32 sequence_number = 2 [ + (gogoproto.customname) = "SequenceNumber", + (gogoproto.jsontag) = "sequence_number", + (gogoproto.moretags) = "yaml:\"sequence_number\"" + ]; +} diff --git a/proto/node/akash/base/resources/v1beta4/gpu.proto b/proto/node/akash/base/resources/v1beta4/gpu.proto new file mode 100644 index 00000000..01c40105 --- /dev/null +++ b/proto/node/akash/base/resources/v1beta4/gpu.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; +package akash.base.resources.v1beta4; + +import "gogoproto/gogo.proto"; +import "akash/base/attributes/v1/attribute.proto"; +import "akash/base/resources/v1beta4/resourcevalue.proto"; + +option go_package = "pkg.akt.dev/go/node/types/resources/v1beta4"; + +// GPU stores resource units and cpu config attributes +message GPU { + option (gogoproto.equal) = true; + ResourceValue units = 1 [ + (gogoproto.nullable) = false + ]; + repeated akash.base.attributes.v1.Attribute attributes = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "pkg.akt.dev/go/node/types/attributes/v1.Attributes", + (gogoproto.jsontag) = "attributes,omitempty", + (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" + ]; +} diff --git a/proto/node/akash/base/resources/v1beta4/memory.proto b/proto/node/akash/base/resources/v1beta4/memory.proto new file mode 100644 index 00000000..1b9ad7ba --- /dev/null +++ b/proto/node/akash/base/resources/v1beta4/memory.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; +package akash.base.resources.v1beta4; + +import "gogoproto/gogo.proto"; + +import "akash/base/attributes/v1/attribute.proto"; +import "akash/base/resources/v1beta4/resourcevalue.proto"; + +option go_package = "pkg.akt.dev/go/node/types/resources/v1beta4"; + +// Memory stores resource quantity and memory attributes +message Memory { + option (gogoproto.equal) = true; + ResourceValue quantity = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "size", + (gogoproto.moretags) = "yaml:\"size\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "pkg.akt.dev/go/node/types/attributes/v1.Attributes", + (gogoproto.jsontag) = "attributes,omitempty", + (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" + ]; +} diff --git a/proto/node/akash/base/resources/v1beta4/resources.proto b/proto/node/akash/base/resources/v1beta4/resources.proto new file mode 100644 index 00000000..cc580a95 --- /dev/null +++ b/proto/node/akash/base/resources/v1beta4/resources.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; +package akash.base.resources.v1beta4; + +import "gogoproto/gogo.proto"; + +import "akash/base/resources/v1beta4/cpu.proto"; +import "akash/base/resources/v1beta4/gpu.proto"; +import "akash/base/resources/v1beta4/memory.proto"; +import "akash/base/resources/v1beta4/storage.proto"; +import "akash/base/resources/v1beta4/endpoint.proto"; + +option go_package = "pkg.akt.dev/go/node/types/resources/v1beta4"; + +// Resources describes all available resources types for deployment/node etc +// if field is nil resource is not present in the given data-structure +message Resources { + option (gogoproto.equal) = true; + uint32 id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + CPU cpu = 2 [ + (gogoproto.nullable) = true, + (gogoproto.customname) = "CPU", + (gogoproto.jsontag) = "cpu,omitempty", + (gogoproto.moretags) = "yaml:\"cpu,omitempty\"" + ]; + Memory memory = 3 [ + (gogoproto.nullable) = true, + (gogoproto.jsontag) = "memory,omitempty", + (gogoproto.moretags) = "yaml:\"memory,omitempty\"" + ]; + repeated Storage storage = 4 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Volumes", + (gogoproto.jsontag) = "storage,omitempty", + (gogoproto.moretags) = "yaml:\"storage,omitempty\"" + ]; + GPU gpu = 5 [ + (gogoproto.nullable) = true, + (gogoproto.customname) = "GPU", + (gogoproto.jsontag) = "gpu,omitempty", + (gogoproto.moretags) = "yaml:\"gpu,omitempty\"" + ]; + repeated Endpoint endpoints = 6 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Endpoints", + (gogoproto.jsontag) = "endpoints,omitempty", + (gogoproto.moretags) = "yaml:\"endpoints,omitempty\"" + ]; +} diff --git a/proto/node/akash/base/resources/v1beta4/resourcevalue.proto b/proto/node/akash/base/resources/v1beta4/resourcevalue.proto new file mode 100644 index 00000000..c969c4c8 --- /dev/null +++ b/proto/node/akash/base/resources/v1beta4/resourcevalue.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package akash.base.resources.v1beta4; + +import "gogoproto/gogo.proto"; + +option go_package = "pkg.akt.dev/go/node/types/resources/v1beta4"; + +// Unit stores cpu, memory and storage metrics +message ResourceValue { + option (gogoproto.equal) = true; + bytes val = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int" + ]; +} diff --git a/proto/node/akash/base/resources/v1beta4/storage.proto b/proto/node/akash/base/resources/v1beta4/storage.proto new file mode 100644 index 00000000..2f37082d --- /dev/null +++ b/proto/node/akash/base/resources/v1beta4/storage.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; +package akash.base.resources.v1beta4; + +import "gogoproto/gogo.proto"; +import "akash/base/attributes/v1/attribute.proto"; +import "akash/base/resources/v1beta4/resourcevalue.proto"; + +option go_package = "pkg.akt.dev/go/node/types/resources/v1beta4"; + +// Storage stores resource quantity and storage attributes +message Storage { + option (gogoproto.equal) = true; + string name = 1 [ + (gogoproto.jsontag) = "name", + (gogoproto.moretags) = "yaml:\"name\"" + ]; + ResourceValue quantity = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "size", + (gogoproto.moretags) = "yaml:\"size\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 3 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "pkg.akt.dev/go/node/types/attributes/v1.Attributes", + (gogoproto.jsontag) = "attributes,omitempty", + (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" + ]; +} diff --git a/proto/node/akash/base/v1beta1/attribute.proto b/proto/node/akash/base/v1beta1/attribute.proto deleted file mode 100644 index f2a40fe5..00000000 --- a/proto/node/akash/base/v1beta1/attribute.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package akash.base.v1beta1; - -import "gogoproto/gogo.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = false; -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta1"; - -// Attribute represents key value pair -message Attribute { - option (gogoproto.goproto_getters) = false; - string key = 1 [(gogoproto.moretags) = "yaml:\"key\""]; - string value = 2 [(gogoproto.moretags) = "yaml:\"value\""]; -} - -// SignedBy represents validation accounts that tenant expects signatures for provider attributes -// AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many -// entries there -// this behaviour to be discussed -message SignedBy { - option (gogoproto.goproto_getters) = false; - // all_of all keys in this list must have signed attributes - repeated string all_of = 1 [(gogoproto.jsontag) = "all_of", (gogoproto.moretags) = "yaml:\"allOf\""]; - // any_of at least of of the keys from the list must have signed attributes - repeated string any_of = 2 [(gogoproto.jsontag) = "any_of", (gogoproto.moretags) = "yaml:\"anyOf\""]; -} - -// PlacementRequirements -message PlacementRequirements { - option (gogoproto.goproto_getters) = false; - - // SignedBy list of keys that tenants expect to have signatures from - SignedBy signed_by = 1 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "signed_by", (gogoproto.moretags) = "yaml:\"signed_by\""]; - - // Attribute list of attributes tenant expects from the provider - repeated Attribute attributes = 2 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "attributes", (gogoproto.moretags) = "yaml:\"attributes\""]; -} diff --git a/proto/node/akash/base/v1beta1/endpoint.proto b/proto/node/akash/base/v1beta1/endpoint.proto deleted file mode 100644 index f06c8161..00000000 --- a/proto/node/akash/base/v1beta1/endpoint.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta1; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta1"; - -// Endpoint describes a publicly accessible IP service -message Endpoint { - option (gogoproto.equal) = true; - // This describes how the endpoint is implemented when the lease is deployed - enum Kind { - // Describes an endpoint that becomes a Kubernetes Ingress - SHARED_HTTP = 0; - // Describes an endpoint that becomes a Kubernetes NodePort - RANDOM_PORT = 1; - } - Kind kind = 1; -} diff --git a/proto/node/akash/base/v1beta1/resource.proto b/proto/node/akash/base/v1beta1/resource.proto deleted file mode 100644 index 9a969391..00000000 --- a/proto/node/akash/base/v1beta1/resource.proto +++ /dev/null @@ -1,68 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta1; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta1/attribute.proto"; -import "akash/base/v1beta1/resourcevalue.proto"; -import "akash/base/v1beta1/endpoint.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta1"; - -// CPU stores resource units and cpu config attributes -message CPU { - option (gogoproto.equal) = true; - ResourceValue units = 1 [(gogoproto.nullable) = false]; - repeated Attribute attributes = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes,omitempty", - (gogoproto.moretags) = "yaml:\"cpu,omitempty\"" - ]; -} - -// Memory stores resource quantity and memory attributes -message Memory { - option (gogoproto.equal) = true; - ResourceValue quantity = 1 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "size", (gogoproto.moretags) = "yaml:\"size\""]; - repeated Attribute attributes = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes,omitempty", - (gogoproto.moretags) = "yaml:\"cpu,omitempty\"" - ]; -} - -// Storage stores resource quantity and storage attributes -message Storage { - option (gogoproto.equal) = true; - ResourceValue quantity = 1 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "size", (gogoproto.moretags) = "yaml:\"size\""]; - repeated Attribute attributes = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes,omitempty", - (gogoproto.moretags) = "yaml:\"cpu,omitempty\"" - ]; -} - -// ResourceUnits describes all available resources types for deployment/node etc -// if field is nil resource is not present in the given data-structure -message ResourceUnits { - option (gogoproto.equal) = true; - CPU cpu = 1 [ - (gogoproto.nullable) = true, - (gogoproto.customname) = "CPU", - (gogoproto.jsontag) = "cpu,omitempty", - (gogoproto.moretags) = "yaml:\"cpu,omitempty\"" - ]; - Memory memory = 2 [ - (gogoproto.nullable) = true, - (gogoproto.jsontag) = "memory,omitempty", - (gogoproto.moretags) = "yaml:\"memory,omitempty\"" - ]; - Storage storage = 3 [ - (gogoproto.nullable) = true, - (gogoproto.jsontag) = "storage,omitempty", - (gogoproto.moretags) = "yaml:\"storage,omitempty\"" - ]; - repeated akash.base.v1beta1.Endpoint endpoints = 4 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "endpoints", (gogoproto.moretags) = "yaml:\"endpoints\""]; -} diff --git a/proto/node/akash/base/v1beta1/resourcevalue.proto b/proto/node/akash/base/v1beta1/resourcevalue.proto deleted file mode 100644 index 7333afd2..00000000 --- a/proto/node/akash/base/v1beta1/resourcevalue.proto +++ /dev/null @@ -1,12 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta1; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta1"; - -// Unit stores cpu, memory and storage metrics -message ResourceValue { - option (gogoproto.equal) = true; - bytes val = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int"]; -} diff --git a/proto/node/akash/base/v1beta2/attribute.proto b/proto/node/akash/base/v1beta2/attribute.proto deleted file mode 100644 index b8bc9ad4..00000000 --- a/proto/node/akash/base/v1beta2/attribute.proto +++ /dev/null @@ -1,41 +0,0 @@ -syntax = "proto3"; - -package akash.base.v1beta2; - -import "gogoproto/gogo.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = false; -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta2"; - -// Attribute represents key value pair -message Attribute { - option (gogoproto.goproto_getters) = false; - string key = 1 [(gogoproto.moretags) = "yaml:\"key\""]; - string value = 2 [(gogoproto.moretags) = "yaml:\"value\""]; -} - -// SignedBy represents validation accounts that tenant expects signatures for provider attributes -// AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many -// entries there -// this behaviour to be discussed -message SignedBy { - option (gogoproto.goproto_getters) = false; - // all_of all keys in this list must have signed attributes - repeated string all_of = 1 [(gogoproto.jsontag) = "all_of", (gogoproto.moretags) = "yaml:\"allOf\""]; - // any_of at least of of the keys from the list must have signed attributes - repeated string any_of = 2 [(gogoproto.jsontag) = "any_of", (gogoproto.moretags) = "yaml:\"anyOf\""]; -} - -// PlacementRequirements -message PlacementRequirements { - option (gogoproto.goproto_getters) = false; - - // SignedBy list of keys that tenants expect to have signatures from - SignedBy signed_by = 1 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "signed_by", (gogoproto.moretags) = "yaml:\"signed_by\""]; - - // Attribute list of attributes tenant expects from the provider - repeated Attribute attributes = 2 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "attributes", (gogoproto.moretags) = "yaml:\"attributes\""]; -} diff --git a/proto/node/akash/base/v1beta2/endpoint.proto b/proto/node/akash/base/v1beta2/endpoint.proto deleted file mode 100644 index 7f17000d..00000000 --- a/proto/node/akash/base/v1beta2/endpoint.proto +++ /dev/null @@ -1,23 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta2; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta2"; - -// Endpoint describes a publicly accessible IP service -message Endpoint { - option (gogoproto.equal) = true; - // This describes how the endpoint is implemented when the lease is deployed - enum Kind { - // Describes an endpoint that becomes a Kubernetes Ingress - SHARED_HTTP = 0; - // Describes an endpoint that becomes a Kubernetes NodePort - RANDOM_PORT = 1; - // Describes an endpoint that becomes a leased IP - LEASED_IP = 2; - } - Kind kind = 1; - uint32 sequence_number = 2 - [(gogoproto.customname) = "SequenceNumber", (gogoproto.jsontag) = "sequence_number", (gogoproto.moretags) = "yaml:\"sequence_number\""]; -} diff --git a/proto/node/akash/base/v1beta2/resource.proto b/proto/node/akash/base/v1beta2/resource.proto deleted file mode 100644 index deb472f0..00000000 --- a/proto/node/akash/base/v1beta2/resource.proto +++ /dev/null @@ -1,56 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta2/attribute.proto"; -import "akash/base/v1beta2/resourcevalue.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta2"; - -// CPU stores resource units and cpu config attributes -message CPU { - option (gogoproto.equal) = true; - ResourceValue units = 1 [(gogoproto.nullable) = false]; - repeated Attribute attributes = 2 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Attributes", - (gogoproto.jsontag) = "attributes,omitempty", - (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" - ]; -} - -// Memory stores resource quantity and memory attributes -message Memory { - option (gogoproto.equal) = true; - ResourceValue quantity = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "size", - (gogoproto.moretags) = "yaml:\"size\"" - ]; - repeated Attribute attributes = 2 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Attributes", - (gogoproto.jsontag) = "attributes,omitempty", - (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" - ]; -} - -// Storage stores resource quantity and storage attributes -message Storage { - option (gogoproto.equal) = true; - string name = 1 [ - (gogoproto.jsontag) = "name", - (gogoproto.moretags) = "yaml:\"name\"" - ]; - ResourceValue quantity = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "size", - (gogoproto.moretags) = "yaml:\"size\"" - ]; - repeated Attribute attributes = 3 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Attributes", - (gogoproto.jsontag) = "attributes,omitempty", - (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" - ]; -} diff --git a/proto/node/akash/base/v1beta2/resourceunits.proto b/proto/node/akash/base/v1beta2/resourceunits.proto deleted file mode 100644 index 55c06141..00000000 --- a/proto/node/akash/base/v1beta2/resourceunits.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta2/resource.proto"; -import "akash/base/v1beta2/endpoint.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta2"; - -// ResourceUnits describes all available resources types for deployment/node etc -// if field is nil resource is not present in the given data-structure -message ResourceUnits { - option (gogoproto.equal) = true; - CPU cpu = 1 [ - (gogoproto.nullable) = true, - (gogoproto.customname) = "CPU", - (gogoproto.jsontag) = "cpu,omitempty", - (gogoproto.moretags) = "yaml:\"cpu,omitempty\"" - ]; - Memory memory = 2 [ - (gogoproto.nullable) = true, - (gogoproto.jsontag) = "memory,omitempty", - (gogoproto.moretags) = "yaml:\"memory,omitempty\"" - ]; - repeated Storage storage = 3 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Volumes", - (gogoproto.jsontag) = "storage,omitempty", - (gogoproto.moretags) = "yaml:\"storage,omitempty\"" - ]; - repeated akash.base.v1beta2.Endpoint endpoints = 4 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Endpoints", - (gogoproto.jsontag) = "endpoints", - (gogoproto.moretags) = "yaml:\"endpoints\"" - ]; -} diff --git a/proto/node/akash/base/v1beta2/resourcevalue.proto b/proto/node/akash/base/v1beta2/resourcevalue.proto deleted file mode 100644 index e8315484..00000000 --- a/proto/node/akash/base/v1beta2/resourcevalue.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta2; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta2"; - -// Unit stores cpu, memory and storage metrics -message ResourceValue { - option (gogoproto.equal) = true; - bytes val = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int" - ]; -} diff --git a/proto/node/akash/base/v1beta3/attribute.proto b/proto/node/akash/base/v1beta3/attribute.proto deleted file mode 100644 index 5c02a80d..00000000 --- a/proto/node/akash/base/v1beta3/attribute.proto +++ /dev/null @@ -1,54 +0,0 @@ -syntax = "proto3"; - -package akash.base.v1beta3; - -import "gogoproto/gogo.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = false; -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta3"; - -// Attribute represents key value pair -message Attribute { - option (gogoproto.goproto_getters) = false; - string key = 1 [(gogoproto.moretags) = "yaml:\"key\""]; - string value = 2 [(gogoproto.moretags) = "yaml:\"value\""]; -} - -// SignedBy represents validation accounts that tenant expects signatures for provider attributes -// AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many -// entries there -// this behaviour to be discussed -message SignedBy { - option (gogoproto.goproto_getters) = false; - // all_of all keys in this list must have signed attributes - repeated string all_of = 1 [ - (gogoproto.jsontag) = "all_of", - (gogoproto.moretags) = "yaml:\"allOf\"" - ]; - // any_of at least of of the keys from the list must have signed attributes - repeated string any_of = 2 [ - (gogoproto.jsontag) = "any_of", - (gogoproto.moretags) = "yaml:\"anyOf\"" - ]; -} - -// PlacementRequirements -message PlacementRequirements { - option (gogoproto.goproto_getters) = false; - - // SignedBy list of keys that tenants expect to have signatures from - SignedBy signed_by = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "signed_by", - (gogoproto.moretags) = "yaml:\"signed_by\"" - ]; - - // Attribute list of attributes tenant expects from the provider - repeated Attribute attributes = 2 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Attributes", - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; -} diff --git a/proto/node/akash/base/v1beta3/cpu.proto b/proto/node/akash/base/v1beta3/cpu.proto deleted file mode 100644 index 40ab2b33..00000000 --- a/proto/node/akash/base/v1beta3/cpu.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta3/attribute.proto"; -import "akash/base/v1beta3/resourcevalue.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta3"; - -// CPU stores resource units and cpu config attributes -message CPU { - option (gogoproto.equal) = true; - ResourceValue units = 1 [(gogoproto.nullable) = false]; - repeated Attribute attributes = 2 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Attributes", - (gogoproto.jsontag) = "attributes,omitempty", - (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" - ]; -} diff --git a/proto/node/akash/base/v1beta3/endpoint.proto b/proto/node/akash/base/v1beta3/endpoint.proto deleted file mode 100644 index 59db93c6..00000000 --- a/proto/node/akash/base/v1beta3/endpoint.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta3; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta3"; - -// Endpoint describes a publicly accessible IP service -message Endpoint { - option (gogoproto.equal) = true; - - // This describes how the endpoint is implemented when the lease is deployed - enum Kind { - // Describes an endpoint that becomes a Kubernetes Ingress - SHARED_HTTP = 0; - // Describes an endpoint that becomes a Kubernetes NodePort - RANDOM_PORT = 1; - // Describes an endpoint that becomes a leased IP - LEASED_IP = 2; - } - - Kind kind = 1; - uint32 sequence_number = 2 [ - (gogoproto.customname) = "SequenceNumber", - (gogoproto.jsontag) = "sequence_number", - (gogoproto.moretags) = "yaml:\"sequence_number\"" - ]; -} diff --git a/proto/node/akash/base/v1beta3/gpu.proto b/proto/node/akash/base/v1beta3/gpu.proto deleted file mode 100644 index 8141d0bd..00000000 --- a/proto/node/akash/base/v1beta3/gpu.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta3/attribute.proto"; -import "akash/base/v1beta3/resourcevalue.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta3"; - -// GPU stores resource units and cpu config attributes -message GPU { - option (gogoproto.equal) = true; - ResourceValue units = 1 [ - (gogoproto.nullable) = false - ]; - repeated Attribute attributes = 2 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Attributes", - (gogoproto.jsontag) = "attributes,omitempty", - (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" - ]; -} diff --git a/proto/node/akash/base/v1beta3/memory.proto b/proto/node/akash/base/v1beta3/memory.proto deleted file mode 100644 index bc778a48..00000000 --- a/proto/node/akash/base/v1beta3/memory.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta3/attribute.proto"; -import "akash/base/v1beta3/resourcevalue.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta3"; - -// Memory stores resource quantity and memory attributes -message Memory { - option (gogoproto.equal) = true; - ResourceValue quantity = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "size", - (gogoproto.moretags) = "yaml:\"size\"" - ]; - repeated Attribute attributes = 2 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Attributes", - (gogoproto.jsontag) = "attributes,omitempty", - (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" - ]; -} diff --git a/proto/node/akash/base/v1beta3/resources.proto b/proto/node/akash/base/v1beta3/resources.proto deleted file mode 100644 index 050ed269..00000000 --- a/proto/node/akash/base/v1beta3/resources.proto +++ /dev/null @@ -1,51 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta3/cpu.proto"; -import "akash/base/v1beta3/gpu.proto"; -import "akash/base/v1beta3/memory.proto"; -import "akash/base/v1beta3/storage.proto"; -import "akash/base/v1beta3/endpoint.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta3"; - -// Resources describes all available resources types for deployment/node etc -// if field is nil resource is not present in the given data-structure -message Resources { - option (gogoproto.equal) = true; - uint32 id = 1 [ - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - CPU cpu = 2 [ - (gogoproto.nullable) = true, - (gogoproto.customname) = "CPU", - (gogoproto.jsontag) = "cpu,omitempty", - (gogoproto.moretags) = "yaml:\"cpu,omitempty\"" - ]; - Memory memory = 3 [ - (gogoproto.nullable) = true, - (gogoproto.jsontag) = "memory,omitempty", - (gogoproto.moretags) = "yaml:\"memory,omitempty\"" - ]; - repeated Storage storage = 4 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Volumes", - (gogoproto.jsontag) = "storage,omitempty", - (gogoproto.moretags) = "yaml:\"storage,omitempty\"" - ]; - GPU gpu = 5 [ - (gogoproto.nullable) = true, - (gogoproto.customname) = "GPU", - (gogoproto.jsontag) = "gpu,omitempty", - (gogoproto.moretags) = "yaml:\"gpu,omitempty\"" - ]; - repeated Endpoint endpoints = 6 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Endpoints", - (gogoproto.jsontag) = "endpoints", - (gogoproto.moretags) = "yaml:\"endpoints\"" - ]; -} diff --git a/proto/node/akash/base/v1beta3/resourcevalue.proto b/proto/node/akash/base/v1beta3/resourcevalue.proto deleted file mode 100644 index 9b4a1e91..00000000 --- a/proto/node/akash/base/v1beta3/resourcevalue.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta3; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta3"; - -// Unit stores cpu, memory and storage metrics -message ResourceValue { - option (gogoproto.equal) = true; - bytes val = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int" - ]; -} diff --git a/proto/node/akash/base/v1beta3/storage.proto b/proto/node/akash/base/v1beta3/storage.proto deleted file mode 100644 index a1d09e3c..00000000 --- a/proto/node/akash/base/v1beta3/storage.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; -package akash.base.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta3/attribute.proto"; -import "akash/base/v1beta3/resourcevalue.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/types/v1beta3"; - -// Storage stores resource quantity and storage attributes -message Storage { - option (gogoproto.equal) = true; - string name = 1 [ - (gogoproto.jsontag) = "name", - (gogoproto.moretags) = "yaml:\"name\"" - ]; - ResourceValue quantity = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "size", - (gogoproto.moretags) = "yaml:\"size\"" - ]; - repeated Attribute attributes = 3 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Attributes", - (gogoproto.jsontag) = "attributes,omitempty", - (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" - ]; -} diff --git a/proto/node/akash/cert/v1/cert.proto b/proto/node/akash/cert/v1/cert.proto new file mode 100644 index 00000000..d8826ff5 --- /dev/null +++ b/proto/node/akash/cert/v1/cert.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; +package akash.cert.v1; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +option go_package = "pkg.akt.dev/go/node/cert/v1"; + +// State is an enum which refers to state of deployment +enum State { + option (gogoproto.goproto_enum_prefix) = false; + + // Prefix should start with 0 in enum. So declaring dummy state + invalid = 0 [ + (gogoproto.enumvalue_customname) = "CertificateStateInvalid" + ]; + // CertificateValid denotes state for deployment active + valid = 1 [ + (gogoproto.enumvalue_customname) = "CertificateValid" + ]; + // CertificateRevoked denotes state for deployment closed + revoked = 2 [ + (gogoproto.enumvalue_customname) = "CertificateRevoked" + ]; +} + +// ID stores owner and sequence number +message ID { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + + string serial = 2 [ + (gogoproto.jsontag) = "serial", + (gogoproto.moretags) = "yaml:\"serial\"" + ]; +} + +// Certificate stores state, certificate and it's public key +message Certificate { + State state = 2 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; + + bytes cert = 3 [ + (gogoproto.jsontag) = "cert", + (gogoproto.moretags) = "yaml:\"cert\"" + ]; + + bytes pubkey = 4 [ + (gogoproto.jsontag) = "pubkey", + (gogoproto.moretags) = "yaml:\"pubkey\"" + ]; +} diff --git a/proto/node/akash/cert/v1/filters.proto b/proto/node/akash/cert/v1/filters.proto new file mode 100644 index 00000000..a4473f7e --- /dev/null +++ b/proto/node/akash/cert/v1/filters.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; +package akash.cert.v1; + +import "gogoproto/gogo.proto"; + +import "cosmos_proto/cosmos.proto"; + +option go_package = "pkg.akt.dev/go/node/cert/v1"; + +// CertificateFilter defines filters used to filter certificates +message CertificateFilter { + option (gogoproto.equal) = false; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + + string serial = 2 [ + (gogoproto.jsontag) = "serial", + (gogoproto.moretags) = "yaml:\"serial\"" + ]; + + string state = 3 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; +} diff --git a/proto/node/akash/cert/v1/genesis.proto b/proto/node/akash/cert/v1/genesis.proto new file mode 100644 index 00000000..e7034117 --- /dev/null +++ b/proto/node/akash/cert/v1/genesis.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; +package akash.cert.v1; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; +import "akash/cert/v1/cert.proto"; + +option go_package = "pkg.akt.dev/go/node/cert/v1"; + +// GenesisCertificate defines certificate entry at genesis +message GenesisCertificate { + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + + akash.cert.v1.Certificate certificate = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "certificate", + (gogoproto.moretags) = "yaml:\"certificate\"" + ]; +} + +// GenesisState defines the basic genesis state used by cert module +message GenesisState { + repeated GenesisCertificate certificates = 1 [ + (gogoproto.castrepeated) = "GenesisCertificates", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "certificates", + (gogoproto.moretags) = "yaml:\"certificates\"" + ]; +} diff --git a/proto/node/akash/cert/v1/msg.proto b/proto/node/akash/cert/v1/msg.proto new file mode 100644 index 00000000..c69a24f5 --- /dev/null +++ b/proto/node/akash/cert/v1/msg.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; +package akash.cert.v1; + +import "gogoproto/gogo.proto"; + +import "cosmos_proto/cosmos.proto"; +import "cosmos/msg/v1/msg.proto"; + +import "akash/cert/v1/cert.proto"; + +option go_package = "pkg.akt.dev/go/node/cert/v1"; + +// MsgCreateCertificate defines an SDK message for creating certificate +message MsgCreateCertificate { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "owner"; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + + bytes cert = 2 [ + (gogoproto.jsontag) = "cert", + (gogoproto.moretags) = "yaml:\"cert\"" + ]; + + bytes pubkey = 3 [ + (gogoproto.jsontag) = "pubkey", + (gogoproto.moretags) = "yaml:\"pubkey\"" + ]; +} + +// MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. +message MsgCreateCertificateResponse {} + +// MsgRevokeCertificate defines an SDK message for revoking certificate +message MsgRevokeCertificate { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "id.owner"; + + akash.cert.v1.ID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. +message MsgRevokeCertificateResponse {} diff --git a/proto/node/akash/cert/v1/query.proto b/proto/node/akash/cert/v1/query.proto new file mode 100644 index 00000000..04b058a3 --- /dev/null +++ b/proto/node/akash/cert/v1/query.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; +package akash.cert.v1; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; +import "akash/cert/v1/cert.proto"; +import "akash/cert/v1/filters.proto"; + +option go_package = "pkg.akt.dev/go/node/cert/v1"; + +// Query defines the gRPC querier service +service Query { + // Certificates queries certificates + rpc Certificates(QueryCertificatesRequest) returns (QueryCertificatesResponse) { + option (google.api.http).get = "/akash/cert/v1/certificates/list"; + } +} + +// CertificateResponse contains a single X509 certificate and its serial number +message CertificateResponse { + akash.cert.v1.Certificate certificate = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "certificate", + (gogoproto.moretags) = "yaml:\"certificate\"" + ]; + + string serial = 2 [ + (gogoproto.jsontag) = "serial", + (gogoproto.moretags) = "yaml:\"serial\"" + ]; +} + +// QueryDeploymentsRequest is request type for the Query/Deployments RPC method +message QueryCertificatesRequest { + CertificateFilter filter = 1 [ + (gogoproto.nullable) = false + ]; + + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryCertificatesResponse is response type for the Query/Certificates RPC method +message QueryCertificatesResponse { + repeated CertificateResponse certificates = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "CertificatesResponse" + ]; + + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} diff --git a/proto/node/akash/cert/v1/service.proto b/proto/node/akash/cert/v1/service.proto new file mode 100644 index 00000000..7b613251 --- /dev/null +++ b/proto/node/akash/cert/v1/service.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; +package akash.cert.v1; + +import "cosmos/msg/v1/msg.proto"; + +import "akash/cert/v1/msg.proto"; + +option go_package = "pkg.akt.dev/go/node/cert/v1"; + +// Msg defines the provider Msg service +service Msg { + option (cosmos.msg.v1.service) = true; + + // CreateCertificate defines a method to create new certificate given proper inputs. + rpc CreateCertificate(MsgCreateCertificate) returns(MsgCreateCertificateResponse); + // RevokeCertificate defines a method to revoke the certificate + rpc RevokeCertificate(MsgRevokeCertificate) returns(MsgRevokeCertificateResponse); +} diff --git a/proto/node/akash/cert/v1beta2/cert.proto b/proto/node/akash/cert/v1beta2/cert.proto deleted file mode 100644 index ddee422a..00000000 --- a/proto/node/akash/cert/v1beta2/cert.proto +++ /dev/null @@ -1,118 +0,0 @@ -syntax = "proto3"; -package akash.cert.v1beta2; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/cert/v1beta2"; - -// Msg defines the provider Msg service -service Msg { - // CreateCertificate defines a method to create new certificate given proper inputs. - rpc CreateCertificate(MsgCreateCertificate) returns(MsgCreateCertificateResponse); - // RevokeCertificate defines a method to revoke the certificate - rpc RevokeCertificate(MsgRevokeCertificate) returns(MsgRevokeCertificateResponse); -} - -// CertificateID stores owner and sequence number -message CertificateID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - string serial = 2 [ - (gogoproto.jsontag) = "serial", - (gogoproto.moretags) = "yaml:\"serial\"" - ]; -} - -// Certificate stores state, certificate and it's public key -message Certificate { - // State is an enum which refers to state of deployment - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [(gogoproto.enumvalue_customname) = "CertificateStateInvalid"]; - // CertificateValid denotes state for deployment active - valid = 1 [(gogoproto.enumvalue_customname) = "CertificateValid"]; - // CertificateRevoked denotes state for deployment closed - revoked = 2 [(gogoproto.enumvalue_customname) = "CertificateRevoked"]; - } - - State state = 2 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - - bytes cert = 3 [ - (gogoproto.jsontag) = "cert", - (gogoproto.moretags) = "yaml:\"cert\"" - ]; - - bytes pubkey = 4 [ - (gogoproto.jsontag) = "pubkey", - (gogoproto.moretags) = "yaml:\"pubkey\"" - ]; -} - -// CertificateFilter defines filters used to filter certificates -message CertificateFilter { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - string serial = 2 [ - (gogoproto.jsontag) = "serial", - (gogoproto.moretags) = "yaml:\"serial\"" - ]; - - string state = 3 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; -} - -// MsgCreateCertificate defines an SDK message for creating certificate -message MsgCreateCertificate { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - bytes cert = 2 [ - (gogoproto.jsontag) = "cert", - (gogoproto.moretags) = "yaml:\"cert\"" - ]; - - bytes pubkey = 3 [ - (gogoproto.jsontag) = "pubkey", - (gogoproto.moretags) = "yaml:\"pubkey\"" - ]; -} - -// MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. -message MsgCreateCertificateResponse {} - -// MsgRevokeCertificate defines an SDK message for revoking certificate -message MsgRevokeCertificate { - option (gogoproto.equal) = false; - - CertificateID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. -message MsgRevokeCertificateResponse {} diff --git a/proto/node/akash/cert/v1beta2/genesis.proto b/proto/node/akash/cert/v1beta2/genesis.proto deleted file mode 100644 index c0ff6f06..00000000 --- a/proto/node/akash/cert/v1beta2/genesis.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; -package akash.cert.v1beta2; - -import "akash/cert/v1beta2/cert.proto"; -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/cert/v1beta2"; - -// GenesisCertificate defines certificate entry at genesis -message GenesisCertificate { - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - Certificate certificate = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "certificate", - (gogoproto.moretags) = "yaml:\"certificate\"" - ]; -} - -// GenesisState defines the basic genesis state used by cert module -message GenesisState { - repeated GenesisCertificate certificates = 1 [ - (gogoproto.castrepeated) = "GenesisCertificates", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "certificates", - (gogoproto.moretags) = "yaml:\"certificates\"" - ]; -} diff --git a/proto/node/akash/cert/v1beta2/query.proto b/proto/node/akash/cert/v1beta2/query.proto deleted file mode 100644 index 4dd3391a..00000000 --- a/proto/node/akash/cert/v1beta2/query.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; -package akash.cert.v1beta2; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/cert/v1beta2/cert.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/cert/v1beta2"; - -// Query defines the gRPC querier service -service Query { - // Certificates queries certificates - rpc Certificates(QueryCertificatesRequest) returns (QueryCertificatesResponse) { - option (google.api.http).get = "/akash/cert/v1beta3/certificates/list"; - } -} - -// CertificateResponse contains a single X509 certificate and its serial number -message CertificateResponse { - Certificate certificate = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "certificate", - (gogoproto.moretags) = "yaml:\"certificate\"" - ]; - - string serial = 2 [ - (gogoproto.jsontag) = "serial", - (gogoproto.moretags) = "yaml:\"serial\"" - ]; -} - -// QueryDeploymentsRequest is request type for the Query/Deployments RPC method -message QueryCertificatesRequest { - CertificateFilter filter = 1 [ - (gogoproto.nullable) = false - ]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryCertificatesResponse is response type for the Query/Certificates RPC method -message QueryCertificatesResponse { - repeated CertificateResponse certificates = 1 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "CertificatesResponse" - ]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} diff --git a/proto/node/akash/cert/v1beta3/cert.proto b/proto/node/akash/cert/v1beta3/cert.proto deleted file mode 100644 index 07282aa1..00000000 --- a/proto/node/akash/cert/v1beta3/cert.proto +++ /dev/null @@ -1,118 +0,0 @@ -syntax = "proto3"; -package akash.cert.v1beta3; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/cert/v1beta3"; - -// Msg defines the provider Msg service -service Msg { - // CreateCertificate defines a method to create new certificate given proper inputs. - rpc CreateCertificate(MsgCreateCertificate) returns(MsgCreateCertificateResponse); - // RevokeCertificate defines a method to revoke the certificate - rpc RevokeCertificate(MsgRevokeCertificate) returns(MsgRevokeCertificateResponse); -} - -// CertificateID stores owner and sequence number -message CertificateID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - string serial = 2 [ - (gogoproto.jsontag) = "serial", - (gogoproto.moretags) = "yaml:\"serial\"" - ]; -} - -// Certificate stores state, certificate and it's public key -message Certificate { - // State is an enum which refers to state of deployment - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [(gogoproto.enumvalue_customname) = "CertificateStateInvalid"]; - // CertificateValid denotes state for deployment active - valid = 1 [(gogoproto.enumvalue_customname) = "CertificateValid"]; - // CertificateRevoked denotes state for deployment closed - revoked = 2 [(gogoproto.enumvalue_customname) = "CertificateRevoked"]; - } - - State state = 2 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - - bytes cert = 3 [ - (gogoproto.jsontag) = "cert", - (gogoproto.moretags) = "yaml:\"cert\"" - ]; - - bytes pubkey = 4 [ - (gogoproto.jsontag) = "pubkey", - (gogoproto.moretags) = "yaml:\"pubkey\"" - ]; -} - -// CertificateFilter defines filters used to filter certificates -message CertificateFilter { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - string serial = 2 [ - (gogoproto.jsontag) = "serial", - (gogoproto.moretags) = "yaml:\"serial\"" - ]; - - string state = 3 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; -} - -// MsgCreateCertificate defines an SDK message for creating certificate -message MsgCreateCertificate { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - bytes cert = 2 [ - (gogoproto.jsontag) = "cert", - (gogoproto.moretags) = "yaml:\"cert\"" - ]; - - bytes pubkey = 3 [ - (gogoproto.jsontag) = "pubkey", - (gogoproto.moretags) = "yaml:\"pubkey\"" - ]; -} - -// MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. -message MsgCreateCertificateResponse {} - -// MsgRevokeCertificate defines an SDK message for revoking certificate -message MsgRevokeCertificate { - option (gogoproto.equal) = false; - - CertificateID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. -message MsgRevokeCertificateResponse {} diff --git a/proto/node/akash/cert/v1beta3/genesis.proto b/proto/node/akash/cert/v1beta3/genesis.proto deleted file mode 100644 index 1a6ae53d..00000000 --- a/proto/node/akash/cert/v1beta3/genesis.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; -package akash.cert.v1beta3; - -import "akash/cert/v1beta3/cert.proto"; -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/cert/v1beta3"; - -// GenesisCertificate defines certificate entry at genesis -message GenesisCertificate { - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - Certificate certificate = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "certificate", - (gogoproto.moretags) = "yaml:\"certificate\"" - ]; -} - -// GenesisState defines the basic genesis state used by cert module -message GenesisState { - repeated GenesisCertificate certificates = 1 [ - (gogoproto.castrepeated) = "GenesisCertificates", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "certificates", - (gogoproto.moretags) = "yaml:\"certificates\"" - ]; -} diff --git a/proto/node/akash/cert/v1beta3/query.proto b/proto/node/akash/cert/v1beta3/query.proto deleted file mode 100644 index d72129d9..00000000 --- a/proto/node/akash/cert/v1beta3/query.proto +++ /dev/null @@ -1,50 +0,0 @@ -syntax = "proto3"; -package akash.cert.v1beta3; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/cert/v1beta3/cert.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/cert/v1beta3"; - -// Query defines the gRPC querier service -service Query { - // Certificates queries certificates - rpc Certificates(QueryCertificatesRequest) returns (QueryCertificatesResponse) { - option (google.api.http).get = "/akash/cert/v1beta3/certificates/list"; - } -} - -// CertificateResponse contains a single X509 certificate and its serial number -message CertificateResponse { - Certificate certificate = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "certificate", - (gogoproto.moretags) = "yaml:\"certificate\"" - ]; - - string serial = 2 [ - (gogoproto.jsontag) = "serial", - (gogoproto.moretags) = "yaml:\"serial\"" - ]; -} - -// QueryDeploymentsRequest is request type for the Query/Deployments RPC method -message QueryCertificatesRequest { - CertificateFilter filter = 1 [ - (gogoproto.nullable) = false - ]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryCertificatesResponse is response type for the Query/Certificates RPC method -message QueryCertificatesResponse { - repeated CertificateResponse certificates = 1 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "CertificatesResponse" - ]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} diff --git a/proto/node/akash/deployment/v1/authz.proto b/proto/node/akash/deployment/v1/authz.proto new file mode 100644 index 00000000..c7529c67 --- /dev/null +++ b/proto/node/akash/deployment/v1/authz.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; +package akash.deployment.v1; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; +import "cosmos/base/v1beta1/coin.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1"; + +// DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from +// the granter's account for a deployment. +message DepositAuthorization { + option (cosmos_proto.implements_interface) = "Authorization"; + + // SpendLimit is the amount the grantee is authorized to spend from the granter's account for + // the purpose of deployment. + cosmos.base.v1beta1.Coin spend_limit = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "spend_limit" + ]; +} diff --git a/proto/node/akash/deployment/v1/deployment.proto b/proto/node/akash/deployment/v1/deployment.proto new file mode 100644 index 00000000..2e7026fd --- /dev/null +++ b/proto/node/akash/deployment/v1/deployment.proto @@ -0,0 +1,58 @@ +syntax = "proto3"; +package akash.deployment.v1; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1"; + +// DeploymentID stores owner and sequence number +message DeploymentID { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = "yaml:\"dseq\"" + ]; +} + +// Deployment stores deploymentID, state and checksum details +message Deployment { + option (gogoproto.equal) = false; + + // State is an enum which refers to state of deployment + enum State { + option (gogoproto.goproto_enum_prefix) = false; + + // Prefix should start with 0 in enum. So declaring dummy state + invalid = 0 [(gogoproto.enumvalue_customname) = "DeploymentStateInvalid"]; + // DeploymentActive denotes state for deployment active + active = 1 [(gogoproto.enumvalue_customname) = "DeploymentActive"]; + // DeploymentClosed denotes state for deployment closed + closed = 2 [(gogoproto.enumvalue_customname) = "DeploymentClosed"]; + } + + DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + + State state = 2 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; + bytes hash = 3 [ + (gogoproto.jsontag) = "hash", + (gogoproto.moretags) = "yaml:\"hash\"" + ]; + int64 created_at = 4; +} diff --git a/proto/node/akash/deployment/v1/event.proto b/proto/node/akash/deployment/v1/event.proto new file mode 100644 index 00000000..672e17ce --- /dev/null +++ b/proto/node/akash/deployment/v1/event.proto @@ -0,0 +1,92 @@ +syntax = "proto3"; +package akash.deployment.v1; + +import "gogoproto/gogo.proto"; + +import "akash/deployment/v1/deployment.proto"; +import "akash/deployment/v1/group.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1"; + +// EventDeploymentCreated event is triggered when deployment is created on chain +message EventDeploymentCreated { + option (gogoproto.equal) = false; + + DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + + bytes hash = 2 [ + (gogoproto.jsontag) = "hash", + (gogoproto.moretags) = "yaml:\"hash\"" + ]; +} + +// EventDeploymentUpdated is triggered when deployment is updated on chain +message EventDeploymentUpdated { + option (gogoproto.equal) = false; + + DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + + bytes hash = 2 [ + (gogoproto.jsontag) = "hash", + (gogoproto.moretags) = "yaml:\"hash\"" + ]; +} + +// EventDeploymentClosed is triggered when deployment is closed on chain +message EventDeploymentClosed { + option (gogoproto.equal) = false; + + DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// EventGroupStarted is triggered when deployment group is started +message EventGroupStarted { + option (gogoproto.equal) = false; + + GroupID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// EventGroupPaused is triggered when deployment group is paused +message EventGroupPaused { + option (gogoproto.equal) = false; + + GroupID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// EventGroupClosed is triggered when deployment group is closed +message EventGroupClosed { + option (gogoproto.equal) = false; + + GroupID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + diff --git a/proto/node/akash/deployment/v1/group.proto b/proto/node/akash/deployment/v1/group.proto new file mode 100644 index 00000000..582291e4 --- /dev/null +++ b/proto/node/akash/deployment/v1/group.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; +package akash.deployment.v1; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1"; + +// GroupID stores owner, deployment sequence number and group sequence number +message GroupID { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = "yaml:\"dseq\"" + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = "yaml:\"gseq\"" + ]; +} diff --git a/proto/node/akash/deployment/v1/msg.proto b/proto/node/akash/deployment/v1/msg.proto new file mode 100644 index 00000000..34f6cf24 --- /dev/null +++ b/proto/node/akash/deployment/v1/msg.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; +package akash.deployment.v1; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +import "cosmos/base/v1beta1/coin.proto"; + +import "akash/deployment/v1/deployment.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1"; + +// MsgDepositDeployment deposits more funds into the deposit account +message MsgDepositDeployment { + option (gogoproto.equal) = false; + + DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + + cosmos.base.v1beta1.Coin amount = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "amount", + (gogoproto.moretags) = "yaml:\"amount\"" + ]; + + // Depositor pays for the deposit + string depositor = 3 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "depositor", + (gogoproto.moretags) = "yaml:\"depositor\"" + ]; +} + +// MsgDepositDeploymentResponse defines response type for the MsgDepositDeployment. +message MsgDepositDeploymentResponse {} diff --git a/proto/node/akash/deployment/v1beta1/authz.proto b/proto/node/akash/deployment/v1beta1/authz.proto deleted file mode 100644 index ed294bf8..00000000 --- a/proto/node/akash/deployment/v1beta1/authz.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta1; - -import "gogoproto/gogo.proto"; -import "cosmos_proto/cosmos.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta1"; - -// DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from -// the granter's account for a deployment. -message DepositDeploymentAuthorization { - option (cosmos_proto.implements_interface) = "Authorization"; - - // SpendLimit is the amount the grantee is authorized to spend from the granter's account for - // the purpose of deployment. - cosmos.base.v1beta1.Coin spend_limit = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "spend_limit" - ]; -} diff --git a/proto/node/akash/deployment/v1beta1/deployment.proto b/proto/node/akash/deployment/v1beta1/deployment.proto deleted file mode 100644 index 0d4e811b..00000000 --- a/proto/node/akash/deployment/v1beta1/deployment.proto +++ /dev/null @@ -1,152 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta1; - -import "gogoproto/gogo.proto"; -import "akash/deployment/v1beta1/group.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta1"; - -// Msg defines the deployment Msg service. -service Msg { - // CreateDeployment defines a method to create new deployment given proper inputs. - rpc CreateDeployment(MsgCreateDeployment) returns (MsgCreateDeploymentResponse); - - // DepositDeployment deposits more funds into the deployment account - rpc DepositDeployment(MsgDepositDeployment) returns (MsgDepositDeploymentResponse); - - // UpdateDeployment defines a method to update a deployment given proper inputs. - rpc UpdateDeployment(MsgUpdateDeployment) returns (MsgUpdateDeploymentResponse); - - // CloseDeployment defines a method to close a deployment given proper inputs. - rpc CloseDeployment(MsgCloseDeployment) returns (MsgCloseDeploymentResponse); - - // CloseGroup defines a method to close a group of a deployment given proper inputs. - rpc CloseGroup(MsgCloseGroup) returns (MsgCloseGroupResponse); - - // PauseGroup defines a method to close a group of a deployment given proper inputs. - rpc PauseGroup(MsgPauseGroup) returns (MsgPauseGroupResponse); - - // StartGroup defines a method to close a group of a deployment given proper inputs. - rpc StartGroup(MsgStartGroup) returns (MsgStartGroupResponse); -} - -// MsgCreateDeployment defines an SDK message for creating deployment -message MsgCreateDeployment { - option (gogoproto.equal) = false; - - DeploymentID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - repeated GroupSpec groups = 2 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "groups", (gogoproto.moretags) = "yaml:\"groups\""]; - bytes version = 3 [(gogoproto.jsontag) = "version", (gogoproto.moretags) = "yaml:\"version\""]; - - cosmos.base.v1beta1.Coin deposit = 4 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "deposit", (gogoproto.moretags) = "yaml:\"deposit\""]; -} - -// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. -message MsgCreateDeploymentResponse {} - -// MsgDepositDeployment deposits more funds into the deposit account -message MsgDepositDeployment { - option (gogoproto.equal) = false; - - DeploymentID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - cosmos.base.v1beta1.Coin amount = 2 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "amount", (gogoproto.moretags) = "yaml:\"amount\""]; -} - -// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. -message MsgDepositDeploymentResponse {} - -// MsgUpdateDeployment defines an SDK message for updating deployment -message MsgUpdateDeployment { - option (gogoproto.equal) = false; - - DeploymentID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - repeated GroupSpec groups = 2 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "groups", (gogoproto.moretags) = "yaml:\"groups\""]; - bytes version = 3 [(gogoproto.jsontag) = "version", (gogoproto.moretags) = "yaml:\"version\""]; -} - -// MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. -message MsgUpdateDeploymentResponse {} - -// MsgCloseDeployment defines an SDK message for closing deployment -message MsgCloseDeployment { - option (gogoproto.equal) = false; - - DeploymentID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. -message MsgCloseDeploymentResponse {} - -// DeploymentID stores owner and sequence number -message DeploymentID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - uint64 dseq = 2 - [(gogoproto.customname) = "DSeq", (gogoproto.jsontag) = "dseq", (gogoproto.moretags) = "yaml:\"dseq\""]; -} - -// Deployment stores deploymentID, state and version details -message Deployment { - option (gogoproto.equal) = false; - - DeploymentID deployment_id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "DeploymentID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - // State is an enum which refers to state of deployment - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [(gogoproto.enumvalue_customname) = "DeploymentStateInvalid"]; - // DeploymentActive denotes state for deployment active - active = 1 [(gogoproto.enumvalue_customname) = "DeploymentActive"]; - // DeploymentClosed denotes state for deployment closed - closed = 2 [(gogoproto.enumvalue_customname) = "DeploymentClosed"]; - } - - State state = 2 [(gogoproto.jsontag) = "state", (gogoproto.moretags) = "yaml:\"state\""]; - bytes version = 3 [(gogoproto.jsontag) = "version", (gogoproto.moretags) = "yaml:\"version\""]; - int64 created_at = 4; -} - -// DeploymentFilters defines filters used to filter deployments -message DeploymentFilters { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - uint64 dseq = 2 - [(gogoproto.customname) = "DSeq", (gogoproto.jsontag) = "dseq", (gogoproto.moretags) = "yaml:\"dseq\""]; - string state = 3 [(gogoproto.jsontag) = "state", (gogoproto.moretags) = "yaml:\"state\""]; -} diff --git a/proto/node/akash/deployment/v1beta1/genesis.proto b/proto/node/akash/deployment/v1beta1/genesis.proto deleted file mode 100644 index 81f74b56..00000000 --- a/proto/node/akash/deployment/v1beta1/genesis.proto +++ /dev/null @@ -1,33 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta1; - -import "gogoproto/gogo.proto"; -import "akash/deployment/v1beta1/deployment.proto"; -import "akash/deployment/v1beta1/group.proto"; -import "akash/deployment/v1beta1/params.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta1"; - -// GenesisDeployment defines the basic genesis state used by deployment module -message GenesisDeployment { - Deployment deployment = 1 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "deployment", (gogoproto.moretags) = "yaml:\"deployment\""]; - - repeated Group groups = 2 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "groups", (gogoproto.moretags) = "yaml:\"groups\""]; -} - -// GenesisState stores slice of genesis deployment instance -message GenesisState { - repeated GenesisDeployment deployments = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "deployments", - (gogoproto.moretags) = "yaml:\"deployments\"" - ]; - - Params params = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "params", - (gogoproto.moretags) = "yaml:\"params\"" - ]; -} diff --git a/proto/node/akash/deployment/v1beta1/group.proto b/proto/node/akash/deployment/v1beta1/group.proto deleted file mode 100644 index 7c27172e..00000000 --- a/proto/node/akash/deployment/v1beta1/group.proto +++ /dev/null @@ -1,128 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta1; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta1/resource.proto"; -import "akash/base/v1beta1/attribute.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta1"; - -// MsgCloseGroup defines SDK message to close a single Group within a Deployment. -message MsgCloseGroup { - option (gogoproto.equal) = false; - - GroupID id = 1 [ - (gogoproto.customname) = "ID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgCloseGroupResponse defines the Msg/CloseGroup response type. -message MsgCloseGroupResponse {} - -// MsgPauseGroup defines SDK message to close a single Group within a Deployment. -message MsgPauseGroup { - option (gogoproto.equal) = false; - - GroupID id = 1 [ - (gogoproto.customname) = "ID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgPauseGroupResponse defines the Msg/PauseGroup response type. -message MsgPauseGroupResponse {} - -// MsgStartGroup defines SDK message to close a single Group within a Deployment. -message MsgStartGroup { - option (gogoproto.equal) = false; - - GroupID id = 1 [ - (gogoproto.customname) = "ID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgStartGroupResponse defines the Msg/StartGroup response type. -message MsgStartGroupResponse {} - -// GroupID stores owner, deployment sequence number and group sequence number -message GroupID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - uint64 dseq = 2 - [(gogoproto.customname) = "DSeq", (gogoproto.jsontag) = "dseq", (gogoproto.moretags) = "yaml:\"dseq\""]; - uint32 gseq = 3 - [(gogoproto.customname) = "GSeq", (gogoproto.jsontag) = "gseq", (gogoproto.moretags) = "yaml:\"gseq\""]; -} - -// GroupSpec stores group specifications -message GroupSpec { - option (gogoproto.equal) = false; - option (gogoproto.goproto_getters) = false; - - string name = 1 [(gogoproto.jsontag) = "name", (gogoproto.moretags) = "yaml:\"name\""]; - - akash.base.v1beta1.PlacementRequirements requirements = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "requirements", - (gogoproto.moretags) = "yaml:\"requirements\"" - ]; - - repeated Resource resources = 3 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "resources", (gogoproto.moretags) = "yaml:\"resources\""]; -} - -// Group stores group id, state and specifications of group -message Group { - option (gogoproto.equal) = false; - - GroupID group_id = 1 [ - (gogoproto.customname) = "GroupID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - // State is an enum which refers to state of group - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [(gogoproto.enumvalue_customname) = "GroupStateInvalid"]; - // GroupOpen denotes state for group open - open = 1 [(gogoproto.enumvalue_customname) = "GroupOpen"]; - // GroupOrdered denotes state for group ordered - paused = 2 [(gogoproto.enumvalue_customname) = "GroupPaused"]; - // GroupInsufficientFunds denotes state for group insufficient_funds - insufficient_funds = 3 [(gogoproto.enumvalue_customname) = "GroupInsufficientFunds"]; - // GroupClosed denotes state for group closed - closed = 4 [(gogoproto.enumvalue_customname) = "GroupClosed"]; - } - - State state = 2 [(gogoproto.jsontag) = "state", (gogoproto.moretags) = "yaml:\"state\""]; - GroupSpec group_spec = 3 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "spec", (gogoproto.moretags) = "yaml:\"spec\""]; - - int64 created_at = 4; -} - -// Resource stores unit, total count and price of resource -message Resource { - option (gogoproto.equal) = false; - - akash.base.v1beta1.ResourceUnits resources = 1 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "unit", (gogoproto.moretags) = "yaml:\"unit\""]; - uint32 count = 2 [(gogoproto.jsontag) = "count", (gogoproto.moretags) = "yaml:\"count\""]; - cosmos.base.v1beta1.Coin price = 3 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "price", (gogoproto.moretags) = "yaml:\"price\""]; -} diff --git a/proto/node/akash/deployment/v1beta1/params.proto b/proto/node/akash/deployment/v1beta1/params.proto deleted file mode 100644 index 453c1ca7..00000000 --- a/proto/node/akash/deployment/v1beta1/params.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta1; -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta1"; - -// Params defines the parameters for the x/deployment package -message Params { - cosmos.base.v1beta1.Coin deployment_min_deposit = 1 [ - (gogoproto.customname) = "DeploymentMinDeposit", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "deployment_min_deposit", - (gogoproto.moretags) = "yaml:\"deployment_min_deposit\"" - ]; -} diff --git a/proto/node/akash/deployment/v1beta1/query.proto b/proto/node/akash/deployment/v1beta1/query.proto deleted file mode 100644 index 8230a0e8..00000000 --- a/proto/node/akash/deployment/v1beta1/query.proto +++ /dev/null @@ -1,71 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta1; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/deployment/v1beta1/deployment.proto"; -import "akash/deployment/v1beta1/group.proto"; -import "akash/escrow/v1beta1/types.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta1"; - -// Query defines the gRPC querier service -service Query { - // Deployments queries deployments - rpc Deployments(QueryDeploymentsRequest) returns (QueryDeploymentsResponse) { - option (google.api.http).get = "/akash/deployment/v1beta1/deployments/list"; - } - - // Deployment queries deployment details - rpc Deployment(QueryDeploymentRequest) returns (QueryDeploymentResponse) { - option (google.api.http).get = "/akash/deployment/v1beta1/deployments/info"; - } - - // Group queries group details - rpc Group(QueryGroupRequest) returns (QueryGroupResponse) { - option (google.api.http).get = "/akash/deployment/v1beta1/groups/info"; - } -} - -// QueryDeploymentsRequest is request type for the Query/Deployments RPC method -message QueryDeploymentsRequest { - DeploymentFilters filters = 1 [(gogoproto.nullable) = false]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryDeploymentsResponse is response type for the Query/Deployments RPC method -message QueryDeploymentsResponse { - repeated QueryDeploymentResponse deployments = 1 - [(gogoproto.nullable) = false, (gogoproto.castrepeated) = "DeploymentResponses"]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryDeploymentRequest is request type for the Query/Deployment RPC method -message QueryDeploymentRequest { - DeploymentID id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; -} - -// QueryDeploymentResponse is response type for the Query/Deployment RPC method -message QueryDeploymentResponse { - option (gogoproto.equal) = false; - Deployment deployment = 1 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "deployment", (gogoproto.moretags) = "yaml:\"deployment\""]; - repeated Group groups = 2 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "groups", (gogoproto.moretags) = "yaml:\"groups\""]; - akash.escrow.v1beta1.Account escrow_account = 3 [ - (gogoproto.nullable) = false - ]; -} - -// QueryGroupRequest is request type for the Query/Group RPC method -message QueryGroupRequest { - GroupID id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; -} - -// QueryGroupResponse is response type for the Query/Group RPC method -message QueryGroupResponse { - Group group = 1 [(gogoproto.nullable) = false]; -} diff --git a/proto/node/akash/deployment/v1beta2/authz.proto b/proto/node/akash/deployment/v1beta2/authz.proto deleted file mode 100644 index 1862e8c4..00000000 --- a/proto/node/akash/deployment/v1beta2/authz.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta2; - -import "gogoproto/gogo.proto"; -import "cosmos_proto/cosmos.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta2"; - -// DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from -// the granter's account for a deployment. -message DepositDeploymentAuthorization { - option (cosmos_proto.implements_interface) = "Authorization"; - - // SpendLimit is the amount the grantee is authorized to spend from the granter's account for - // the purpose of deployment. - cosmos.base.v1beta1.Coin spend_limit = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "spend_limit" - ]; -} diff --git a/proto/node/akash/deployment/v1beta2/deployment.proto b/proto/node/akash/deployment/v1beta2/deployment.proto deleted file mode 100644 index 3a8973b8..00000000 --- a/proto/node/akash/deployment/v1beta2/deployment.proto +++ /dev/null @@ -1,75 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta2; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta2"; - -// DeploymentID stores owner and sequence number -message DeploymentID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = "yaml:\"dseq\"" - ]; -} - -// Deployment stores deploymentID, state and version details -message Deployment { - option (gogoproto.equal) = false; - - DeploymentID deployment_id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "DeploymentID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - // State is an enum which refers to state of deployment - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [(gogoproto.enumvalue_customname) = "DeploymentStateInvalid"]; - // DeploymentActive denotes state for deployment active - active = 1 [(gogoproto.enumvalue_customname) = "DeploymentActive"]; - // DeploymentClosed denotes state for deployment closed - closed = 2 [(gogoproto.enumvalue_customname) = "DeploymentClosed"]; - } - - State state = 2 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - bytes version = 3 [ - (gogoproto.jsontag) = "version", - (gogoproto.moretags) = "yaml:\"version\"" - ]; - int64 created_at = 4; -} - -// DeploymentFilters defines filters used to filter deployments -message DeploymentFilters { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = "yaml:\"dseq\"" - ]; - string state = 3 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; -} diff --git a/proto/node/akash/deployment/v1beta2/deploymentmsg.proto b/proto/node/akash/deployment/v1beta2/deploymentmsg.proto deleted file mode 100644 index 9ace46f4..00000000 --- a/proto/node/akash/deployment/v1beta2/deploymentmsg.proto +++ /dev/null @@ -1,106 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta2; - -import "gogoproto/gogo.proto"; - -import "akash/deployment/v1beta2/deployment.proto"; -import "akash/deployment/v1beta2/groupspec.proto"; - -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta2"; - -// MsgCreateDeployment defines an SDK message for creating deployment -message MsgCreateDeployment { - option (gogoproto.equal) = false; - - DeploymentID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - repeated GroupSpec groups = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "groups", - (gogoproto.moretags) = "yaml:\"groups\"" - ]; - bytes version = 3 [ - (gogoproto.jsontag) = "version", - (gogoproto.moretags) = "yaml:\"version\"" - ]; - cosmos.base.v1beta1.Coin deposit = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "deposit", - (gogoproto.moretags) = "yaml:\"deposit\"" - ]; - // Depositor pays for the deposit - string depositor = 5 [ - (gogoproto.jsontag) = "depositor", - (gogoproto.moretags) = "yaml:\"depositor\"" - ]; -} - -// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. -message MsgCreateDeploymentResponse {} - -// MsgDepositDeployment deposits more funds into the deposit account -message MsgDepositDeployment { - option (gogoproto.equal) = false; - - DeploymentID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - cosmos.base.v1beta1.Coin amount = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "amount", - (gogoproto.moretags) = "yaml:\"amount\"" - ]; - - // Depositor pays for the deposit - string depositor = 3 [ - (gogoproto.jsontag) = "depositor", - (gogoproto.moretags) = "yaml:\"depositor\"" - ]; -} - -// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. -message MsgDepositDeploymentResponse {} - -// MsgUpdateDeployment defines an SDK message for updating deployment -message MsgUpdateDeployment { - option (gogoproto.equal) = false; - - DeploymentID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - bytes version = 3 [ - (gogoproto.jsontag) = "version", - (gogoproto.moretags) = "yaml:\"version\"" - ]; -} - -// MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. -message MsgUpdateDeploymentResponse {} - -// MsgCloseDeployment defines an SDK message for closing deployment -message MsgCloseDeployment { - option (gogoproto.equal) = false; - - DeploymentID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. -message MsgCloseDeploymentResponse {} diff --git a/proto/node/akash/deployment/v1beta2/genesis.proto b/proto/node/akash/deployment/v1beta2/genesis.proto deleted file mode 100644 index 0338d5f7..00000000 --- a/proto/node/akash/deployment/v1beta2/genesis.proto +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/deployment/v1beta2/deployment.proto"; -import "akash/deployment/v1beta2/group.proto"; -import "akash/deployment/v1beta2/params.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta2"; - -// GenesisDeployment defines the basic genesis state used by deployment module -message GenesisDeployment { - Deployment deployment = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "deployment", - (gogoproto.moretags) = "yaml:\"deployment\"" - ]; - - repeated Group groups = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "groups", - (gogoproto.moretags) = "yaml:\"groups\"" - ]; -} - -// GenesisState stores slice of genesis deployment instance -message GenesisState { - repeated GenesisDeployment deployments = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "deployments", - (gogoproto.moretags) = "yaml:\"deployments\"" - ]; - - Params params = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "params", - (gogoproto.moretags) = "yaml:\"params\"" - ]; -} diff --git a/proto/node/akash/deployment/v1beta2/group.proto b/proto/node/akash/deployment/v1beta2/group.proto deleted file mode 100644 index cfe341b1..00000000 --- a/proto/node/akash/deployment/v1beta2/group.proto +++ /dev/null @@ -1,48 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/deployment/v1beta2/groupid.proto"; -import "akash/deployment/v1beta2/groupspec.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta2"; - -// Group stores group id, state and specifications of group -message Group { - option (gogoproto.equal) = false; - - GroupID group_id = 1 [ - (gogoproto.customname) = "GroupID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - // State is an enum which refers to state of group - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [(gogoproto.enumvalue_customname) = "GroupStateInvalid"]; - // GroupOpen denotes state for group open - open = 1 [(gogoproto.enumvalue_customname) = "GroupOpen"]; - // GroupOrdered denotes state for group ordered - paused = 2 [(gogoproto.enumvalue_customname) = "GroupPaused"]; - // GroupInsufficientFunds denotes state for group insufficient_funds - insufficient_funds = 3 [(gogoproto.enumvalue_customname) = "GroupInsufficientFunds"]; - // GroupClosed denotes state for group closed - closed = 4 [(gogoproto.enumvalue_customname) = "GroupClosed"]; - } - - State state = 2 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - GroupSpec group_spec = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "spec", - (gogoproto.moretags) = "yaml:\"spec\"" - ]; - - int64 created_at = 4; -} diff --git a/proto/node/akash/deployment/v1beta2/groupid.proto b/proto/node/akash/deployment/v1beta2/groupid.proto deleted file mode 100644 index f1247a59..00000000 --- a/proto/node/akash/deployment/v1beta2/groupid.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta2; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta2"; - -// GroupID stores owner, deployment sequence number and group sequence number -message GroupID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = "yaml:\"dseq\"" - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = "yaml:\"gseq\"" - ]; -} diff --git a/proto/node/akash/deployment/v1beta2/groupmsg.proto b/proto/node/akash/deployment/v1beta2/groupmsg.proto deleted file mode 100644 index 04c85943..00000000 --- a/proto/node/akash/deployment/v1beta2/groupmsg.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/deployment/v1beta2/groupid.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta2"; - -// MsgCloseGroup defines SDK message to close a single Group within a Deployment. -message MsgCloseGroup { - option (gogoproto.equal) = false; - - GroupID id = 1 [ - (gogoproto.customname) = "ID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgCloseGroupResponse defines the Msg/CloseGroup response type. -message MsgCloseGroupResponse {} - -// MsgPauseGroup defines SDK message to close a single Group within a Deployment. -message MsgPauseGroup { - option (gogoproto.equal) = false; - - GroupID id = 1 [ - (gogoproto.customname) = "ID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgPauseGroupResponse defines the Msg/PauseGroup response type. -message MsgPauseGroupResponse {} - -// MsgStartGroup defines SDK message to close a single Group within a Deployment. -message MsgStartGroup { - option (gogoproto.equal) = false; - - GroupID id = 1 [ - (gogoproto.customname) = "ID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgStartGroupResponse defines the Msg/StartGroup response type. -message MsgStartGroupResponse {} diff --git a/proto/node/akash/deployment/v1beta2/groupspec.proto b/proto/node/akash/deployment/v1beta2/groupspec.proto deleted file mode 100644 index 9f5aa771..00000000 --- a/proto/node/akash/deployment/v1beta2/groupspec.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta2/attribute.proto"; -import "akash/deployment/v1beta2/resource.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta2"; - -// GroupSpec stores group specifications -message GroupSpec { - option (gogoproto.equal) = false; - option (gogoproto.goproto_getters) = false; - - string name = 1 [ - (gogoproto.jsontag) = "name", - (gogoproto.moretags) = "yaml:\"name\"" - ]; - - akash.base.v1beta2.PlacementRequirements requirements = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "requirements", - (gogoproto.moretags) = "yaml:\"requirements\"" - ]; - - repeated Resource resources = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "resources", - (gogoproto.moretags) = "yaml:\"resources\"" - ]; -} diff --git a/proto/node/akash/deployment/v1beta2/params.proto b/proto/node/akash/deployment/v1beta2/params.proto deleted file mode 100644 index c15181d8..00000000 --- a/proto/node/akash/deployment/v1beta2/params.proto +++ /dev/null @@ -1,18 +0,0 @@ -syntax = "proto3"; - -package akash.deployment.v1beta2; - -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta2"; - -// Params defines the parameters for the x/deployment package -message Params { - cosmos.base.v1beta1.Coin deployment_min_deposit = 1 [ - (gogoproto.customname) = "DeploymentMinDeposit", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "deployment_min_deposit", - (gogoproto.moretags) = "yaml:\"deployment_min_deposit\"" - ]; -} diff --git a/proto/node/akash/deployment/v1beta2/query.proto b/proto/node/akash/deployment/v1beta2/query.proto deleted file mode 100644 index c2ccf115..00000000 --- a/proto/node/akash/deployment/v1beta2/query.proto +++ /dev/null @@ -1,72 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta2; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/deployment/v1beta2/deployment.proto"; -import "akash/deployment/v1beta2/group.proto"; -import "akash/deployment/v1beta2/groupid.proto"; -import "akash/escrow/v1beta2/types.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta2"; - -// Query defines the gRPC querier service -service Query { - // Deployments queries deployments - rpc Deployments(QueryDeploymentsRequest) returns (QueryDeploymentsResponse) { - option (google.api.http).get = "/akash/deployment/v1beta2/deployments/list"; - } - - // Deployment queries deployment details - rpc Deployment(QueryDeploymentRequest) returns (QueryDeploymentResponse) { - option (google.api.http).get = "/akash/deployment/v1beta2/deployments/info"; - } - - // Group queries group details - rpc Group(QueryGroupRequest) returns (QueryGroupResponse) { - option (google.api.http).get = "/akash/deployment/v1beta2/groups/info"; - } -} - -// QueryDeploymentsRequest is request type for the Query/Deployments RPC method -message QueryDeploymentsRequest { - DeploymentFilters filters = 1 [(gogoproto.nullable) = false]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryDeploymentsResponse is response type for the Query/Deployments RPC method -message QueryDeploymentsResponse { - repeated QueryDeploymentResponse deployments = 1 - [(gogoproto.nullable) = false, (gogoproto.castrepeated) = "DeploymentResponses"]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryDeploymentRequest is request type for the Query/Deployment RPC method -message QueryDeploymentRequest { - DeploymentID id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; -} - -// QueryDeploymentResponse is response type for the Query/Deployment RPC method -message QueryDeploymentResponse { - option (gogoproto.equal) = false; - Deployment deployment = 1 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "deployment", (gogoproto.moretags) = "yaml:\"deployment\""]; - repeated Group groups = 2 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "groups", (gogoproto.moretags) = "yaml:\"groups\""]; - akash.escrow.v1beta2.Account escrow_account = 3 [ - (gogoproto.nullable) = false - ]; -} - -// QueryGroupRequest is request type for the Query/Group RPC method -message QueryGroupRequest { - GroupID id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; -} - -// QueryGroupResponse is response type for the Query/Group RPC method -message QueryGroupResponse { - Group group = 1 [(gogoproto.nullable) = false]; -} diff --git a/proto/node/akash/deployment/v1beta2/resource.proto b/proto/node/akash/deployment/v1beta2/resource.proto deleted file mode 100644 index 3610be2e..00000000 --- a/proto/node/akash/deployment/v1beta2/resource.proto +++ /dev/null @@ -1,28 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta2; - -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; -import "akash/base/v1beta2/resourceunits.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta2"; - -// Resource stores unit, total count and price of resource -message Resource { - option (gogoproto.equal) = false; - - akash.base.v1beta2.ResourceUnits resources = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "unit", - (gogoproto.moretags) = "yaml:\"unit\"" - ]; - uint32 count = 2 [ - (gogoproto.jsontag) = "count", - (gogoproto.moretags) = "yaml:\"count\"" - ]; - cosmos.base.v1beta1.DecCoin price = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "price", - (gogoproto.moretags) = "yaml:\"price\"" - ]; -} diff --git a/proto/node/akash/deployment/v1beta2/service.proto b/proto/node/akash/deployment/v1beta2/service.proto deleted file mode 100644 index cea1aa2c..00000000 --- a/proto/node/akash/deployment/v1beta2/service.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta2; - -import "akash/deployment/v1beta2/deploymentmsg.proto"; -import "akash/deployment/v1beta2/groupmsg.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta2"; - -// Msg defines the deployment Msg service. -service Msg { - // CreateDeployment defines a method to create new deployment given proper inputs. - rpc CreateDeployment(MsgCreateDeployment) returns (MsgCreateDeploymentResponse); - - // DepositDeployment deposits more funds into the deployment account - rpc DepositDeployment(MsgDepositDeployment) returns (MsgDepositDeploymentResponse); - - // UpdateDeployment defines a method to update a deployment given proper inputs. - rpc UpdateDeployment(MsgUpdateDeployment) returns (MsgUpdateDeploymentResponse); - - // CloseDeployment defines a method to close a deployment given proper inputs. - rpc CloseDeployment(MsgCloseDeployment) returns (MsgCloseDeploymentResponse); - - // CloseGroup defines a method to close a group of a deployment given proper inputs. - rpc CloseGroup(MsgCloseGroup) returns (MsgCloseGroupResponse); - - // PauseGroup defines a method to close a group of a deployment given proper inputs. - rpc PauseGroup(MsgPauseGroup) returns (MsgPauseGroupResponse); - - // StartGroup defines a method to close a group of a deployment given proper inputs. - rpc StartGroup(MsgStartGroup) returns (MsgStartGroupResponse); -} diff --git a/proto/node/akash/deployment/v1beta3/authz.proto b/proto/node/akash/deployment/v1beta3/authz.proto deleted file mode 100644 index a52c520c..00000000 --- a/proto/node/akash/deployment/v1beta3/authz.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta3; - -import "gogoproto/gogo.proto"; -import "cosmos_proto/cosmos.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta3"; - -// DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from -// the granter's account for a deployment. -message DepositDeploymentAuthorization { - option (cosmos_proto.implements_interface) = "Authorization"; - - // SpendLimit is the amount the grantee is authorized to spend from the granter's account for - // the purpose of deployment. - cosmos.base.v1beta1.Coin spend_limit = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "spend_limit" - ]; -} diff --git a/proto/node/akash/deployment/v1beta3/deployment.proto b/proto/node/akash/deployment/v1beta3/deployment.proto deleted file mode 100644 index 5616cd66..00000000 --- a/proto/node/akash/deployment/v1beta3/deployment.proto +++ /dev/null @@ -1,75 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta3; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta3"; - -// DeploymentID stores owner and sequence number -message DeploymentID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = "yaml:\"dseq\"" - ]; -} - -// Deployment stores deploymentID, state and version details -message Deployment { - option (gogoproto.equal) = false; - - DeploymentID deployment_id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "DeploymentID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - // State is an enum which refers to state of deployment - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [(gogoproto.enumvalue_customname) = "DeploymentStateInvalid"]; - // DeploymentActive denotes state for deployment active - active = 1 [(gogoproto.enumvalue_customname) = "DeploymentActive"]; - // DeploymentClosed denotes state for deployment closed - closed = 2 [(gogoproto.enumvalue_customname) = "DeploymentClosed"]; - } - - State state = 2 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - bytes version = 3 [ - (gogoproto.jsontag) = "version", - (gogoproto.moretags) = "yaml:\"version\"" - ]; - int64 created_at = 4; -} - -// DeploymentFilters defines filters used to filter deployments -message DeploymentFilters { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = "yaml:\"dseq\"" - ]; - string state = 3 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; -} diff --git a/proto/node/akash/deployment/v1beta3/deploymentmsg.proto b/proto/node/akash/deployment/v1beta3/deploymentmsg.proto deleted file mode 100644 index 47443ae9..00000000 --- a/proto/node/akash/deployment/v1beta3/deploymentmsg.proto +++ /dev/null @@ -1,106 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta3; - -import "gogoproto/gogo.proto"; - -import "akash/deployment/v1beta3/deployment.proto"; -import "akash/deployment/v1beta3/groupspec.proto"; - -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta3"; - -// MsgCreateDeployment defines an SDK message for creating deployment -message MsgCreateDeployment { - option (gogoproto.equal) = false; - - DeploymentID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - repeated GroupSpec groups = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "groups", - (gogoproto.moretags) = "yaml:\"groups\"" - ]; - bytes version = 3 [ - (gogoproto.jsontag) = "version", - (gogoproto.moretags) = "yaml:\"version\"" - ]; - cosmos.base.v1beta1.Coin deposit = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "deposit", - (gogoproto.moretags) = "yaml:\"deposit\"" - ]; - // Depositor pays for the deposit - string depositor = 5 [ - (gogoproto.jsontag) = "depositor", - (gogoproto.moretags) = "yaml:\"depositor\"" - ]; -} - -// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. -message MsgCreateDeploymentResponse {} - -// MsgDepositDeployment deposits more funds into the deposit account -message MsgDepositDeployment { - option (gogoproto.equal) = false; - - DeploymentID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - cosmos.base.v1beta1.Coin amount = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "amount", - (gogoproto.moretags) = "yaml:\"amount\"" - ]; - - // Depositor pays for the deposit - string depositor = 3 [ - (gogoproto.jsontag) = "depositor", - (gogoproto.moretags) = "yaml:\"depositor\"" - ]; -} - -// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. -message MsgDepositDeploymentResponse {} - -// MsgUpdateDeployment defines an SDK message for updating deployment -message MsgUpdateDeployment { - option (gogoproto.equal) = false; - - DeploymentID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - bytes version = 3 [ - (gogoproto.jsontag) = "version", - (gogoproto.moretags) = "yaml:\"version\"" - ]; -} - -// MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. -message MsgUpdateDeploymentResponse {} - -// MsgCloseDeployment defines an SDK message for closing deployment -message MsgCloseDeployment { - option (gogoproto.equal) = false; - - DeploymentID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID", - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. -message MsgCloseDeploymentResponse {} diff --git a/proto/node/akash/deployment/v1beta3/genesis.proto b/proto/node/akash/deployment/v1beta3/genesis.proto deleted file mode 100644 index 8375cb9a..00000000 --- a/proto/node/akash/deployment/v1beta3/genesis.proto +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/deployment/v1beta3/deployment.proto"; -import "akash/deployment/v1beta3/group.proto"; -import "akash/deployment/v1beta3/params.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta3"; - -// GenesisDeployment defines the basic genesis state used by deployment module -message GenesisDeployment { - Deployment deployment = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "deployment", - (gogoproto.moretags) = "yaml:\"deployment\"" - ]; - - repeated Group groups = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "groups", - (gogoproto.moretags) = "yaml:\"groups\"" - ]; -} - -// GenesisState stores slice of genesis deployment instance -message GenesisState { - repeated GenesisDeployment deployments = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "deployments", - (gogoproto.moretags) = "yaml:\"deployments\"" - ]; - - Params params = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "params", - (gogoproto.moretags) = "yaml:\"params\"" - ]; -} diff --git a/proto/node/akash/deployment/v1beta3/group.proto b/proto/node/akash/deployment/v1beta3/group.proto deleted file mode 100644 index 2a9b1df6..00000000 --- a/proto/node/akash/deployment/v1beta3/group.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/deployment/v1beta3/groupid.proto"; -import "akash/deployment/v1beta3/groupspec.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta3"; - -// Group stores group id, state and specifications of group -message Group { - option (gogoproto.equal) = false; - - GroupID group_id = 1 [ - (gogoproto.customname) = "GroupID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - // State is an enum which refers to state of group - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [ - (gogoproto.enumvalue_customname) = "GroupStateInvalid" - ]; - // GroupOpen denotes state for group open - open = 1 [ - (gogoproto.enumvalue_customname) = "GroupOpen" - ]; - // GroupOrdered denotes state for group ordered - paused = 2 [ - (gogoproto.enumvalue_customname) = "GroupPaused" - ]; - // GroupInsufficientFunds denotes state for group insufficient_funds - insufficient_funds = 3 [ - (gogoproto.enumvalue_customname) = "GroupInsufficientFunds" - ]; - // GroupClosed denotes state for group closed - closed = 4 [ - (gogoproto.enumvalue_customname) = "GroupClosed" - ]; - } - - State state = 2 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - GroupSpec group_spec = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "spec", - (gogoproto.moretags) = "yaml:\"spec\"" - ]; - - int64 created_at = 4; -} diff --git a/proto/node/akash/deployment/v1beta3/groupid.proto b/proto/node/akash/deployment/v1beta3/groupid.proto deleted file mode 100644 index 01d8e5a3..00000000 --- a/proto/node/akash/deployment/v1beta3/groupid.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta3; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta3"; - -// GroupID stores owner, deployment sequence number and group sequence number -message GroupID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = "yaml:\"dseq\"" - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = "yaml:\"gseq\"" - ]; -} diff --git a/proto/node/akash/deployment/v1beta3/groupmsg.proto b/proto/node/akash/deployment/v1beta3/groupmsg.proto deleted file mode 100644 index ad647b84..00000000 --- a/proto/node/akash/deployment/v1beta3/groupmsg.proto +++ /dev/null @@ -1,52 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/deployment/v1beta3/groupid.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta3"; - -// MsgCloseGroup defines SDK message to close a single Group within a Deployment. -message MsgCloseGroup { - option (gogoproto.equal) = false; - - GroupID id = 1 [ - (gogoproto.customname) = "ID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgCloseGroupResponse defines the Msg/CloseGroup response type. -message MsgCloseGroupResponse {} - -// MsgPauseGroup defines SDK message to close a single Group within a Deployment. -message MsgPauseGroup { - option (gogoproto.equal) = false; - - GroupID id = 1 [ - (gogoproto.customname) = "ID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgPauseGroupResponse defines the Msg/PauseGroup response type. -message MsgPauseGroupResponse {} - -// MsgStartGroup defines SDK message to close a single Group within a Deployment. -message MsgStartGroup { - option (gogoproto.equal) = false; - - GroupID id = 1 [ - (gogoproto.customname) = "ID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgStartGroupResponse defines the Msg/StartGroup response type. -message MsgStartGroupResponse {} diff --git a/proto/node/akash/deployment/v1beta3/groupspec.proto b/proto/node/akash/deployment/v1beta3/groupspec.proto deleted file mode 100644 index 851d37a7..00000000 --- a/proto/node/akash/deployment/v1beta3/groupspec.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta3/attribute.proto"; -import "akash/deployment/v1beta3/resourceunit.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta3"; - -// GroupSpec stores group specifications -message GroupSpec { - option (gogoproto.equal) = false; - option (gogoproto.goproto_getters) = false; - - string name = 1 [ - (gogoproto.jsontag) = "name", - (gogoproto.moretags) = "yaml:\"name\"" - ]; - - akash.base.v1beta3.PlacementRequirements requirements = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "requirements", - (gogoproto.moretags) = "yaml:\"requirements\"" - ]; - - repeated ResourceUnit resources = 3 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "ResourceUnits", - (gogoproto.jsontag) = "resources", - (gogoproto.moretags) = "yaml:\"resources\"" - ]; -} diff --git a/proto/node/akash/deployment/v1beta3/params.proto b/proto/node/akash/deployment/v1beta3/params.proto deleted file mode 100644 index 91ed80ca..00000000 --- a/proto/node/akash/deployment/v1beta3/params.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package akash.deployment.v1beta3; - -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta3"; - -// Params defines the parameters for the x/deployment package -message Params { - repeated cosmos.base.v1beta1.Coin min_deposits = 1[ - (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.Coins", - (gogoproto.customname) = "MinDeposits", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "min_deposits", - (gogoproto.moretags) = "yaml:\"min_deposits\"" - ]; -} diff --git a/proto/node/akash/deployment/v1beta3/query.proto b/proto/node/akash/deployment/v1beta3/query.proto deleted file mode 100644 index d80d16eb..00000000 --- a/proto/node/akash/deployment/v1beta3/query.proto +++ /dev/null @@ -1,90 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta3; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/deployment/v1beta3/deployment.proto"; -import "akash/deployment/v1beta3/group.proto"; -import "akash/deployment/v1beta3/groupid.proto"; -import "akash/escrow/v1beta3/types.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta3"; - -// Query defines the gRPC querier service -service Query { - // Deployments queries deployments - rpc Deployments(QueryDeploymentsRequest) returns (QueryDeploymentsResponse) { - option (google.api.http).get = "/akash/deployment/v1beta3/deployments/list"; - } - - // Deployment queries deployment details - rpc Deployment(QueryDeploymentRequest) returns (QueryDeploymentResponse) { - option (google.api.http).get = "/akash/deployment/v1beta3/deployments/info"; - } - - // Group queries group details - rpc Group(QueryGroupRequest) returns (QueryGroupResponse) { - option (google.api.http).get = "/akash/deployment/v1beta3/groups/info"; - } -} - -// QueryDeploymentsRequest is request type for the Query/Deployments RPC method -message QueryDeploymentsRequest { - DeploymentFilters filters = 1 [ - (gogoproto.nullable) = false - ]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryDeploymentsResponse is response type for the Query/Deployments RPC method -message QueryDeploymentsResponse { - repeated QueryDeploymentResponse deployments = 1 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "DeploymentResponses" - ]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryDeploymentRequest is request type for the Query/Deployment RPC method -message QueryDeploymentRequest { - DeploymentID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID" - ]; -} - -// QueryDeploymentResponse is response type for the Query/Deployment RPC method -message QueryDeploymentResponse { - option (gogoproto.equal) = false; - Deployment deployment = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "deployment", - (gogoproto.moretags) = "yaml:\"deployment\"" - ]; - repeated Group groups = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "groups", - (gogoproto.moretags) = "yaml:\"groups\"" - ]; - akash.escrow.v1beta3.Account escrow_account = 3 [ - (gogoproto.nullable) = false - ]; -} - -// QueryGroupRequest is request type for the Query/Group RPC method -message QueryGroupRequest { - GroupID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID" - ]; -} - -// QueryGroupResponse is response type for the Query/Group RPC method -message QueryGroupResponse { - Group group = 1 [ - (gogoproto.nullable) = false - ]; -} diff --git a/proto/node/akash/deployment/v1beta3/service.proto b/proto/node/akash/deployment/v1beta3/service.proto deleted file mode 100644 index cf89f664..00000000 --- a/proto/node/akash/deployment/v1beta3/service.proto +++ /dev/null @@ -1,31 +0,0 @@ -syntax = "proto3"; -package akash.deployment.v1beta3; - -import "akash/deployment/v1beta3/deploymentmsg.proto"; -import "akash/deployment/v1beta3/groupmsg.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta3"; - -// Msg defines the deployment Msg service. -service Msg { - // CreateDeployment defines a method to create new deployment given proper inputs. - rpc CreateDeployment(MsgCreateDeployment) returns (MsgCreateDeploymentResponse); - - // DepositDeployment deposits more funds into the deployment account - rpc DepositDeployment(MsgDepositDeployment) returns (MsgDepositDeploymentResponse); - - // UpdateDeployment defines a method to update a deployment given proper inputs. - rpc UpdateDeployment(MsgUpdateDeployment) returns (MsgUpdateDeploymentResponse); - - // CloseDeployment defines a method to close a deployment given proper inputs. - rpc CloseDeployment(MsgCloseDeployment) returns (MsgCloseDeploymentResponse); - - // CloseGroup defines a method to close a group of a deployment given proper inputs. - rpc CloseGroup(MsgCloseGroup) returns (MsgCloseGroupResponse); - - // PauseGroup defines a method to close a group of a deployment given proper inputs. - rpc PauseGroup(MsgPauseGroup) returns (MsgPauseGroupResponse); - - // StartGroup defines a method to close a group of a deployment given proper inputs. - rpc StartGroup(MsgStartGroup) returns (MsgStartGroupResponse); -} diff --git a/proto/node/akash/deployment/v1beta4/deploymentmsg.proto b/proto/node/akash/deployment/v1beta4/deploymentmsg.proto new file mode 100644 index 00000000..57ad8192 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/deploymentmsg.proto @@ -0,0 +1,84 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; +import "cosmos/msg/v1/msg.proto"; +import "cosmos/base/v1beta1/coin.proto"; + +import "akash/deployment/v1/deployment.proto"; +import "akash/deployment/v1beta4/groupspec.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1beta4"; + +// MsgCreateDeployment defines an SDK message for creating deployment +message MsgCreateDeployment { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "id.owner"; + + akash.deployment.v1.DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + repeated GroupSpec groups = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "GroupSpecs", + (gogoproto.jsontag) = "groups", + (gogoproto.moretags) = "yaml:\"groups\"" + ]; + bytes hash = 3 [ + (gogoproto.jsontag) = "hash", + (gogoproto.moretags) = "yaml:\"hash\"" + ]; + cosmos.base.v1beta1.Coin deposit = 4 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"deposit\"" + ]; + // Depositor pays for the deposit + string depositor = 5 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "depositor", + (gogoproto.moretags) = "yaml:\"depositor\"" + ]; +} + +// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. +message MsgCreateDeploymentResponse {} + +// MsgUpdateDeployment defines an SDK message for updating deployment +message MsgUpdateDeployment { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "id.owner"; + + akash.deployment.v1.DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + bytes hash = 3 [ + (gogoproto.jsontag) = "hash", + (gogoproto.moretags) = "yaml:\"hash\"" + ]; +} + +// MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. +message MsgUpdateDeploymentResponse {} + +// MsgCloseDeployment defines an SDK message for closing deployment +message MsgCloseDeployment { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "id.owner"; + + akash.deployment.v1.DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. +message MsgCloseDeploymentResponse {} diff --git a/proto/node/akash/deployment/v1beta4/filters.proto b/proto/node/akash/deployment/v1beta4/filters.proto new file mode 100644 index 00000000..2319480f --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/filters.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1beta4"; + +// DeploymentFilters defines filters used to filter deployments +message DeploymentFilters { + option (gogoproto.equal) = false; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = "yaml:\"dseq\"" + ]; + string state = 3 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; +} + +// GroupFilters defines filters used to filter groups +message GroupFilters { + option (gogoproto.equal) = false; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = "yaml:\"dseq\"" + ]; + uint64 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = "yaml:\"gseq\"" + ]; + string state = 4 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; +} diff --git a/proto/node/akash/deployment/v1beta4/genesis.proto b/proto/node/akash/deployment/v1beta4/genesis.proto new file mode 100644 index 00000000..2839cb60 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/genesis.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; + +import "akash/deployment/v1/deployment.proto"; + +import "akash/deployment/v1beta4/group.proto"; +import "akash/deployment/v1beta4/params.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1beta4"; + +// GenesisDeployment defines the basic genesis state used by deployment module +message GenesisDeployment { + akash.deployment.v1.Deployment deployment = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "deployment", + (gogoproto.moretags) = "yaml:\"deployment\"" + ]; + + repeated Group groups = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Groups", + (gogoproto.jsontag) = "groups", + (gogoproto.moretags) = "yaml:\"groups\"" + ]; +} + +// GenesisState stores slice of genesis deployment instance +message GenesisState { + repeated GenesisDeployment deployments = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "deployments", + (gogoproto.moretags) = "yaml:\"deployments\"" + ]; + + Params params = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "params", + (gogoproto.moretags) = "yaml:\"params\"" + ]; +} diff --git a/proto/node/akash/deployment/v1beta4/group.proto b/proto/node/akash/deployment/v1beta4/group.proto new file mode 100644 index 00000000..21b3d56a --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/group.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; +import "akash/deployment/v1/group.proto"; +import "akash/deployment/v1beta4/groupspec.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1beta4"; + +// Group stores group id, state and specifications of group +message Group { + option (gogoproto.equal) = false; + + // State is an enum which refers to state of group + enum State { + option (gogoproto.goproto_enum_prefix) = false; + + // Prefix should start with 0 in enum. So declaring dummy state + invalid = 0 [ + (gogoproto.enumvalue_customname) = "GroupStateInvalid" + ]; + // GroupOpen denotes state for group open + open = 1 [ + (gogoproto.enumvalue_customname) = "GroupOpen" + ]; + // GroupOrdered denotes state for group ordered + paused = 2 [ + (gogoproto.enumvalue_customname) = "GroupPaused" + ]; + // GroupInsufficientFunds denotes state for group insufficient_funds + insufficient_funds = 3 [ + (gogoproto.enumvalue_customname) = "GroupInsufficientFunds" + ]; + // GroupClosed denotes state for group closed + closed = 4 [ + (gogoproto.enumvalue_customname) = "GroupClosed" + ]; + } + + akash.deployment.v1.GroupID id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + + State state = 2 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; + + GroupSpec group_spec = 3 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "GroupSpecs", + (gogoproto.jsontag) = "spec", + (gogoproto.moretags) = "yaml:\"spec\"" + ]; + + int64 created_at = 4; +} diff --git a/proto/node/akash/deployment/v1beta4/groupmsg.proto b/proto/node/akash/deployment/v1beta4/groupmsg.proto new file mode 100644 index 00000000..fb83931a --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/groupmsg.proto @@ -0,0 +1,58 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; + +import "cosmos/msg/v1/msg.proto"; + +import "akash/deployment/v1/group.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1beta4"; + +// MsgCloseGroup defines SDK message to close a single Group within a Deployment. +message MsgCloseGroup { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "id.owner"; + + akash.deployment.v1.GroupID id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// MsgCloseGroupResponse defines the Msg/CloseGroup response type. +message MsgCloseGroupResponse {} + +// MsgPauseGroup defines SDK message to close a single Group within a Deployment. +message MsgPauseGroup { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "id.owner"; + + akash.deployment.v1.GroupID id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// MsgPauseGroupResponse defines the Msg/PauseGroup response type. +message MsgPauseGroupResponse {} + +// MsgStartGroup defines SDK message to close a single Group within a Deployment. +message MsgStartGroup { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "id.owner"; + + akash.deployment.v1.GroupID id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// MsgStartGroupResponse defines the Msg/StartGroup response type. +message MsgStartGroupResponse {} diff --git a/proto/node/akash/deployment/v1beta4/groupspec.proto b/proto/node/akash/deployment/v1beta4/groupspec.proto new file mode 100644 index 00000000..f75128c5 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/groupspec.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; + +import "akash/base/attributes/v1/attribute.proto"; +import "akash/deployment/v1beta4/resourceunit.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1beta4"; + +// Spec stores group specifications +message GroupSpec { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string name = 1 [ + (gogoproto.jsontag) = "name", + (gogoproto.moretags) = "yaml:\"name\"" + ]; + + akash.base.attributes.v1.PlacementRequirements requirements = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "requirements", + (gogoproto.moretags) = "yaml:\"requirements\"" + ]; + + repeated ResourceUnit resources = 3 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "ResourceUnits", + (gogoproto.jsontag) = "resources", + (gogoproto.moretags) = "yaml:\"resources\"" + ]; +} diff --git a/proto/node/akash/deployment/v1beta4/params.proto b/proto/node/akash/deployment/v1beta4/params.proto new file mode 100644 index 00000000..608f7455 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/params.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; +import "cosmos/base/v1beta1/coin.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1beta4"; + +// Params defines the parameters for the x/deployment module +message Params { + repeated cosmos.base.v1beta1.Coin min_deposits = 1[ + (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.Coins", + (gogoproto.customname) = "MinDeposits", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "min_deposits", + (gogoproto.moretags) = "yaml:\"min_deposits\"" + ]; +} diff --git a/proto/node/akash/deployment/v1beta4/paramsmsg.proto b/proto/node/akash/deployment/v1beta4/paramsmsg.proto new file mode 100644 index 00000000..4e47bd30 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/paramsmsg.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; + +import "cosmos/msg/v1/msg.proto"; +import "cosmos_proto/cosmos.proto"; + +import "akash/deployment/v1beta4/params.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1beta4"; + +// MsgUpdateParams is the Msg/UpdateParams request type. +// +// Since: akash v1.0.0 +message MsgUpdateParams { + option (cosmos.msg.v1.signer) = "authority"; + + // authority is the address of the governance account. + string authority = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString" + ]; + + // params defines the x/deployment parameters to update. + // + // NOTE: All parameters must be supplied. + Params params = 2 [ + (gogoproto.nullable) = false + ]; +} + +// MsgUpdateParamsResponse defines the response structure for executing a +// MsgUpdateParams message. +// +// Since: akash v1.0.0 +message MsgUpdateParamsResponse {} diff --git a/proto/node/akash/deployment/v1beta4/query.proto b/proto/node/akash/deployment/v1beta4/query.proto new file mode 100644 index 00000000..444f909c --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/query.proto @@ -0,0 +1,114 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; +import "amino/amino.proto"; +import "google/api/annotations.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; + +import "akash/deployment/v1beta4/filters.proto"; +import "akash/deployment/v1beta4/group.proto"; +import "akash/deployment/v1beta4/params.proto"; + +import "akash/deployment/v1/deployment.proto"; +import "akash/deployment/v1/group.proto"; + +import "akash/escrow/v1/account.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1beta4"; + +// Query defines the gRPC querier service +service Query { + // Deployments queries deployments + rpc Deployments(QueryDeploymentsRequest) returns (QueryDeploymentsResponse) { + option (google.api.http).get = "/akash/deployment/v1beta4/deployments/list"; + } + + // Deployment queries deployment details + rpc Deployment(QueryDeploymentRequest) returns (QueryDeploymentResponse) { + option (google.api.http).get = "/akash/deployment/v1beta4/deployments/info"; + } + + // Group queries group details + rpc Group(QueryGroupRequest) returns (QueryGroupResponse) { + option (google.api.http).get = "/akash/deployment/v1beta4/groups/info"; + } + + // Params returns the total set of minting parameters. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/akash/deployment/v1beta4/params"; + } +} + +// QueryDeploymentsRequest is request type for the Query/Deployments RPC method +message QueryDeploymentsRequest { + DeploymentFilters filters = 1 [ + (gogoproto.nullable) = false + ]; + + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryDeploymentsResponse is response type for the Query/Deployments RPC method +message QueryDeploymentsResponse { + repeated QueryDeploymentResponse deployments = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "DeploymentResponses" + ]; + + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryDeploymentRequest is request type for the Query/Deployment RPC method +message QueryDeploymentRequest { + akash.deployment.v1.DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID" + ]; +} + +// QueryDeploymentResponse is response type for the Query/Deployment RPC method +message QueryDeploymentResponse { + option (gogoproto.equal) = false; + akash.deployment.v1.Deployment deployment = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "deployment", + (gogoproto.moretags) = "yaml:\"deployment\"" + ]; + repeated Group groups = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Groups", + (gogoproto.jsontag) = "groups", + (gogoproto.moretags) = "yaml:\"groups\"" + ]; + akash.escrow.v1.Account escrow_account = 3 [ + (gogoproto.nullable) = false + ]; +} + +// QueryGroupRequest is request type for the Query/Group RPC method +message QueryGroupRequest { + akash.deployment.v1.GroupID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID" + ]; +} + +// QueryGroupResponse is response type for the Query/Group RPC method +message QueryGroupResponse { + Group group = 1 [ + (gogoproto.nullable) = false + ]; +} + +// QueryParamsRequest is the request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is the response type for the Query/Params RPC method. +message QueryParamsResponse { + // params defines the parameters of the module. + Params params = 1 [ + (gogoproto.nullable) = false, + (amino.dont_omitempty) = true + ]; +} diff --git a/proto/node/akash/deployment/v1beta3/resourceunit.proto b/proto/node/akash/deployment/v1beta4/resourceunit.proto similarity index 75% rename from proto/node/akash/deployment/v1beta3/resourceunit.proto rename to proto/node/akash/deployment/v1beta4/resourceunit.proto index 29c2a7f9..a646869f 100644 --- a/proto/node/akash/deployment/v1beta3/resourceunit.proto +++ b/proto/node/akash/deployment/v1beta4/resourceunit.proto @@ -1,17 +1,19 @@ syntax = "proto3"; -package akash.deployment.v1beta3; +package akash.deployment.v1beta4; import "gogoproto/gogo.proto"; -import "akash/base/v1beta3/resources.proto"; + import "cosmos/base/v1beta1/coin.proto"; -option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta3"; +import "akash/base/resources/v1beta4/resources.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1beta4"; // ResourceUnit extends Resources and adds Count along with the Price message ResourceUnit { option (gogoproto.equal) = true; - akash.base.v1beta3.Resources resource = 1 [ + akash.base.resources.v1beta4.Resources resource = 1 [ (gogoproto.nullable) = false, (gogoproto.embed) = true, (gogoproto.jsontag) = "resource", diff --git a/proto/node/akash/deployment/v1beta4/service.proto b/proto/node/akash/deployment/v1beta4/service.proto new file mode 100644 index 00000000..24d458a3 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/service.proto @@ -0,0 +1,43 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "akash/deployment/v1/msg.proto"; +import "akash/deployment/v1beta4/deploymentmsg.proto"; +import "akash/deployment/v1beta4/groupmsg.proto"; +import "akash/deployment/v1beta4/paramsmsg.proto"; + +import "cosmos/msg/v1/msg.proto"; + +option go_package = "pkg.akt.dev/go/node/deployment/v1beta4"; + +// Msg defines the x/deployment Msg service. +service Msg { + option (cosmos.msg.v1.service) = true; + + // CreateDeployment defines a method to create new deployment given proper inputs. + rpc CreateDeployment(MsgCreateDeployment) returns (MsgCreateDeploymentResponse); + + // DepositDeployment deposits more funds into the deployment account + rpc DepositDeployment(akash.deployment.v1.MsgDepositDeployment) returns (akash.deployment.v1.MsgDepositDeploymentResponse); + + // UpdateDeployment defines a method to update a deployment given proper inputs. + rpc UpdateDeployment(MsgUpdateDeployment) returns (MsgUpdateDeploymentResponse); + + // CloseDeployment defines a method to close a deployment given proper inputs. + rpc CloseDeployment(MsgCloseDeployment) returns (MsgCloseDeploymentResponse); + + // CloseGroup defines a method to close a group of a deployment given proper inputs. + rpc CloseGroup(MsgCloseGroup) returns (MsgCloseGroupResponse); + + // PauseGroup defines a method to close a group of a deployment given proper inputs. + rpc PauseGroup(MsgPauseGroup) returns (MsgPauseGroupResponse); + + // StartGroup defines a method to close a group of a deployment given proper inputs. + rpc StartGroup(MsgStartGroup) returns (MsgStartGroupResponse); + + // UpdateParams defines a governance operation for updating the x/deployment module + // parameters. The authority is hard-coded to the x/gov module account. + // + // Since: akash v1.0.0 + rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); +} diff --git a/proto/node/akash/discovery/v1/akash.proto b/proto/node/akash/discovery/v1/akash.proto index 5bdf3630..0e4fd445 100644 --- a/proto/node/akash/discovery/v1/akash.proto +++ b/proto/node/akash/discovery/v1/akash.proto @@ -5,7 +5,7 @@ package akash.discovery.v1; import "akash/discovery/v1/client_info.proto"; import "gogoproto/gogo.proto"; -option go_package = "github.com/akash-network/akash-api/go/node/client"; +option go_package = "pkg.akt.dev/go/node/client"; // Akash akash specific RPC parameters message Akash { diff --git a/proto/node/akash/discovery/v1/client_info.proto b/proto/node/akash/discovery/v1/client_info.proto index d01ad9e8..be852158 100644 --- a/proto/node/akash/discovery/v1/client_info.proto +++ b/proto/node/akash/discovery/v1/client_info.proto @@ -4,7 +4,7 @@ package akash.discovery.v1; import "gogoproto/gogo.proto"; -option go_package = "github.com/akash-network/akash-api/go/node/client"; +option go_package = "pkg.akt.dev/go/node/client"; // ClientInfo akash specific client info message ClientInfo { diff --git a/proto/node/akash/escrow/v1/account.proto b/proto/node/akash/escrow/v1/account.proto new file mode 100644 index 00000000..671f7b61 --- /dev/null +++ b/proto/node/akash/escrow/v1/account.proto @@ -0,0 +1,94 @@ +syntax = "proto3"; +package akash.escrow.v1; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +import "cosmos/base/v1beta1/coin.proto"; + +import "akash/escrow/v1/accountid.proto"; + +option go_package = "pkg.akt.dev/go/node/escrow/v1"; + +// Account stores state for an escrow account +message Account { + // State stores state for an escrow account + enum State { + option (gogoproto.goproto_enum_prefix) = false; + // AccountStateInvalid is an invalid state + invalid = 0 [ + (gogoproto.enumvalue_customname) = "AccountStateInvalid" + ]; + // AccountOpen is the state when an account is open + open = 1 [ + (gogoproto.enumvalue_customname) = "AccountOpen" + ]; + // AccountClosed is the state when an account is closed + closed = 2 [ + (gogoproto.enumvalue_customname) = "AccountClosed" + ]; + // AccountOverdrawn is the state when an account is overdrawn + overdrawn = 3 [ + (gogoproto.enumvalue_customname) = "AccountOverdrawn" + ]; + } + + // unique identifier for this escrow account + AccountID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"", + (gogoproto.customname) = "ID" + ]; + + // bech32 encoded account address of the owner of this escrow account + string owner = 2 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + + // current state of this escrow account + State state = 3 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; + + // unspent coins received from the owner's wallet + cosmos.base.v1beta1.DecCoin balance = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "balance", + (gogoproto.moretags) = "yaml:\"balance\"" + ]; + + // total coins spent by this account + cosmos.base.v1beta1.DecCoin transferred = 5 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "transferred", + (gogoproto.moretags) = "yaml:\"transferred\"" + ]; + + // block height at which this account was last settled + int64 settled_at = 6 [ + (gogoproto.jsontag) = "settledAt", + (gogoproto.moretags) = "yaml:\"settledAt\"", + (gogoproto.customname) = "SettledAt" + ]; + + // bech32 encoded account address of the depositor. + // If depositor is same as the owner, then any incoming coins are added to the Balance. + // If depositor isn't same as the owner, then any incoming coins are added to the Funds. + string depositor = 7 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "depositor", + (gogoproto.moretags) = "yaml:\"depositor\"" + ]; + + // Funds are unspent coins received from the (non-Owner) Depositor's wallet. + // If there are any funds, they should be spent before spending the Balance. + cosmos.base.v1beta1.DecCoin funds = 8 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "funds", + (gogoproto.moretags) = "yaml:\"funds\"" + ]; +} diff --git a/proto/node/akash/escrow/v1/accountid.proto b/proto/node/akash/escrow/v1/accountid.proto new file mode 100644 index 00000000..69be58fb --- /dev/null +++ b/proto/node/akash/escrow/v1/accountid.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; +package akash.escrow.v1; + +import "gogoproto/gogo.proto"; + +option go_package = "pkg.akt.dev/go/node/escrow/v1"; + +// AccountID is the account identifier +message AccountID { + string scope = 1 [ + (gogoproto.jsontag) = "scope", + (gogoproto.moretags) = "yaml:\"scope\"" + ]; + string xid = 2 [ + (gogoproto.jsontag) = "xid", + (gogoproto.moretags) = "yaml:\"xid\"", + (gogoproto.customname) = "XID" + ]; +} diff --git a/proto/node/akash/escrow/v1/fractional_payment.proto b/proto/node/akash/escrow/v1/fractional_payment.proto new file mode 100644 index 00000000..47b621ed --- /dev/null +++ b/proto/node/akash/escrow/v1/fractional_payment.proto @@ -0,0 +1,77 @@ +syntax = "proto3"; +package akash.escrow.v1; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +import "cosmos/base/v1beta1/coin.proto"; + +import "akash/escrow/v1/accountid.proto"; + +option go_package = "pkg.akt.dev/go/node/escrow/v1"; + +// Payment stores state for a payment +message FractionalPayment { + // State defines payment state + enum State { + option (gogoproto.goproto_enum_prefix) = false; + // PaymentStateInvalid is the state when the payment is invalid + invalid = 0 [ + (gogoproto.enumvalue_customname) = "PaymentStateInvalid" + ]; + // PaymentStateOpen is the state when the payment is open + open = 1 [ + (gogoproto.enumvalue_customname) = "PaymentOpen" + ]; + // PaymentStateClosed is the state when the payment is closed + closed = 2 [ + (gogoproto.enumvalue_customname) = "PaymentClosed" + ]; + // PaymentStateOverdrawn is the state when the payment is overdrawn + overdrawn = 3 [ + (gogoproto.enumvalue_customname) = "PaymentOverdrawn" + ]; + } + + AccountID account_id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "accountID", + (gogoproto.moretags) = "yaml:\"accountID\"", + (gogoproto.customname) = "AccountID" + ]; + + string payment_id = 2 [ + (gogoproto.jsontag) = "paymentID", + (gogoproto.moretags) = "yaml:\"paymentID\"", + (gogoproto.customname) = "PaymentID" + ]; + + string owner = 3 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + + State state = 4 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; + + cosmos.base.v1beta1.DecCoin rate = 5 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "rate", + (gogoproto.moretags) = "yaml:\"rate\"" + ]; + + cosmos.base.v1beta1.DecCoin balance = 6 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "balance", + (gogoproto.moretags) = "yaml:\"balance\"" + ]; + + cosmos.base.v1beta1.Coin withdrawn = 7 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "withdrawn", + (gogoproto.moretags) = "yaml:\"withdrawn\"" + ]; +} diff --git a/proto/node/akash/escrow/v1/genesis.proto b/proto/node/akash/escrow/v1/genesis.proto new file mode 100644 index 00000000..2616fca6 --- /dev/null +++ b/proto/node/akash/escrow/v1/genesis.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; +package akash.escrow.v1; + +import "gogoproto/gogo.proto"; +import "akash/escrow/v1/account.proto"; +import "akash/escrow/v1/fractional_payment.proto"; + +option go_package = "pkg.akt.dev/go/node/escrow/v1"; + +// GenesisState defines the basic genesis state used by the escrow module +message GenesisState { + repeated Account accounts = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Accounts", + (gogoproto.jsontag) = "accounts", + (gogoproto.moretags) = "yaml:\"accounts\"" + ]; + + repeated FractionalPayment payments = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "FractionalPayments", + (gogoproto.jsontag) = "payments", + (gogoproto.moretags) = "yaml:\"payments\"" + ]; +} diff --git a/proto/node/akash/escrow/v1/query.proto b/proto/node/akash/escrow/v1/query.proto new file mode 100644 index 00000000..6abe6d3d --- /dev/null +++ b/proto/node/akash/escrow/v1/query.proto @@ -0,0 +1,66 @@ +syntax = "proto3"; + +package akash.escrow.v1; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; + +import "akash/escrow/v1/account.proto"; +import "akash/escrow/v1/fractional_payment.proto"; + +option go_package = "pkg.akt.dev/go/node/escrow/v1"; + +// Query defines the gRPC querier service +service Query { + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + // Accounts queries all accounts + rpc Accounts(QueryAccountsRequest) returns (QueryAccountsResponse) { + option (google.api.http).get = "/akash/escrow/v1/types/accounts/list"; + } + + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + // Payments queries all payments + rpc Payments(QueryPaymentsRequest) returns (QueryPaymentsResponse) { + option (google.api.http).get = "/akash/escrow/v1/types/payments/list"; + } +} + +// QueryAccountRequest is request type for the Query/Account RPC method +message QueryAccountsRequest { + string scope = 1; + string xid = 2; + string owner = 3; + string state = 4; + cosmos.base.query.v1beta1.PageRequest pagination = 5; +} + +// QueryProvidersResponse is response type for the Query/Providers RPC method +message QueryAccountsResponse { + repeated Account accounts = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Accounts" + ]; + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryPaymentRequest is request type for the Query/Payment RPC method +message QueryPaymentsRequest { + string scope = 1; + string xid = 2; + string id = 3; + string owner = 4; + string state = 5; + cosmos.base.query.v1beta1.PageRequest pagination = 6; +} + +// QueryProvidersResponse is response type for the Query/Providers RPC method +message QueryPaymentsResponse { + repeated FractionalPayment payments = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "FractionalPayments" + ]; + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} diff --git a/proto/node/akash/escrow/v1beta1/genesis.proto b/proto/node/akash/escrow/v1beta1/genesis.proto deleted file mode 100644 index e02f21e9..00000000 --- a/proto/node/akash/escrow/v1beta1/genesis.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; -package akash.escrow.v1beta1; - -import "gogoproto/gogo.proto"; -import "akash/escrow/v1beta1/types.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/escrow/v1beta1"; - -// GenesisState defines the basic genesis state used by escrow module -message GenesisState { - repeated Account accounts = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "accounts", - (gogoproto.moretags) = "yaml:\"accounts\"" - ]; - - repeated Payment payments = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "payments", - (gogoproto.moretags) = "yaml:\"payments\"" - ]; -} diff --git a/proto/node/akash/escrow/v1beta1/query.proto b/proto/node/akash/escrow/v1beta1/query.proto deleted file mode 100644 index 7cf8ca3c..00000000 --- a/proto/node/akash/escrow/v1beta1/query.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package akash.escrow.v1beta1; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/escrow/v1beta1/types.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/escrow/v1beta1"; - -// Query defines the gRPC querier service -service Query { - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Accounts queries all accounts - rpc Accounts(QueryAccountsRequest) returns (QueryAccountsResponse) { - option (google.api.http).get = "/akash/escrow/v1beta1/types/accounts/list"; - } - - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Payments queries all payments - rpc Payments(QueryPaymentsRequest) returns (QueryPaymentsResponse) { - option (google.api.http).get = "/akash/escrow/v1beta1/types/payments/list"; - } -} - -// QueryAccountRequest is request type for the Query/Account RPC method -message QueryAccountsRequest { - string scope = 1; - string xid = 2; - string owner = 3; - string state = 4; - cosmos.base.query.v1beta1.PageRequest pagination = 5; -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -message QueryAccountsResponse { - repeated Account accounts = 1 [(gogoproto.nullable) = false]; - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryPaymentRequest is request type for the Query/Payment RPC method -message QueryPaymentsRequest { - string scope = 1; - string xid = 2; - string id = 3; - string owner = 4; - string state = 5; - cosmos.base.query.v1beta1.PageRequest pagination = 6; -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -message QueryPaymentsResponse { - repeated Payment payments = 1 [(gogoproto.nullable) = false]; - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} diff --git a/proto/node/akash/escrow/v1beta1/types.proto b/proto/node/akash/escrow/v1beta1/types.proto deleted file mode 100644 index 5f24fcd5..00000000 --- a/proto/node/akash/escrow/v1beta1/types.proto +++ /dev/null @@ -1,139 +0,0 @@ -syntax = "proto3"; -package akash.escrow.v1beta1; - -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/escrow/v1beta1"; - -// AccountID is the account identifier -message AccountID { - string scope = 1 [ - (gogoproto.jsontag) = "scope", - (gogoproto.moretags) = "yaml:\"scope\"" - ]; - string xid = 2 [ - (gogoproto.jsontag) = "xid", - (gogoproto.moretags) = "yaml:\"xid\"", - (gogoproto.customname) = "XID" - ]; -} - -// Account stores state for an escrow account -message Account { - - // State stores state for an escrow account - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // AccountStateInvalid is an invalid state - invalid = 0 [(gogoproto.enumvalue_customname) = "AccountStateInvalid"]; - // AccountOpen is the state when an account is open - open = 1 [(gogoproto.enumvalue_customname) = "AccountOpen"]; - // AccountClosed is the state when an account is closed - closed = 2 [(gogoproto.enumvalue_customname) = "AccountClosed"]; - // AccountOverdrawn is the state when an account is overdrawn - overdrawn = 3 [(gogoproto.enumvalue_customname) = "AccountOverdrawn"]; - } - - // unique identifier for this escrow account - AccountID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"", - (gogoproto.customname) = "ID" - ]; - - // bech32 encoded account address of the owner of this escrow account - string owner = 2 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - // current state of this escrow account - State state = 3 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - - // unspent coins received from the owner's wallet - cosmos.base.v1beta1.Coin balance = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "balance", - (gogoproto.moretags) = "yaml:\"balance\"" - ]; - - // total coins spent by this account - cosmos.base.v1beta1.Coin transferred = 5 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "transferred", - (gogoproto.moretags) = "yaml:\"transferred\"" - ]; - - // block height at which this account was last settled - int64 settled_at = 6 [ - (gogoproto.jsontag) = "settledAt", - (gogoproto.moretags) = "yaml:\"settledAt\"", - (gogoproto.customname) = "SettledAt" - ]; -} - -// Payment stores state for a payment -message Payment { - option (gogoproto.goproto_stringer) = true; - - // Payment State - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // PaymentStateInvalid is the state when the payment is invalid - invalid = 0 [(gogoproto.enumvalue_customname) = "PaymentStateInvalid"]; - // PaymentStateOpen is the state when the payment is open - open = 1 [(gogoproto.enumvalue_customname) = "PaymentOpen"]; - // PaymentStateClosed is the state when the payment is closed - closed = 2 [(gogoproto.enumvalue_customname) = "PaymentClosed"]; - // PaymentStateOverdrawn is the state when the payment is overdrawn - overdrawn = 3 [(gogoproto.enumvalue_customname) = "PaymentOverdrawn"]; - } - - AccountID account_id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "accountID", - (gogoproto.moretags) = "yaml:\"accountID\"", - (gogoproto.customname) = "AccountID" - ]; - - string payment_id = 2 [ - (gogoproto.jsontag) = "paymentID", - (gogoproto.moretags) = "yaml:\"paymentID\"", - (gogoproto.customname) = "PaymentID" - ]; - - string owner = 3 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - State state = 4 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - - cosmos.base.v1beta1.Coin rate = 5 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "rate", - (gogoproto.moretags) = "yaml:\"rate\"" - ]; - - cosmos.base.v1beta1.Coin balance = 6 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "balance", - (gogoproto.moretags) = "yaml:\"balance\"" - ]; - - cosmos.base.v1beta1.Coin withdrawn = 7 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "withdrawn", - (gogoproto.moretags) = "yaml:\"withdrawn\"" - ]; -} diff --git a/proto/node/akash/escrow/v1beta2/genesis.proto b/proto/node/akash/escrow/v1beta2/genesis.proto deleted file mode 100644 index e9a3a078..00000000 --- a/proto/node/akash/escrow/v1beta2/genesis.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; -package akash.escrow.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/escrow/v1beta2/types.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/escrow/v1beta2"; - -// GenesisState defines the basic genesis state used by escrow module -message GenesisState { - repeated Account accounts = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "accounts", - (gogoproto.moretags) = "yaml:\"accounts\"" - ]; - - repeated FractionalPayment payments = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "payments", - (gogoproto.moretags) = "yaml:\"payments\"" - ]; -} diff --git a/proto/node/akash/escrow/v1beta2/query.proto b/proto/node/akash/escrow/v1beta2/query.proto deleted file mode 100644 index df835805..00000000 --- a/proto/node/akash/escrow/v1beta2/query.proto +++ /dev/null @@ -1,58 +0,0 @@ -syntax = "proto3"; - -package akash.escrow.v1beta2; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/escrow/v1beta2/types.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/escrow/v1beta2"; - -// Query defines the gRPC querier service -service Query { - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Accounts queries all accounts - rpc Accounts(QueryAccountsRequest) returns (QueryAccountsResponse) { - option (google.api.http).get = "/akash/escrow/v1beta2/types/accounts/list"; - } - - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Payments queries all payments - rpc Payments(QueryPaymentsRequest) returns (QueryPaymentsResponse) { - option (google.api.http).get = "/akash/escrow/v1beta2/types/payments/list"; - } -} - -// QueryAccountRequest is request type for the Query/Account RPC method -message QueryAccountsRequest { - string scope = 1; - string xid = 2; - string owner = 3; - string state = 4; - cosmos.base.query.v1beta1.PageRequest pagination = 5; -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -message QueryAccountsResponse { - repeated Account accounts = 1 [(gogoproto.nullable) = false]; - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryPaymentRequest is request type for the Query/Payment RPC method -message QueryPaymentsRequest { - string scope = 1; - string xid = 2; - string id = 3; - string owner = 4; - string state = 5; - cosmos.base.query.v1beta1.PageRequest pagination = 6; -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -message QueryPaymentsResponse { - repeated FractionalPayment payments = 1 [(gogoproto.nullable) = false]; - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} diff --git a/proto/node/akash/escrow/v1beta2/types.proto b/proto/node/akash/escrow/v1beta2/types.proto deleted file mode 100644 index c5ceeb1b..00000000 --- a/proto/node/akash/escrow/v1beta2/types.proto +++ /dev/null @@ -1,155 +0,0 @@ -syntax = "proto3"; -package akash.escrow.v1beta2; - -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/escrow/v1beta2"; - -// AccountID is the account identifier -message AccountID { - string scope = 1 [ - (gogoproto.jsontag) = "scope", - (gogoproto.moretags) = "yaml:\"scope\"" - ]; - string xid = 2 [ - (gogoproto.jsontag) = "xid", - (gogoproto.moretags) = "yaml:\"xid\"", - (gogoproto.customname) = "XID" - ]; -} - -// Account stores state for an escrow account -message Account { - - // State stores state for an escrow account - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // AccountStateInvalid is an invalid state - invalid = 0 [(gogoproto.enumvalue_customname) = "AccountStateInvalid"]; - // AccountOpen is the state when an account is open - open = 1 [(gogoproto.enumvalue_customname) = "AccountOpen"]; - // AccountClosed is the state when an account is closed - closed = 2 [(gogoproto.enumvalue_customname) = "AccountClosed"]; - // AccountOverdrawn is the state when an account is overdrawn - overdrawn = 3 [(gogoproto.enumvalue_customname) = "AccountOverdrawn"]; - } - - // unique identifier for this escrow account - AccountID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"", - (gogoproto.customname) = "ID" - ]; - - // bech32 encoded account address of the owner of this escrow account - string owner = 2 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - // current state of this escrow account - State state = 3 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - - // unspent coins received from the owner's wallet - cosmos.base.v1beta1.DecCoin balance = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "balance", - (gogoproto.moretags) = "yaml:\"balance\"" - ]; - - // total coins spent by this account - cosmos.base.v1beta1.DecCoin transferred = 5 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "transferred", - (gogoproto.moretags) = "yaml:\"transferred\"" - ]; - - // block height at which this account was last settled - int64 settled_at = 6 [ - (gogoproto.jsontag) = "settledAt", - (gogoproto.moretags) = "yaml:\"settledAt\"", - (gogoproto.customname) = "SettledAt" - ]; - - // bech32 encoded account address of the depositor. - // If depositor is same as the owner, then any incoming coins are added to the Balance. - // If depositor isn't same as the owner, then any incoming coins are added to the Funds. - string depositor = 7 [ - (gogoproto.jsontag) = "depositor", - (gogoproto.moretags) = "yaml:\"depositor\"" - ]; - - // Funds are unspent coins received from the (non-Owner) Depositor's wallet. - // If there are any funds, they should be spent before spending the Balance. - cosmos.base.v1beta1.DecCoin funds = 8 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "funds", - (gogoproto.moretags) = "yaml:\"funds\"" - ]; -} - -// Payment stores state for a payment -message FractionalPayment { - option (gogoproto.goproto_stringer) = true; - - // Payment State - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // PaymentStateInvalid is the state when the payment is invalid - invalid = 0 [(gogoproto.enumvalue_customname) = "PaymentStateInvalid"]; - // PaymentStateOpen is the state when the payment is open - open = 1 [(gogoproto.enumvalue_customname) = "PaymentOpen"]; - // PaymentStateClosed is the state when the payment is closed - closed = 2 [(gogoproto.enumvalue_customname) = "PaymentClosed"]; - // PaymentStateOverdrawn is the state when the payment is overdrawn - overdrawn = 3 [(gogoproto.enumvalue_customname) = "PaymentOverdrawn"]; - } - - AccountID account_id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "accountID", - (gogoproto.moretags) = "yaml:\"accountID\"", - (gogoproto.customname) = "AccountID" - ]; - - string payment_id = 2 [ - (gogoproto.jsontag) = "paymentID", - (gogoproto.moretags) = "yaml:\"paymentID\"", - (gogoproto.customname) = "PaymentID" - ]; - - string owner = 3 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - State state = 4 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - - cosmos.base.v1beta1.DecCoin rate = 5 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "rate", - (gogoproto.moretags) = "yaml:\"rate\"" - ]; - - cosmos.base.v1beta1.DecCoin balance = 6 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "balance", - (gogoproto.moretags) = "yaml:\"balance\"" - ]; - - cosmos.base.v1beta1.Coin withdrawn = 7 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "withdrawn", - (gogoproto.moretags) = "yaml:\"withdrawn\"" - ]; -} diff --git a/proto/node/akash/escrow/v1beta3/genesis.proto b/proto/node/akash/escrow/v1beta3/genesis.proto deleted file mode 100644 index e530adf8..00000000 --- a/proto/node/akash/escrow/v1beta3/genesis.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; -package akash.escrow.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/escrow/v1beta3/types.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/escrow/v1beta3"; - -// GenesisState defines the basic genesis state used by escrow module -message GenesisState { - repeated Account accounts = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "accounts", - (gogoproto.moretags) = "yaml:\"accounts\"" - ]; - - repeated FractionalPayment payments = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "payments", - (gogoproto.moretags) = "yaml:\"payments\"" - ]; -} diff --git a/proto/node/akash/escrow/v1beta3/query.proto b/proto/node/akash/escrow/v1beta3/query.proto deleted file mode 100644 index 10062c29..00000000 --- a/proto/node/akash/escrow/v1beta3/query.proto +++ /dev/null @@ -1,62 +0,0 @@ -syntax = "proto3"; - -package akash.escrow.v1beta3; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/escrow/v1beta3/types.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/escrow/v1beta3"; - -// Query defines the gRPC querier service -service Query { - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Accounts queries all accounts - rpc Accounts(QueryAccountsRequest) returns (QueryAccountsResponse) { - option (google.api.http).get = "/akash/escrow/v1beta3/types/accounts/list"; - } - - // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - // Payments queries all payments - rpc Payments(QueryPaymentsRequest) returns (QueryPaymentsResponse) { - option (google.api.http).get = "/akash/escrow/v1beta3/types/payments/list"; - } -} - -// QueryAccountRequest is request type for the Query/Account RPC method -message QueryAccountsRequest { - string scope = 1; - string xid = 2; - string owner = 3; - string state = 4; - cosmos.base.query.v1beta1.PageRequest pagination = 5; -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -message QueryAccountsResponse { - repeated Account accounts = 1 [ - (gogoproto.nullable) = false - ]; - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryPaymentRequest is request type for the Query/Payment RPC method -message QueryPaymentsRequest { - string scope = 1; - string xid = 2; - string id = 3; - string owner = 4; - string state = 5; - cosmos.base.query.v1beta1.PageRequest pagination = 6; -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -message QueryPaymentsResponse { - repeated FractionalPayment payments = 1 [ - (gogoproto.nullable) = false - ]; - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} diff --git a/proto/node/akash/escrow/v1beta3/types.proto b/proto/node/akash/escrow/v1beta3/types.proto deleted file mode 100644 index f767a8a4..00000000 --- a/proto/node/akash/escrow/v1beta3/types.proto +++ /dev/null @@ -1,155 +0,0 @@ -syntax = "proto3"; -package akash.escrow.v1beta3; - -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/escrow/v1beta3"; - -// AccountID is the account identifier -message AccountID { - string scope = 1 [ - (gogoproto.jsontag) = "scope", - (gogoproto.moretags) = "yaml:\"scope\"" - ]; - string xid = 2 [ - (gogoproto.jsontag) = "xid", - (gogoproto.moretags) = "yaml:\"xid\"", - (gogoproto.customname) = "XID" - ]; -} - -// Account stores state for an escrow account -message Account { - - // State stores state for an escrow account - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // AccountStateInvalid is an invalid state - invalid = 0 [(gogoproto.enumvalue_customname) = "AccountStateInvalid"]; - // AccountOpen is the state when an account is open - open = 1 [(gogoproto.enumvalue_customname) = "AccountOpen"]; - // AccountClosed is the state when an account is closed - closed = 2 [(gogoproto.enumvalue_customname) = "AccountClosed"]; - // AccountOverdrawn is the state when an account is overdrawn - overdrawn = 3 [(gogoproto.enumvalue_customname) = "AccountOverdrawn"]; - } - - // unique identifier for this escrow account - AccountID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"", - (gogoproto.customname) = "ID" - ]; - - // bech32 encoded account address of the owner of this escrow account - string owner = 2 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - // current state of this escrow account - State state = 3 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - - // unspent coins received from the owner's wallet - cosmos.base.v1beta1.DecCoin balance = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "balance", - (gogoproto.moretags) = "yaml:\"balance\"" - ]; - - // total coins spent by this account - cosmos.base.v1beta1.DecCoin transferred = 5 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "transferred", - (gogoproto.moretags) = "yaml:\"transferred\"" - ]; - - // block height at which this account was last settled - int64 settled_at = 6 [ - (gogoproto.jsontag) = "settledAt", - (gogoproto.moretags) = "yaml:\"settledAt\"", - (gogoproto.customname) = "SettledAt" - ]; - - // bech32 encoded account address of the depositor. - // If depositor is same as the owner, then any incoming coins are added to the Balance. - // If depositor isn't same as the owner, then any incoming coins are added to the Funds. - string depositor = 7 [ - (gogoproto.jsontag) = "depositor", - (gogoproto.moretags) = "yaml:\"depositor\"" - ]; - - // Funds are unspent coins received from the (non-Owner) Depositor's wallet. - // If there are any funds, they should be spent before spending the Balance. - cosmos.base.v1beta1.DecCoin funds = 8 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "funds", - (gogoproto.moretags) = "yaml:\"funds\"" - ]; -} - -// Payment stores state for a payment -message FractionalPayment { - option (gogoproto.goproto_stringer) = true; - - // Payment State - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // PaymentStateInvalid is the state when the payment is invalid - invalid = 0 [(gogoproto.enumvalue_customname) = "PaymentStateInvalid"]; - // PaymentStateOpen is the state when the payment is open - open = 1 [(gogoproto.enumvalue_customname) = "PaymentOpen"]; - // PaymentStateClosed is the state when the payment is closed - closed = 2 [(gogoproto.enumvalue_customname) = "PaymentClosed"]; - // PaymentStateOverdrawn is the state when the payment is overdrawn - overdrawn = 3 [(gogoproto.enumvalue_customname) = "PaymentOverdrawn"]; - } - - AccountID account_id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "accountID", - (gogoproto.moretags) = "yaml:\"accountID\"", - (gogoproto.customname) = "AccountID" - ]; - - string payment_id = 2 [ - (gogoproto.jsontag) = "paymentID", - (gogoproto.moretags) = "yaml:\"paymentID\"", - (gogoproto.customname) = "PaymentID" - ]; - - string owner = 3 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - - State state = 4 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - - cosmos.base.v1beta1.DecCoin rate = 5 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "rate", - (gogoproto.moretags) = "yaml:\"rate\"" - ]; - - cosmos.base.v1beta1.DecCoin balance = 6 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "balance", - (gogoproto.moretags) = "yaml:\"balance\"" - ]; - - cosmos.base.v1beta1.Coin withdrawn = 7 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "withdrawn", - (gogoproto.moretags) = "yaml:\"withdrawn\"" - ]; -} diff --git a/proto/node/akash/gov/v1beta3/genesis.proto b/proto/node/akash/gov/v1beta3/genesis.proto index 1765a325..48bfb06f 100644 --- a/proto/node/akash/gov/v1beta3/genesis.proto +++ b/proto/node/akash/gov/v1beta3/genesis.proto @@ -4,7 +4,7 @@ package akash.gov.v1beta3; import "gogoproto/gogo.proto"; import "akash/gov/v1beta3/params.proto"; -option go_package = "github.com/akash-network/akash-api/go/node/gov/v1beta3"; +option go_package = "pkg.akt.dev/go/node/gov/v1beta3"; // GenesisState stores slice of genesis deployment instance message GenesisState { diff --git a/proto/node/akash/gov/v1beta3/params.proto b/proto/node/akash/gov/v1beta3/params.proto index e325b854..ac40e3e6 100644 --- a/proto/node/akash/gov/v1beta3/params.proto +++ b/proto/node/akash/gov/v1beta3/params.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package akash.gov.v1beta3; import "gogoproto/gogo.proto"; -option go_package = "github.com/akash-network/akash-api/go/node/gov/v1beta3"; +option go_package = "pkg.akt.dev/go/node/gov/v1beta3"; // DepositParams defines the parameters for the x/gov module message DepositParams { diff --git a/proto/node/akash/inflation/v1beta2/genesis.proto b/proto/node/akash/inflation/v1beta2/genesis.proto index 9f66ece4..98607aa0 100644 --- a/proto/node/akash/inflation/v1beta2/genesis.proto +++ b/proto/node/akash/inflation/v1beta2/genesis.proto @@ -4,7 +4,7 @@ package akash.inflation.v1beta2; import "gogoproto/gogo.proto"; import "akash/inflation/v1beta2/params.proto"; -option go_package = "github.com/akash-network/akash-api/go/node/inflation/types/v1beta2"; +option go_package = "pkg.akt.dev/go/node/inflation/types/v1beta2"; // GenesisState stores slice of genesis deployment instance message GenesisState { diff --git a/proto/node/akash/inflation/v1beta2/params.proto b/proto/node/akash/inflation/v1beta2/params.proto index c1e3b5b6..f74bd905 100644 --- a/proto/node/akash/inflation/v1beta2/params.proto +++ b/proto/node/akash/inflation/v1beta2/params.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package akash.inflation.v1beta2; import "gogoproto/gogo.proto"; -option go_package = "github.com/akash-network/akash-api/go/node/inflation/types/v1beta2"; +option go_package = "pkg.akt.dev/go/node/inflation/types/v1beta2"; // Params defines the parameters for the x/deployment package message Params { diff --git a/proto/node/akash/inflation/v1beta3/genesis.proto b/proto/node/akash/inflation/v1beta3/genesis.proto index 2fa5fe9a..5690d621 100644 --- a/proto/node/akash/inflation/v1beta3/genesis.proto +++ b/proto/node/akash/inflation/v1beta3/genesis.proto @@ -4,7 +4,7 @@ package akash.inflation.v1beta3; import "gogoproto/gogo.proto"; import "akash/inflation/v1beta3/params.proto"; -option go_package = "github.com/akash-network/akash-api/go/node/inflation/v1beta3"; +option go_package = "pkg.akt.dev/go/node/inflation/v1beta3"; // GenesisState stores slice of genesis deployment instance message GenesisState { diff --git a/proto/node/akash/inflation/v1beta3/params.proto b/proto/node/akash/inflation/v1beta3/params.proto index 87cc6feb..c8c36bc2 100644 --- a/proto/node/akash/inflation/v1beta3/params.proto +++ b/proto/node/akash/inflation/v1beta3/params.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package akash.inflation.v1beta3; import "gogoproto/gogo.proto"; -option go_package = "github.com/akash-network/akash-api/go/node/inflation/v1beta3"; +option go_package = "pkg.akt.dev/go/node/inflation/v1beta3"; // Params defines the parameters for the x/deployment package message Params { diff --git a/proto/node/akash/market/v1/bid.proto b/proto/node/akash/market/v1/bid.proto new file mode 100644 index 00000000..99559499 --- /dev/null +++ b/proto/node/akash/market/v1/bid.proto @@ -0,0 +1,40 @@ +syntax = "proto3"; +package akash.market.v1; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1"; + +// BidID stores owner and all other seq numbers +// A successful bid becomes a Lease(ID). +message BidID { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = "yaml:\"dseq\"" + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = "yaml:\"gseq\"" + ]; + uint32 oseq = 4 [ + (gogoproto.customname) = "OSeq", + (gogoproto.jsontag) = "oseq", + (gogoproto.moretags) = "yaml:\"oseq\"" + ]; + string provider = 5 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "provider", + (gogoproto.moretags) = "yaml:\"provider\"" + ]; +} diff --git a/proto/node/akash/market/v1/event.proto b/proto/node/akash/market/v1/event.proto new file mode 100644 index 00000000..d44874a3 --- /dev/null +++ b/proto/node/akash/market/v1/event.proto @@ -0,0 +1,96 @@ +syntax = "proto3"; +package akash.market.v1; + +import "gogoproto/gogo.proto"; + +import "cosmos/base/v1beta1/coin.proto"; + +import "akash/market/v1/bid.proto"; +import "akash/market/v1/order.proto"; +import "akash/market/v1/lease.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1"; + +// EventOrderCreated +message EventOrderCreated { + option (gogoproto.equal) = false; + + OrderID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// EventOrderClosed +message EventOrderClosed { + option (gogoproto.equal) = false; + + OrderID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// EventBidCreated +message EventBidCreated { + option (gogoproto.equal) = false; + + BidID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + cosmos.base.v1beta1.DecCoin price = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "price", + (gogoproto.moretags) = "yaml:\"price\"" + ]; +} + +// EventBidClosed +message EventBidClosed { + option (gogoproto.equal) = false; + + BidID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// EventLeaseCreated +message EventLeaseCreated { + option (gogoproto.equal) = false; + + LeaseID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + + cosmos.base.v1beta1.DecCoin price = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "price", + (gogoproto.moretags) = "yaml:\"price\"" + ]; +} + +// EventLeaseClosed +message EventLeaseClosed { + option (gogoproto.equal) = false; + + LeaseID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + diff --git a/proto/node/akash/market/v1/filters.proto b/proto/node/akash/market/v1/filters.proto new file mode 100644 index 00000000..aa36720a --- /dev/null +++ b/proto/node/akash/market/v1/filters.proto @@ -0,0 +1,42 @@ +syntax = "proto3"; +package akash.market.v1; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1"; + +// LeaseFilters defines flags for lease list filter +message LeaseFilters { + option (gogoproto.equal) = false; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = "yaml:\"dseq\"" + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = "yaml:\"gseq\"" + ]; + uint32 oseq = 4 [ + (gogoproto.customname) = "OSeq", + (gogoproto.jsontag) = "oseq", + (gogoproto.moretags) = "yaml:\"oseq\"" + ]; + string provider = 5 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "provider", + (gogoproto.moretags) = "yaml:\"provider\"" + ]; + string state = 6 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; +} diff --git a/proto/node/akash/market/v1/lease.proto b/proto/node/akash/market/v1/lease.proto new file mode 100644 index 00000000..d47f0631 --- /dev/null +++ b/proto/node/akash/market/v1/lease.proto @@ -0,0 +1,95 @@ +syntax = "proto3"; +package akash.market.v1; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +import "cosmos/base/v1beta1/coin.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1"; + +// LeaseID stores bid details of lease +message LeaseID { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = "yaml:\"dseq\"" + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = "yaml:\"gseq\"" + ]; + uint32 oseq = 4 [ + (gogoproto.customname) = "OSeq", + (gogoproto.jsontag) = "oseq", + (gogoproto.moretags) = "yaml:\"oseq\"" + ]; + string provider = 5 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "provider", + (gogoproto.moretags) = "yaml:\"provider\"" + ]; +} + +// Lease stores LeaseID, state of lease and price +message Lease { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + // State is an enum which refers to state of lease + enum State { + option (gogoproto.goproto_enum_prefix) = false; + + // Prefix should start with 0 in enum. So declaring dummy state + invalid = 0 [ + (gogoproto.enumvalue_customname) = "LeaseStateInvalid" + ]; + // LeaseActive denotes state for lease active + active = 1 [ + (gogoproto.enumvalue_customname) = "LeaseActive" + ]; + // LeaseInsufficientFunds denotes state for lease insufficient_funds + insufficient_funds = 2 [ + (gogoproto.enumvalue_customname) = "LeaseInsufficientFunds" + ]; + // LeaseClosed denotes state for lease closed + closed = 3 [ + (gogoproto.enumvalue_customname) = "LeaseClosed" + ]; + } + + LeaseID id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + + State state = 2 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; + + cosmos.base.v1beta1.DecCoin price = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "price", + (gogoproto.moretags) = "yaml:\"price\"" + ]; + int64 created_at = 4 [ + (gogoproto.jsontag) = "created_at", + (gogoproto.moretags) = "yaml:\"created_at\"" + ]; + int64 closed_on = 5 [ + (gogoproto.jsontag) = "closed_on", + (gogoproto.moretags) = "yaml:\"closed_on\"" + ]; +} diff --git a/proto/node/akash/market/v1/order.proto b/proto/node/akash/market/v1/order.proto new file mode 100644 index 00000000..ad6452cf --- /dev/null +++ b/proto/node/akash/market/v1/order.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; +package akash.market.v1; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1"; + +// OrderID stores owner and all other seq numbers +message OrderID { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = "yaml:\"dseq\"" + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = "yaml:\"gseq\"" + ]; + uint32 oseq = 4 [ + (gogoproto.customname) = "OSeq", + (gogoproto.jsontag) = "oseq", + (gogoproto.moretags) = "yaml:\"oseq\"" + ]; +} diff --git a/proto/node/akash/market/v1beta2/bid.proto b/proto/node/akash/market/v1beta2/bid.proto deleted file mode 100644 index 0cdab374..00000000 --- a/proto/node/akash/market/v1beta2/bid.proto +++ /dev/null @@ -1,109 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/market/v1beta2/order.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta2"; - -// MsgCreateBid defines an SDK message for creating Bid -message MsgCreateBid { - option (gogoproto.equal) = false; - - OrderID order = 1 [ - (gogoproto.customname) = "Order", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "order", - (gogoproto.moretags) = "yaml:\"order\"" - ]; - string provider = 2 [(gogoproto.jsontag) = "provider", (gogoproto.moretags) = "yaml:\"provider\""]; - cosmos.base.v1beta1.DecCoin price = 3 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "price", (gogoproto.moretags) = "yaml:\"price\""]; - - cosmos.base.v1beta1.Coin deposit = 4 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "deposit", (gogoproto.moretags) = "yaml:\"deposit\""]; -} - -// MsgCreateBidResponse defines the Msg/CreateBid response type. -message MsgCreateBidResponse {} - -// MsgCloseBid defines an SDK message for closing bid -message MsgCloseBid { - option (gogoproto.equal) = false; - - BidID bid_id = 1 [ - (gogoproto.customname) = "BidID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgCloseBidResponse defines the Msg/CloseBid response type. -message MsgCloseBidResponse {} - -// BidID stores owner and all other seq numbers -// A successful bid becomes a Lease(ID). -message BidID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - uint64 dseq = 2 - [(gogoproto.customname) = "DSeq", (gogoproto.jsontag) = "dseq", (gogoproto.moretags) = "yaml:\"dseq\""]; - uint32 gseq = 3 - [(gogoproto.customname) = "GSeq", (gogoproto.jsontag) = "gseq", (gogoproto.moretags) = "yaml:\"gseq\""]; - uint32 oseq = 4 - [(gogoproto.customname) = "OSeq", (gogoproto.jsontag) = "oseq", (gogoproto.moretags) = "yaml:\"oseq\""]; - string provider = 5 [(gogoproto.jsontag) = "provider", (gogoproto.moretags) = "yaml:\"provider\""]; -} - -// Bid stores BidID, state of bid and price -message Bid { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - BidID bid_id = 1 [ - (gogoproto.customname) = "BidID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - // State is an enum which refers to state of bid - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [(gogoproto.enumvalue_customname) = "BidStateInvalid"]; - // BidOpen denotes state for bid open - open = 1 [(gogoproto.enumvalue_customname) = "BidOpen"]; - // BidMatched denotes state for bid open - active = 2 [(gogoproto.enumvalue_customname) = "BidActive"]; - // BidLost denotes state for bid lost - lost = 3 [(gogoproto.enumvalue_customname) = "BidLost"]; - // BidClosed denotes state for bid closed - closed = 4 [(gogoproto.enumvalue_customname) = "BidClosed"]; - } - - State state = 2 [(gogoproto.jsontag) = "state", (gogoproto.moretags) = "yaml:\"state\""]; - cosmos.base.v1beta1.DecCoin price = 3 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "price", (gogoproto.moretags) = "yaml:\"price\""]; - int64 created_at = 4; -} - -// BidFilters defines flags for bid list filter -message BidFilters { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - uint64 dseq = 2 - [(gogoproto.customname) = "DSeq", (gogoproto.jsontag) = "dseq", (gogoproto.moretags) = "yaml:\"dseq\""]; - uint32 gseq = 3 - [(gogoproto.customname) = "GSeq", (gogoproto.jsontag) = "gseq", (gogoproto.moretags) = "yaml:\"gseq\""]; - uint32 oseq = 4 - [(gogoproto.customname) = "OSeq", (gogoproto.jsontag) = "oseq", (gogoproto.moretags) = "yaml:\"oseq\""]; - string provider = 5 [(gogoproto.jsontag) = "provider", (gogoproto.moretags) = "yaml:\"provider\""]; - string state = 6 [(gogoproto.jsontag) = "state", (gogoproto.moretags) = "yaml:\"state\""]; -} diff --git a/proto/node/akash/market/v1beta2/genesis.proto b/proto/node/akash/market/v1beta2/genesis.proto deleted file mode 100644 index 686ebff3..00000000 --- a/proto/node/akash/market/v1beta2/genesis.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/market/v1beta2/order.proto"; -import "akash/market/v1beta2/lease.proto"; -import "akash/market/v1beta2/params.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta2"; - -// GenesisState defines the basic genesis state used by market module -message GenesisState { - repeated Order orders = 1 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "orders", (gogoproto.moretags) = "yaml:\"orders\""]; - - repeated Lease leases = 2 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "leases", (gogoproto.moretags) = "yaml:\"leases\""]; - - Params params = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "params", - (gogoproto.moretags) = "yaml:\"params\"" - ]; -} diff --git a/proto/node/akash/market/v1beta2/lease.proto b/proto/node/akash/market/v1beta2/lease.proto deleted file mode 100644 index 9aab0588..00000000 --- a/proto/node/akash/market/v1beta2/lease.proto +++ /dev/null @@ -1,117 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta2; - -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; -import "akash/market/v1beta2/bid.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta2"; - -// LeaseID stores bid details of lease -message LeaseID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - uint64 dseq = 2 - [(gogoproto.customname) = "DSeq", (gogoproto.jsontag) = "dseq", (gogoproto.moretags) = "yaml:\"dseq\""]; - uint32 gseq = 3 - [(gogoproto.customname) = "GSeq", (gogoproto.jsontag) = "gseq", (gogoproto.moretags) = "yaml:\"gseq\""]; - uint32 oseq = 4 - [(gogoproto.customname) = "OSeq", (gogoproto.jsontag) = "oseq", (gogoproto.moretags) = "yaml:\"oseq\""]; - string provider = 5 [(gogoproto.jsontag) = "provider", (gogoproto.moretags) = "yaml:\"provider\""]; -} - -// Lease stores LeaseID, state of lease and price -message Lease { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - LeaseID lease_id = 1 [ - (gogoproto.customname) = "LeaseID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - // State is an enum which refers to state of lease - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [(gogoproto.enumvalue_customname) = "LeaseStateInvalid"]; - // LeaseActive denotes state for lease active - active = 1 [(gogoproto.enumvalue_customname) = "LeaseActive"]; - // LeaseInsufficientFunds denotes state for lease insufficient_funds - insufficient_funds = 2 [(gogoproto.enumvalue_customname) = "LeaseInsufficientFunds"]; - // LeaseClosed denotes state for lease closed - closed = 3 [(gogoproto.enumvalue_customname) = "LeaseClosed"]; - } - - State state = 2 [(gogoproto.jsontag) = "state", (gogoproto.moretags) = "yaml:\"state\""]; - cosmos.base.v1beta1.DecCoin price = 3 - [(gogoproto.nullable) = false, (gogoproto.jsontag) = "price", (gogoproto.moretags) = "yaml:\"price\""]; - int64 created_at = 4; - int64 closed_on = 5; -} - -// LeaseFilters defines flags for lease list filter -message LeaseFilters { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - uint64 dseq = 2 - [(gogoproto.customname) = "DSeq", (gogoproto.jsontag) = "dseq", (gogoproto.moretags) = "yaml:\"dseq\""]; - uint32 gseq = 3 - [(gogoproto.customname) = "GSeq", (gogoproto.jsontag) = "gseq", (gogoproto.moretags) = "yaml:\"gseq\""]; - uint32 oseq = 4 - [(gogoproto.customname) = "OSeq", (gogoproto.jsontag) = "oseq", (gogoproto.moretags) = "yaml:\"oseq\""]; - string provider = 5 [(gogoproto.jsontag) = "provider", (gogoproto.moretags) = "yaml:\"provider\""]; - string state = 6 [(gogoproto.jsontag) = "state", (gogoproto.moretags) = "yaml:\"state\""]; -} - -// MsgCreateLease is sent to create a lease -message MsgCreateLease { - option (gogoproto.equal) = false; - - BidID bid_id = 1 [ - (gogoproto.customname) = "BidID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgCreateLeaseResponse is the response from creating a lease -message MsgCreateLeaseResponse {} - -// MsgWithdrawLease defines an SDK message for closing bid -message MsgWithdrawLease { - option (gogoproto.equal) = false; - - LeaseID bid_id = 1 [ - (gogoproto.customname) = "LeaseID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. -message MsgWithdrawLeaseResponse {} - - -// MsgCloseLease defines an SDK message for closing order -message MsgCloseLease { - option (gogoproto.equal) = false; - - LeaseID lease_id = 1 [ - (gogoproto.customname) = "LeaseID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgCloseLeaseResponse defines the Msg/CloseLease response type. -message MsgCloseLeaseResponse {} diff --git a/proto/node/akash/market/v1beta2/order.proto b/proto/node/akash/market/v1beta2/order.proto deleted file mode 100644 index 7822234c..00000000 --- a/proto/node/akash/market/v1beta2/order.proto +++ /dev/null @@ -1,74 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/deployment/v1beta2/groupspec.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta2"; - -// OrderID stores owner and all other seq numbers -message OrderID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - uint64 dseq = 2 - [(gogoproto.customname) = "DSeq", (gogoproto.jsontag) = "dseq", (gogoproto.moretags) = "yaml:\"dseq\""]; - uint32 gseq = 3 - [(gogoproto.customname) = "GSeq", (gogoproto.jsontag) = "gseq", (gogoproto.moretags) = "yaml:\"gseq\""]; - uint32 oseq = 4 - [(gogoproto.customname) = "OSeq", (gogoproto.jsontag) = "oseq", (gogoproto.moretags) = "yaml:\"oseq\""]; -} - -// Order stores orderID, state of order and other details -message Order { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - OrderID order_id = 1 [ - (gogoproto.customname) = "OrderID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - // State is an enum which refers to state of order - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [(gogoproto.enumvalue_customname) = "OrderStateInvalid"]; - // OrderOpen denotes state for order open - open = 1 [(gogoproto.enumvalue_customname) = "OrderOpen"]; - // OrderMatched denotes state for order matched - active = 2 [(gogoproto.enumvalue_customname) = "OrderActive"]; - // OrderClosed denotes state for order lost - closed = 3 [(gogoproto.enumvalue_customname) = "OrderClosed"]; - } - - State state = 2 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - akash.deployment.v1beta2.GroupSpec spec = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "spec", - (gogoproto.moretags) = "yaml:\"spec\"" - ]; - - int64 created_at = 4; -} - -// OrderFilters defines flags for order list filter -message OrderFilters { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - uint64 dseq = 2 - [(gogoproto.customname) = "DSeq", (gogoproto.jsontag) = "dseq", (gogoproto.moretags) = "yaml:\"dseq\""]; - uint32 gseq = 3 - [(gogoproto.customname) = "GSeq", (gogoproto.jsontag) = "gseq", (gogoproto.moretags) = "yaml:\"gseq\""]; - uint32 oseq = 4 - [(gogoproto.customname) = "OSeq", (gogoproto.jsontag) = "oseq", (gogoproto.moretags) = "yaml:\"oseq\""]; - string state = 5 [(gogoproto.jsontag) = "state", (gogoproto.moretags) = "yaml:\"state\""]; -} diff --git a/proto/node/akash/market/v1beta2/params.proto b/proto/node/akash/market/v1beta2/params.proto deleted file mode 100644 index 2953b972..00000000 --- a/proto/node/akash/market/v1beta2/params.proto +++ /dev/null @@ -1,21 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta2; -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta2"; - -// Params is the params for the x/market module -message Params { - cosmos.base.v1beta1.Coin bid_min_deposit = 1 [ - (gogoproto.customname) = "BidMinDeposit", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "bid_min_deposit", - (gogoproto.moretags) = "yaml:\"bid_min_deposit\"" - ]; - uint32 order_max_bids = 2 [ - (gogoproto.customname) = "OrderMaxBids", - (gogoproto.jsontag) = "order_max_bids", - (gogoproto.moretags) = "yaml:\"order_max_bids\"" - ]; -} diff --git a/proto/node/akash/market/v1beta2/query.proto b/proto/node/akash/market/v1beta2/query.proto deleted file mode 100644 index fbacfc93..00000000 --- a/proto/node/akash/market/v1beta2/query.proto +++ /dev/null @@ -1,123 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta2; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/market/v1beta2/order.proto"; -import "akash/market/v1beta2/bid.proto"; -import "akash/market/v1beta2/lease.proto"; -import "akash/escrow/v1beta2/types.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta2"; - -// Query defines the gRPC querier service -service Query { - // Orders queries orders with filters - rpc Orders(QueryOrdersRequest) returns (QueryOrdersResponse) { - option (google.api.http).get = "/akash/market/v1beta2/orders/list"; - } - - // Order queries order details - rpc Order(QueryOrderRequest) returns (QueryOrderResponse) { - option (google.api.http).get = "/akash/market/v1beta2/orders/info"; - } - - // Bids queries bids with filters - rpc Bids(QueryBidsRequest) returns (QueryBidsResponse) { - option (google.api.http).get = "/akash/market/v1beta2/bids/list"; - } - - // Bid queries bid details - rpc Bid(QueryBidRequest) returns (QueryBidResponse) { - option (google.api.http).get = "/akash/market/v1beta2/bids/info"; - } - - // Leases queries leases with filters - rpc Leases(QueryLeasesRequest) returns (QueryLeasesResponse) { - option (google.api.http).get = "/akash/market/v1beta2/leases/list"; - } - - // Lease queries lease details - rpc Lease(QueryLeaseRequest) returns (QueryLeaseResponse) { - option (google.api.http).get = "/akash/market/v1beta2/leases/info"; - } -} - -// QueryOrdersRequest is request type for the Query/Orders RPC method -message QueryOrdersRequest { - OrderFilters filters = 1 [(gogoproto.nullable) = false]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryOrdersResponse is response type for the Query/Orders RPC method -message QueryOrdersResponse { - repeated Order orders = 1 [(gogoproto.nullable) = false, (gogoproto.castrepeated) = "Orders"]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryOrderRequest is request type for the Query/Order RPC method -message QueryOrderRequest { - OrderID id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; -} - -// QueryOrderResponse is response type for the Query/Order RPC method -message QueryOrderResponse { - Order order = 1 [(gogoproto.nullable) = false]; -} - -// QueryBidsRequest is request type for the Query/Bids RPC method -message QueryBidsRequest { - BidFilters filters = 1 [(gogoproto.nullable) = false]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryBidsResponse is response type for the Query/Bids RPC method -message QueryBidsResponse { - repeated QueryBidResponse bids = 1 [(gogoproto.nullable) = false]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryBidRequest is request type for the Query/Bid RPC method -message QueryBidRequest { - BidID id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; -} - -// QueryBidResponse is response type for the Query/Bid RPC method -message QueryBidResponse { - Bid bid = 1 [(gogoproto.nullable) = false]; - akash.escrow.v1beta2.Account escrow_account = 2 [ - (gogoproto.nullable) = false - ]; -} - -// QueryLeasesRequest is request type for the Query/Leases RPC method -message QueryLeasesRequest { - LeaseFilters filters = 1 [(gogoproto.nullable) = false]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryLeasesResponse is response type for the Query/Leases RPC method -message QueryLeasesResponse { - repeated QueryLeaseResponse leases = 1 [(gogoproto.nullable) = false]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryLeaseRequest is request type for the Query/Lease RPC method -message QueryLeaseRequest { - LeaseID id = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "ID"]; -} - -// QueryLeaseResponse is response type for the Query/Lease RPC method -message QueryLeaseResponse { - Lease lease = 1 [(gogoproto.nullable) = false]; - akash.escrow.v1beta2.FractionalPayment escrow_payment = 2 [ - (gogoproto.nullable) = false - ]; -} diff --git a/proto/node/akash/market/v1beta2/service.proto b/proto/node/akash/market/v1beta2/service.proto deleted file mode 100644 index b995f19c..00000000 --- a/proto/node/akash/market/v1beta2/service.proto +++ /dev/null @@ -1,24 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta2; - -import "akash/market/v1beta2/bid.proto"; -import "akash/market/v1beta2/lease.proto"; -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta2"; - -// Msg defines the market Msg service -service Msg { - // CreateBid defines a method to create a bid given proper inputs. - rpc CreateBid(MsgCreateBid) returns (MsgCreateBidResponse); - - // CloseBid defines a method to close a bid given proper inputs. - rpc CloseBid(MsgCloseBid) returns (MsgCloseBidResponse); - - // WithdrawLease withdraws accrued funds from the lease payment - rpc WithdrawLease(MsgWithdrawLease) returns (MsgWithdrawLeaseResponse); - - // CreateLease creates a new lease - rpc CreateLease(MsgCreateLease) returns (MsgCreateLeaseResponse); - - // CloseLease defines a method to close an order given proper inputs. - rpc CloseLease(MsgCloseLease) returns (MsgCloseLeaseResponse); -} diff --git a/proto/node/akash/market/v1beta3/bid.proto b/proto/node/akash/market/v1beta3/bid.proto deleted file mode 100644 index 9026c985..00000000 --- a/proto/node/akash/market/v1beta3/bid.proto +++ /dev/null @@ -1,164 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta3; - -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; -import "akash/market/v1beta3/order.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta3"; - -// MsgCreateBid defines an SDK message for creating Bid -message MsgCreateBid { - option (gogoproto.equal) = false; - - OrderID order = 1 [ - (gogoproto.customname) = "Order", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "order", - (gogoproto.moretags) = "yaml:\"order\"" - ]; - string provider = 2 [ - (gogoproto.jsontag) = "provider", - (gogoproto.moretags) = "yaml:\"provider\"" - ]; - cosmos.base.v1beta1.DecCoin price = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "price", - (gogoproto.moretags) = "yaml:\"price\"" - ]; - cosmos.base.v1beta1.Coin deposit = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "deposit", - (gogoproto.moretags) = "yaml:\"deposit\"" - ]; -} - -// MsgCreateBidResponse defines the Msg/CreateBid response type. -message MsgCreateBidResponse {} - -// MsgCloseBid defines an SDK message for closing bid -message MsgCloseBid { - option (gogoproto.equal) = false; - - BidID bid_id = 1 [ - (gogoproto.customname) = "BidID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgCloseBidResponse defines the Msg/CloseBid response type. -message MsgCloseBidResponse {} - -// BidID stores owner and all other seq numbers -// A successful bid becomes a Lease(ID). -message BidID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = "yaml:\"dseq\"" - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = "yaml:\"gseq\""]; - uint32 oseq = 4 [ - (gogoproto.customname) = "OSeq", - (gogoproto.jsontag) = "oseq", - (gogoproto.moretags) = "yaml:\"oseq\""]; - string provider = 5 [ - (gogoproto.jsontag) = "provider", - (gogoproto.moretags) = "yaml:\"provider\"" - ]; -} - -// Bid stores BidID, state of bid and price -message Bid { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - BidID bid_id = 1 [ - (gogoproto.customname) = "BidID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - // State is an enum which refers to state of bid - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [ - (gogoproto.enumvalue_customname) = "BidStateInvalid" - ]; - // BidOpen denotes state for bid open - open = 1 [ - (gogoproto.enumvalue_customname) = "BidOpen" - ]; - // BidMatched denotes state for bid open - active = 2 [ - (gogoproto.enumvalue_customname) = "BidActive" - ]; - // BidLost denotes state for bid lost - lost = 3 [ - (gogoproto.enumvalue_customname) = "BidLost" - ]; - // BidClosed denotes state for bid closed - closed = 4 [ - (gogoproto.enumvalue_customname) = "BidClosed" - ]; - } - - State state = 2 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - cosmos.base.v1beta1.DecCoin price = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "price", - (gogoproto.moretags) = "yaml:\"price\"" - ]; - int64 created_at = 4; -} - -// BidFilters defines flags for bid list filter -message BidFilters { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = "yaml:\"dseq\"" - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = "yaml:\"gseq\"" - ]; - uint32 oseq = 4 [ - (gogoproto.customname) = "OSeq", - (gogoproto.jsontag) = "oseq", - (gogoproto.moretags) = "yaml:\"oseq\"" - ]; - string provider = 5 [ - (gogoproto.jsontag) = "provider", - (gogoproto.moretags) = "yaml:\"provider\"" - ]; - string state = 6 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; -} diff --git a/proto/node/akash/market/v1beta3/genesis.proto b/proto/node/akash/market/v1beta3/genesis.proto deleted file mode 100644 index f3309210..00000000 --- a/proto/node/akash/market/v1beta3/genesis.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/market/v1beta3/order.proto"; -import "akash/market/v1beta3/lease.proto"; -import "akash/market/v1beta3/bid.proto"; -import "akash/market/v1beta3/params.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta3"; - -// GenesisState defines the basic genesis state used by market module -message GenesisState { - Params params = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "params", - (gogoproto.moretags) = "yaml:\"params\"" - ]; - repeated Order orders = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "orders", - (gogoproto.moretags) = "yaml:\"orders\"" - ]; - repeated Lease leases = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "leases", - (gogoproto.moretags) = "yaml:\"leases\"" - ]; - repeated Bid bids = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "bids", - (gogoproto.moretags) = "yaml:\"bids\"" - ]; -} diff --git a/proto/node/akash/market/v1beta3/lease.proto b/proto/node/akash/market/v1beta3/lease.proto deleted file mode 100644 index 023d28ff..00000000 --- a/proto/node/akash/market/v1beta3/lease.proto +++ /dev/null @@ -1,164 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta3; - -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; -import "akash/market/v1beta3/bid.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta3"; - -// LeaseID stores bid details of lease -message LeaseID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = "yaml:\"dseq\"" - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = "yaml:\"gseq\"" - ]; - uint32 oseq = 4 [ - (gogoproto.customname) = "OSeq", - (gogoproto.jsontag) = "oseq", - (gogoproto.moretags) = "yaml:\"oseq\"" - ]; - string provider = 5 [ - (gogoproto.jsontag) = "provider", - (gogoproto.moretags) = "yaml:\"provider\"" - ]; -} - -// Lease stores LeaseID, state of lease and price -message Lease { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - LeaseID lease_id = 1 [ - (gogoproto.customname) = "LeaseID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - // State is an enum which refers to state of lease - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [ - (gogoproto.enumvalue_customname) = "LeaseStateInvalid" - ]; - // LeaseActive denotes state for lease active - active = 1 [ - (gogoproto.enumvalue_customname) = "LeaseActive" - ]; - // LeaseInsufficientFunds denotes state for lease insufficient_funds - insufficient_funds = 2 [ - (gogoproto.enumvalue_customname) = "LeaseInsufficientFunds" - ]; - // LeaseClosed denotes state for lease closed - closed = 3 [ - (gogoproto.enumvalue_customname) = "LeaseClosed" - ]; - } - - State state = 2 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - cosmos.base.v1beta1.DecCoin price = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "price", - (gogoproto.moretags) = "yaml:\"price\"" - ]; - int64 created_at = 4; - int64 closed_on = 5; -} - -// LeaseFilters defines flags for lease list filter -message LeaseFilters { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = "yaml:\"dseq\"" - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = "yaml:\"gseq\"" - ]; - uint32 oseq = 4 [ - (gogoproto.customname) = "OSeq", - (gogoproto.jsontag) = "oseq", - (gogoproto.moretags) = "yaml:\"oseq\"" - ]; - string provider = 5 [ - (gogoproto.jsontag) = "provider", - (gogoproto.moretags) = "yaml:\"provider\"" - ]; - string state = 6 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; -} - -// MsgCreateLease is sent to create a lease -message MsgCreateLease { - option (gogoproto.equal) = false; - - BidID bid_id = 1 [ - (gogoproto.customname) = "BidID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgCreateLeaseResponse is the response from creating a lease -message MsgCreateLeaseResponse {} - -// MsgWithdrawLease defines an SDK message for closing bid -message MsgWithdrawLease { - option (gogoproto.equal) = false; - - LeaseID bid_id = 1 [ - (gogoproto.customname) = "LeaseID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. -message MsgWithdrawLeaseResponse {} - - -// MsgCloseLease defines an SDK message for closing order -message MsgCloseLease { - option (gogoproto.equal) = false; - - LeaseID lease_id = 1 [ - (gogoproto.customname) = "LeaseID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; -} - -// MsgCloseLeaseResponse defines the Msg/CloseLease response type. -message MsgCloseLeaseResponse {} diff --git a/proto/node/akash/market/v1beta3/order.proto b/proto/node/akash/market/v1beta3/order.proto deleted file mode 100644 index e019bf0e..00000000 --- a/proto/node/akash/market/v1beta3/order.proto +++ /dev/null @@ -1,101 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/deployment/v1beta3/groupspec.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta3"; - -// OrderID stores owner and all other seq numbers -message OrderID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = "yaml:\"dseq\"" - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = "yaml:\"gseq\"" - ]; - uint32 oseq = 4 [ - (gogoproto.customname) = "OSeq", - (gogoproto.jsontag) = "oseq", - (gogoproto.moretags) = "yaml:\"oseq\"" - ]; -} - -// Order stores orderID, state of order and other details -message Order { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - OrderID order_id = 1 [ - (gogoproto.customname) = "OrderID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = "yaml:\"id\"" - ]; - - // State is an enum which refers to state of order - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [(gogoproto.enumvalue_customname) = "OrderStateInvalid"]; - // OrderOpen denotes state for order open - open = 1 [(gogoproto.enumvalue_customname) = "OrderOpen"]; - // OrderMatched denotes state for order matched - active = 2 [(gogoproto.enumvalue_customname) = "OrderActive"]; - // OrderClosed denotes state for order lost - closed = 3 [(gogoproto.enumvalue_customname) = "OrderClosed"]; - } - - State state = 2 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; - akash.deployment.v1beta3.GroupSpec spec = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "spec", - (gogoproto.moretags) = "yaml:\"spec\"" - ]; - - int64 created_at = 4; -} - -// OrderFilters defines flags for order list filter -message OrderFilters { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = "yaml:\"owner\"" - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = "yaml:\"dseq\"" - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = "yaml:\"gseq\"" - ]; - uint32 oseq = 4 [ - (gogoproto.customname) = "OSeq", - (gogoproto.jsontag) = "oseq", - (gogoproto.moretags) = "yaml:\"oseq\"" - ]; - string state = 5 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = "yaml:\"state\"" - ]; -} diff --git a/proto/node/akash/market/v1beta3/params.proto b/proto/node/akash/market/v1beta3/params.proto deleted file mode 100644 index d0e2bf34..00000000 --- a/proto/node/akash/market/v1beta3/params.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta3; - -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta3"; - -// Params is the params for the x/market module -message Params { - cosmos.base.v1beta1.Coin bid_min_deposit = 1 [ - (gogoproto.customname) = "BidMinDeposit", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "bid_min_deposit", - (gogoproto.moretags) = "yaml:\"bid_min_deposit\"" - ]; - uint32 order_max_bids = 2 [ - (gogoproto.customname) = "OrderMaxBids", - (gogoproto.jsontag) = "order_max_bids", - (gogoproto.moretags) = "yaml:\"order_max_bids\"" - ]; -} diff --git a/proto/node/akash/market/v1beta3/query.proto b/proto/node/akash/market/v1beta3/query.proto deleted file mode 100644 index a42b973b..00000000 --- a/proto/node/akash/market/v1beta3/query.proto +++ /dev/null @@ -1,147 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta3; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/market/v1beta3/order.proto"; -import "akash/market/v1beta3/bid.proto"; -import "akash/market/v1beta3/lease.proto"; -import "akash/escrow/v1beta3/types.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta3"; - -// Query defines the gRPC querier service -service Query { - // Orders queries orders with filters - rpc Orders(QueryOrdersRequest) returns (QueryOrdersResponse) { - option (google.api.http).get = "/akash/market/v1beta3/orders/list"; - } - - // Order queries order details - rpc Order(QueryOrderRequest) returns (QueryOrderResponse) { - option (google.api.http).get = "/akash/market/v1beta3/orders/info"; - } - - // Bids queries bids with filters - rpc Bids(QueryBidsRequest) returns (QueryBidsResponse) { - option (google.api.http).get = "/akash/market/v1beta3/bids/list"; - } - - // Bid queries bid details - rpc Bid(QueryBidRequest) returns (QueryBidResponse) { - option (google.api.http).get = "/akash/market/v1beta3/bids/info"; - } - - // Leases queries leases with filters - rpc Leases(QueryLeasesRequest) returns (QueryLeasesResponse) { - option (google.api.http).get = "/akash/market/v1beta3/leases/list"; - } - - // Lease queries lease details - rpc Lease(QueryLeaseRequest) returns (QueryLeaseResponse) { - option (google.api.http).get = "/akash/market/v1beta3/leases/info"; - } -} - -// QueryOrdersRequest is request type for the Query/Orders RPC method -message QueryOrdersRequest { - OrderFilters filters = 1 [ - (gogoproto.nullable) = false - ]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryOrdersResponse is response type for the Query/Orders RPC method -message QueryOrdersResponse { - repeated Order orders = 1 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Orders" - ]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryOrderRequest is request type for the Query/Order RPC method -message QueryOrderRequest { - OrderID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID" - ]; -} - -// QueryOrderResponse is response type for the Query/Order RPC method -message QueryOrderResponse { - Order order = 1 [ - (gogoproto.nullable) = false - ]; -} - -// QueryBidsRequest is request type for the Query/Bids RPC method -message QueryBidsRequest { - BidFilters filters = 1 [ - (gogoproto.nullable) = false - ]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryBidsResponse is response type for the Query/Bids RPC method -message QueryBidsResponse { - repeated QueryBidResponse bids = 1 [ - (gogoproto.nullable) = false - ]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryBidRequest is request type for the Query/Bid RPC method -message QueryBidRequest { - BidID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID" - ]; -} - -// QueryBidResponse is response type for the Query/Bid RPC method -message QueryBidResponse { - Bid bid = 1 [(gogoproto.nullable) = false]; - akash.escrow.v1beta3.Account escrow_account = 2 [ - (gogoproto.nullable) = false - ]; -} - -// QueryLeasesRequest is request type for the Query/Leases RPC method -message QueryLeasesRequest { - LeaseFilters filters = 1 [ - (gogoproto.nullable) = false - ]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryLeasesResponse is response type for the Query/Leases RPC method -message QueryLeasesResponse { - repeated QueryLeaseResponse leases = 1 [ - (gogoproto.nullable) = false - ]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryLeaseRequest is request type for the Query/Lease RPC method -message QueryLeaseRequest { - LeaseID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID" - ]; -} - -// QueryLeaseResponse is response type for the Query/Lease RPC method -message QueryLeaseResponse { - Lease lease = 1 [(gogoproto.nullable) = false]; - akash.escrow.v1beta3.FractionalPayment escrow_payment = 2 [ - (gogoproto.nullable) = false - ]; -} diff --git a/proto/node/akash/market/v1beta3/service.proto b/proto/node/akash/market/v1beta3/service.proto deleted file mode 100644 index 80fd62a1..00000000 --- a/proto/node/akash/market/v1beta3/service.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta3; - -import "akash/market/v1beta3/bid.proto"; -import "akash/market/v1beta3/lease.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta3"; - -// Msg defines the market Msg service -service Msg { - // CreateBid defines a method to create a bid given proper inputs. - rpc CreateBid(MsgCreateBid) returns (MsgCreateBidResponse); - - // CloseBid defines a method to close a bid given proper inputs. - rpc CloseBid(MsgCloseBid) returns (MsgCloseBidResponse); - - // WithdrawLease withdraws accrued funds from the lease payment - rpc WithdrawLease(MsgWithdrawLease) returns (MsgWithdrawLeaseResponse); - - // CreateLease creates a new lease - rpc CreateLease(MsgCreateLease) returns (MsgCreateLeaseResponse); - - // CloseLease defines a method to close an order given proper inputs. - rpc CloseLease(MsgCloseLease) returns (MsgCloseLeaseResponse); -} diff --git a/proto/node/akash/market/v1beta4/bid.proto b/proto/node/akash/market/v1beta4/bid.proto deleted file mode 100644 index 0af39505..00000000 --- a/proto/node/akash/market/v1beta4/bid.proto +++ /dev/null @@ -1,199 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta4; - -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -import "akash/base/v1beta3/resources.proto"; -import "akash/market/v1beta4/order.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta4"; - -// ResourceOffer describes resources that provider is offering -// for deployment -message ResourceOffer { - option (gogoproto.equal) = true; - akash.base.v1beta3.Resources resources = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "resources", - (gogoproto.moretags) = 'yaml:"resources"' - ]; - uint32 count = 2 [ - (gogoproto.jsontag) = "count", - (gogoproto.moretags) = 'yaml:"count"' - ]; -} - -// MsgCreateBid defines an SDK message for creating Bid -message MsgCreateBid { - option (gogoproto.equal) = false; - - OrderID order = 1 [ - (gogoproto.customname) = "Order", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "order", - (gogoproto.moretags) = 'yaml:"order"' - ]; - string provider = 2 [ - (gogoproto.jsontag) = "provider", - (gogoproto.moretags) = 'yaml:"provider"' - ]; - cosmos.base.v1beta1.DecCoin price = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "price", - (gogoproto.moretags) = 'yaml:"price"' - ]; - cosmos.base.v1beta1.Coin deposit = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "deposit", - (gogoproto.moretags) = 'yaml:"deposit"' - ]; - repeated ResourceOffer resources_offer = 5 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "ResourcesOffer", - (gogoproto.customname) = "ResourcesOffer", - (gogoproto.jsontag) = "resources_offer", - (gogoproto.moretags) = 'yaml:"resources_offer"' - ]; -} - -// MsgCreateBidResponse defines the Msg/CreateBid response type. -message MsgCreateBidResponse {} - -// MsgCloseBid defines an SDK message for closing bid -message MsgCloseBid { - option (gogoproto.equal) = false; - - BidID bid_id = 1 [ - (gogoproto.customname) = "BidID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = 'yaml:"id"' - ]; -} - -// MsgCloseBidResponse defines the Msg/CloseBid response type. -message MsgCloseBidResponse {} - -// BidID stores owner and all other seq numbers -// A successful bid becomes a Lease(ID). -message BidID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = 'yaml:"owner"' - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = 'yaml:"dseq"' - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = 'yaml:"gseq"' - ]; - uint32 oseq = 4 [ - (gogoproto.customname) = "OSeq", - (gogoproto.jsontag) = "oseq", - (gogoproto.moretags) = 'yaml:"oseq"' - ]; - string provider = 5 [ - (gogoproto.jsontag) = "provider", - (gogoproto.moretags) = 'yaml:"provider"' - ]; -} - -// Bid stores BidID, state of bid and price -message Bid { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - BidID bid_id = 1 [ - (gogoproto.customname) = "BidID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = 'yaml:"id"' - ]; - - // State is an enum which refers to state of bid - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [ - (gogoproto.enumvalue_customname) = "BidStateInvalid" - ]; - // BidOpen denotes state for bid open - open = 1 [ - (gogoproto.enumvalue_customname) = "BidOpen" - ]; - // BidMatched denotes state for bid open - active = 2 [ - (gogoproto.enumvalue_customname) = "BidActive" - ]; - // BidLost denotes state for bid lost - lost = 3 [ - (gogoproto.enumvalue_customname) = "BidLost" - ]; - // BidClosed denotes state for bid closed - closed = 4 [ - (gogoproto.enumvalue_customname) = "BidClosed" - ]; - } - - State state = 2 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = 'yaml:"state"' - ]; - - cosmos.base.v1beta1.DecCoin price = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "price", - (gogoproto.moretags) = 'yaml:"price"' - ]; - int64 created_at = 4; - - repeated ResourceOffer resources_offer = 5 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "ResourcesOffer", - (gogoproto.customname) = "ResourcesOffer", - (gogoproto.jsontag) = "resources_offer", - (gogoproto.moretags) = 'yaml:"resources_offer"' - ]; -} - -// BidFilters defines flags for bid list filter -message BidFilters { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = 'yaml:"owner"' - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = 'yaml:"dseq"' - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = 'yaml:"gseq"' - ]; - uint32 oseq = 4 [ - (gogoproto.customname) = "OSeq", - (gogoproto.jsontag) = "oseq", - (gogoproto.moretags) = 'yaml:"oseq"' - ]; - string provider = 5 [ - (gogoproto.jsontag) = "provider", - (gogoproto.moretags) = 'yaml:"provider"' - ]; - string state = 6 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = 'yaml:"state"' - ]; -} diff --git a/proto/node/akash/market/v1beta4/genesis.proto b/proto/node/akash/market/v1beta4/genesis.proto deleted file mode 100644 index e22dd504..00000000 --- a/proto/node/akash/market/v1beta4/genesis.proto +++ /dev/null @@ -1,34 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta4; - -import "gogoproto/gogo.proto"; -import "akash/market/v1beta4/order.proto"; -import "akash/market/v1beta4/lease.proto"; -import "akash/market/v1beta4/bid.proto"; -import "akash/market/v1beta4/params.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta4"; - -// GenesisState defines the basic genesis state used by market module -message GenesisState { - Params params = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "params", - (gogoproto.moretags) = 'yaml:"params"' - ]; - repeated Order orders = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "orders", - (gogoproto.moretags) = 'yaml:"orders"' - ]; - repeated Lease leases = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "leases", - (gogoproto.moretags) = 'yaml:"leases"' - ]; - repeated Bid bids = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "bids", - (gogoproto.moretags) = 'yaml:"bids"' - ]; -} diff --git a/proto/node/akash/market/v1beta4/lease.proto b/proto/node/akash/market/v1beta4/lease.proto deleted file mode 100644 index 6e89feb1..00000000 --- a/proto/node/akash/market/v1beta4/lease.proto +++ /dev/null @@ -1,164 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta4; - -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; -import "akash/market/v1beta4/bid.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta4"; - -// LeaseID stores bid details of lease -message LeaseID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = 'yaml:"owner"' - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = 'yaml:"dseq"' - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = 'yaml:"gseq"' - ]; - uint32 oseq = 4 [ - (gogoproto.customname) = "OSeq", - (gogoproto.jsontag) = "oseq", - (gogoproto.moretags) = 'yaml:"oseq"' - ]; - string provider = 5 [ - (gogoproto.jsontag) = "provider", - (gogoproto.moretags) = 'yaml:"provider"' - ]; -} - -// Lease stores LeaseID, state of lease and price -message Lease { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - LeaseID lease_id = 1 [ - (gogoproto.customname) = "LeaseID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = 'yaml:"id"' - ]; - - // State is an enum which refers to state of lease - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [ - (gogoproto.enumvalue_customname) = "LeaseStateInvalid" - ]; - // LeaseActive denotes state for lease active - active = 1 [ - (gogoproto.enumvalue_customname) = "LeaseActive" - ]; - // LeaseInsufficientFunds denotes state for lease insufficient_funds - insufficient_funds = 2 [ - (gogoproto.enumvalue_customname) = "LeaseInsufficientFunds" - ]; - // LeaseClosed denotes state for lease closed - closed = 3 [ - (gogoproto.enumvalue_customname) = "LeaseClosed" - ]; - } - - State state = 2 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = 'yaml:"state"' - ]; - cosmos.base.v1beta1.DecCoin price = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "price", - (gogoproto.moretags) = 'yaml:"price"' - ]; - int64 created_at = 4; - int64 closed_on = 5; -} - -// LeaseFilters defines flags for lease list filter -message LeaseFilters { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = 'yaml:"owner"' - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = 'yaml:"dseq"' - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = 'yaml:"gseq"' - ]; - uint32 oseq = 4 [ - (gogoproto.customname) = "OSeq", - (gogoproto.jsontag) = "oseq", - (gogoproto.moretags) = 'yaml:"oseq"' - ]; - string provider = 5 [ - (gogoproto.jsontag) = "provider", - (gogoproto.moretags) = 'yaml:"provider"' - ]; - string state = 6 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = 'yaml:"state"' - ]; -} - -// MsgCreateLease is sent to create a lease -message MsgCreateLease { - option (gogoproto.equal) = false; - - BidID bid_id = 1 [ - (gogoproto.customname) = "BidID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = 'yaml:"id"' - ]; -} - -// MsgCreateLeaseResponse is the response from creating a lease -message MsgCreateLeaseResponse {} - -// MsgWithdrawLease defines an SDK message for closing bid -message MsgWithdrawLease { - option (gogoproto.equal) = false; - - LeaseID bid_id = 1 [ - (gogoproto.customname) = "LeaseID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = 'yaml:"id"' - ]; -} - -// MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. -message MsgWithdrawLeaseResponse {} - - -// MsgCloseLease defines an SDK message for closing order -message MsgCloseLease { - option (gogoproto.equal) = false; - - LeaseID lease_id = 1 [ - (gogoproto.customname) = "LeaseID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = 'yaml:"id"' - ]; -} - -// MsgCloseLeaseResponse defines the Msg/CloseLease response type. -message MsgCloseLeaseResponse {} diff --git a/proto/node/akash/market/v1beta4/order.proto b/proto/node/akash/market/v1beta4/order.proto deleted file mode 100644 index 0b393806..00000000 --- a/proto/node/akash/market/v1beta4/order.proto +++ /dev/null @@ -1,109 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta4; - -import "gogoproto/gogo.proto"; -import "akash/deployment/v1beta3/groupspec.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta4"; - -// OrderID stores owner and all other seq numbers -message OrderID { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = 'yaml:"owner"' - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = 'yaml:"dseq"' - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = 'yaml:"gseq"' - ]; - uint32 oseq = 4 [ - (gogoproto.customname) = "OSeq", - (gogoproto.jsontag) = "oseq", - (gogoproto.moretags) = 'yaml:"oseq"' - ]; -} - -// Order stores orderID, state of order and other details -message Order { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - OrderID order_id = 1 [ - (gogoproto.customname) = "OrderID", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "id", - (gogoproto.moretags) = 'yaml:"id"' - ]; - - // State is an enum which refers to state of order - enum State { - option (gogoproto.goproto_enum_prefix) = false; - - // Prefix should start with 0 in enum. So declaring dummy state - invalid = 0 [ - (gogoproto.enumvalue_customname) = "OrderStateInvalid" - ]; - // OrderOpen denotes state for order open - open = 1 [ - (gogoproto.enumvalue_customname) = "OrderOpen" - ]; - // OrderMatched denotes state for order matched - active = 2 [ - (gogoproto.enumvalue_customname) = "OrderActive" - ]; - // OrderClosed denotes state for order lost - closed = 3 [ - (gogoproto.enumvalue_customname) = "OrderClosed" - ]; - } - - State state = 2 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = 'yaml:"state"' - ]; - akash.deployment.v1beta3.GroupSpec spec = 3 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "spec", - (gogoproto.moretags) = 'yaml:"spec"' - ]; - - int64 created_at = 4; -} - -// OrderFilters defines flags for order list filter -message OrderFilters { - option (gogoproto.equal) = false; - - string owner = 1 [ - (gogoproto.jsontag) = "owner", - (gogoproto.moretags) = 'yaml:"owner"' - ]; - uint64 dseq = 2 [ - (gogoproto.customname) = "DSeq", - (gogoproto.jsontag) = "dseq", - (gogoproto.moretags) = 'yaml:"dseq"' - ]; - uint32 gseq = 3 [ - (gogoproto.customname) = "GSeq", - (gogoproto.jsontag) = "gseq", - (gogoproto.moretags) = 'yaml:"gseq"' - ]; - uint32 oseq = 4 [ - (gogoproto.customname) = "OSeq", - (gogoproto.jsontag) = "oseq", - (gogoproto.moretags) = 'yaml:"oseq"' - ]; - string state = 5 [ - (gogoproto.jsontag) = "state", - (gogoproto.moretags) = 'yaml:"state"' - ]; -} diff --git a/proto/node/akash/market/v1beta4/params.proto b/proto/node/akash/market/v1beta4/params.proto deleted file mode 100644 index 3dff6706..00000000 --- a/proto/node/akash/market/v1beta4/params.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta4; - -import "gogoproto/gogo.proto"; -import "cosmos/base/v1beta1/coin.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta4"; - -// Params is the params for the x/market module -message Params { - cosmos.base.v1beta1.Coin bid_min_deposit = 1 [ - (gogoproto.customname) = "BidMinDeposit", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "bid_min_deposit", - (gogoproto.moretags) = 'yaml:"bid_min_deposit"' - ]; - uint32 order_max_bids = 2 [ - (gogoproto.customname) = "OrderMaxBids", - (gogoproto.jsontag) = "order_max_bids", - (gogoproto.moretags) = 'yaml:"order_max_bids"' - ]; -} diff --git a/proto/node/akash/market/v1beta4/query.proto b/proto/node/akash/market/v1beta4/query.proto deleted file mode 100644 index 7260e013..00000000 --- a/proto/node/akash/market/v1beta4/query.proto +++ /dev/null @@ -1,151 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta4; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; - -import "akash/escrow/v1beta3/types.proto"; - -import "akash/market/v1beta4/order.proto"; -import "akash/market/v1beta4/bid.proto"; -import "akash/market/v1beta4/lease.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta4"; - -// Query defines the gRPC querier service -service Query { - // Orders queries orders with filters - rpc Orders(QueryOrdersRequest) returns (QueryOrdersResponse) { - option (google.api.http).get = "/akash/market/v1beta4/orders/list"; - } - - // Order queries order details - rpc Order(QueryOrderRequest) returns (QueryOrderResponse) { - option (google.api.http).get = "/akash/market/v1beta4/orders/info"; - } - - // Bids queries bids with filters - rpc Bids(QueryBidsRequest) returns (QueryBidsResponse) { - option (google.api.http).get = "/akash/market/v1beta4/bids/list"; - } - - // Bid queries bid details - rpc Bid(QueryBidRequest) returns (QueryBidResponse) { - option (google.api.http).get = "/akash/market/v1beta4/bids/info"; - } - - // Leases queries leases with filters - rpc Leases(QueryLeasesRequest) returns (QueryLeasesResponse) { - option (google.api.http).get = "/akash/market/v1beta4/leases/list"; - } - - // Lease queries lease details - rpc Lease(QueryLeaseRequest) returns (QueryLeaseResponse) { - option (google.api.http).get = "/akash/market/v1beta4/leases/info"; - } -} - -// QueryOrdersRequest is request type for the Query/Orders RPC method -message QueryOrdersRequest { - OrderFilters filters = 1 [ - (gogoproto.nullable) = false - ]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryOrdersResponse is response type for the Query/Orders RPC method -message QueryOrdersResponse { - repeated Order orders = 1 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Orders" - ]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryOrderRequest is request type for the Query/Order RPC method -message QueryOrderRequest { - OrderID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID" - ]; -} - -// QueryOrderResponse is response type for the Query/Order RPC method -message QueryOrderResponse { - Order order = 1 [ - (gogoproto.nullable) = false - ]; -} - -// QueryBidsRequest is request type for the Query/Bids RPC method -message QueryBidsRequest { - BidFilters filters = 1 [ - (gogoproto.nullable) = false - ]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryBidsResponse is response type for the Query/Bids RPC method -message QueryBidsResponse { - repeated QueryBidResponse bids = 1 [ - (gogoproto.nullable) = false - ]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryBidRequest is request type for the Query/Bid RPC method -message QueryBidRequest { - BidID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID" - ]; -} - -// QueryBidResponse is response type for the Query/Bid RPC method -message QueryBidResponse { - Bid bid = 1 [(gogoproto.nullable) = false]; - akash.escrow.v1beta3.Account escrow_account = 2 [ - (gogoproto.nullable) = false - ]; -} - -// QueryLeasesRequest is request type for the Query/Leases RPC method -message QueryLeasesRequest { - LeaseFilters filters = 1 [ - (gogoproto.nullable) = false - ]; - - cosmos.base.query.v1beta1.PageRequest pagination = 2; -} - -// QueryLeasesResponse is response type for the Query/Leases RPC method -message QueryLeasesResponse { - repeated QueryLeaseResponse leases = 1 [ - (gogoproto.nullable) = false - ]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryLeaseRequest is request type for the Query/Lease RPC method -message QueryLeaseRequest { - LeaseID id = 1 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "ID" - ]; -} - -// QueryLeaseResponse is response type for the Query/Lease RPC method -message QueryLeaseResponse { - Lease lease = 1 [ - (gogoproto.nullable) = false - ]; - akash.escrow.v1beta3.FractionalPayment escrow_payment = 2 [ - (gogoproto.nullable) = false - ]; -} diff --git a/proto/node/akash/market/v1beta4/service.proto b/proto/node/akash/market/v1beta4/service.proto deleted file mode 100644 index 5177a431..00000000 --- a/proto/node/akash/market/v1beta4/service.proto +++ /dev/null @@ -1,25 +0,0 @@ -syntax = "proto3"; -package akash.market.v1beta4; - -import "akash/market/v1beta4/bid.proto"; -import "akash/market/v1beta4/lease.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta4"; - -// Msg defines the market Msg service -service Msg { - // CreateBid defines a method to create a bid given proper inputs. - rpc CreateBid(MsgCreateBid) returns (MsgCreateBidResponse); - - // CloseBid defines a method to close a bid given proper inputs. - rpc CloseBid(MsgCloseBid) returns (MsgCloseBidResponse); - - // WithdrawLease withdraws accrued funds from the lease payment - rpc WithdrawLease(MsgWithdrawLease) returns (MsgWithdrawLeaseResponse); - - // CreateLease creates a new lease - rpc CreateLease(MsgCreateLease) returns (MsgCreateLeaseResponse); - - // CloseLease defines a method to close an order given proper inputs. - rpc CloseLease(MsgCloseLease) returns (MsgCloseLeaseResponse); -} diff --git a/proto/node/akash/market/v1beta5/bid.proto b/proto/node/akash/market/v1beta5/bid.proto new file mode 100644 index 00000000..5b9fcd1a --- /dev/null +++ b/proto/node/akash/market/v1beta5/bid.proto @@ -0,0 +1,73 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; +import "cosmos/base/v1beta1/coin.proto"; + +import "akash/market/v1beta5/resourcesoffer.proto"; +import "akash/market/v1/bid.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1beta5"; + +// Bid stores BidID, state of bid and price +message Bid { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + // BidState is an enum which refers to state of bid + enum State { + option (gogoproto.goproto_enum_prefix) = false; + + // Prefix should start with 0 in enum. So declaring dummy state + invalid = 0 [ + (gogoproto.enumvalue_customname) = "BidStateInvalid" + ]; + // BidOpen denotes state for bid open + open = 1 [ + (gogoproto.enumvalue_customname) = "BidOpen" + ]; + // BidMatched denotes state for bid open + active = 2 [ + (gogoproto.enumvalue_customname) = "BidActive" + ]; + // BidLost denotes state for bid lost + lost = 3 [ + (gogoproto.enumvalue_customname) = "BidLost" + ]; + // BidClosed denotes state for bid closed + closed = 4 [ + (gogoproto.enumvalue_customname) = "BidClosed" + ]; + } + + akash.market.v1.BidID id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + + State state = 2 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; + + cosmos.base.v1beta1.DecCoin price = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "price", + (gogoproto.moretags) = "yaml:\"price\"" + ]; + + int64 created_at = 4 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"created_at\"" + ]; + + repeated ResourceOffer resources_offer = 5 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "ResourcesOffer", + (gogoproto.customname) = "ResourcesOffer", + (gogoproto.jsontag) = "resources_offer", + (gogoproto.moretags) = "yaml:\"resources_offer\"" + ]; +} diff --git a/proto/node/akash/market/v1beta5/bidmsg.proto b/proto/node/akash/market/v1beta5/bidmsg.proto new file mode 100644 index 00000000..783321b7 --- /dev/null +++ b/proto/node/akash/market/v1beta5/bidmsg.proto @@ -0,0 +1,69 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; +import "cosmos/msg/v1/msg.proto"; + +import "cosmos/base/v1beta1/coin.proto"; + +import "akash/market/v1beta5/resourcesoffer.proto"; + +import "akash/market/v1/bid.proto"; +import "akash/market/v1/order.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1beta5"; + +// MsgCreateBid defines an SDK message for creating Bid +message MsgCreateBid { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "provider"; + + akash.market.v1.OrderID order_id = 1 [ + (gogoproto.customname) = "OrderID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "order_id", + (gogoproto.moretags) = "yaml:\"order_id\"" + ]; + string provider = 2 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "provider", + (gogoproto.moretags) = 'yaml:"provider"' + ]; + cosmos.base.v1beta1.DecCoin price = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "price", + (gogoproto.moretags) = "yaml:\"price\"" + ]; + cosmos.base.v1beta1.Coin deposit = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "deposit", + (gogoproto.moretags) = "yaml:\"deposit\"" + ]; + repeated ResourceOffer resources_offer = 5 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "ResourcesOffer", + (gogoproto.customname) = "ResourcesOffer", + (gogoproto.jsontag) = "resources_offer", + (gogoproto.moretags) = "yaml:\"resources_offer\"" + ]; +} + +// MsgCreateBidResponse defines the Msg/CreateBid response type. +message MsgCreateBidResponse {} + +// MsgCloseBid defines an SDK message for closing bid +message MsgCloseBid { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "id.provider"; + + akash.market.v1.BidID id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// MsgCloseBidResponse defines the Msg/CloseBid response type. +message MsgCloseBidResponse {} diff --git a/proto/node/akash/market/v1beta5/filters.proto b/proto/node/akash/market/v1beta5/filters.proto new file mode 100644 index 00000000..acf4f481 --- /dev/null +++ b/proto/node/akash/market/v1beta5/filters.proto @@ -0,0 +1,72 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1beta5"; + +// BidFilters defines flags for bid list filter +message BidFilters { + option (gogoproto.equal) = false; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = "yaml:\"dseq\"" + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = "yaml:\"gseq\"" + ]; + uint32 oseq = 4 [ + (gogoproto.customname) = "OSeq", + (gogoproto.jsontag) = "oseq", + (gogoproto.moretags) = "yaml:\"oseq\"" + ]; + string provider = 5 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "provider", + (gogoproto.moretags) = "yaml:\"provider\"" + ]; + string state = 6 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; +} + +// OrderFilters defines flags for order list filter +message OrderFilters { + option (gogoproto.equal) = false; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = "yaml:\"dseq\"" + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = "yaml:\"gseq\"" + ]; + uint32 oseq = 4 [ + (gogoproto.customname) = "OSeq", + (gogoproto.jsontag) = "oseq", + (gogoproto.moretags) = "yaml:\"oseq\"" + ]; + string state = 5 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; +} diff --git a/proto/node/akash/market/v1beta5/genesis.proto b/proto/node/akash/market/v1beta5/genesis.proto new file mode 100644 index 00000000..46077d27 --- /dev/null +++ b/proto/node/akash/market/v1beta5/genesis.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; + +import "akash/market/v1/lease.proto"; + +import "akash/market/v1beta5/params.proto"; +import "akash/market/v1beta5/bid.proto"; +import "akash/market/v1beta5/order.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1beta5"; + +// GenesisState defines the basic genesis state used by market module +message GenesisState { + Params params = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "params", + (gogoproto.moretags) = 'yaml:"params"' + ]; + repeated Order orders = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Orders", + (gogoproto.jsontag) = "orders", + (gogoproto.moretags) = "yaml:\"orders\"" + ]; + repeated akash.market.v1.Lease leases = 3 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "pkg.akt.dev/go/node/market/v1.Leases", + (gogoproto.jsontag) = "leases", + (gogoproto.moretags) = "yaml:\"leases\"" + ]; + repeated Bid bids = 4 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Bids", + (gogoproto.jsontag) = "bids", + (gogoproto.moretags) = "yaml:\"bids\"" + ]; +} diff --git a/proto/node/akash/market/v1beta5/leasemsg.proto b/proto/node/akash/market/v1beta5/leasemsg.proto new file mode 100644 index 00000000..78477914 --- /dev/null +++ b/proto/node/akash/market/v1beta5/leasemsg.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; + +import "cosmos/msg/v1/msg.proto"; + +import "akash/market/v1/bid.proto"; +import "akash/market/v1/lease.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1beta5"; + +// MsgCreateLease is sent to create a lease +message MsgCreateLease { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "bid_id.owner"; + + akash.market.v1.BidID bid_id = 1 [ + (gogoproto.customname) = "BidID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// MsgCreateLeaseResponse is the response from creating a lease +message MsgCreateLeaseResponse {} + +// MsgWithdrawLease defines an SDK message for withdrawing lease funds +message MsgWithdrawLease { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "bid_id.provider"; + + akash.market.v1.LeaseID bid_id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. +message MsgWithdrawLeaseResponse {} + + +// MsgCloseLease defines an SDK message for closing order +message MsgCloseLease { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "lease_id.owner"; + + akash.market.v1.LeaseID lease_id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// MsgCloseLeaseResponse defines the Msg/CloseLease response type. +message MsgCloseLeaseResponse {} diff --git a/proto/node/akash/market/v1beta5/order.proto b/proto/node/akash/market/v1beta5/order.proto new file mode 100644 index 00000000..d25a9f95 --- /dev/null +++ b/proto/node/akash/market/v1beta5/order.proto @@ -0,0 +1,58 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; + +import "akash/deployment/v1beta4/groupspec.proto"; + +import "akash/market/v1/order.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1beta5"; + +// Order stores orderID, state of order and other details +message Order { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + // State is an enum which refers to state of order + enum State { + option (gogoproto.goproto_enum_prefix) = false; + + // Prefix should start with 0 in enum. So declaring dummy state + invalid = 0 [ + (gogoproto.enumvalue_customname) = "OrderStateInvalid" + ]; + // OrderOpen denotes state for order open + open = 1 [ + (gogoproto.enumvalue_customname) = "OrderOpen" + ]; + // OrderMatched denotes state for order matched + active = 2 [ + (gogoproto.enumvalue_customname) = "OrderActive" + ]; + // OrderClosed denotes state for order lost + closed = 3 [ + (gogoproto.enumvalue_customname) = "OrderClosed" + ]; + } + + akash.market.v1.OrderID id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + + State state = 2 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; + + akash.deployment.v1beta4.GroupSpec spec = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "spec", + (gogoproto.moretags) = "yaml:\"spec\"" + ]; + + int64 created_at = 4; +} diff --git a/proto/node/akash/market/v1beta5/params.proto b/proto/node/akash/market/v1beta5/params.proto new file mode 100644 index 00000000..266fda49 --- /dev/null +++ b/proto/node/akash/market/v1beta5/params.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; +import "cosmos/base/v1beta1/coin.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1beta5"; + +// Params is the params for the x/market module +message Params { + cosmos.base.v1beta1.Coin bid_min_deposit = 1 [ + (gogoproto.customname) = "BidMinDeposit", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "bid_min_deposit", + (gogoproto.moretags) = "yaml:\"bid_min_deposit\"" + ]; + uint32 order_max_bids = 2 [ + (gogoproto.customname) = "OrderMaxBids", + (gogoproto.jsontag) = "order_max_bids", + (gogoproto.moretags) = "yaml:\"order_max_bids\"" + ]; +} diff --git a/proto/node/akash/market/v1beta5/paramsmsg.proto b/proto/node/akash/market/v1beta5/paramsmsg.proto new file mode 100644 index 00000000..bfab76ec --- /dev/null +++ b/proto/node/akash/market/v1beta5/paramsmsg.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; + +import "cosmos/msg/v1/msg.proto"; +import "cosmos_proto/cosmos.proto"; + +import "akash/market/v1beta5/params.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1beta5"; + +// MsgUpdateParams is the Msg/UpdateParams request type. +// +// Since: akash v1.0.0 +message MsgUpdateParams { + option (cosmos.msg.v1.signer) = "authority"; + + // authority is the address of the governance account. + string authority = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString" + ]; + + // params defines the x/deployment parameters to update. + // + // NOTE: All parameters must be supplied. + Params params = 2 [ + (gogoproto.nullable) = false + ]; +} + +// MsgUpdateParamsResponse defines the response structure for executing a +// MsgUpdateParams message. +// +// Since: akash v1.0.0 +message MsgUpdateParamsResponse {} diff --git a/proto/node/akash/market/v1beta5/query.proto b/proto/node/akash/market/v1beta5/query.proto new file mode 100644 index 00000000..b4217307 --- /dev/null +++ b/proto/node/akash/market/v1beta5/query.proto @@ -0,0 +1,179 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; +import "amino/amino.proto"; +import "google/api/annotations.proto"; + +import "cosmos/base/query/v1beta1/pagination.proto"; + +import "akash/escrow/v1/account.proto"; +import "akash/escrow/v1/fractional_payment.proto"; + +import "akash/market/v1/order.proto"; +import "akash/market/v1/bid.proto"; +import "akash/market/v1/lease.proto"; +import "akash/market/v1/filters.proto"; + +import "akash/market/v1beta5/order.proto"; +import "akash/market/v1beta5/bid.proto"; +import "akash/market/v1beta5/filters.proto"; +import "akash/market/v1beta5/params.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1beta5"; + +// Query defines the gRPC querier service +service Query { + // Orders queries orders with filters + rpc Orders(QueryOrdersRequest) returns (QueryOrdersResponse) { + option (google.api.http).get = "/akash/market/v1beta5/orders/list"; + } + + // Order queries order details + rpc Order(QueryOrderRequest) returns (QueryOrderResponse) { + option (google.api.http).get = "/akash/market/v1beta5/orders/info"; + } + + // Bids queries bids with filters + rpc Bids(QueryBidsRequest) returns (QueryBidsResponse) { + option (google.api.http).get = "/akash/market/v1beta5/bids/list"; + } + + // Bid queries bid details + rpc Bid(QueryBidRequest) returns (QueryBidResponse) { + option (google.api.http).get = "/akash/market/v1beta5/bids/info"; + } + + // Leases queries leases with filters + rpc Leases(QueryLeasesRequest) returns (QueryLeasesResponse) { + option (google.api.http).get = "/akash/market/v1beta5/leases/list"; + } + + // Lease queries lease details + rpc Lease(QueryLeaseRequest) returns (QueryLeaseResponse) { + option (google.api.http).get = "/akash/market/v1beta5/leases/info"; + } + + // Params returns the total set of minting parameters. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/akash/market/v1beta5/params"; + } +} + +// QueryOrdersRequest is request type for the Query/Orders RPC method +message QueryOrdersRequest { + akash.market.v1beta5.OrderFilters filters = 1 [ + (gogoproto.nullable) = false + ]; + + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryOrdersResponse is response type for the Query/Orders RPC method +message QueryOrdersResponse { + repeated Order orders = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Orders" + ]; + + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryOrderRequest is request type for the Query/Order RPC method +message QueryOrderRequest { + akash.market.v1.OrderID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID" + ]; +} + +// QueryOrderResponse is response type for the Query/Order RPC method +message QueryOrderResponse { + Order order = 1 [ + (gogoproto.nullable) = false + ]; +} + +// QueryBidsRequest is request type for the Query/Bids RPC method +message QueryBidsRequest { + akash.market.v1beta5.BidFilters filters = 1 [ + (gogoproto.nullable) = false + ]; + + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryBidsResponse is response type for the Query/Bids RPC method +message QueryBidsResponse { + repeated QueryBidResponse bids = 1 [ + (gogoproto.nullable) = false + ]; + + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryBidRequest is request type for the Query/Bid RPC method +message QueryBidRequest { + akash.market.v1.BidID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID" + ]; +} + +// QueryBidResponse is response type for the Query/Bid RPC method +message QueryBidResponse { + Bid bid = 1 [ + (gogoproto.nullable) = false + ]; + akash.escrow.v1.Account escrow_account = 2 [ + (gogoproto.nullable) = false + ]; +} + +// QueryLeasesRequest is request type for the Query/Leases RPC method +message QueryLeasesRequest { + akash.market.v1.LeaseFilters filters = 1 [ + (gogoproto.nullable) = false + ]; + + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryLeasesResponse is response type for the Query/Leases RPC method +message QueryLeasesResponse { + repeated QueryLeaseResponse leases = 1 [ + (gogoproto.nullable) = false + ]; + + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryLeaseRequest is request type for the Query/Lease RPC method +message QueryLeaseRequest { + akash.market.v1.LeaseID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID" + ]; +} + +// QueryLeaseResponse is response type for the Query/Lease RPC method +message QueryLeaseResponse { + akash.market.v1.Lease lease = 1 [ + (gogoproto.nullable) = false + ]; + akash.escrow.v1.FractionalPayment escrow_payment = 2 [ + (gogoproto.nullable) = false + ]; +} + +// QueryParamsRequest is the request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is the response type for the Query/Params RPC method. +message QueryParamsResponse { + // params defines the parameters of the module. + Params params = 1 [ + (gogoproto.nullable) = false, + (amino.dont_omitempty) = true + ]; +} diff --git a/proto/node/akash/market/v1beta5/resourcesoffer.proto b/proto/node/akash/market/v1beta5/resourcesoffer.proto new file mode 100644 index 00000000..e0f1243c --- /dev/null +++ b/proto/node/akash/market/v1beta5/resourcesoffer.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; + +import "akash/base/resources/v1beta4/resources.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1beta5"; + +// ResourceOffer describes resources that provider is offering +// for deployment +message ResourceOffer { + option (gogoproto.equal) = true; + akash.base.resources.v1beta4.Resources resources = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "resources", + (gogoproto.moretags) = "yaml:\"resources\"" + ]; + uint32 count = 2 [ + (gogoproto.jsontag) = "count", + (gogoproto.moretags) = "yaml:\"count\"" + ]; +} diff --git a/proto/node/akash/market/v1beta5/service.proto b/proto/node/akash/market/v1beta5/service.proto new file mode 100644 index 00000000..c545bc67 --- /dev/null +++ b/proto/node/akash/market/v1beta5/service.proto @@ -0,0 +1,34 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "akash/market/v1beta5/bidmsg.proto"; +import "akash/market/v1beta5/leasemsg.proto"; +import "akash/market/v1beta5/paramsmsg.proto"; +import "cosmos/msg/v1/msg.proto"; + +option go_package = "pkg.akt.dev/go/node/market/v1beta5"; + +// Msg defines the market Msg service +service Msg { + option (cosmos.msg.v1.service) = true; + // CreateBid defines a method to create a bid given proper inputs. + rpc CreateBid(MsgCreateBid) returns (MsgCreateBidResponse); + + // CloseBid defines a method to close a bid given proper inputs. + rpc CloseBid(MsgCloseBid) returns (MsgCloseBidResponse); + + // WithdrawLease withdraws accrued funds from the lease payment + rpc WithdrawLease(MsgWithdrawLease) returns (MsgWithdrawLeaseResponse); + + // CreateLease creates a new lease + rpc CreateLease(MsgCreateLease) returns (MsgCreateLeaseResponse); + + // CloseLease defines a method to close an order given proper inputs. + rpc CloseLease(MsgCloseLease) returns (MsgCloseLeaseResponse); + + // UpdateParams defines a governance operation for updating the x/market module + // parameters. The authority is hard-coded to the x/gov module account. + // + // Since: akash v1.0.0 + rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); +} diff --git a/proto/node/akash/provider/v1beta1/provider.proto b/proto/node/akash/provider/v1beta1/provider.proto deleted file mode 100644 index 7b82effe..00000000 --- a/proto/node/akash/provider/v1beta1/provider.proto +++ /dev/null @@ -1,120 +0,0 @@ -syntax = "proto3"; -package akash.provider.v1beta1; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta1/attribute.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/provider/v1beta1"; - -// Msg defines the provider Msg service -service Msg { - // CreateProvider defines a method that creates a provider given the proper inputs - rpc CreateProvider(MsgCreateProvider) returns (MsgCreateProviderResponse); - - // UpdateProvider defines a method that updates a provider given the proper inputs - rpc UpdateProvider(MsgUpdateProvider) returns (MsgUpdateProviderResponse); - - // DeleteProvider defines a method that deletes a provider given the proper inputs - rpc DeleteProvider(MsgDeleteProvider) returns (MsgDeleteProviderResponse); -} - -// ProviderInfo -message ProviderInfo { - string email = 1 [ - (gogoproto.customname) = "EMail", - (gogoproto.jsontag) = "email", - (gogoproto.moretags) = "yaml:\"email\"" - ]; - string website = 2 [ - (gogoproto.jsontag) = "website", - (gogoproto.moretags) = "yaml:\"website\"" - ]; -} - -// MsgCreateProvider defines an SDK message for creating a provider -message MsgCreateProvider { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - string host_uri = 2 [ - (gogoproto.customname) = "HostURI", - (gogoproto.jsontag) = "host_uri", - (gogoproto.moretags) = "yaml:\"host_uri\"" - ]; - repeated akash.base.v1beta1.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta1.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; - - ProviderInfo info = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "info", - (gogoproto.moretags) = "yaml:\"info\"" - ]; -} - -// MsgCreateProviderResponse defines the Msg/CreateProvider response type. -message MsgCreateProviderResponse {} - -// MsgUpdateProvider defines an SDK message for updating a provider -message MsgUpdateProvider { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - string host_uri = 2 [ - (gogoproto.customname) = "HostURI", - (gogoproto.jsontag) = "host_uri", - (gogoproto.moretags) = "yaml:\"host_uri\"" - ]; - repeated akash.base.v1beta1.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta1.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; - ProviderInfo info = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "info", - (gogoproto.moretags) = "yaml:\"info\"" - ]; -} - -// MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. -message MsgUpdateProviderResponse {} - -// MsgDeleteProvider defines an SDK message for deleting a provider -message MsgDeleteProvider { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; -} - -// MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. -message MsgDeleteProviderResponse {} - -// Provider stores owner and host details -message Provider { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - string host_uri = 2 [ - (gogoproto.customname) = "HostURI", - (gogoproto.jsontag) = "host_uri", - (gogoproto.moretags) = "yaml:\"host_uri\"" - ]; - repeated akash.base.v1beta1.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta1.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; - - ProviderInfo info = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "info", - (gogoproto.moretags) = "yaml:\"info\"" - ]; -} diff --git a/proto/node/akash/provider/v1beta2/genesis.proto b/proto/node/akash/provider/v1beta2/genesis.proto deleted file mode 100644 index c2718c8d..00000000 --- a/proto/node/akash/provider/v1beta2/genesis.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; -package akash.provider.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/provider/v1beta2/provider.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/provider/v1beta2"; - -// GenesisState defines the basic genesis state used by provider module -message GenesisState { - repeated Provider providers = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "providers", - (gogoproto.moretags) = "yaml:\"providers\"" - ]; -} diff --git a/proto/node/akash/provider/v1beta2/provider.proto b/proto/node/akash/provider/v1beta2/provider.proto deleted file mode 100644 index 01344ab7..00000000 --- a/proto/node/akash/provider/v1beta2/provider.proto +++ /dev/null @@ -1,120 +0,0 @@ -syntax = "proto3"; -package akash.provider.v1beta2; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta2/attribute.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/provider/v1beta2"; - -// Msg defines the provider Msg service -service Msg { - // CreateProvider defines a method that creates a provider given the proper inputs - rpc CreateProvider(MsgCreateProvider) returns (MsgCreateProviderResponse); - - // UpdateProvider defines a method that updates a provider given the proper inputs - rpc UpdateProvider(MsgUpdateProvider) returns (MsgUpdateProviderResponse); - - // DeleteProvider defines a method that deletes a provider given the proper inputs - rpc DeleteProvider(MsgDeleteProvider) returns (MsgDeleteProviderResponse); -} - -// ProviderInfo -message ProviderInfo { - string email = 1 [ - (gogoproto.customname) = "EMail", - (gogoproto.jsontag) = "email", - (gogoproto.moretags) = "yaml:\"email\"" - ]; - string website = 2 [ - (gogoproto.jsontag) = "website", - (gogoproto.moretags) = "yaml:\"website\"" - ]; -} - -// MsgCreateProvider defines an SDK message for creating a provider -message MsgCreateProvider { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - string host_uri = 2 [ - (gogoproto.customname) = "HostURI", - (gogoproto.jsontag) = "host_uri", - (gogoproto.moretags) = "yaml:\"host_uri\"" - ]; - repeated akash.base.v1beta2.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta2.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; - - ProviderInfo info = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "info", - (gogoproto.moretags) = "yaml:\"info\"" - ]; -} - -// MsgCreateProviderResponse defines the Msg/CreateProvider response type. -message MsgCreateProviderResponse {} - -// MsgUpdateProvider defines an SDK message for updating a provider -message MsgUpdateProvider { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - string host_uri = 2 [ - (gogoproto.customname) = "HostURI", - (gogoproto.jsontag) = "host_uri", - (gogoproto.moretags) = "yaml:\"host_uri\"" - ]; - repeated akash.base.v1beta2.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta2.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; - ProviderInfo info = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "info", - (gogoproto.moretags) = "yaml:\"info\"" - ]; -} - -// MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. -message MsgUpdateProviderResponse {} - -// MsgDeleteProvider defines an SDK message for deleting a provider -message MsgDeleteProvider { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; -} - -// MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. -message MsgDeleteProviderResponse {} - -// Provider stores owner and host details -message Provider { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - string host_uri = 2 [ - (gogoproto.customname) = "HostURI", - (gogoproto.jsontag) = "host_uri", - (gogoproto.moretags) = "yaml:\"host_uri\"" - ]; - repeated akash.base.v1beta2.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta2.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; - - ProviderInfo info = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "info", - (gogoproto.moretags) = "yaml:\"info\"" - ]; -} diff --git a/proto/node/akash/provider/v1beta2/query.proto b/proto/node/akash/provider/v1beta2/query.proto deleted file mode 100644 index 5b4cc1a8..00000000 --- a/proto/node/akash/provider/v1beta2/query.proto +++ /dev/null @@ -1,44 +0,0 @@ -syntax = "proto3"; -package akash.provider.v1beta2; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/provider/v1beta2/provider.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/provider/v1beta2"; - -// Query defines the gRPC querier service -service Query { - // Providers queries providers - rpc Providers(QueryProvidersRequest) returns (QueryProvidersResponse) { - option (google.api.http).get = "/akash/provider/v1beta2/providers"; - } - - // Provider queries provider details - rpc Provider(QueryProviderRequest) returns (QueryProviderResponse) { - option (google.api.http).get = "/akash/provider/v1beta2/providers/{owner}"; - } -} - -// QueryProvidersRequest is request type for the Query/Providers RPC method -message QueryProvidersRequest { - cosmos.base.query.v1beta1.PageRequest pagination = 1; -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -message QueryProvidersResponse { - repeated Provider providers = 1 [(gogoproto.nullable) = false, (gogoproto.castrepeated) = "Providers"]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryProviderRequest is request type for the Query/Provider RPC method -message QueryProviderRequest { - string owner = 1; -} - -// QueryProviderResponse is response type for the Query/Provider RPC method -message QueryProviderResponse { - Provider provider = 1 [(gogoproto.nullable) = false]; -} diff --git a/proto/node/akash/provider/v1beta3/genesis.proto b/proto/node/akash/provider/v1beta3/genesis.proto deleted file mode 100644 index 703f215e..00000000 --- a/proto/node/akash/provider/v1beta3/genesis.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; -package akash.provider.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/provider/v1beta3/provider.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/provider/v1beta3"; - -// GenesisState defines the basic genesis state used by provider module -message GenesisState { - repeated Provider providers = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "providers", - (gogoproto.moretags) = "yaml:\"providers\"" - ]; -} diff --git a/proto/node/akash/provider/v1beta3/provider.proto b/proto/node/akash/provider/v1beta3/provider.proto deleted file mode 100644 index 693a42f6..00000000 --- a/proto/node/akash/provider/v1beta3/provider.proto +++ /dev/null @@ -1,120 +0,0 @@ -syntax = "proto3"; -package akash.provider.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/base/v1beta3/attribute.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/provider/v1beta3"; - -// Msg defines the provider Msg service -service Msg { - // CreateProvider defines a method that creates a provider given the proper inputs - rpc CreateProvider(MsgCreateProvider) returns (MsgCreateProviderResponse); - - // UpdateProvider defines a method that updates a provider given the proper inputs - rpc UpdateProvider(MsgUpdateProvider) returns (MsgUpdateProviderResponse); - - // DeleteProvider defines a method that deletes a provider given the proper inputs - rpc DeleteProvider(MsgDeleteProvider) returns (MsgDeleteProviderResponse); -} - -// ProviderInfo -message ProviderInfo { - string email = 1 [ - (gogoproto.customname) = "EMail", - (gogoproto.jsontag) = "email", - (gogoproto.moretags) = "yaml:\"email\"" - ]; - string website = 2 [ - (gogoproto.jsontag) = "website", - (gogoproto.moretags) = "yaml:\"website\"" - ]; -} - -// MsgCreateProvider defines an SDK message for creating a provider -message MsgCreateProvider { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - string host_uri = 2 [ - (gogoproto.customname) = "HostURI", - (gogoproto.jsontag) = "host_uri", - (gogoproto.moretags) = "yaml:\"host_uri\"" - ]; - repeated akash.base.v1beta3.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; - - ProviderInfo info = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "info", - (gogoproto.moretags) = "yaml:\"info\"" - ]; -} - -// MsgCreateProviderResponse defines the Msg/CreateProvider response type. -message MsgCreateProviderResponse {} - -// MsgUpdateProvider defines an SDK message for updating a provider -message MsgUpdateProvider { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - string host_uri = 2 [ - (gogoproto.customname) = "HostURI", - (gogoproto.jsontag) = "host_uri", - (gogoproto.moretags) = "yaml:\"host_uri\"" - ]; - repeated akash.base.v1beta3.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; - ProviderInfo info = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "info", - (gogoproto.moretags) = "yaml:\"info\"" - ]; -} - -// MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. -message MsgUpdateProviderResponse {} - -// MsgDeleteProvider defines an SDK message for deleting a provider -message MsgDeleteProvider { - option (gogoproto.equal) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; -} - -// MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. -message MsgDeleteProviderResponse {} - -// Provider stores owner and host details -message Provider { - option (gogoproto.equal) = false; - option (gogoproto.goproto_stringer) = false; - - string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; - string host_uri = 2 [ - (gogoproto.customname) = "HostURI", - (gogoproto.jsontag) = "host_uri", - (gogoproto.moretags) = "yaml:\"host_uri\"" - ]; - repeated akash.base.v1beta3.Attribute attributes = 3 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes", - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "attributes", - (gogoproto.moretags) = "yaml:\"attributes\"" - ]; - - ProviderInfo info = 4 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "info", - (gogoproto.moretags) = "yaml:\"info\"" - ]; -} diff --git a/proto/node/akash/provider/v1beta3/query.proto b/proto/node/akash/provider/v1beta3/query.proto deleted file mode 100644 index 87a2e423..00000000 --- a/proto/node/akash/provider/v1beta3/query.proto +++ /dev/null @@ -1,49 +0,0 @@ -syntax = "proto3"; -package akash.provider.v1beta3; - -import "gogoproto/gogo.proto"; -import "google/api/annotations.proto"; -import "cosmos/base/query/v1beta1/pagination.proto"; -import "akash/provider/v1beta3/provider.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/provider/v1beta3"; - -// Query defines the gRPC querier service -service Query { - // Providers queries providers - rpc Providers(QueryProvidersRequest) returns (QueryProvidersResponse) { - option (google.api.http).get = "/akash/provider/v1beta3/providers"; - } - - // Provider queries provider details - rpc Provider(QueryProviderRequest) returns (QueryProviderResponse) { - option (google.api.http).get = "/akash/provider/v1beta3/providers/{owner}"; - } -} - -// QueryProvidersRequest is request type for the Query/Providers RPC method -message QueryProvidersRequest { - cosmos.base.query.v1beta1.PageRequest pagination = 1; -} - -// QueryProvidersResponse is response type for the Query/Providers RPC method -message QueryProvidersResponse { - repeated Provider providers = 1 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Providers" - ]; - - cosmos.base.query.v1beta1.PageResponse pagination = 2; -} - -// QueryProviderRequest is request type for the Query/Provider RPC method -message QueryProviderRequest { - string owner = 1; -} - -// QueryProviderResponse is response type for the Query/Provider RPC method -message QueryProviderResponse { - Provider provider = 1 [ - (gogoproto.nullable) = false - ]; -} diff --git a/proto/node/akash/provider/v1beta4/event.proto b/proto/node/akash/provider/v1beta4/event.proto new file mode 100644 index 00000000..5ff9c7fe --- /dev/null +++ b/proto/node/akash/provider/v1beta4/event.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; +package akash.provider.v1beta4; + +import "gogoproto/gogo.proto"; + +import "cosmos_proto/cosmos.proto"; + +option go_package = "pkg.akt.dev/go/node/provider/v1beta4"; + +// EventProviderCreated defines an SDK message for provider created event +message EventProviderCreated { + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; +} + +// EventProviderUpdated defines an SDK message for provider updated event +message EventProviderUpdated { + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; +} + +// EventProviderDeleted defines an SDK message for provider deleted event +message EventProviderDeleted { + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; +} diff --git a/proto/node/akash/provider/v1beta4/genesis.proto b/proto/node/akash/provider/v1beta4/genesis.proto new file mode 100644 index 00000000..35455f67 --- /dev/null +++ b/proto/node/akash/provider/v1beta4/genesis.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; +package akash.provider.v1beta4; + +import "gogoproto/gogo.proto"; +import "akash/provider/v1beta4/provider.proto"; + +option go_package = "pkg.akt.dev/go/node/provider/v1beta4"; + +// GenesisState defines the basic genesis state used by provider module +message GenesisState { + repeated Provider providers = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Providers", + (gogoproto.jsontag) = "providers", + (gogoproto.moretags) = "yaml:\"providers\"" + ]; +} diff --git a/proto/node/akash/provider/v1beta4/msg.proto b/proto/node/akash/provider/v1beta4/msg.proto new file mode 100644 index 00000000..f629ad76 --- /dev/null +++ b/proto/node/akash/provider/v1beta4/msg.proto @@ -0,0 +1,90 @@ +syntax = "proto3"; +package akash.provider.v1beta4; + +import "gogoproto/gogo.proto"; +import "cosmos/msg/v1/msg.proto"; +import "cosmos_proto/cosmos.proto"; + +import "akash/base/attributes/v1/attribute.proto"; + +import "akash/provider/v1beta4/provider.proto"; + +option go_package = "pkg.akt.dev/go/node/provider/v1beta4"; + +// MsgCreateProvider defines an SDK message for creating a provider +message MsgCreateProvider { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "owner"; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + string host_uri = 2 [ + (gogoproto.customname) = "HostURI", + (gogoproto.jsontag) = "host_uri", + (gogoproto.moretags) = "yaml:\"host_uri\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 3 [ + (gogoproto.castrepeated) = "pkg.akt.dev/go/node/types/attributes/v1.Attributes", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; + + akash.provider.v1beta4.Info info = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "info", + (gogoproto.moretags) = "yaml:\"info\"" + ]; +} + +// MsgCreateProviderResponse defines the Msg/CreateProvider response type. +message MsgCreateProviderResponse {} + +// MsgUpdateProvider defines an SDK message for updating a provider +message MsgUpdateProvider { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "owner"; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + string host_uri = 2 [ + (gogoproto.customname) = "HostURI", + (gogoproto.jsontag) = "host_uri", + (gogoproto.moretags) = "yaml:\"host_uri\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 3 [ + (gogoproto.castrepeated) = "pkg.akt.dev/go/node/types/attributes/v1.Attributes", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; + akash.provider.v1beta4.Info info = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "info", + (gogoproto.moretags) = "yaml:\"info\"" + ]; +} + +// MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. +message MsgUpdateProviderResponse {} + +// MsgDeleteProvider defines an SDK message for deleting a provider +message MsgDeleteProvider { + option (gogoproto.equal) = false; + option (cosmos.msg.v1.signer) = "owner"; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; +} + +// MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. +message MsgDeleteProviderResponse {} diff --git a/proto/node/akash/provider/v1beta4/provider.proto b/proto/node/akash/provider/v1beta4/provider.proto new file mode 100644 index 00000000..ed341725 --- /dev/null +++ b/proto/node/akash/provider/v1beta4/provider.proto @@ -0,0 +1,51 @@ +syntax = "proto3"; +package akash.provider.v1beta4; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +import "akash/base/attributes/v1/attribute.proto"; + +option go_package = "pkg.akt.dev/go/node/provider/v1beta4"; + +// Info +message Info { + string email = 1 [ + (gogoproto.customname) = "EMail", + (gogoproto.jsontag) = "email", + (gogoproto.moretags) = "yaml:\"email\"" + ]; + string website = 2 [ + (gogoproto.jsontag) = "website", + (gogoproto.moretags) = "yaml:\"website\"" + ]; +} + +// Provider stores owner and host details +message Provider { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + string owner = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString", + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + string host_uri = 2 [ + (gogoproto.customname) = "HostURI", + (gogoproto.jsontag) = "host_uri", + (gogoproto.moretags) = "yaml:\"host_uri\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 3 [ + (gogoproto.castrepeated) = "pkg.akt.dev/go/node/types/attributes/v1.Attributes", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; + + Info info = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "info", + (gogoproto.moretags) = "yaml:\"info\"" + ]; +} diff --git a/proto/node/akash/provider/v1beta4/query.proto b/proto/node/akash/provider/v1beta4/query.proto new file mode 100644 index 00000000..ed8e609e --- /dev/null +++ b/proto/node/akash/provider/v1beta4/query.proto @@ -0,0 +1,50 @@ +syntax = "proto3"; +package akash.provider.v1beta4; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; + +import "akash/provider/v1beta4/provider.proto"; + +option go_package = "pkg.akt.dev/go/node/provider/v1beta4"; + +// Query defines the gRPC querier service +service Query { + // Providers queries providers + rpc Providers(QueryProvidersRequest) returns (QueryProvidersResponse) { + option (google.api.http).get = "/akash/provider/v1beta4/providers"; + } + + // Provider queries provider details + rpc Provider(QueryProviderRequest) returns (QueryProviderResponse) { + option (google.api.http).get = "/akash/provider/v1beta4/providers/{owner}"; + } +} + +// QueryProvidersRequest is request type for the Query/Providers RPC method +message QueryProvidersRequest { + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryProvidersResponse is response type for the Query/Providers RPC method +message QueryProvidersResponse { + repeated Provider providers = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Providers" + ]; + + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryProviderRequest is request type for the Query/Provider RPC method +message QueryProviderRequest { + string owner = 1; +} + +// QueryProviderResponse is response type for the Query/Provider RPC method +message QueryProviderResponse { + Provider provider = 1 [ + (gogoproto.nullable) = false + ]; +} diff --git a/proto/node/akash/provider/v1beta4/service.proto b/proto/node/akash/provider/v1beta4/service.proto new file mode 100644 index 00000000..8f0bca86 --- /dev/null +++ b/proto/node/akash/provider/v1beta4/service.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; +package akash.provider.v1beta4; + +import "cosmos/msg/v1/msg.proto"; + +import "akash/provider/v1beta4/msg.proto"; + +option go_package = "pkg.akt.dev/go/node/provider/v1beta4"; + +// Msg defines the provider Msg service +service Msg { + option (cosmos.msg.v1.service) = true; + + // CreateProvider defines a method that creates a provider given the proper inputs + rpc CreateProvider(MsgCreateProvider) returns (MsgCreateProviderResponse); + + // UpdateProvider defines a method that updates a provider given the proper inputs + rpc UpdateProvider(MsgUpdateProvider) returns (MsgUpdateProviderResponse); + + // DeleteProvider defines a method that deletes a provider given the proper inputs + rpc DeleteProvider(MsgDeleteProvider) returns (MsgDeleteProviderResponse); +} diff --git a/proto/node/akash/staking/v1beta3/genesis.proto b/proto/node/akash/staking/v1beta3/genesis.proto index 40b46921..ed814ec7 100644 --- a/proto/node/akash/staking/v1beta3/genesis.proto +++ b/proto/node/akash/staking/v1beta3/genesis.proto @@ -4,7 +4,7 @@ package akash.staking.v1beta3; import "gogoproto/gogo.proto"; import "akash/staking/v1beta3/params.proto"; -option go_package = "github.com/akash-network/akash-api/go/node/staking/v1beta3"; +option go_package = "pkg.akt.dev/go/node/staking/v1beta3"; // GenesisState stores slice of genesis deployment instance message GenesisState { diff --git a/proto/node/akash/staking/v1beta3/params.proto b/proto/node/akash/staking/v1beta3/params.proto index 9df24863..032ed383 100644 --- a/proto/node/akash/staking/v1beta3/params.proto +++ b/proto/node/akash/staking/v1beta3/params.proto @@ -2,7 +2,7 @@ syntax = "proto3"; package akash.staking.v1beta3; import "gogoproto/gogo.proto"; -option go_package = "github.com/akash-network/akash-api/go/node/staking/v1beta3"; +option go_package = "pkg.akt.dev/go/node/staking/v1beta3"; // Params extends the parameters for the x/staking module message Params { diff --git a/proto/node/akash/staking/v1beta3/paramsmsg.proto b/proto/node/akash/staking/v1beta3/paramsmsg.proto new file mode 100644 index 00000000..1ee244bb --- /dev/null +++ b/proto/node/akash/staking/v1beta3/paramsmsg.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; +package akash.staking.v1beta3; + +import "gogoproto/gogo.proto"; + +import "amino/amino.proto"; + +import "cosmos/msg/v1/msg.proto"; +import "cosmos_proto/cosmos.proto"; + +import "akash/staking/v1beta3/params.proto"; + +option go_package = "pkg.akt.dev/go/node/staking/v1beta3"; + +// MsgUpdateParams is the Msg/UpdateParams request type. +// +// Since: akash v1.0.0 +message MsgUpdateParams { + option (cosmos.msg.v1.signer) = "authority"; + option (amino.name) = "akash/x/staking/MsgUpdateParams"; + // authority is the address of the governance account. + string authority = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString" + ]; + + // params defines the x/deployment parameters to update. + // + // NOTE: All parameters must be supplied. + Params params = 2 [ + (gogoproto.nullable) = false, + (amino.dont_omitempty) = true + ]; +} + +// MsgUpdateParamsResponse defines the response structure for executing a +// MsgUpdateParams message. +// +// Since: akash v1.0.0 +message MsgUpdateParamsResponse {} diff --git a/proto/node/akash/staking/v1beta3/query.proto b/proto/node/akash/staking/v1beta3/query.proto new file mode 100644 index 00000000..dc1c9abd --- /dev/null +++ b/proto/node/akash/staking/v1beta3/query.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package akash.staking.v1beta3; + +import "google/api/annotations.proto"; +import "gogoproto/gogo.proto"; +import "amino/amino.proto"; + +import "akash/staking/v1beta3/params.proto"; + +option go_package = "pkg.akt.dev/go/node/staking/v1beta3"; + +// Query defines the gRPC querier service +service Query { + // Params returns the total set of minting parameters. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/akash/staking/v1beta3/params"; + } +} + +// QueryParamsRequest is the request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is the response type for the Query/Params RPC method. +message QueryParamsResponse { + // params defines the parameters of the module. + Params params = 1 [ + (gogoproto.nullable) = false, + (amino.dont_omitempty) = true + ]; +} diff --git a/proto/node/akash/staking/v1beta3/service.proto b/proto/node/akash/staking/v1beta3/service.proto new file mode 100644 index 00000000..00834332 --- /dev/null +++ b/proto/node/akash/staking/v1beta3/service.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; +package akash.staking.v1beta3; + +import "akash/staking/v1beta3/paramsmsg.proto"; +import "cosmos/msg/v1/msg.proto"; + +option go_package = "pkg.akt.dev/go/node/staking/v1beta3"; + +// Msg defines the market Msg service +service Msg { + option (cosmos.msg.v1.service) = true; + // UpdateParams defines a governance operation for updating the x/market module + // parameters. The authority is hard-coded to the x/gov module account. + // + // Since: akash v1.0.0 + rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); +} diff --git a/proto/node/akash/take/v1/genesis.proto b/proto/node/akash/take/v1/genesis.proto new file mode 100644 index 00000000..9e01a192 --- /dev/null +++ b/proto/node/akash/take/v1/genesis.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package akash.take.v1; + +import "gogoproto/gogo.proto"; +import "akash/take/v1/params.proto"; + +option go_package = "pkg.akt.dev/go/node/take/v1"; + +// GenesisState stores slice of genesis deployment instance +message GenesisState { + Params params = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "params", + (gogoproto.moretags) = "yaml:\"params\"" + ]; +} diff --git a/proto/node/akash/take/v1/params.proto b/proto/node/akash/take/v1/params.proto new file mode 100644 index 00000000..8a4b7dfe --- /dev/null +++ b/proto/node/akash/take/v1/params.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; + +package akash.take.v1; + +import "gogoproto/gogo.proto"; + +option go_package = "pkg.akt.dev/go/node/take/v1"; + +// DenomTakeRate describes take rate for specified denom +message DenomTakeRate { + string denom = 1 [ + (gogoproto.customname) = "Denom", + (gogoproto.jsontag) = "denom", + (gogoproto.moretags) = "yaml:\"denom\"" + ]; + uint32 rate = 2 [ + (gogoproto.customname) = "Rate", + (gogoproto.jsontag) = "rate", + (gogoproto.moretags) = "yaml:\"rate\"" + ]; +} + +// Params defines the parameters for the x/take package +message Params { + // denom -> % take rate + repeated DenomTakeRate denom_take_rates = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "DenomTakeRates", + (gogoproto.customname) = "DenomTakeRates", + (gogoproto.jsontag) = "denom_take_rates", + (gogoproto.moretags) = "yaml:\"denom_take_rates\"" + ]; + + uint32 default_take_rate = 2 [ + (gogoproto.customname) = "DefaultTakeRate", + (gogoproto.jsontag) = "default_take_rate", + (gogoproto.moretags) = "yaml:\"default_take_rate\"" + ]; +} diff --git a/proto/node/akash/take/v1/paramsmsg.proto b/proto/node/akash/take/v1/paramsmsg.proto new file mode 100644 index 00000000..c2606b64 --- /dev/null +++ b/proto/node/akash/take/v1/paramsmsg.proto @@ -0,0 +1,36 @@ +syntax = "proto3"; +package akash.take.v1; + +import "gogoproto/gogo.proto"; + +import "cosmos/msg/v1/msg.proto"; +import "cosmos_proto/cosmos.proto"; + +import "akash/take/v1/params.proto"; + +option go_package = "pkg.akt.dev/go/node/take/v1"; + +// MsgUpdateParams is the Msg/UpdateParams request type. +// +// Since: akash v1.0.0 +message MsgUpdateParams { + option (cosmos.msg.v1.signer) = "authority"; + + // authority is the address of the governance account. + string authority = 1 [ + (cosmos_proto.scalar) = "cosmos.AddressString" + ]; + + // params defines the x/deployment parameters to update. + // + // NOTE: All parameters must be supplied. + Params params = 2 [ + (gogoproto.nullable) = false + ]; +} + +// MsgUpdateParamsResponse defines the response structure for executing a +// MsgUpdateParams message. +// +// Since: akash v1.0.0 +message MsgUpdateParamsResponse {} diff --git a/proto/node/akash/take/v1/query.proto b/proto/node/akash/take/v1/query.proto new file mode 100644 index 00000000..0489a1dd --- /dev/null +++ b/proto/node/akash/take/v1/query.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package akash.take.v1; + +import "google/api/annotations.proto"; +import "gogoproto/gogo.proto"; +import "amino/amino.proto"; + +import "akash/take/v1/params.proto"; + +option go_package = "pkg.akt.dev/go/node/take/v1"; + +// QueryParamsRequest is the request type for the Query/Params RPC method. +message QueryParamsRequest {} + +// QueryParamsResponse is the response type for the Query/Params RPC method. +message QueryParamsResponse { + // params defines the parameters of the module. + Params params = 1 [ + (gogoproto.nullable) = false, + (amino.dont_omitempty) = true + ]; +} + +// Query defines the gRPC querier service +service Query { + // Params returns the total set of minting parameters. + rpc Params(QueryParamsRequest) returns (QueryParamsResponse) { + option (google.api.http).get = "/akash/take/v1/params"; + } +} diff --git a/proto/node/akash/take/v1/service.proto b/proto/node/akash/take/v1/service.proto new file mode 100644 index 00000000..a9aab2f7 --- /dev/null +++ b/proto/node/akash/take/v1/service.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; +package akash.take.v1; + +import "cosmos/msg/v1/msg.proto"; + +import "akash/take/v1/paramsmsg.proto"; + +option go_package = "pkg.akt.dev/go/node/take/v1"; + +// Msg defines the market Msg service +service Msg { + option (cosmos.msg.v1.service) = true; + // UpdateParams defines a governance operation for updating the x/market module + // parameters. The authority is hard-coded to the x/gov module account. + // + // Since: akash v1.0.0 + rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); +} diff --git a/proto/node/akash/take/v1beta3/genesis.proto b/proto/node/akash/take/v1beta3/genesis.proto deleted file mode 100644 index 32905155..00000000 --- a/proto/node/akash/take/v1beta3/genesis.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; -package akash.take.v1beta3; - -import "gogoproto/gogo.proto"; -import "akash/take/v1beta3/params.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/take/v1beta3"; - -// GenesisState stores slice of genesis deployment instance -message GenesisState { - Params params = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "params", - (gogoproto.moretags) = "yaml:\"params\"" - ]; -} diff --git a/proto/node/akash/take/v1beta3/params.proto b/proto/node/akash/take/v1beta3/params.proto deleted file mode 100644 index ab18adf3..00000000 --- a/proto/node/akash/take/v1beta3/params.proto +++ /dev/null @@ -1,39 +0,0 @@ -syntax = "proto3"; - -package akash.take.v1beta3; - -import "gogoproto/gogo.proto"; - -option go_package = "github.com/akash-network/akash-api/go/node/take/v1beta3"; - -// DenomTakeRate describes take rate for specified denom -message DenomTakeRate { - string denom = 1 [ - (gogoproto.customname) = "Denom", - (gogoproto.jsontag) = "denom", - (gogoproto.moretags) = "yaml:\"denom\"" - ]; - uint32 rate = 2 [ - (gogoproto.customname) = "Rate", - (gogoproto.jsontag) = "rate", - (gogoproto.moretags) = "yaml:\"rate\"" - ]; -} - -// Params defines the parameters for the x/take package -message Params { - // denom -> % take rate - repeated DenomTakeRate denom_take_rates = 1 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "DenomTakeRates", - (gogoproto.customname) = "DenomTakeRates", - (gogoproto.jsontag) = "denom_take_rates", - (gogoproto.moretags) = "yaml:\"denom_take_rates\"" - ]; - - uint32 default_take_rate = 2 [ - (gogoproto.customname) = "DefaultTakeRate", - (gogoproto.jsontag) = "default_take_rate", - (gogoproto.moretags) = "yaml:\"default_take_rate\"" - ]; -} diff --git a/proto/node/akash/take/v1beta3/query.proto b/proto/node/akash/take/v1beta3/query.proto deleted file mode 100644 index e5952aae..00000000 --- a/proto/node/akash/take/v1beta3/query.proto +++ /dev/null @@ -1,8 +0,0 @@ -syntax = "proto3"; -package akash.take.v1beta3; - -option go_package = "github.com/akash-network/akash-api/go/node/take/v1beta3"; - -// Query defines the gRPC querier service -service Query { -} diff --git a/proto/node/buf.gen.doc.yaml b/proto/node/buf.gen.doc.yaml new file mode 100644 index 00000000..1c39a39c --- /dev/null +++ b/proto/node/buf.gen.doc.yaml @@ -0,0 +1,6 @@ +version: v1 +plugins: + - name: doc + strategy: all + out: ./docs/proto + opt: ./docs/protodoc-markdown.tmpl,node.md diff --git a/proto/node/buf.lock b/proto/node/buf.lock index c91b5810..d6a46546 100644 --- a/proto/node/buf.lock +++ b/proto/node/buf.lock @@ -1,2 +1,23 @@ # Generated by buf. DO NOT EDIT. version: v1 +deps: + - remote: buf.build + owner: cosmos + repository: cosmos-proto + commit: 04467658e59e44bbb22fe568206e1f70 + digest: shake256:73a640bd60e0c523b0f8237ff34eab67c45a38b64bbbde1d80224819d272dbf316ac183526bd245f994af6608b025f5130483d0133c5edd385531326b5990466 + - remote: buf.build + owner: cosmos + repository: cosmos-sdk + commit: 954f7b05f38440fc8250134b15adec47 + digest: shake256:2ab4404fd04a7d1d52df0e2d0f2d477a3d83ffd88d876957bf3fedfd702c8e52833d65b3ce1d89a3c5adf2aab512616b0e4f51d8463f07eda9a8a3317ee3ac54 + - remote: buf.build + owner: cosmos + repository: gogo-proto + commit: 88ef6483f90f478fb938c37dde52ece3 + digest: shake256:89c45df2aa11e0cff97b0d695436713db3d993d76792e9f8dc1ae90e6ab9a9bec55503d48ceedd6b86069ab07d3041b32001b2bfe0227fa725dd515ff381e5ba + - remote: buf.build + owner: googleapis + repository: googleapis + commit: 74015a8aeb8445aa9e3e1454cb54bc35 + digest: shake256:7149cf5e9955c692d381e557830555d4e93f205a0f1b8e2dfdae46d029369aa3fc1980e35df0d310f7cc3b622f93e19ad276769a283a967dd3065ddfd3a40e13 diff --git a/proto/node/buf.yaml b/proto/node/buf.yaml index 40bce3e7..5175c1af 100644 --- a/proto/node/buf.yaml +++ b/proto/node/buf.yaml @@ -1,11 +1,11 @@ # This module represents buf.build/akash-network/node version: v1 name: buf.build/akash-network/node -#deps: -# - buf.build/cosmos/cosmos-proto -# - buf.build/cosmos/cosmos-sdk -# - buf.build/cosmos/gogo-proto -# - buf.build/googleapis/googleapis +deps: + - buf.build/cosmos/gogo-proto + - buf.build/cosmos/cosmos-sdk:v0.47.0 + - buf.build/cosmos/cosmos-proto + - buf.build/googleapis/googleapis breaking: use: - FILE @@ -23,21 +23,3 @@ lint: - ENUM_VALUE_UPPER_SNAKE_CASE - ENUM_ZERO_VALUE_SUFFIX - RPC_REQUEST_STANDARD_NAME - ignore: - - akash/audit/v1beta1/ - - akash/audit/v1beta2/ - - akash/base/v1beta1 - - akash/base/v1beta2 - - akash/cert/v1beta1 - - akash/cert/v1beta2 - - akash/escrow/v1beta1 - - akash/escrow/v1beta2 - - akash/inflation/v1beta1 - - akash/inflation/v1beta2 - - akash/market/v1beta1 - - akash/market/v1beta2 - - akash/market/v1beta3 - - akash/deployment/v1beta1 - - akash/deployment/v1beta2 - - akash/provider/v1beta1 - - akash/provider/v1beta2 diff --git a/proto/provider/akash/inventory/v1/cluster.proto b/proto/provider/akash/inventory/v1/cluster.proto index 7aca9c42..23273c47 100644 --- a/proto/provider/akash/inventory/v1/cluster.proto +++ b/proto/provider/akash/inventory/v1/cluster.proto @@ -6,7 +6,7 @@ import "gogoproto/gogo.proto"; import "akash/inventory/v1/node.proto"; import "akash/inventory/v1/storage.proto"; -option go_package = "github.com/akash-network/akash-api/go/inventory/v1"; +option go_package = "pkg.akt.dev/go/inventory/v1"; // Cluster reports inventory across entire cluster message Cluster { diff --git a/proto/provider/akash/inventory/v1/cpu.proto b/proto/provider/akash/inventory/v1/cpu.proto index 46c82234..a269e67a 100644 --- a/proto/provider/akash/inventory/v1/cpu.proto +++ b/proto/provider/akash/inventory/v1/cpu.proto @@ -5,7 +5,7 @@ import "gogoproto/gogo.proto"; import "akash/inventory/v1/resourcepair.proto"; -option go_package = "github.com/akash-network/akash-api/go/inventory/v1"; +option go_package = "pkg.akt.dev/go/inventory/v1"; // leave it commented for now. having it as true used to cause issues with GRPC reflection. // it seems to work for now diff --git a/proto/provider/akash/inventory/v1/gpu.proto b/proto/provider/akash/inventory/v1/gpu.proto index 7f35edfb..cb5b54e8 100644 --- a/proto/provider/akash/inventory/v1/gpu.proto +++ b/proto/provider/akash/inventory/v1/gpu.proto @@ -5,7 +5,7 @@ package akash.inventory.v1; import "gogoproto/gogo.proto"; import "akash/inventory/v1/resourcepair.proto"; -option go_package = "github.com/akash-network/akash-api/go/inventory/v1"; +option go_package = "pkg.akt.dev/go/inventory/v1"; // GPUInfo reports GPU details message GPUInfo { diff --git a/proto/provider/akash/inventory/v1/memory.proto b/proto/provider/akash/inventory/v1/memory.proto index 287dcf26..6ada5c93 100644 --- a/proto/provider/akash/inventory/v1/memory.proto +++ b/proto/provider/akash/inventory/v1/memory.proto @@ -5,7 +5,7 @@ package akash.inventory.v1; import "gogoproto/gogo.proto"; import "akash/inventory/v1/resourcepair.proto"; -option go_package = "github.com/akash-network/akash-api/go/inventory/v1"; +option go_package = "pkg.akt.dev/go/inventory/v1"; // MemoryInfo reports Memory details message MemoryInfo { diff --git a/proto/provider/akash/inventory/v1/node.proto b/proto/provider/akash/inventory/v1/node.proto index 6a796560..ca24fb7b 100644 --- a/proto/provider/akash/inventory/v1/node.proto +++ b/proto/provider/akash/inventory/v1/node.proto @@ -4,7 +4,7 @@ package akash.inventory.v1; import "gogoproto/gogo.proto"; import "akash/inventory/v1/resources.proto"; -option go_package = "github.com/akash-network/akash-api/go/inventory/v1"; +option go_package = "pkg.akt.dev/go/inventory/v1"; // NodeCapabilities extended list of node capabilities message NodeCapabilities { diff --git a/proto/provider/akash/inventory/v1/resourcepair.proto b/proto/provider/akash/inventory/v1/resourcepair.proto index 9f036d8f..ef337550 100644 --- a/proto/provider/akash/inventory/v1/resourcepair.proto +++ b/proto/provider/akash/inventory/v1/resourcepair.proto @@ -3,10 +3,10 @@ syntax = "proto3"; package akash.inventory.v1; import "gogoproto/gogo.proto"; -import "akash/base/v1beta3/attribute.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "akash/base/attributes/v1/attribute.proto"; +import "k8s/io/apimachinery/pkg/api/resource/generated.proto"; -option go_package = "github.com/akash-network/akash-api/go/inventory/v1"; +option go_package = "pkg.akt.dev/go/inventory/v1"; // ResourcePair to extents resource.Quantity to provide total and available units of the resource message ResourcePair { @@ -24,9 +24,9 @@ message ResourcePair { (gogoproto.jsontag) = "allocated", (gogoproto.moretags) = "yaml:\"allocated\"" ]; - repeated akash.base.v1beta3.Attribute attributes = 3 [ + repeated akash.base.attributes.v1.Attribute attributes = 3 [ (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/v1beta3.Attributes", + (gogoproto.castrepeated) = "pkg.akt.dev/go/node/types/attributes/v1.Attributes", (gogoproto.jsontag) = "attributes,omitempty", (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" ]; diff --git a/proto/provider/akash/inventory/v1/resources.proto b/proto/provider/akash/inventory/v1/resources.proto index 9e30fd16..837e1273 100644 --- a/proto/provider/akash/inventory/v1/resources.proto +++ b/proto/provider/akash/inventory/v1/resources.proto @@ -7,7 +7,7 @@ import "akash/inventory/v1/gpu.proto"; import "akash/inventory/v1/memory.proto"; import "akash/inventory/v1/resourcepair.proto"; -option go_package = "github.com/akash-network/akash-api/go/inventory/v1"; +option go_package = "pkg.akt.dev/go/inventory/v1"; // NodeResources reports node inventory details message NodeResources { diff --git a/proto/provider/akash/inventory/v1/service.proto b/proto/provider/akash/inventory/v1/service.proto index 3e319209..d45cd0c6 100644 --- a/proto/provider/akash/inventory/v1/service.proto +++ b/proto/provider/akash/inventory/v1/service.proto @@ -6,7 +6,7 @@ import "google/api/annotations.proto"; import "akash/inventory/v1/node.proto"; import "akash/inventory/v1/cluster.proto"; -option go_package = "github.com/akash-network/akash-api/go/inventory/v1"; +option go_package = "pkg.akt.dev/go/inventory/v1"; // NodeRPC defines the RPC server of node diff --git a/proto/provider/akash/inventory/v1/storage.proto b/proto/provider/akash/inventory/v1/storage.proto index de39e9ce..ae50fe1e 100644 --- a/proto/provider/akash/inventory/v1/storage.proto +++ b/proto/provider/akash/inventory/v1/storage.proto @@ -5,7 +5,7 @@ package akash.inventory.v1; import "gogoproto/gogo.proto"; import "akash/inventory/v1/resourcepair.proto"; -option go_package = "github.com/akash-network/akash-api/go/inventory/v1"; +option go_package = "pkg.akt.dev/go/inventory/v1"; // StorageInfo reports Storage details message StorageInfo { diff --git a/proto/provider/akash/manifest/v2beta1/group.proto b/proto/provider/akash/manifest/v2beta1/group.proto deleted file mode 100644 index 009a346a..00000000 --- a/proto/provider/akash/manifest/v2beta1/group.proto +++ /dev/null @@ -1,26 +0,0 @@ -syntax = "proto3"; - -package akash.manifest.v2beta1; - -import "gogoproto/gogo.proto"; -import "akash/manifest/v2beta1/service.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option go_package = "github.com/akash-network/akash-api/go/manifest/v2beta1"; - -// Group store name and list of services -message Group { - // getters must be implemented as value receiver - // due to GetName collision - option (gogoproto.goproto_getters) = false; - string name = 1 [ - (gogoproto.jsontag) = "name", - (gogoproto.moretags) = "yaml:\"name\"" - ]; - repeated Service services = 2 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "services", - (gogoproto.moretags) = "yaml:\"services\"" - ]; -} diff --git a/proto/provider/akash/manifest/v2beta1/httpoptions.proto b/proto/provider/akash/manifest/v2beta1/httpoptions.proto deleted file mode 100644 index 1b090eb3..00000000 --- a/proto/provider/akash/manifest/v2beta1/httpoptions.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package akash.manifest.v2beta1; - -import "gogoproto/gogo.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option go_package = "github.com/akash-network/akash-api/go/manifest/v2beta1"; - -// ServiceExposeHTTPOptions -message ServiceExposeHTTPOptions { - uint32 max_body_size = 1 [ - (gogoproto.jsontag) = "maxBodySize", - (gogoproto.moretags) = "yaml:\"maxBodySize\"" - ]; - uint32 read_timeout = 2 [ - (gogoproto.jsontag) = "readTimeout", - (gogoproto.moretags) = "yaml:\"readTimeout\"" - ]; - uint32 send_timeout = 3 [ - (gogoproto.jsontag) = "sendTimeout", - (gogoproto.moretags) = "yaml:\"sendTimeout\"" - ]; - uint32 next_tries = 4 [ - (gogoproto.jsontag) = "nextTries", - (gogoproto.moretags) = "yaml:\"nextTries\"" - ]; - uint32 next_timeout = 5 [ - (gogoproto.jsontag) = "nextTimeout", - (gogoproto.moretags) = "yaml:\"nextTimeout\"" - ]; - repeated string next_cases = 6 [ - (gogoproto.jsontag) = "nextCases", - (gogoproto.moretags) = "yaml:\"nextCases\"" - ]; -} diff --git a/proto/provider/akash/manifest/v2beta1/service.proto b/proto/provider/akash/manifest/v2beta1/service.proto deleted file mode 100644 index f1a50c31..00000000 --- a/proto/provider/akash/manifest/v2beta1/service.proto +++ /dev/null @@ -1,80 +0,0 @@ -syntax = "proto3"; - -package akash.manifest.v2beta1; - -import "gogoproto/gogo.proto"; -import "akash/manifest/v2beta1/serviceexpose.proto"; -import "akash/base/v1beta2/resourceunits.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option go_package = "github.com/akash-network/akash-api/go/manifest/v2beta1"; - -// StorageParams -message StorageParams { - string name = 1 [ - (gogoproto.jsontag) = "name", - (gogoproto.moretags) = "yaml:\"name\"" - ]; - string mount = 2 [ - (gogoproto.jsontag) = "mount", - (gogoproto.moretags) = "yaml:\"mount\"" - ]; - bool read_only = 3 [ - (gogoproto.jsontag) = "readOnly", - (gogoproto.moretags) = "yaml:\"readOnly\"" - ]; -} - -// ServiceParams -message ServiceParams { - repeated StorageParams storage = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "storage", - (gogoproto.moretags) = "yaml:\"storage\"" - ]; -} - -// Service stores name, image, args, env, unit, count and expose list of service -message Service { - string name = 1 [ - (gogoproto.jsontag) = "name", - (gogoproto.moretags) = "yaml:\"name\"" - ]; - string image = 2 [ - (gogoproto.jsontag) = "image", - (gogoproto.moretags) = "yaml:\"image\"" - ]; - repeated string command = 3 [ - (gogoproto.jsontag) = "command", - (gogoproto.moretags) = "yaml:\"command\"" - ]; - repeated string args = 4 [ - (gogoproto.jsontag) = "args", - (gogoproto.moretags) = "yaml:\"args\"" - ]; - repeated string env = 5 [ - (gogoproto.nullable) = true, - (gogoproto.jsontag) = "env", - (gogoproto.moretags) = "yaml:\"env\"" - ]; - akash.base.v1beta2.ResourceUnits resources = 6 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "resources", - (gogoproto.moretags) = "yaml:\"resources\"" - ]; - uint32 count = 7 [ - (gogoproto.jsontag) = "count", - (gogoproto.moretags) = "yaml:\"count\"" - ]; - repeated ServiceExpose expose = 8 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "expose", - (gogoproto.moretags) = "yaml:\"expose\"" - ]; - ServiceParams params = 9 [ - (gogoproto.nullable) = true, - (gogoproto.jsontag) = "params", - (gogoproto.moretags) = "yaml:\"params\"" - ]; -} diff --git a/proto/provider/akash/manifest/v2beta1/serviceexpose.proto b/proto/provider/akash/manifest/v2beta1/serviceexpose.proto deleted file mode 100644 index 46d1d818..00000000 --- a/proto/provider/akash/manifest/v2beta1/serviceexpose.proto +++ /dev/null @@ -1,69 +0,0 @@ -syntax = "proto3"; - -package akash.manifest.v2beta1; - -import "gogoproto/gogo.proto"; -import "akash/manifest/v2beta1/httpoptions.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option go_package = "github.com/akash-network/akash-api/go/manifest/v2beta1"; - -//// State is an enum which refers to state of deployment -//enum ServiceProtocol { -// option (gogoproto.goproto_enum_prefix) = false; -// // invalid describes unsupported protocol -// invalid = 0 [(gogoproto.enumvalue_customname) = "InvalidProtocol"]; -// // tcp -// tcp = 1 [(gogoproto.enumvalue_customname) = "TCP"]; -// // udp -// udp = 2 [(gogoproto.enumvalue_customname) = "UDP"]; -//} - -// ServiceExpose stores exposed ports and hosts details -message ServiceExpose { - // port on the container - uint32 port = 1 [ - (gogoproto.jsontag) = "port", - (gogoproto.moretags) = "yaml:\"port\"" - ]; - // port on the service definition - uint32 external_port = 2 [ - (gogoproto.jsontag) = "externalPort", - (gogoproto.moretags) = "yaml:\"externalPort\"" - ]; - string proto = 3 [ - (gogoproto.casttype) = "ServiceProtocol", - (gogoproto.jsontag) = "proto", - (gogoproto.moretags) = "yaml:\"proto\"" - ]; - string service = 4 [ - (gogoproto.jsontag) = "service", - (gogoproto.moretags) = "yaml:\"service\"" - ]; - bool global = 5 [ - (gogoproto.jsontag) = "global", - (gogoproto.moretags) = "yaml:\"global\"" - ]; - repeated string hosts = 6 [ - (gogoproto.jsontag) = "hosts", - (gogoproto.moretags) = "yaml:\"hosts\"" - ]; - ServiceExposeHTTPOptions http_options = 7 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "HTTPOptions", - (gogoproto.jsontag) = "httpOptions", - (gogoproto.moretags) = "yaml:\"httpOptions\"" - ]; - // The name of the IP address associated with this, if any - string ip = 8 [ - (gogoproto.customname) = "IP", - (gogoproto.jsontag) = "ip", - (gogoproto.moretags) = "yaml:\"ip\"" - ]; - // The sequence number of the associated endpoint in the on-chain data - uint32 endpoint_sequence_number = 9 [ - (gogoproto.jsontag) = "endpointSequenceNumber", - (gogoproto.moretags) = "yaml:\"endpointSequenceNumber\"" - ]; -} diff --git a/proto/provider/akash/manifest/v2beta2/group.proto b/proto/provider/akash/manifest/v2beta2/group.proto deleted file mode 100644 index 9285eea2..00000000 --- a/proto/provider/akash/manifest/v2beta2/group.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package akash.manifest.v2beta2; - -import "gogoproto/gogo.proto"; -import "akash/manifest/v2beta2/service.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option go_package = "github.com/akash-network/akash-api/go/manifest/v2beta2"; - -// Group store name and list of services -message Group { - // getters must be implemented as value receiver - // due to GetName collision - option (gogoproto.goproto_getters) = false; - string name = 1 [ - (gogoproto.jsontag) = "name", - (gogoproto.moretags) = "yaml:\"name\"" - ]; - repeated Service services = 2 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "Services", - (gogoproto.jsontag) = "services", - (gogoproto.moretags) = "yaml:\"services\"" - ]; -} diff --git a/proto/provider/akash/manifest/v2beta2/httpoptions.proto b/proto/provider/akash/manifest/v2beta2/httpoptions.proto deleted file mode 100644 index 919080fb..00000000 --- a/proto/provider/akash/manifest/v2beta2/httpoptions.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package akash.manifest.v2beta2; - -import "gogoproto/gogo.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option go_package = "github.com/akash-network/akash-api/go/manifest/v2beta2"; - -// ServiceExposeHTTPOptions -message ServiceExposeHTTPOptions { - uint32 max_body_size = 1 [ - (gogoproto.jsontag) = "maxBodySize", - (gogoproto.moretags) = "yaml:\"maxBodySize\"" - ]; - uint32 read_timeout = 2 [ - (gogoproto.jsontag) = "readTimeout", - (gogoproto.moretags) = "yaml:\"readTimeout\"" - ]; - uint32 send_timeout = 3 [ - (gogoproto.jsontag) = "sendTimeout", - (gogoproto.moretags) = "yaml:\"sendTimeout\"" - ]; - uint32 next_tries = 4 [ - (gogoproto.jsontag) = "nextTries", - (gogoproto.moretags) = "yaml:\"nextTries\"" - ]; - uint32 next_timeout = 5 [ - (gogoproto.jsontag) = "nextTimeout", - (gogoproto.moretags) = "yaml:\"nextTimeout\"" - ]; - repeated string next_cases = 6 [ - (gogoproto.jsontag) = "nextCases", - (gogoproto.moretags) = "yaml:\"nextCases\"" - ]; -} diff --git a/proto/provider/akash/manifest/v2beta2/service.proto b/proto/provider/akash/manifest/v2beta2/service.proto deleted file mode 100644 index d3ce1f19..00000000 --- a/proto/provider/akash/manifest/v2beta2/service.proto +++ /dev/null @@ -1,106 +0,0 @@ -syntax = "proto3"; - -package akash.manifest.v2beta2; - -import "gogoproto/gogo.proto"; -import "akash/manifest/v2beta2/serviceexpose.proto"; -import "akash/base/v1beta3/resources.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option go_package = "github.com/akash-network/akash-api/go/manifest/v2beta2"; - -// StorageParams -message StorageParams { - string name = 1 [ - (gogoproto.jsontag) = "name", - (gogoproto.moretags) = "yaml:\"name\"" - ]; - string mount = 2 [ - (gogoproto.jsontag) = "mount", - (gogoproto.moretags) = "yaml:\"mount\"" - ]; - bool read_only = 3 [ - (gogoproto.jsontag) = "readOnly", - (gogoproto.moretags) = "yaml:\"readOnly\"" - ]; -} - -// ServiceParams -message ServiceParams { - repeated StorageParams storage = 1 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "storage", - (gogoproto.moretags) = "yaml:\"storage\"" - ]; -} - -// Credentials to fetch image from registry -message ServiceImageCredentials { - string host = 1 [ - (gogoproto.jsontag) = "host", - (gogoproto.moretags) = "yaml:\"host\"" - ]; - string email = 2 [ - (gogoproto.jsontag) = "email", - (gogoproto.moretags) = "yaml:\"email\"" - ]; - string username = 3 [ - (gogoproto.jsontag) = "username", - (gogoproto.moretags) = "yaml:\"username\"" - ]; - string password = 4 [ - (gogoproto.jsontag) = "password", - (gogoproto.moretags) = "yaml:\"password\"" - ]; -} - -// Service stores name, image, args, env, unit, count and expose list of service -message Service { - string name = 1 [ - (gogoproto.jsontag) = "name", - (gogoproto.moretags) = "yaml:\"name\"" - ]; - string image = 2 [ - (gogoproto.jsontag) = "image", - (gogoproto.moretags) = "yaml:\"image\"" - ]; - repeated string command = 3 [ - (gogoproto.jsontag) = "command", - (gogoproto.moretags) = "yaml:\"command\"" - ]; - repeated string args = 4 [ - (gogoproto.jsontag) = "args", - (gogoproto.moretags) = "yaml:\"args\"" - ]; - repeated string env = 5 [ - (gogoproto.nullable) = true, - (gogoproto.jsontag) = "env", - (gogoproto.moretags) = "yaml:\"env\"" - ]; - akash.base.v1beta3.Resources resources = 6 [ - (gogoproto.nullable) = false, - (gogoproto.jsontag) = "resources", - (gogoproto.moretags) = "yaml:\"resources\"" - ]; - uint32 count = 7 [ - (gogoproto.jsontag) = "count", - (gogoproto.moretags) = "yaml:\"count\"" - ]; - repeated ServiceExpose expose = 8 [ - (gogoproto.nullable) = false, - (gogoproto.castrepeated) = "ServiceExposes", - (gogoproto.jsontag) = "expose", - (gogoproto.moretags) = "yaml:\"expose\"" - ]; - ServiceParams params = 9 [ - (gogoproto.nullable) = true, - (gogoproto.jsontag) = "params", - (gogoproto.moretags) = "yaml:\"params\"" - ]; - ServiceImageCredentials credentials = 10 [ - (gogoproto.nullable) = true, - (gogoproto.jsontag) = "credentials", - (gogoproto.moretags) = "yaml:\"credentials\"" - ]; -} diff --git a/proto/provider/akash/manifest/v2beta2/serviceexpose.proto b/proto/provider/akash/manifest/v2beta2/serviceexpose.proto deleted file mode 100644 index 40956f6c..00000000 --- a/proto/provider/akash/manifest/v2beta2/serviceexpose.proto +++ /dev/null @@ -1,71 +0,0 @@ -syntax = "proto3"; - -package akash.manifest.v2beta2; - -import "gogoproto/gogo.proto"; -import "akash/manifest/v2beta2/httpoptions.proto"; - -option (gogoproto.goproto_stringer_all) = false; -option (gogoproto.stringer_all) = true; -option go_package = "github.com/akash-network/akash-api/go/manifest/v2beta2"; - -//// State is an enum which refers to state of deployment -//enum ServiceProtocol { -// option (gogoproto.goproto_enum_prefix) = false; -// // invalid describes unsupported protocol -// invalid = 0 [(gogoproto.enumvalue_customname) = "InvalidProtocol"]; -// // tcp -// tcp = 1 [(gogoproto.enumvalue_customname) = "TCP"]; -// // udp -// udp = 2 [(gogoproto.enumvalue_customname) = "UDP"]; -//} - -// ServiceExpose stores exposed ports and hosts details -message ServiceExpose { - option (gogoproto.goproto_getters) = false; - - // port on the container - uint32 port = 1 [ - (gogoproto.jsontag) = "port", - (gogoproto.moretags) = "yaml:\"port\"" - ]; - // port on the service definition - uint32 external_port = 2 [ - (gogoproto.jsontag) = "externalPort", - (gogoproto.moretags) = "yaml:\"externalPort\"" - ]; - string proto = 3 [ - (gogoproto.casttype) = "ServiceProtocol", - (gogoproto.jsontag) = "proto", - (gogoproto.moretags) = "yaml:\"proto\"" - ]; - string service = 4 [ - (gogoproto.jsontag) = "service", - (gogoproto.moretags) = "yaml:\"service\"" - ]; - bool global = 5 [ - (gogoproto.jsontag) = "global", - (gogoproto.moretags) = "yaml:\"global\"" - ]; - repeated string hosts = 6 [ - (gogoproto.jsontag) = "hosts", - (gogoproto.moretags) = "yaml:\"hosts\"" - ]; - ServiceExposeHTTPOptions http_options = 7 [ - (gogoproto.nullable) = false, - (gogoproto.customname) = "HTTPOptions", - (gogoproto.jsontag) = "httpOptions", - (gogoproto.moretags) = "yaml:\"httpOptions\"" - ]; - // The name of the IP address associated with this, if any - string ip = 8 [ - (gogoproto.customname) = "IP", - (gogoproto.jsontag) = "ip", - (gogoproto.moretags) = "yaml:\"ip\"" - ]; - // The sequence number of the associated endpoint in the on-chain data - uint32 endpoint_sequence_number = 9 [ - (gogoproto.jsontag) = "endpointSequenceNumber", - (gogoproto.moretags) = "yaml:\"endpointSequenceNumber\"" - ]; -} diff --git a/proto/provider/akash/manifest/v2beta3/group.proto b/proto/provider/akash/manifest/v2beta3/group.proto new file mode 100644 index 00000000..ec341853 --- /dev/null +++ b/proto/provider/akash/manifest/v2beta3/group.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package akash.manifest.v2beta3; + +import "gogoproto/gogo.proto"; +import "akash/manifest/v2beta3/service.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option go_package = "pkg.akt.dev/go/manifest/v2beta3"; + +// Group store name and list of services +message Group { + // getters must be implemented as value receiver + // due to GetName collision + option (gogoproto.goproto_getters) = false; + string name = 1 [ + (gogoproto.jsontag) = "name", + (gogoproto.moretags) = "yaml:\"name\"" + ]; + repeated Service services = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Services", + (gogoproto.jsontag) = "services", + (gogoproto.moretags) = "yaml:\"services\"" + ]; +} diff --git a/proto/provider/akash/manifest/v2beta3/httpoptions.proto b/proto/provider/akash/manifest/v2beta3/httpoptions.proto new file mode 100644 index 00000000..4ebaeb70 --- /dev/null +++ b/proto/provider/akash/manifest/v2beta3/httpoptions.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package akash.manifest.v2beta3; + +import "gogoproto/gogo.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option go_package = "pkg.akt.dev/go/manifest/v2beta3"; + +// ServiceExposeHTTPOptions +message ServiceExposeHTTPOptions { + uint32 max_body_size = 1 [ + (gogoproto.jsontag) = "maxBodySize", + (gogoproto.moretags) = "yaml:\"maxBodySize\"" + ]; + uint32 read_timeout = 2 [ + (gogoproto.jsontag) = "readTimeout", + (gogoproto.moretags) = "yaml:\"readTimeout\"" + ]; + uint32 send_timeout = 3 [ + (gogoproto.jsontag) = "sendTimeout", + (gogoproto.moretags) = "yaml:\"sendTimeout\"" + ]; + uint32 next_tries = 4 [ + (gogoproto.jsontag) = "nextTries", + (gogoproto.moretags) = "yaml:\"nextTries\"" + ]; + uint32 next_timeout = 5 [ + (gogoproto.jsontag) = "nextTimeout", + (gogoproto.moretags) = "yaml:\"nextTimeout\"" + ]; + repeated string next_cases = 6 [ + (gogoproto.nullable) = true, + (gogoproto.jsontag) = "nextCases,omitempty", + (gogoproto.moretags) = "yaml:\"nextCases,omitempty\"" + ]; +} diff --git a/proto/provider/akash/manifest/v2beta3/service.proto b/proto/provider/akash/manifest/v2beta3/service.proto new file mode 100644 index 00000000..ee31a162 --- /dev/null +++ b/proto/provider/akash/manifest/v2beta3/service.proto @@ -0,0 +1,112 @@ +syntax = "proto3"; + +package akash.manifest.v2beta3; + +import "gogoproto/gogo.proto"; + +import "akash/manifest/v2beta3/serviceexpose.proto"; +import "akash/base/resources/v1beta4/resources.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option go_package = "pkg.akt.dev/go/manifest/v2beta3"; + +// StorageParams +message StorageParams { + string name = 1 [ + (gogoproto.jsontag) = "name", + (gogoproto.moretags) = "yaml:\"name\"" + ]; + string mount = 2 [ + (gogoproto.jsontag) = "mount", + (gogoproto.moretags) = "yaml:\"mount\"" + ]; + bool read_only = 3 [ + (gogoproto.jsontag) = "readOnly", + (gogoproto.moretags) = "yaml:\"readOnly\"" + ]; +} + +// ServiceParams +message ServiceParams { + repeated StorageParams storage = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "storage", + (gogoproto.moretags) = "yaml:\"storage\"" + ]; + ImageCredentials credentials = 10 [ + (gogoproto.nullable) = true, + (gogoproto.jsontag) = "credentials,omitempty", + (gogoproto.moretags) = "yaml:\"credentials,omitempty\"" + ]; +} + +// Credentials to fetch image from registry +message ImageCredentials { + string host = 1 [ + (gogoproto.jsontag) = "host", + (gogoproto.moretags) = "yaml:\"host\"" + ]; + string email = 2 [ + (gogoproto.jsontag) = "email", + (gogoproto.moretags) = "yaml:\"email\"" + ]; + string username = 3 [ + (gogoproto.jsontag) = "username", + (gogoproto.moretags) = "yaml:\"username\"" + ]; + string password = 4 [ + (gogoproto.jsontag) = "password", + (gogoproto.moretags) = "yaml:\"password\"" + ]; +} + +// Service stores name, image, args, env, unit, count and expose list of service +message Service { + string name = 1 [ + (gogoproto.jsontag) = "name", + (gogoproto.moretags) = "yaml:\"name\"" + ]; + string image = 2 [ + (gogoproto.jsontag) = "image", + (gogoproto.moretags) = "yaml:\"image\"" + ]; + repeated string command = 3 [ + (gogoproto.jsontag) = "command", + (gogoproto.moretags) = "yaml:\"command\"" + ]; + repeated string args = 4 [ + (gogoproto.jsontag) = "args", + (gogoproto.moretags) = "yaml:\"args\"" + ]; + repeated string env = 5 [ + (gogoproto.nullable) = true, + (gogoproto.jsontag) = "env", + (gogoproto.moretags) = "yaml:\"env\"" + ]; + akash.base.resources.v1beta4.Resources resources = 6 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "resources", + (gogoproto.moretags) = "yaml:\"resources\"" + ]; + uint32 count = 7 [ + (gogoproto.jsontag) = "count", + (gogoproto.moretags) = "yaml:\"count\"" + ]; + repeated ServiceExpose expose = 8 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "ServiceExposes", + (gogoproto.jsontag) = "expose", + (gogoproto.moretags) = "yaml:\"expose\"" + ]; + ServiceParams params = 9 [ + (gogoproto.nullable) = true, + (gogoproto.jsontag) = "params,omitempty", + (gogoproto.moretags) = "yaml:\"params,omitempty\"" + ]; + ImageCredentials credentials = 10 [ + (gogoproto.nullable) = true, + (gogoproto.jsontag) = "credentials", + (gogoproto.moretags) = "yaml:\"credentials\"" + ]; +} diff --git a/proto/provider/akash/manifest/v2beta3/serviceexpose.proto b/proto/provider/akash/manifest/v2beta3/serviceexpose.proto new file mode 100644 index 00000000..2559fa00 --- /dev/null +++ b/proto/provider/akash/manifest/v2beta3/serviceexpose.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; + +package akash.manifest.v2beta3; + +import "gogoproto/gogo.proto"; +import "akash/manifest/v2beta3/httpoptions.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option go_package = "pkg.akt.dev/go/manifest/v2beta3"; + +// ServiceExpose stores exposed ports and hosts details +message ServiceExpose { + option (gogoproto.goproto_getters) = false; + + // port on the container + uint32 port = 1 [ + (gogoproto.jsontag) = "port", + (gogoproto.moretags) = "yaml:\"port\"" + ]; + // port on the service definition + uint32 external_port = 2 [ + (gogoproto.jsontag) = "externalPort", + (gogoproto.moretags) = "yaml:\"externalPort\"" + ]; + string proto = 3 [ + (gogoproto.casttype) = "ServiceProtocol", + (gogoproto.jsontag) = "proto", + (gogoproto.moretags) = "yaml:\"proto\"" + ]; + string service = 4 [ + (gogoproto.jsontag) = "service", + (gogoproto.moretags) = "yaml:\"service\"" + ]; + bool global = 5 [ + (gogoproto.jsontag) = "global", + (gogoproto.moretags) = "yaml:\"global\"" + ]; + repeated string hosts = 6 [ + (gogoproto.jsontag) = "hosts", + (gogoproto.moretags) = "yaml:\"hosts\"" + ]; + ServiceExposeHTTPOptions http_options = 7 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "HTTPOptions", + (gogoproto.jsontag) = "httpOptions", + (gogoproto.moretags) = "yaml:\"httpOptions\"" + ]; + // The name of the IP address associated with this, if any + string ip = 8 [ + (gogoproto.customname) = "IP", + (gogoproto.jsontag) = "ip", + (gogoproto.moretags) = "yaml:\"ip\"" + ]; + // The sequence number of the associated endpoint in the on-chain data + uint32 endpoint_sequence_number = 9 [ + (gogoproto.jsontag) = "endpointSequenceNumber", + (gogoproto.moretags) = "yaml:\"endpointSequenceNumber\"" + ]; +} diff --git a/proto/provider/akash/provider/lease/v1/service.proto b/proto/provider/akash/provider/lease/v1/service.proto index f7f26f8d..ba369a14 100644 --- a/proto/provider/akash/provider/lease/v1/service.proto +++ b/proto/provider/akash/provider/lease/v1/service.proto @@ -1,11 +1,12 @@ syntax = "proto3"; package akash.provider.lease.v1; -import "akash/manifest/v2beta2/group.proto"; -import "akash/market/v1beta4/lease.proto"; import "gogoproto/gogo.proto"; -option go_package = "github.com/akash-network/akash-api/go/provider/lease/v1"; +import "akash/manifest/v2beta3/group.proto"; +import "akash/market/v1/lease.proto"; + +option go_package = "pkg.akt.dev/go/provider/lease/v1"; // LeaseServiceStatus message LeaseServiceStatus { @@ -88,14 +89,14 @@ message ServiceStatus { // SendManifestRequest is request type for the SendManifest Providers RPC method message SendManifestRequest { - akash.market.v1beta4.LeaseID lease_id = 1 [ + akash.market.v1.LeaseID lease_id = 1 [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "LeaseID", (gogoproto.moretags) = "yaml:\"LeaseID\"" ]; - repeated akash.manifest.v2beta2.Group manifest = 2 [ - (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/manifest/v2beta2.Manifest", + repeated akash.manifest.v2beta3.Group manifest = 2 [ + (gogoproto.castrepeated) = "pkg.akt.dev/go/manifest/v2beta3.Manifest", (gogoproto.nullable) = false, (gogoproto.customname) = "Manifest", (gogoproto.jsontag) = "manifest", @@ -108,7 +109,7 @@ message SendManifestResponse {} // ServiceLogsRequest message ServiceLogsRequest { - akash.market.v1beta4.LeaseID lease_id = 1 [ + akash.market.v1.LeaseID lease_id = 1 [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "LeaseID", (gogoproto.moretags) = "yaml:\"LeaseID\"" @@ -141,7 +142,7 @@ message ServiceLogsResponse { // ShellRequest message ShellRequest { - akash.market.v1beta4.LeaseID lease_id = 1 [ + akash.market.v1.LeaseID lease_id = 1 [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "LeaseID", (gogoproto.moretags) = "yaml:\"LeaseID\"" @@ -150,7 +151,7 @@ message ShellRequest { // ServiceStatusRequest message ServiceStatusRequest { - akash.market.v1beta4.LeaseID lease_id = 1 [ + akash.market.v1.LeaseID lease_id = 1 [ (gogoproto.nullable) = false, (gogoproto.jsontag) = "LeaseID", (gogoproto.moretags) = "yaml:\"LeaseID\"" diff --git a/proto/provider/akash/provider/v1/service.proto b/proto/provider/akash/provider/v1/service.proto index 1abb2985..b42d6b24 100644 --- a/proto/provider/akash/provider/v1/service.proto +++ b/proto/provider/akash/provider/v1/service.proto @@ -3,9 +3,10 @@ package akash.provider.v1; import "google/protobuf/empty.proto"; import "google/api/annotations.proto"; + import "akash/provider/v1/status.proto"; -option go_package = "github.com/akash-network/akash-api/go/provider/v1"; +option go_package = "pkg.akt.dev/go/provider/v1"; // ProviderRPC defines the RPC server for provider service ProviderRPC { diff --git a/proto/provider/akash/provider/v1/status.proto b/proto/provider/akash/provider/v1/status.proto index d0a304e7..397cabb7 100644 --- a/proto/provider/akash/provider/v1/status.proto +++ b/proto/provider/akash/provider/v1/status.proto @@ -1,12 +1,11 @@ syntax = "proto3"; package akash.provider.v1; -import "google/protobuf/timestamp.proto"; import "gogoproto/gogo.proto"; import "akash/inventory/v1/cluster.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s/io/apimachinery/pkg/api/resource/generated.proto"; -option go_package = "github.com/akash-network/akash-api/go/provider/v1"; +option go_package = "pkg.akt.dev/go/provider/v1"; // ResourceMetrics message ResourcesMetric { @@ -177,11 +176,11 @@ message Status { (gogoproto.moretags) = "yaml:\"public_hostnames\"" ]; - google.protobuf.Timestamp timestamp = 6 [ - (gogoproto.nullable) = false, - (gogoproto.stdtime) = true, - (gogoproto.customname) = "Timestamp", - (gogoproto.jsontag) = "timestamp", - (gogoproto.moretags) = "yaml:\"timestamp\"" - ]; +// google.protobuf.Timestamp timestamp = 6 [ +// (gogoproto.nullable) = false, +// (gogoproto.stdtime) = true, +// (gogoproto.customname) = "Timestamp", +// (gogoproto.jsontag) = "timestamp", +// (gogoproto.moretags) = "yaml:\"timestamp\"" +// ]; } diff --git a/proto/provider/buf.gen.doc.yaml b/proto/provider/buf.gen.doc.yaml new file mode 100644 index 00000000..eeb85a3c --- /dev/null +++ b/proto/provider/buf.gen.doc.yaml @@ -0,0 +1,6 @@ +version: v1 +plugins: + - name: doc + strategy: all + out: ./docs/proto + opt: ./docs/protodoc-markdown.tmpl,provider.md diff --git a/proto/provider/buf.lock b/proto/provider/buf.lock index c91b5810..78a35732 100644 --- a/proto/provider/buf.lock +++ b/proto/provider/buf.lock @@ -1,2 +1,28 @@ # Generated by buf. DO NOT EDIT. version: v1 +deps: + - remote: buf.build + owner: cosmos + repository: cosmos-proto + commit: 04467658e59e44bbb22fe568206e1f70 + digest: shake256:73a640bd60e0c523b0f8237ff34eab67c45a38b64bbbde1d80224819d272dbf316ac183526bd245f994af6608b025f5130483d0133c5edd385531326b5990466 + - remote: buf.build + owner: cosmos + repository: cosmos-sdk + commit: 954f7b05f38440fc8250134b15adec47 + digest: shake256:2ab4404fd04a7d1d52df0e2d0f2d477a3d83ffd88d876957bf3fedfd702c8e52833d65b3ce1d89a3c5adf2aab512616b0e4f51d8463f07eda9a8a3317ee3ac54 + - remote: buf.build + owner: cosmos + repository: gogo-proto + commit: 88ef6483f90f478fb938c37dde52ece3 + digest: shake256:89c45df2aa11e0cff97b0d695436713db3d993d76792e9f8dc1ae90e6ab9a9bec55503d48ceedd6b86069ab07d3041b32001b2bfe0227fa725dd515ff381e5ba + - remote: buf.build + owner: googleapis + repository: googleapis + commit: 74015a8aeb8445aa9e3e1454cb54bc35 + digest: shake256:7149cf5e9955c692d381e557830555d4e93f205a0f1b8e2dfdae46d029369aa3fc1980e35df0d310f7cc3b622f93e19ad276769a283a967dd3065ddfd3a40e13 + - remote: buf.build + owner: protocolbuffers + repository: wellknowntypes + commit: 3186086b2a8e44d9acdeeef2423c5de7 + digest: shake256:3b9dc2f56d9ed2e4001f95b701985fd803f7e2559b19b6a18d5f4e792cfdde320e765638de69fff037edc202b0006532d7ff19eab9465526b5ec628e4a5e5a1a diff --git a/proto/provider/buf.yaml b/proto/provider/buf.yaml index 3c1f7e5b..05bf8c9e 100644 --- a/proto/provider/buf.yaml +++ b/proto/provider/buf.yaml @@ -3,10 +3,11 @@ version: v1 name: buf.build/akash-network/provider deps: - buf.build/akash-network/node -# - buf.build/cosmos/cosmos-proto -# - buf.build/cosmos/cosmos-sdk -# - buf.build/cosmos/gogo-proto -# - buf.build/googleapis/googleapis + - buf.build/cosmos/gogo-proto + - buf.build/cosmos/cosmos-sdk:v0.47.0 + - buf.build/cosmos/cosmos-proto + - buf.build/googleapis/googleapis + - buf.build/protocolbuffers/wellknowntypes lint: allow_comment_ignores: true use: diff --git a/script/protoc-gen-swagger.sh b/script/protoc-gen-swagger.sh deleted file mode 100755 index 76e64376..00000000 --- a/script/protoc-gen-swagger.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash - -set -eo pipefail - -workdir=./.cache/tmp/swagger-gen - -function cleanup { - rm -rf "$workdir" -} - -# clean swagger files -#trap cleanup EXIT - -proto_files=$(find ./proto -type f \( -name 'service.proto' -o -name "query.proto" \) -print0 | xargs -0 -n1 | sort | uniq) - -for file in $proto_files; do - buf generate --template buf.gen.swagger.yaml "$file" -done - -mkdir -p ./docs/swagger-ui - -# combine swagger files -# uses nodejs package `swagger-combine`. -# all the individual swagger files need to be configured in `config.json` for merging -swagger-combine \ - ./docs/config.yaml \ - -o ./docs/swagger-ui/swagger.yaml \ - --continueOnConflictingPaths=true \ - --includeDefinitions=true diff --git a/script/protocgen-legacy.sh b/script/protocgen-legacy.sh deleted file mode 100755 index 47640598..00000000 --- a/script/protocgen-legacy.sh +++ /dev/null @@ -1,155 +0,0 @@ -#!/usr/bin/env bash - -set -eo pipefail - -PATH=$(pwd)/.cache/bin/legacy:$PATH -export PATH=$PATH - -function cleanup { - # put absolute path - rm -rf "${AKASH_ROOT}/github.com" - rm -rf "$AKASH_DEVCACHE_TS_TMP_GRPC_JS" - rm -rf "${AKASH_DEVCACHE_TS_TMP_PATCHES}" -} - -trap cleanup EXIT ERR - -mkdir -p "${AKASH_DEVCACHE_TS_TMP_GRPC_JS}" -mkdir -p "${AKASH_DEVCACHE_TS_TMP_PATCHES}" - -script/ts-patches.sh preserve - -ts_generated="${AKASH_TS_ROOT}/src/generated" -rm -rf "$ts_generated" -mkdir -p "$ts_generated" - -proto_dirs=$(find ./proto/node -path -prune -o -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq) -#shellcheck disable=SC2046 -for dir in $proto_dirs; do - .cache/bin/protoc \ - -I "proto/node" \ - -I ".cache/include/google/protobuf" \ - -I "vendor/github.com/cosmos/cosmos-sdk/proto" \ - -I "vendor/github.com/cosmos/cosmos-sdk/third_party/proto" \ - --gocosmos_out=plugins=interfacetype+grpc,Mgoogle/protobuf/any.proto=github.com/cosmos/cosmos-sdk/codec/types:. \ - $(find "${dir}" -maxdepth 1 -name '*.proto') - - # command to generate gRPC gateway (*.pb.gw.go in respective modules) files - .cache/bin/protoc \ - -I "proto/node" \ - -I ".cache/include" \ - -I "vendor/github.com/cosmos/cosmos-sdk/proto" \ - -I "vendor/github.com/cosmos/cosmos-sdk/third_party/proto" \ - --grpc-gateway_out=logtostderr=true:. \ - $(find "${dir}" -maxdepth 1 -name '*.proto') - - .cache/bin/protoc \ - -I ".cache/include/google/protobuf" \ - -I "proto/node" \ - -I "vendor/github.com/cosmos/cosmos-sdk/proto" \ - -I "vendor/github.com/cosmos/cosmos-sdk/third_party/proto" \ - --plugin="${AKASH_TS_NODE_BIN}/protoc-gen-ts_proto" \ - --ts_proto_out="${AKASH_TS_ROOT}/src/generated" \ - --ts_proto_opt="esModuleInterop=true,forceLong=long,outputTypeRegistry=true,useExactTypes=false,outputIndex=true" \ - $(find "${dir}" -maxdepth 1 -name '*.proto') - - .cache/bin/protoc \ - -I ".cache/include/google/protobuf" \ - -I "proto/node" \ - -I "vendor/github.com/cosmos/cosmos-sdk/proto" \ - -I "vendor/github.com/cosmos/cosmos-sdk/third_party/proto" \ - --plugin="${AKASH_TS_NODE_BIN}/protoc-gen-ts_proto" \ - --ts_proto_out="$AKASH_DEVCACHE_TS_TMP_GRPC_JS" \ - --ts_proto_opt="esModuleInterop=true,forceLong=long,outputTypeRegistry=true,useExactTypes=false,outputServices=grpc-js" \ - $(find "${dir}" -maxdepth 1 -name '*.proto') -done - -proto_dirs=$(find ./proto/provider -path -prune -o -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq) -#shellcheck disable=SC2046 -for dir in $proto_dirs; do - .cache/bin/protoc \ - -I "proto/provider" \ - -I "proto/node" \ - -I "vendor/github.com/cosmos/cosmos-sdk/proto" \ - -I "vendor/github.com/cosmos/cosmos-sdk/third_party/proto" \ - -I "vendor" \ - --gocosmos_out=plugins=interfacetype+grpc,Mgoogle/protobuf/any.proto=github.com/cosmos/cosmos-sdk/codec/types:. \ - $(find "${dir}" -maxdepth 1 -name '*.proto') - - # command to generate gRPC gateway (*.pb.gw.go in respective modules) files - .cache/bin/protoc \ - -I "proto/provider" \ - -I "proto/node" \ - -I ".cache/include" \ - -I "vendor/github.com/cosmos/cosmos-sdk/proto" \ - -I "vendor/github.com/cosmos/cosmos-sdk/third_party/proto" \ - --grpc-gateway_out=logtostderr=true:. \ - $(find "${dir}" -maxdepth 1 -name '*.proto') - - .cache/bin/protoc \ - -I "proto/provider" \ - -I ".cache/include" \ - -I "proto/node" \ - -I "vendor/github.com/cosmos/cosmos-sdk/proto" \ - -I "vendor/github.com/cosmos/cosmos-sdk/third_party/proto" \ - --plugin="${AKASH_TS_NODE_BIN}/protoc-gen-ts_proto" \ - --ts_proto_out="${AKASH_TS_ROOT}/src/generated" \ - --ts_proto_opt="esModuleInterop=true,forceLong=long,outputTypeRegistry=true,useExactTypes=false,outputIndex=true" \ - $(find "${dir}" -maxdepth 1 -name '*.proto') - - .cache/bin/protoc \ - -I "proto/provider" \ - -I ".cache/include" \ - -I "proto/node" \ - -I "vendor/github.com/cosmos/cosmos-sdk/proto" \ - -I "vendor/github.com/cosmos/cosmos-sdk/third_party/proto" \ - --plugin="${AKASH_TS_NODE_BIN}/protoc-gen-ts_proto" \ - --ts_proto_out="$AKASH_DEVCACHE_TS_TMP_GRPC_JS" \ - --ts_proto_opt="esModuleInterop=true,forceLong=long,outputTypeRegistry=true,useExactTypes=false,outputServices=grpc-js" \ - $(find "${dir}" -maxdepth 1 -name '*.proto') -done - -# merge generated grpc-js services to the main generated directory -ts_grpc_js_services=$(find "$AKASH_DEVCACHE_TS_TMP_GRPC_JS" -name 'service.ts') - -for file in $ts_grpc_js_services; do - dest_path=$(dirname "${file/$AKASH_DEVCACHE_TS_TMP_GRPC_JS/$AKASH_TS_ROOT\/src\/generated}") - dest_file="${dest_path}/service.grpc-js.ts" - - mv "$file" "$dest_file" - - path_from_gen_dir=${dest_file#"${AKASH_TS_ROOT}/src/generated/"} - index_file_name_base=${path_from_gen_dir%/service.grpc-js.ts} - index_file_name="index.${index_file_name_base//\//.}.grpc-js.ts" - index_file_path="${AKASH_TS_ROOT}/src/generated/$index_file_name" - export_statement="export * from \"./${path_from_gen_dir%.ts}\";" - - echo "$export_statement" > "$index_file_path" -done - -# move proto files to the right places -cp -rv github.com/akash-network/akash-api/* ./ - -# shellcheck disable=SC2046 -.cache/bin/protoc \ - -I "proto/node" \ - -I "vendor/github.com/cosmos/cosmos-sdk/proto" \ - -I "vendor/github.com/cosmos/cosmos-sdk/third_party/proto" \ - --doc_out=./docs/proto \ - --doc_opt=./docs/protodoc-markdown.tmpl,node.md \ - $(find "./proto/node" -maxdepth 4 -name '*.proto') - -# shellcheck disable=SC2046 -.cache/bin/protoc \ - -I "proto/provider" \ - -I "proto/node" \ - -I "vendor/github.com/cosmos/cosmos-sdk/proto" \ - -I "vendor/github.com/cosmos/cosmos-sdk/third_party/proto" \ - -I "vendor" \ - --doc_out=./docs/proto \ - --doc_opt=./docs/protodoc-markdown.tmpl,provider.md \ - $(find "./proto/provider" -maxdepth 4 -name '*.proto') - -script/ts-patches.sh restore - -npm run format --prefix "$AKASH_TS_ROOT" diff --git a/script/protocgen.sh b/script/protocgen.sh index 7c05ab6e..adeee591 100755 --- a/script/protocgen.sh +++ b/script/protocgen.sh @@ -1,23 +1,203 @@ #!/usr/bin/env bash -set -eo pipefail +set -e +set -o pipefail + +if [[ $# -lt 1 ]]; then + echo "invalid number of parameters" + exit 1 +fi PATH=$(pwd)/.cache/bin:$PATH export PATH=$PATH -function cleanup { - rm -rf github.com +function gen() { + proto_dirs=$(find ./proto -path -prune -o -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq) + + for dir in $proto_dirs; do + while IFS= read -r -d '' file; do + buf generate --template "$1" "$file" + done < <(find "${dir}" -maxdepth 1 -name '*.proto' -print0) + done +} + +function gen_go() { + if [[ $# -ne 2 ]]; then + echo "invalid number of parameters" + exit 1 + fi + + IFS=/ read -r pkg_domain _ <<<"$1" + + function cleanup_go() { + rm -rf "$pkg_domain" + } + + trap cleanup_go EXIT ERR + + gen buf.gen.go.yaml + + set -x + # shellcheck disable=SC2086 + cp -r ./$1/* ./$2/ } -trap cleanup EXIT +function gen_pulsar() { + gen buf.gen.pulsar.yaml +} + +function ts_patches() { + local generated_dir + local tmp_dir + + generated_dir="$AKASH_TS_ROOT/src/generated" + tmp_dir="$AKASH_DEVCACHE_TMP_TS_PATCHES" -proto_dirs=$(find ./proto -path -prune -o -name '*.proto' -print0 | xargs -0 -n1 dirname | sort | uniq) -#shellcheck disable=SC2046 -for dir in $proto_dirs; do - while IFS= read -r -d '' file; do - buf generate --template buf.gen.gogo.yaml "$file" - done < <(find "${dir}" -maxdepth 1 -name '*.proto' -print0) -done + if [ ! -d "$generated_dir" ]; then + return 0 + fi + + function cleanup() { + rm -rf "$tmp_dir" + } + + case $1 in + preserve) + echo "Preserving TypeScript patches..." + + find "$generated_dir" -type f -name "*.original.ts" | while read -r src_file; do + local gen_dir + local file + local tmp_file + + src_file=${src_file//.original/} + gen_dir=$(dirname "$src_file") + gen_dir=${gen_dir//$generated_dir\//} + file=$(basename "$src_file" .original.ts) + mkdir -p "$tmp_dir/$gen_dir" + tmp_file="$tmp_dir/$gen_dir/$file" + + echo "Preserving $src_file to $tmp_file" + cp "$src_file" "$tmp_file" + done + + ;; + restore) + trap cleanup EXIT ERR + + echo "Restoring TypeScript patches..." + + find "$tmp_dir" -type f -name "*.ts" | while read -r src_file; do + local original_file_path + local renamed_original_file_path + + original_file_path=${src_file/$tmp_dir\//} + renamed_original_file_path="${original_file_path/.ts/.original.ts}" + + echo "Restoring $original_file_path to $generated_dir/$original_file_path" + + mv "$generated_dir/$original_file_path" "$generated_dir/$renamed_original_file_path" + mv "$tmp_dir/$original_file_path" "$generated_dir/$original_file_path" + done + ;; + *) + echo "Invalid argument. Use 'preserve' or 'restore'." + exit 1 + ;; + esac +} + +function gen_ts() { + function cleanup_ts { + rm -rf "${AKASH_DEVCACHE_TMP_TS_GRPC_JS}" + rm -rf "${AKASH_DEVCACHE_TMP_TS_PATCHES}" + } + + trap cleanup_ts EXIT ERR + + mkdir -p "${AKASH_DEVCACHE_TMP_TS_GRPC_JS}" + mkdir -p "${AKASH_DEVCACHE_TMP_TS_PATCHES}" + + ts_patches preserve + + gen buf.gen.ts.yaml + + local ts_grpc_js_services + + # merge generated grpc-js services to the main generated directory + ts_grpc_js_services=$(find "$AKASH_DEVCACHE_TMP_TS_GRPC_JS" -name 'service.ts') + + for file in $ts_grpc_js_services; do + dest_path=$(dirname "${file/$AKASH_DEVCACHE_TMP_TS_GRPC_JS/$AKASH_TS_ROOT\/src\/generated}") + dest_file="${dest_path}/service.grpc-js.ts" + + mv "$file" "$dest_file" + + path_from_gen_dir=${dest_file#"${AKASH_TS_ROOT}/src/generated/"} + index_file_name_base=${path_from_gen_dir%/service.grpc-js.ts} + index_file_name="index.${index_file_name_base//\//.}.grpc-js.ts" + index_file_path="${AKASH_TS_ROOT}/src/generated/$index_file_name" + export_statement="export * from \"./${path_from_gen_dir%.ts}\";" + + echo "$export_statement" >"$index_file_path" + done + + ts_patches restore + + npm run format --prefix "$AKASH_TS_ROOT" +} + +function gen_doc() { + local workdir + + workdir=$AKASH_DEVCACHE_TMP/swagger-gen + + function cleanup { + rm -rf "$workdir" + } + + # clean swagger files + trap cleanup EXIT ERR + + buf generate --template ./proto/node/buf.gen.doc.yaml ./proto/node 2>/dev/null + buf generate --template ./proto/provider/buf.gen.doc.yaml ./proto/provider 2>/dev/null + + proto_files=$(find ./proto -type f \( -name 'service.proto' -o -name "query.proto" \) -print0 | xargs -0 -n1 | sort | uniq) + for file in $proto_files; do + buf generate --template buf.gen.swagger.yaml "$file" + done + + mkdir -p ./docs/swagger-ui + + # combine swagger files + # uses nodejs package `swagger-combine`. + # all the individual swagger files need to be configured in `config.json` for merging + swagger-combine \ + ./docs/config.yaml \ + -o ./docs/swagger-ui/swagger.yaml \ + -f yaml \ + --continueOnConflictingPaths=true \ + --includeDefinitions=true +} -# move proto files to the right places -cp -rv github.com/akash-network/akash-api/* ./ +case $1 in + go) + shift + gen_go "$@" + ;; + pulsar) + shift + gen_pulsar "$@" + ;; + ts) + shift + gen_ts + ;; + doc) + gen_doc + ;; + *) + echo "Invalid argument" + exit 1 + ;; +esac diff --git a/script/semver.sh b/script/semver.sh index 731ccaeb..768cec25 100755 --- a/script/semver.sh +++ b/script/semver.sh @@ -1,256 +1,131 @@ #!/usr/bin/env bash +# SPDX-License-Identifier: Apache-2.0 set -o errexit -o nounset -o pipefail -SEMVER_REGEX="^[v|V]?(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" -SEMVER_REGEX_LEGACY="^[v|V]?(0|[1-9][0-9]*)\\.(0|[1-9][0-9]*)(\\.0|[1-9][0-9]*)?(\\-[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?(\\+[0-9A-Za-z-]+(\\.[0-9A-Za-z-]+)*)?$" +source "$SCRIPT_DIR/semver_funcs.sh" PROG=semver -PROG_VERSION=2.2.0 +PROG_VERSION="3.4.0" USAGE="\ Usage: - $PROG bump (major|minor|patch|release|prerel |build ) + $PROG bump major + $PROG bump minor + $PROG bump patch + $PROG bump prerel|prerelease [] + $PROG bump build + $PROG bump release + $PROG get major + $PROG get minor + $PROG get patch + $PROG get prerel|prerelease + $PROG get build + $PROG get release $PROG compare - $PROG get (major|minor|patch|release|prerel|build) + $PROG diff + $PROG validate $PROG --help $PROG --version -Arguments: - A version must match the following regex pattern: - \"${SEMVER_REGEX}\". - In english, the version must match X.Y.Z(-PRERELEASE)(+BUILD) - where X, Y and Z are positive integers, PRERELEASE is an optional - string composed of alphanumeric characters and hyphens and - BUILD is also an optional string composed of alphanumeric - characters and hyphens. - See definition. - String that must be composed of alphanumeric characters and hyphens. - String that must be composed of alphanumeric characters and hyphens. -Options: - -v, --version Print the version of this tool. - -h, --help Print this help message. -Commands: - bump Bump by one of major, minor, patch, prerel, build - or a forced potentially conflicting version. The bumped version is - shown to stdout. - compare Compare with , output to stdout the - following values: -1 if is newer, 0 if equal, 1 if - older. - get Extract given part of , where part is one of major, minor, - patch, prerel, build. - validate Check version string is valid" - - -function error { - echo -e "$1" >&2 - exit 1 -} - -function usage-help { - error "$USAGE" -} - -function usage-version { - echo -e "${PROG}: $PROG_VERSION" - exit 0 -} - -function validate-version { - local version=$1 - if [[ "$version" =~ $SEMVER_REGEX ]]; then - # if a second argument is passed, store the result in var named by $2 - if [[ "$#" -eq "2" ]]; then - local major=${BASH_REMATCH[1]} - local minor=${BASH_REMATCH[2]} - local patch=${BASH_REMATCH[3]} - local prere=${BASH_REMATCH[4]} - local build=${BASH_REMATCH[6]} - eval "$2=(\"${major}\" \"${minor}\" \"${patch}\" \"${prere}\" \"${build}\")" - else - echo "$version" - fi - elif [[ "$version" =~ $SEMVER_REGEX_LEGACY ]]; then - # if a second argument is passed, store the result in var named by $2 - if [[ "$#" -eq "2" ]]; then - local major=${BASH_REMATCH[1]} - local minor=${BASH_REMATCH[2]} - local patch=0 - local prere=${BASH_REMATCH[4]} - local build=${BASH_REMATCH[6]} - eval "$2=(\"${major}\" \"${minor}\" \"${patch}\" \"${prere}\" \"${build}\")" - else - echo "$version" - fi - else - error "version $version does not match the semver scheme 'X.Y.Z(-PRERELEASE)(+BUILD)'. See help for more information." - fi -} - -function compare-version { - validate-version "$1" V - validate-version "$2" V_ - - # MAJOR, MINOR and PATCH should compare numerically - for i in 0 1 2; do - local diff=$((${V[$i]} - ${V_[$i]})) - if [[ ${diff} -lt 0 ]]; then - echo -1; - return 0 - elif [[ ${diff} -gt 0 ]]; then - echo 1; - return 0 - fi - done - # PREREL should compare with the ASCII order. - if [[ -z "${V[3]}" ]] && [[ -n "${V_[3]}" ]]; then - echo -1; - return 0; - elif [[ -n "${V[3]}" ]] && [[ -z "${V_[3]}" ]]; then - echo 1; - return 0; - elif [[ -n "${V[3]}" ]] && [[ -n "${V_[3]}" ]]; then - if [[ "${V[3]}" > "${V_[3]}" ]]; then - echo 1; - return 0; - elif [[ "${V[3]}" < "${V_[3]}" ]]; then - echo -1; - return 0; - fi - fi - - echo 0 -} - -function command-bump { - local new; - local version; - local sub_version; - local command; - - case $# in - 2) - case $1 in - major | minor | patch | release) - command=$1; - version=$2 ;; - *) - usage-help ;; - esac ;; - 3) - case $1 in - prerel | build) - command=$1; - sub_version=$2 version=$3 ;; - *) - usage-help ;; - esac ;; - *) - usage-help ;; - esac - - validate-version "$version" parts - # shellcheck disable=SC2154 - local major="${parts[0]}" - local minor="${parts[1]}" - local patch="${parts[2]}" - local prere="${parts[3]}" - local build="${parts[4]}" +Arguments: + A version must match the following regular expression: + \"${SEMVER_REGEX}\" + In English: + -- The version must match X.Y.Z[-PRERELEASE][+BUILD] + where X, Y and Z are non-negative integers. + -- PRERELEASE is a dot separated sequence of non-negative integers and/or + identifiers composed of alphanumeric characters and hyphens (with + at least one non-digit). Numeric identifiers must not have leading + zeros. A hyphen (\"-\") introduces this optional part. + -- BUILD is a dot separated sequence of identifiers composed of alphanumeric + characters and hyphens. A plus (\"+\") introduces this optional part. - case "$command" in - major) - new="$((major + 1)).0.0" ;; - minor) - new="${major}.$((minor + 1)).0" ;; - patch) - new="${major}.${minor}.$((patch + 1))" ;; - release) - new="${major}.${minor}.${patch}" ;; - prerel) - new=$(validate-version "${major}.${minor}.${patch}-${sub_version}") ;; - build) - new=$(validate-version "${major}.${minor}.${patch}${prere}+${sub_version}") ;; - *) - usage-help ;; - esac + See definition. - echo "$new" - exit 0 -} + A string as defined by PRERELEASE above. Or, it can be a PRERELEASE + prototype string followed by a dot. -function command-compare { - local v; - local v_; + A string as defined by BUILD above. - case $# in - 2) - v=$(validate-version "$1"); - v_=$(validate-version "$2") ;; - *) - usage-help ;; - esac +Options: + -v, --version Print the version of this tool. + -h, --help Print this help message. - compare-version "$v" "$v_" - exit 0 +Commands: + bump Bump by one of major, minor, patch; zeroing or removing + subsequent parts. \"bump prerel\" (or its synonym \"bump prerelease\") + sets the PRERELEASE part and removes any BUILD part. A trailing dot + in the argument introduces an incrementing numeric field + which is added or bumped. If no argument is provided, an + incrementing numeric field is introduced/bumped. \"bump build\" sets + the BUILD part. \"bump release\" removes any PRERELEASE or BUILD parts. + The bumped version is written to stdout. + + get Extract given part of , where part is one of major, minor, + patch, prerel (alternatively: prerelease), build, or release. + + compare Compare with , output to stdout the + following values: -1 if is newer, 0 if equal, 1 if + older. The BUILD part is not used in comparisons. + + diff Compare with , output to stdout the + difference between two versions by the release type (MAJOR, MINOR, + PATCH, PRERELEASE, BUILD). + + validate Validate if follows the SEMVER pattern (see + definition). Print 'valid' to stdout if the version is valid, otherwise + print 'invalid'. + +See also: + https://semver.org -- Semantic Versioning 2.0.0" + +function usage_help { + error "$USAGE" } -# shellcheck disable=SC2034 -function command-get { - local part version - - if [[ "$#" -ne "2" ]] || [[ -z "$1" ]] || [[ -z "$2" ]]; then - usage-help - fi - - part="$1" - version="$2" - - validate-version "$version" parts - local major="${parts[0]}" - local minor="${parts[1]}" - local patch="${parts[2]}" - local prerel="${parts[3]:1}" - local build="${parts[4]:1}" - - case "$part" in - "major-minor") - echo "$major.$minor" - ;; - major | minor | patch | release | prerel | build) - echo "${!part}" ;; - *) - usage-help ;; - esac - - exit 0 +function usage_version { + echo -e "${PROG}: $PROG_VERSION" + exit 0 } case $# in - 0) - echo "Unknown command: $*"; - usage-help ;; + 0) + echo "Unknown command: $*" + usage_help + ;; esac case $1 in - --help | -h) - echo -e "$USAGE"; - exit 0 ;; - --version | -v) - usage-version ;; - bump) - shift; - command-bump "$@" ;; - get) - shift; - command-get "$@" ;; - compare) - shift; - command-compare "$@" ;; - validate) - shift; - validate-version "$@" V ;; - *) - echo "Unknown arguments: $*"; - usage-help ;; + --help | -h) + echo -e "$USAGE" + exit 0 + ;; + --version | -v) usage_version ;; + bump) + shift + command_bump "$@" + ;; + get) + shift + command_get "$@" + ;; + compare) + shift + command_compare "$@" + ;; + diff) + shift + command_diff "$@" + ;; + validate) + shift + command_validate "$@" + ;; + *) + echo "Unknown arguments: $*" + usage_help + ;; esac diff --git a/script/semver_funcs.sh b/script/semver_funcs.sh new file mode 100755 index 00000000..bd4ece5a --- /dev/null +++ b/script/semver_funcs.sh @@ -0,0 +1,417 @@ +#!/usr/bin/env bash + +NAT='0|[1-9][0-9]*' +ALPHANUM='[0-9]*[A-Za-z-][0-9A-Za-z-]*' +IDENT="$NAT|$ALPHANUM" +FIELD='[0-9A-Za-z-]+' + +SEMVER_REGEX_STR="\ +[vV]?\ +($NAT)\\.($NAT)\\.($NAT)\ +(\\-(${IDENT})(\\.(${IDENT}))*)?\ +(\\+${FIELD}(\\.${FIELD})*)?$" + +SEMVER_REGEX_LEGACY="\ +[vV]?\ +($NAT)\\.($NAT)(\\.($NAT))?\ +(\\-(${IDENT})(\\.(${IDENT}))*)?\ +(\\+${FIELD}(\\.${FIELD})*)?$" + +SEMVER_REGEX="^$SEMVER_REGEX_STR" + +function error { + echo -e "$1" >&2 + exit 1 +} + +# normalize the "part" keywords to a canonical string. At present, +# only "prerelease" is normalized to "prerel". + +function normalize_part { + if [ "$1" == "prerelease" ]; then + echo "prerel" + else + echo "$1" + fi +} + +function validate_version { + local version=$1 + if [[ "$version" =~ $SEMVER_REGEX ]]; then + # if a second argument is passed, store the result in var named by $2 + if [ "$#" -eq "2" ]; then + local major=${BASH_REMATCH[1]} + local minor=${BASH_REMATCH[2]} + local patch=${BASH_REMATCH[3]} + local prere=${BASH_REMATCH[4]} + local build=${BASH_REMATCH[8]} + eval "$2=(\"$major\" \"$minor\" \"$patch\" \"$prere\" \"$build\")" + else + echo "$version" + fi + elif [[ "$version" =~ $SEMVER_REGEX_LEGACY ]]; then + # if a second argument is passed, store the result in var named by $2 + if [[ "$#" -eq "2" ]]; then + local major=${BASH_REMATCH[1]} + local minor=${BASH_REMATCH[2]} + local patch=0 + local prere=${BASH_REMATCH[4]} + local build=${BASH_REMATCH[6]} + eval "$2=(\"${major}\" \"${minor}\" \"${patch}\" \"${prere}\" \"${build}\")" + else + echo "$version" + fi + else + error "version $version does not match the semver scheme 'X.Y.Z(-PRERELEASE)(+BUILD)'. See help for more information." + fi +} + +function is_nat { + [[ "$1" =~ ^($NAT)$ ]] +} + +function is_null { + [ -z "$1" ] +} + +function order_nat { + [ "$1" -lt "$2" ] && { + echo -1 + return + } + [ "$1" -gt "$2" ] && { + echo 1 + return + } + echo 0 +} + +function order_string { + [[ $1 < $2 ]] && { + echo -1 + return + } + [[ $1 > $2 ]] && { + echo 1 + return + } + echo 0 +} + +# given two (named) arrays containing NAT and/or ALPHANUM fields, compare them +# one by one according to semver 2.0.0 spec. Return -1, 0, 1 if left array ($1) +# is less-than, equal, or greater-than the right array ($2). The longer array +# is considered greater-than the shorter if the shorter is a prefix of the longer. +# +function compare_fields { + local l="$1[@]" + local r="$2[@]" + local leftfield=("${!l}") + local rightfield=("${!r}") + local left + local right + + local i=$((-1)) + local order=$((0)) + + while true; do + # shellcheck disable=SC2086 + [ $order -ne 0 ] && { + echo "$order" + return + } + + : $((i++)) + left="${leftfield[$i]}" + right="${rightfield[$i]}" + + is_null "$left" && is_null "$right" && { + echo 0 + return + } + is_null "$left" && { + echo -1 + return + } + is_null "$right" && { + echo 1 + return + } + + is_nat "$left" && is_nat "$right" && { + order=$(order_nat "$left" "$right") + continue + } + is_nat "$left" && { + echo -1 + return + } + is_nat "$right" && { + echo 1 + return + } + { + order=$(order_string "$left" "$right") + continue + } + done +} + +# shellcheck disable=SC2206 # checked by "validate"; ok to expand prerel id's into array +function compare_version { + local order + validate_version "$1" V + validate_version "$2" V_ + + # compare major, minor, patch + + local left=("${V[0]}" "${V[1]}" "${V[2]}") + local right=("${V_[0]}" "${V_[1]}" "${V_[2]}") + + order=$(compare_fields left right) + [ "$order" -ne 0 ] && { + echo "$order" + return + } + + # compare pre-release ids when M.m.p are equal + + local prerel="${V[3]:1}" + local prerel_="${V_[3]:1}" + local left=(${prerel//./ }) + local right=(${prerel_//./ }) + + # if left and right have no pre-release part, then left equals right + # if only one of left/right has pre-release part, that one is less than simple M.m.p + + [ -z "$prerel" ] && [ -z "$prerel_" ] && { + echo 0 + return + } + [ -z "$prerel" ] && { + echo 1 + return + } + [ -z "$prerel_" ] && { + echo -1 + return + } + + # otherwise, compare the pre-release id's + + compare_fields left right +} + +# render_prerel -- return a prerel field with a trailing numeric string +# usage: render_prerel numeric [prefix-string] +# +function render_prerel { + if [ -z "$2" ]; then + echo "${1}" + else + echo "${2}${1}" + fi +} + +# extract_prerel -- extract prefix and trailing numeric portions of a pre-release part +# usage: extract_prerel prerel prerel_parts +# The prefix and trailing numeric parts are returned in "prerel_parts". +# +PREFIX_ALPHANUM='[.0-9A-Za-z-]*[.A-Za-z-]' +DIGITS='[0-9][0-9]*' +EXTRACT_REGEX="^(${PREFIX_ALPHANUM})*(${DIGITS})$" + +function extract_prerel { + local prefix + local numeric + + if [[ "$1" =~ $EXTRACT_REGEX ]]; then # found prefix and trailing numeric parts + prefix="${BASH_REMATCH[1]}" + numeric="${BASH_REMATCH[2]}" + else # no numeric part + prefix="${1}" + numeric= + fi + + eval "$2=(\"$prefix\" \"$numeric\")" +} + +# bump_prerel -- return the new pre-release part based on previous pre-release part +# and prototype for bump +# usage: bump_prerel proto previous +# +function bump_prerel { + local proto + local prev_prefix + local prev_numeric + + # case one: no trailing dot in prototype => simply replace previous with proto + if [[ ! ("$1" =~ \.$) ]]; then + echo "$1" + return + fi + + proto="${1%.}" # discard trailing dot marker from prototype + + extract_prerel "${2#-}" prerel_parts # extract parts of previous pre-release + # shellcheck disable=SC2154 + prev_prefix="${prerel_parts[0]}" + prev_numeric="${prerel_parts[1]}" + + # case two: bump or append numeric to previous pre-release part + if [ "$proto" == "+" ]; then # dummy "+" indicates no prototype argument provided + if [ -n "$prev_numeric" ]; then + : $((++prev_numeric)) # previous pre-release is already numbered, bump it + render_prerel "$prev_numeric" "$prev_prefix" + else + render_prerel 1 "$prev_prefix" # append starting number + fi + return + fi + + # case three: set, bump, or append using prototype prefix + if [ "$prev_prefix" != "$proto" ]; then + render_prerel 1 "$proto" # proto not same pre-release; set and start at '1' + elif [ -n "$prev_numeric" ]; then + : $((++prev_numeric)) # pre-release is numbered; bump it + render_prerel "$prev_numeric" "$prev_prefix" + else + render_prerel 1 "$prev_prefix" # start pre-release at number '1' + fi +} + +function command_bump { + local new + local version + local sub_version + local command + + command="$(normalize_part "$1")" + + case $# in + 2) case "$command" in + major | minor | patch | prerel | release) + sub_version="+." + version=$2 + ;; + *) usage_help ;; + esac ;; + 3) case "$command" in + prerel | build) sub_version=$2 version=$3 ;; + *) usage_help ;; + esac ;; + *) usage_help ;; + esac + + validate_version "$version" parts + # shellcheck disable=SC2154 + local major="${parts[0]}" + local minor="${parts[1]}" + local patch="${parts[2]}" + local prere="${parts[3]}" + local build="${parts[4]}" + + case "$command" in + major) new="$((major + 1)).0.0" ;; + minor) new="${major}.$((minor + 1)).0" ;; + patch) new="${major}.${minor}.$((patch + 1))" ;; + release) new="${major}.${minor}.${patch}" ;; + prerel) new=$(validate_version "${major}.${minor}.${patch}-$(bump_prerel "$sub_version" "$prere")") ;; + build) new=$(validate_version "${major}.${minor}.${patch}${prere}+${sub_version}") ;; + *) usage_help ;; + esac + + echo "$new" + exit 0 +} + +function command_compare { + local v + local v_ + + case $# in + 2) + v=$(validate_version "$1") + v_=$(validate_version "$2") + ;; + *) usage_help ;; + esac + + set +u # need unset array element to evaluate to null + compare_version "$v" "$v_" + exit 0 +} + +function command_diff { + validate_version "$1" v1_parts + # shellcheck disable=SC2154 + local v1_major="${v1_parts[0]}" + local v1_minor="${v1_parts[1]}" + local v1_patch="${v1_parts[2]}" + local v1_prere="${v1_parts[3]}" + local v1_build="${v1_parts[4]}" + + validate_version "$2" v2_parts + # shellcheck disable=SC2154 + local v2_major="${v2_parts[0]}" + local v2_minor="${v2_parts[1]}" + local v2_patch="${v2_parts[2]}" + local v2_prere="${v2_parts[3]}" + local v2_build="${v2_parts[4]}" + + if [ "${v1_major}" != "${v2_major}" ]; then + echo "major" + elif [ "${v1_minor}" != "${v2_minor}" ]; then + echo "minor" + elif [ "${v1_patch}" != "${v2_patch}" ]; then + echo "patch" + elif [ "${v1_prere}" != "${v2_prere}" ]; then + echo "prerelease" + elif [ "${v1_build}" != "${v2_build}" ]; then + echo "build" + fi +} + +# shellcheck disable=SC2034 +function command_get { + local part version + + if [[ "$#" -ne "2" ]] || [[ -z "$1" ]] || [[ -z "$2" ]]; then + usage_help + exit 0 + fi + + part="$1" + version="$2" + + validate_version "$version" parts + local major="${parts[0]}" + local minor="${parts[1]}" + local patch="${parts[2]}" + local prerel="${parts[3]:1}" + local build="${parts[4]:1}" + local release="${major}.${minor}.${patch}" + + part="$(normalize_part "$part")" + + case "$part" in + major | minor | patch | release | prerel | build) echo "${!part}" ;; + *) usage_help ;; + esac + + exit 0 +} + +function command_validate { + if [[ "$#" -ne "1" ]]; then + usage_help + fi + + if [[ "$1" =~ $SEMVER_REGEX ]]; then + echo "valid" + else + echo "invalid" + fi + + exit 0 +} diff --git a/script/shellcheck.sh b/script/shellcheck.sh index 1b560d3b..64614c77 100644 --- a/script/shellcheck.sh +++ b/script/shellcheck.sh @@ -2,7 +2,14 @@ unset FAILED -FILES=$(find /shellcheck/ -type f -name "*.sh" ! -path "/shellcheck/vendor/*" ! -path "/shellcheck/.git/*" ! -path "/shellcheck/ts/.husky/*") +FILES=$(\ + find /shellcheck/ -type f \ + -name "*.sh" \ + ! -path "/shellcheck/go/vendor/*" \ + ! -path "/shellcheck/.git/*" \ + ! -path "/shellcheck/ts/.husky/*" \ + ! -path "/shellcheck/ts/node_modules/*"\ + ) for file in $FILES; do name="$(basename "$file")"; diff --git a/script/tools.sh b/script/tools.sh index 7126c582..5756dffc 100755 --- a/script/tools.sh +++ b/script/tools.sh @@ -1,114 +1,258 @@ #!/usr/bin/env bash -set -o pipefail +set -eo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)" SEMVER=$SCRIPT_DIR/semver.sh -gomod="$SCRIPT_DIR/../go.mod" +source "$SCRIPT_DIR/semver_funcs.sh" + +GO_ROOT=$(realpath "$SCRIPT_DIR/../go") + +gomod="$GO_ROOT/go.mod" function get_gotoolchain() { - local gotoolchain - local goversion + local gotoolchain + local goversion + local local_goversion + + set +o pipefail + gotoolchain=$(grep -E '^toolchain go[0-9]{1,}.[0-9]{1,}.[0-9]{1,}$' "$gomod" | cut -d ' ' -f 2 | tr -d '\n') + goversion=$(grep -E '^go [0-9]{1,}.[0-9]{1,}(.[0-9]{1,})?$' "$gomod" | cut -d ' ' -f 2 | tr -d '\n') - gotoolchain=$(grep -E '^toolchain go[0-9]{1,}.[0-9]{1,}.[0-9]{1,}$' < "$gomod" | cut -d ' ' -f 2 | tr -d '\n') + set -o pipefail - if [[ ${gotoolchain} == "" ]]; then - # determine go toolchain from go version in go.mod - if which go > /dev/null 2>&1 ; then - goversion=$(GOTOOLCHAIN=local go version | cut -d ' ' -f 3 | sed 's/go*//' | tr -d '\n') - fi + if [[ ${gotoolchain} == "" ]]; then + # determine go toolchain from go version in go.mod + if which go >/dev/null 2>&1; then + local_goversion=$(GOTOOLCHAIN=local go version | cut -d ' ' -f 3 | sed 's/go*//' | tr -d '\n') + if [[ $($SEMVER compare "v$local_goversion" v"$goversion") -ge 0 ]]; then + goversion=$local_goversion + else + local_goversion= + fi + fi - if [[ $goversion != "" ]] && [[ $($SEMVER compare "v$goversion" v1.21.0) -ge 0 ]]; then - gotoolchain=go${goversion} - else - gotoolchain=go$(grep -E '^go [0-9]{1,}.[0-9]{1,}$' < "$gomod" | cut -d ' ' -f 2 | tr -d '\n').0 - fi - fi + if [[ "$local_goversion" == "" ]]; then + goversion=$(curl -s "https://go.dev/dl/?mode=json&include=all" \ + | jq -r --arg regexp "^go$goversion" '.[] | select(.stable == true) | select(.version | match($regexp)) | .version' \ + | head -n 1 \ + | sed -e s/^go//) + fi - echo -n "$gotoolchain" + if [[ $goversion != "" ]] && [[ $($SEMVER compare "v$goversion" v1.21.0) -ge 0 ]]; then + gotoolchain=go${goversion} + else + gotoolchain=go$(grep -E '^go [0-9]{1,}.[0-9]{1,}$' <"$gomod" | cut -d ' ' -f 2 | tr -d '\n').0 + fi + fi + + echo -n "$gotoolchain" } replace_paths() { - local file="${1}" - local cversion="${2}" - local nversion="${3}" - local sedcmd=sed + local file="${1}" + local cimport="${2}" + local nimport="${3}" + local sedcmd=sed - if [[ "$OSTYPE" == "darwin"* ]]; then - sedcmd=gsed - fi + if [[ "$OSTYPE" == "darwin"* ]]; then + sedcmd=gsed + fi - $sedcmd -ri "s/github.com\/akash-network\/node\/(v${cversion})?/github.com\/akash-network\/node\/v${nversion}\//g" "${file}" + $sedcmd -ri "s~$cimport~$nimport~" "${file}" } function replace_import_path() { - local next_major_version=$1 - local import_path_to_replace - import_path_to_replace=$(go list -m) - - local version_to_replace - version_to_replace=$(echo "$import_path_to_replace" | sed -n 's/.*v\([0-9]*\).*/\1/p') - - echo "$version_to_replace" - echo Current import paths are "$version_to_replace", replacing with "$next_major_version" - - # list all folders containing Go modules. -# local modules -# modules=$(go list -tags e2e ./... | sed "s/g.*v${version_to_replace}\///") - - while IFS= read -r line; do - modules_to_upgrade_manually+=("$line") - done < <(find . -name go.mod -exec grep -l "github.com/akash-network/node" {} \; | grep -v "^./go.mod$" | sed 's|/go.mod||' | sed 's|^./||') - - echo "Replacing import paths in all files" - - declare -a files - - while IFS= read -r line; do - files+=("$line") - done < <(find . -type f -not \(-path "./install.sh" -or -path "./upgrades/software/*" -or -path "./upgrades/heightpatches/*" -or -path "./.cache/*" -or -path "./dist/*" -or -path "./.git*" -or -name "*.md" -or -path "./.idea/*" \)) - -# echo "Updating all files" - - for file in "${files[@]}"; do - if test -f "$file"; then - # skip files that need manual upgrading - for excluded_file in "${modules_to_upgrade_manually[@]}"; do - if [[ "$file" == *"$excluded_file"* ]]; then - continue 2 - fi - done - replace_paths "$file" "$version_to_replace" "$next_major_version" - fi - done - -# exit 0 - -# echo "Updating go.mod and vendoring" - # go.mod -# replace_paths "go.mod" -# go mod tidy >/dev/null -# go mod vendor >/dev/null - - # ensure that generated files are updated. - # N.B.: This must be run after go mod vendor. -# echo "running make proto-gen" -# make proto-gen >/dev/null -# -# echo "Run go mod vendor after proto-gen to avoid vendoring issues" -# go mod vendor >/dev/null -# -# echo "running make run-querygen" -# make run-querygen >/dev/null + local next_major_version=$1 + local curr_module_name + local curr_version + local new_module_name + + curr_module_name=$( + cd go + go list -m + ) + curr_version=$(echo "$curr_module_name" | sed -n 's/.*v\([0-9]*\).*/\1/p') + new_module_name=${curr_module_name%/"v$curr_version"}/$next_major_version + + echo "current import paths are $curr_module_name, replacing with $new_module_name" + + declare -a modules_to_upgrade_manually + + modules_to_upgrade_manually+=("./go/go.mod") + + echo "preparing files to replace" + + declare -a files + + while IFS= read -r line; do + files+=("$line") + done < <(find . -type f -not \( \ + -path "./install.sh" \ + -or -path "./upgrades/*" \ + -or -path "./.cache/*" \ + -or -path "./dist/*" \ + -or -path "./.git*" \ + -or -name "*.md" \ + -or -path "./.idea/*" \)) + + echo "updating all files" + + for file in "${files[@]}"; do + if test -f "$file"; then + # skip files that need manual upgrading + for excluded_file in "${modules_to_upgrade_manually[@]}"; do + if [[ "$file" == *"$excluded_file"* ]]; then + continue 2 + fi + done + + replace_paths "$file" "\"$curr_module_name" "\"$new_module_name" + fi + done + + echo "updating go.mod" + for retract in $( + cd go + go mod edit --json | jq -cr '.Retract | if . != null then .[] else empty end' + ); do + local low + local high + + low=$(jq -r '.Low' <<<"$retract") + high=$(jq -r '.High' <<<"$retract") + echo " dropping retract: [$low, $high]" + go mod edit -dropretract=["$low","$high"] + done + + replace_paths "./go/go.mod" "$curr_module_name" "$new_module_name" +} + +function run_gotest() { + declare -a modules + + modules=("$1") + + if [ -z "$AKASH_ROOT" ]; then + echo "AKASH_ROOT environment variable is not set" + exit 1 + fi + + if [ -z "$GO111MODULE" ]; then + echo "GO111MODULE environment variable is not set" + exit 1 + fi + + # shellcheck disable=SC2068 + for module in ${modules[@]}; do + pushd "$(pwd)" + echo "running tests in $module" + cd "$module" + # shellcheck disable=SC2086 + go test ${2} ${3} + popd + done +} + +function run_golint() { + declare -a modules + + modules=("$1") + dirs="$2" + + # shellcheck disable=SC2068 + for module in ${modules[@]}; do + pushd "$(pwd)" + echo "running lint on $module" + cd "$module" + # shellcheck disable=SC2086 + golangci-lint run --issues-exit-code=0 --timeout=5m "$dirs" + popd + done +} + +function run_gocoverage() { + declare -a modules + + modules=("$1") + + if [ -z "$AKASH_ROOT" ]; then + echo "AKASH_ROOT environment variable is not set" + exit 1 + fi + + if [ -z "$GO111MODULE" ]; then + echo "GO111MODULE environment variable is not set" + exit 1 + fi + + # shellcheck disable=SC2068 + for module in ${modules[@]}; do + pushd "$(pwd)" + cd "$module" + local coverpkgs + + # shellcheck disable=SC2086 + coverpkgs=$(go list ${3} | grep -v mock | paste -sd, -) + + local coverprofile + coverprofile="$AKASH_ROOT/coverage-$(echo "$module" | tr '/' '-').txt" + + # shellcheck disable=SC2086 + go test ${2} -coverprofile="$coverprofile" \ + -covermode=count \ + -coverpkg="$coverpkgs" \ + ${3} + + popd + done +} + +function run_bump_module() { + local cmd + local prefix + local mod_tag + + cmd="$1" + prefix="$2" + mod_tag="$(git describe --abbrev=0 --tags --match "$prefix/v*")" + + if [[ "$mod_tag" =~ $SEMVER_REGEX_STR ]]; then + local nversion + local oversion + + oversion=${BASH_REMATCH[0]} + + nversion=v$($SEMVER bump "$cmd" "$oversion") + git tag -a "$prefix/$nversion" -m "$prefix/$nversion" + else + error "unable to find any tag for module $prefix" + fi } case "$1" in -gotoolchain) - get_gotoolchain - ;; -replace-import-path) - shift - replace_import_path "$@" - ;; + gotoolchain) + get_gotoolchain + ;; + replace-import-path) + shift + replace_import_path "$@" + ;; + gotest) + shift + run_gotest "$@" + ;; + golint) + shift + run_golint "$@" + ;; + gocoverage) + shift + run_gocoverage "$@" + ;; + bump) + shift + run_bump_module "$@" + ;; esac diff --git a/script/ts-patches.sh b/script/ts-patches.sh deleted file mode 100755 index 6ba4e575..00000000 --- a/script/ts-patches.sh +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bash - -set -eo pipefail - -generated_dir="$AKASH_TS_ROOT/src/generated" -tmp_dir="$AKASH_DEVCACHE_TS_TMP_PATCHES" - -if [ ! -d "$generated_dir" ]; then - echo "Directory $generated_dir does not exist. Skipping..." - exit 0 -fi - -preserve_patches() { - echo "Preserving TypeScript patches..." - - find "$generated_dir" -type f -name "*.original.ts" | while read -r src_file; do - src_file=${src_file//.original/} - gen_dir=$(dirname "$src_file") - gen_dir=${gen_dir//$generated_dir\//} - file=$(basename "$src_file" .original.ts) - mkdir -p "$tmp_dir/$gen_dir" - tmp_file="$tmp_dir/$gen_dir/$file" - - echo "Preserving $src_file to $tmp_file" - cp "$src_file" "$tmp_file" - done -} - -function cleanup { - rm -rf "$tmp_dir" -} - -restore_patches() { - trap cleanup EXIT ERR - - echo "Restoring TypeScript patches..." - find "$tmp_dir" -type f -name "*.ts" | while read -r src_file; do - original_file_path=${src_file/$tmp_dir\//} - renamed_original_file_path="${original_file_path/.ts/.original.ts}" - echo "Restoring $original_file_path to $generated_dir/$original_file_path" - - mv "$generated_dir/$original_file_path" "$generated_dir/$renamed_original_file_path" - mv "$tmp_dir/$original_file_path" "$generated_dir/$original_file_path" - done -} - -case $1 in -preserve) - preserve_patches - ;; -restore) - restore_patches - ;; -*) - echo "Invalid argument. Use 'preserve' or 'restore'." - exit 1 - ;; -esac diff --git a/tools.go b/tools.go deleted file mode 100644 index 62b9f5de..00000000 --- a/tools.go +++ /dev/null @@ -1,17 +0,0 @@ -//go:build tools -// +build tools - -package api - -import ( - _ "github.com/99designs/keyring" - _ "github.com/grpc-ecosystem/grpc-gateway/runtime" - _ "google.golang.org/grpc" - - _ "github.com/btcsuite/btcd/chaincfg/chainhash" - - _ "github.com/pseudomuto/protoc-gen-doc" - - // TODO https://github.com/akash-network/support/issues/77 - _ "github.com/regen-network/cosmos-proto/protoc-gen-gocosmos" -) diff --git a/ts/package-lock.json b/ts/package-lock.json index 8dc8b2e0..7a771e0e 100644 --- a/ts/package-lock.json +++ b/ts/package-lock.json @@ -28,7 +28,7 @@ "semantic-release-commit-filter": "^1.0.2", "sort-json": "^2.0.1", "ts-jest": "^29.1.2", - "ts-proto": "^1.169.1", + "ts-proto": "^2.2.0", "typescript": "^5.4.2" }, "peerDependencies": { @@ -651,6 +651,12 @@ "integrity": "sha512-0hYQ8SB4Db5zvZB4axdMHGwEaQjkZzFjQiN9LVYvIFB2nSUHW9tYpxWriPrWDASIxiaXax83REcLxuSdnGPZtw==", "dev": true }, + "node_modules/@bufbuild/protobuf": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@bufbuild/protobuf/-/protobuf-2.0.0.tgz", + "integrity": "sha512-sw2JhwJyvyL0zlhG61aDzOVryEfJg2PDZFSV7i7IdC7nAE41WuXCru3QWLGiP87At0BMzKOoKO/FqEGoKygGZQ==", + "dev": true + }, "node_modules/@colors/colors": { "version": "1.5.0", "resolved": "https://registry.npmjs.org/@colors/colors/-/colors-1.5.0.tgz", @@ -741,12 +747,12 @@ } }, "node_modules/@grpc/grpc-js": { - "version": "1.10.6", - "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.10.6.tgz", - "integrity": "sha512-xP58G7wDQ4TCmN/cMUHh00DS7SRDv/+lC+xFLrTkMIN8h55X5NhZMLYbvy7dSELP15qlI6hPhNCRWVMtZMwqLA==", + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@grpc/grpc-js/-/grpc-js-1.11.1.tgz", + "integrity": "sha512-gyt/WayZrVPH2w/UTLansS7F9Nwld472JxxaETamrM8HNlsa+jSLNyKAZmhxI2Me4c3mQHFiS1wWHDY1g1Kthw==", "peer": true, "dependencies": { - "@grpc/proto-loader": "^0.7.10", + "@grpc/proto-loader": "^0.7.13", "@js-sdsl/ordered-map": "^4.4.2" }, "engines": { @@ -754,14 +760,14 @@ } }, "node_modules/@grpc/proto-loader": { - "version": "0.7.12", - "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.7.12.tgz", - "integrity": "sha512-DCVwMxqYzpUCiDMl7hQ384FqP4T3DbNpXU8pt681l3UWCip1WUiD5JrkImUwCB9a7f2cq4CUTmi5r/xIMRPY1Q==", + "version": "0.7.13", + "resolved": "https://registry.npmjs.org/@grpc/proto-loader/-/proto-loader-0.7.13.tgz", + "integrity": "sha512-AiXO/bfe9bmxBjxxtYxFAXGZvMaN5s8kO+jBHAJCON8rJoB5YS/D6X7ZNc6XQkuHNmyl4CYaMI1fJ/Gn27RGGw==", "peer": true, "dependencies": { "lodash.camelcase": "^4.3.0", "long": "^5.0.0", - "protobufjs": "^7.2.4", + "protobufjs": "^7.2.5", "yargs": "^17.7.2" }, "bin": { @@ -1699,27 +1705,32 @@ "node_modules/@protobufjs/aspromise": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/aspromise/-/aspromise-1.1.2.tgz", - "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==" + "integrity": "sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==", + "peer": true }, "node_modules/@protobufjs/base64": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/base64/-/base64-1.1.2.tgz", - "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==" + "integrity": "sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==", + "peer": true }, "node_modules/@protobufjs/codegen": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/@protobufjs/codegen/-/codegen-2.0.4.tgz", - "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==" + "integrity": "sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==", + "peer": true }, "node_modules/@protobufjs/eventemitter": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/eventemitter/-/eventemitter-1.1.0.tgz", - "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==" + "integrity": "sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==", + "peer": true }, "node_modules/@protobufjs/fetch": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/fetch/-/fetch-1.1.0.tgz", "integrity": "sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==", + "peer": true, "dependencies": { "@protobufjs/aspromise": "^1.1.1", "@protobufjs/inquire": "^1.1.0" @@ -1728,27 +1739,32 @@ "node_modules/@protobufjs/float": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/@protobufjs/float/-/float-1.0.2.tgz", - "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==" + "integrity": "sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==", + "peer": true }, "node_modules/@protobufjs/inquire": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/inquire/-/inquire-1.1.0.tgz", - "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==" + "integrity": "sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==", + "peer": true }, "node_modules/@protobufjs/path": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/@protobufjs/path/-/path-1.1.2.tgz", - "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==" + "integrity": "sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==", + "peer": true }, "node_modules/@protobufjs/pool": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/pool/-/pool-1.1.0.tgz", - "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==" + "integrity": "sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==", + "peer": true }, "node_modules/@protobufjs/utf8": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/@protobufjs/utf8/-/utf8-1.1.0.tgz", - "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==" + "integrity": "sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==", + "peer": true }, "node_modules/@semantic-release/commit-analyzer": { "version": "12.0.0", @@ -2765,12 +2781,12 @@ } }, "node_modules/braces": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", - "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", "dev": true, "dependencies": { - "fill-range": "^7.0.1" + "fill-range": "^7.1.1" }, "engines": { "node": ">=8" @@ -4284,9 +4300,9 @@ } }, "node_modules/fill-range": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", - "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", "dev": true, "dependencies": { "to-regex-range": "^5.0.1" @@ -6768,7 +6784,8 @@ "node_modules/long": { "version": "5.2.3", "resolved": "https://registry.npmjs.org/long/-/long-5.2.3.tgz", - "integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==" + "integrity": "sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==", + "peer": true }, "node_modules/lru-cache": { "version": "6.0.0", @@ -10698,6 +10715,7 @@ "resolved": "https://registry.npmjs.org/protobufjs/-/protobufjs-7.2.6.tgz", "integrity": "sha512-dgJaEDDL6x8ASUZ1YqWciTRrdOuYNzoOf27oHNfdyvKqHr5i0FV7FSLU+aIeFjyFgVxrpTOtQUi0BLLBymZaBw==", "hasInstallScript": true, + "peer": true, "dependencies": { "@protobufjs/aspromise": "^1.1.2", "@protobufjs/base64": "^1.1.2", @@ -12294,28 +12312,27 @@ } }, "node_modules/ts-proto": { - "version": "1.169.1", - "resolved": "https://registry.npmjs.org/ts-proto/-/ts-proto-1.169.1.tgz", - "integrity": "sha512-MHdllDrtFCabxvIyUqze7/4vSh55SEgwirpthGVUGt3pMqIpmmrDyBv0vDk/RCjBxm0/LIWVMnXlOjBxYaE1rA==", + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/ts-proto/-/ts-proto-2.2.0.tgz", + "integrity": "sha512-xzmnyrarUjPnY+Py4RyTh3lYmL9w5t/oTtRTo2rKF8laAAahpGZ/ELxkXFEZns5JVbgkYke3C17HN5iNvZOs4g==", "dev": true, "dependencies": { + "@bufbuild/protobuf": "^2.0.0", "case-anything": "^2.1.13", - "protobufjs": "^7.2.4", "ts-poet": "^6.7.0", - "ts-proto-descriptors": "1.15.0" + "ts-proto-descriptors": "2.0.0" }, "bin": { "protoc-gen-ts_proto": "protoc-gen-ts_proto" } }, "node_modules/ts-proto-descriptors": { - "version": "1.15.0", - "resolved": "https://registry.npmjs.org/ts-proto-descriptors/-/ts-proto-descriptors-1.15.0.tgz", - "integrity": "sha512-TYyJ7+H+7Jsqawdv+mfsEpZPTIj9siDHS6EMCzG/z3b/PZiphsX+mWtqFfFVe5/N0Th6V3elK9lQqjnrgTOfrg==", + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/ts-proto-descriptors/-/ts-proto-descriptors-2.0.0.tgz", + "integrity": "sha512-wHcTH3xIv11jxgkX5OyCSFfw27agpInAd6yh89hKG6zqIXnjW9SYqSER2CVQxdPj4czeOhGagNvZBEbJPy7qkw==", "dev": true, "dependencies": { - "long": "^5.2.3", - "protobufjs": "^7.2.4" + "@bufbuild/protobuf": "^2.0.0" } }, "node_modules/tslib": { diff --git a/ts/package.json b/ts/package.json index 565f94f3..b9a42de1 100644 --- a/ts/package.json +++ b/ts/package.json @@ -60,10 +60,89 @@ "semantic-release-commit-filter": "^1.0.2", "sort-json": "^2.0.1", "ts-jest": "^29.1.2", - "ts-proto": "^1.169.1", + "ts-proto": "^2.2.0", "typescript": "^5.4.2" }, "peerDependencies": { "@grpc/grpc-js": "^1.10.6" + }, + "exports": { + "./": "./dist/index.js", + "./typeRegistry": "./dist/generated/typeRegistry.js", + "./akash/deployment/v1beta3/query": "./dist/generated/akash/deployment/v1beta3/query.js", + "./deprecated/akash/cert/v1beta1": "./dist/deprecated/index.akash.cert.v1beta1.js", + "./deprecated/akash/market/v1beta1": "./dist/deprecated/index.akash.market.v1beta1.js", + "./v1beta1": "./dist/index.v1beta1.js", + "./v1beta2": "./dist/index.v1beta2.js", + "./v1beta3": "./dist/index.v1beta3.js", + "./v1beta4": "./dist/index.v1beta4.js", + "./akash/audit": "./dist/generated/index.akash.audit.js", + "./akash/audit/v1": "./dist/generated/index.akash.audit.v1.js", + "./akash/audit/v1/grpc-js": "./dist/generated/index.akash.audit.v1.grpc-js.js", + "./akash/base/attributes": "./dist/generated/index.akash.base.attributes.js", + "./akash/base/attributes/v1": "./dist/generated/index.akash.base.attributes.v1.js", + "./akash/base": "./dist/generated/index.akash.base.js", + "./akash/base/resources": "./dist/generated/index.akash.base.resources.js", + "./akash/base/resources/v1beta4": "./dist/generated/index.akash.base.resources.v1beta4.js", + "./akash/cert": "./dist/generated/index.akash.cert.js", + "./akash/cert/v1": "./dist/generated/index.akash.cert.v1.js", + "./akash/cert/v1/grpc-js": "./dist/generated/index.akash.cert.v1.grpc-js.js", + "./akash": "./dist/generated/index.akash.js", + "./akash/deployment": "./dist/generated/index.akash.deployment.js", + "./akash/deployment/v1": "./dist/generated/index.akash.deployment.v1.js", + "./akash/deployment/v1beta4": "./dist/generated/index.akash.deployment.v1beta4.js", + "./akash/deployment/v1beta4/grpc-js": "./dist/generated/index.akash.deployment.v1beta4.grpc-js.js", + "./akash/discovery": "./dist/generated/index.akash.discovery.js", + "./akash/discovery/v1": "./dist/generated/index.akash.discovery.v1.js", + "./akash/escrow": "./dist/generated/index.akash.escrow.js", + "./akash/escrow/v1": "./dist/generated/index.akash.escrow.v1.js", + "./akash/gov": "./dist/generated/index.akash.gov.js", + "./akash/gov/v1beta3": "./dist/generated/index.akash.gov.v1beta3.js", + "./akash/inflation": "./dist/generated/index.akash.inflation.js", + "./akash/inflation/v1beta2": "./dist/generated/index.akash.inflation.v1beta2.js", + "./akash/inflation/v1beta3": "./dist/generated/index.akash.inflation.v1beta3.js", + "./akash/inventory": "./dist/generated/index.akash.inventory.js", + "./akash/inventory/v1": "./dist/generated/index.akash.inventory.v1.js", + "./akash/inventory/v1/grpc-js": "./dist/generated/index.akash.inventory.v1.grpc-js.js", + "./akash/manifest": "./dist/generated/index.akash.manifest.js", + "./akash/manifest/v2beta3": "./dist/generated/index.akash.manifest.v2beta3.js", + "./akash/manifest/v2beta3/grpc-js": "./dist/generated/index.akash.manifest.v2beta3.grpc-js.js", + "./akash/market": "./dist/generated/index.akash.market.js", + "./akash/market/v1": "./dist/generated/index.akash.market.v1.js", + "./akash/market/v1beta5": "./dist/generated/index.akash.market.v1beta5.js", + "./akash/market/v1beta5/grpc-js": "./dist/generated/index.akash.market.v1beta5.grpc-js.js", + "./akash/provider": "./dist/generated/index.akash.provider.js", + "./akash/provider/lease": "./dist/generated/index.akash.provider.lease.js", + "./akash/provider/lease/v1": "./dist/generated/index.akash.provider.lease.v1.js", + "./akash/provider/lease/v1/grpc-js": "./dist/generated/index.akash.provider.lease.v1.grpc-js.js", + "./akash/provider/v1": "./dist/generated/index.akash.provider.v1.js", + "./akash/provider/v1/grpc-js": "./dist/generated/index.akash.provider.v1.grpc-js.js", + "./akash/provider/v1beta4": "./dist/generated/index.akash.provider.v1beta4.js", + "./akash/provider/v1beta4/grpc-js": "./dist/generated/index.akash.provider.v1beta4.grpc-js.js", + "./akash/staking": "./dist/generated/index.akash.staking.js", + "./akash/staking/v1beta3": "./dist/generated/index.akash.staking.v1beta3.js", + "./akash/staking/v1beta3/grpc-js": "./dist/generated/index.akash.staking.v1beta3.grpc-js.js", + "./akash/take": "./dist/generated/index.akash.take.js", + "./akash/take/v1": "./dist/generated/index.akash.take.v1.js", + "./akash/take/v1/grpc-js": "./dist/generated/index.akash.take.v1.grpc-js.js", + "./amino": "./dist/generated/index.amino.js", + "./cosmos/base": "./dist/generated/index.cosmos.base.js", + "./cosmos/base/query": "./dist/generated/index.cosmos.base.query.js", + "./cosmos/base/query/v1beta1": "./dist/generated/index.cosmos.base.query.v1beta1.js", + "./cosmos/base/v1beta1": "./dist/generated/index.cosmos.base.v1beta1.js", + "./cosmos": "./dist/generated/index.cosmos.js", + "./cosmos/msg": "./dist/generated/index.cosmos.msg.js", + "./cosmos/msg/v1": "./dist/generated/index.cosmos.msg.v1.js", + "./cosmos_proto": "./dist/generated/index.cosmos_proto.js", + "./gogoproto": "./dist/generated/index.gogoproto.js", + "./google/api": "./dist/generated/index.google.api.js", + "./google": "./dist/generated/index.google.js", + "./google/protobuf": "./dist/generated/index.google.protobuf.js", + "./k8s": "./dist/generated/index.k8s.js", + "./k8s/io/apimachinery": "./dist/generated/index.k8s.io.apimachinery.js", + "./k8s/io/apimachinery/pkg/api": "./dist/generated/index.k8s.io.apimachinery.pkg.api.js", + "./k8s/io/apimachinery/pkg/api/resource": "./dist/generated/index.k8s.io.apimachinery.pkg.api.resource.js", + "./k8s/io/apimachinery/pkg": "./dist/generated/index.k8s.io.apimachinery.pkg.js", + "./k8s/io": "./dist/generated/index.k8s.io.js" } -} +} \ No newline at end of file diff --git a/ts/src/deprecated/akash/base/v1beta1/attribute.ts b/ts/src/deprecated/akash/base/v1beta1/attribute.ts deleted file mode 100644 index 228688af..00000000 --- a/ts/src/deprecated/akash/base/v1beta1/attribute.ts +++ /dev/null @@ -1,312 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import * as _m0 from "protobufjs/minimal"; - -export const protobufPackage = "akash.base.v1beta1"; - -/** Attribute represents key value pair */ -export interface Attribute { - $type: "akash.base.v1beta1.Attribute"; - key: string; - value: string; -} - -/** - * SignedBy represents validation accounts that tenant expects signatures for provider attributes - * AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many - * entries there - * this behaviour to be discussed - */ -export interface SignedBy { - $type: "akash.base.v1beta1.SignedBy"; - /** all_of all keys in this list must have signed attributes */ - allOf: string[]; - /** any_of at least of of the keys from the list must have signed attributes */ - anyOf: string[]; -} - -/** PlacementRequirements */ -export interface PlacementRequirements { - $type: "akash.base.v1beta1.PlacementRequirements"; - /** SignedBy list of keys that tenants expect to have signatures from */ - signedBy: SignedBy | undefined; - /** Attribute list of attributes tenant expects from the provider */ - attributes: Attribute[]; -} - -function createBaseAttribute(): Attribute { - return { $type: "akash.base.v1beta1.Attribute", key: "", value: "" }; -} - -export const Attribute = { - $type: "akash.base.v1beta1.Attribute" as const, - - encode( - message: Attribute, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value !== "") { - writer.uint32(18).string(message.value); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Attribute { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAttribute(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.key = reader.string(); - break; - case 2: - message.value = reader.string(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Attribute { - return { - $type: Attribute.$type, - key: isSet(object.key) ? String(object.key) : "", - value: isSet(object.value) ? String(object.value) : "", - }; - }, - - toJSON(message: Attribute): unknown { - const obj: any = {}; - message.key !== undefined && (obj.key = message.key); - message.value !== undefined && (obj.value = message.value); - return obj; - }, - - fromPartial, I>>( - object: I, - ): Attribute { - const message = createBaseAttribute(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(Attribute.$type, Attribute); - -function createBaseSignedBy(): SignedBy { - return { $type: "akash.base.v1beta1.SignedBy", allOf: [], anyOf: [] }; -} - -export const SignedBy = { - $type: "akash.base.v1beta1.SignedBy" as const, - - encode( - message: SignedBy, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.allOf) { - writer.uint32(10).string(v!); - } - for (const v of message.anyOf) { - writer.uint32(18).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SignedBy { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSignedBy(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.allOf.push(reader.string()); - break; - case 2: - message.anyOf.push(reader.string()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): SignedBy { - return { - $type: SignedBy.$type, - allOf: Array.isArray(object?.allOf) - ? object.allOf.map((e: any) => String(e)) - : [], - anyOf: Array.isArray(object?.anyOf) - ? object.anyOf.map((e: any) => String(e)) - : [], - }; - }, - - toJSON(message: SignedBy): unknown { - const obj: any = {}; - if (message.allOf) { - obj.allOf = message.allOf.map((e) => e); - } else { - obj.allOf = []; - } - if (message.anyOf) { - obj.anyOf = message.anyOf.map((e) => e); - } else { - obj.anyOf = []; - } - return obj; - }, - - fromPartial, I>>(object: I): SignedBy { - const message = createBaseSignedBy(); - message.allOf = object.allOf?.map((e) => e) || []; - message.anyOf = object.anyOf?.map((e) => e) || []; - return message; - }, -}; - -messageTypeRegistry.set(SignedBy.$type, SignedBy); - -function createBasePlacementRequirements(): PlacementRequirements { - return { - $type: "akash.base.v1beta1.PlacementRequirements", - signedBy: undefined, - attributes: [], - }; -} - -export const PlacementRequirements = { - $type: "akash.base.v1beta1.PlacementRequirements" as const, - - encode( - message: PlacementRequirements, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.signedBy !== undefined) { - SignedBy.encode(message.signedBy, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): PlacementRequirements { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePlacementRequirements(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.signedBy = SignedBy.decode(reader, reader.uint32()); - break; - case 2: - message.attributes.push(Attribute.decode(reader, reader.uint32())); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): PlacementRequirements { - return { - $type: PlacementRequirements.$type, - signedBy: isSet(object.signedBy) - ? SignedBy.fromJSON(object.signedBy) - : undefined, - attributes: Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: PlacementRequirements): unknown { - const obj: any = {}; - message.signedBy !== undefined && - (obj.signedBy = message.signedBy - ? SignedBy.toJSON(message.signedBy) - : undefined); - if (message.attributes) { - obj.attributes = message.attributes.map((e) => - e ? Attribute.toJSON(e) : undefined, - ); - } else { - obj.attributes = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): PlacementRequirements { - const message = createBasePlacementRequirements(); - message.signedBy = - object.signedBy !== undefined && object.signedBy !== null - ? SignedBy.fromPartial(object.signedBy) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(PlacementRequirements.$type, PlacementRequirements); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/base/v1beta1/endpoint.ts b/ts/src/deprecated/akash/base/v1beta1/endpoint.ts deleted file mode 100644 index 70fee9a7..00000000 --- a/ts/src/deprecated/akash/base/v1beta1/endpoint.ts +++ /dev/null @@ -1,144 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import * as _m0 from "protobufjs/minimal"; - -export const protobufPackage = "akash.base.v1beta1"; - -/** Endpoint describes a publicly accessible IP service */ -export interface Endpoint { - $type: "akash.base.v1beta1.Endpoint"; - kind: Endpoint_Kind; -} - -/** This describes how the endpoint is implemented when the lease is deployed */ -export enum Endpoint_Kind { - /** SHARED_HTTP - Describes an endpoint that becomes a Kubernetes Ingress */ - SHARED_HTTP = 0, - /** RANDOM_PORT - Describes an endpoint that becomes a Kubernetes NodePort */ - RANDOM_PORT = 1, - UNRECOGNIZED = -1, -} - -export function endpoint_KindFromJSON(object: any): Endpoint_Kind { - switch (object) { - case 0: - case "SHARED_HTTP": - return Endpoint_Kind.SHARED_HTTP; - case 1: - case "RANDOM_PORT": - return Endpoint_Kind.RANDOM_PORT; - case -1: - case "UNRECOGNIZED": - default: - return Endpoint_Kind.UNRECOGNIZED; - } -} - -export function endpoint_KindToJSON(object: Endpoint_Kind): string { - switch (object) { - case Endpoint_Kind.SHARED_HTTP: - return "SHARED_HTTP"; - case Endpoint_Kind.RANDOM_PORT: - return "RANDOM_PORT"; - case Endpoint_Kind.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -function createBaseEndpoint(): Endpoint { - return { $type: "akash.base.v1beta1.Endpoint", kind: 0 }; -} - -export const Endpoint = { - $type: "akash.base.v1beta1.Endpoint" as const, - - encode( - message: Endpoint, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.kind !== 0) { - writer.uint32(8).int32(message.kind); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Endpoint { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEndpoint(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.kind = reader.int32() as any; - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Endpoint { - return { - $type: Endpoint.$type, - kind: isSet(object.kind) ? endpoint_KindFromJSON(object.kind) : 0, - }; - }, - - toJSON(message: Endpoint): unknown { - const obj: any = {}; - message.kind !== undefined && - (obj.kind = endpoint_KindToJSON(message.kind)); - return obj; - }, - - fromPartial, I>>(object: I): Endpoint { - const message = createBaseEndpoint(); - message.kind = object.kind ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(Endpoint.$type, Endpoint); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/base/v1beta1/resource.ts b/ts/src/deprecated/akash/base/v1beta1/resource.ts deleted file mode 100644 index fa845d24..00000000 --- a/ts/src/deprecated/akash/base/v1beta1/resource.ts +++ /dev/null @@ -1,451 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import { ResourceValue } from "./resourcevalue"; -import { Attribute } from "./attribute"; -import { Endpoint } from "./endpoint"; -import * as _m0 from "protobufjs/minimal"; - -export const protobufPackage = "akash.base.v1beta1"; - -/** CPU stores resource units and cpu config attributes */ -export interface CPU { - $type: "akash.base.v1beta1.CPU"; - units: ResourceValue | undefined; - attributes: Attribute[]; -} - -/** Memory stores resource quantity and memory attributes */ -export interface Memory { - $type: "akash.base.v1beta1.Memory"; - quantity: ResourceValue | undefined; - attributes: Attribute[]; -} - -/** Storage stores resource quantity and storage attributes */ -export interface Storage { - $type: "akash.base.v1beta1.Storage"; - quantity: ResourceValue | undefined; - attributes: Attribute[]; -} - -/** - * ResourceUnits describes all available resources types for deployment/node etc - * if field is nil resource is not present in the given data-structure - */ -export interface ResourceUnits { - $type: "akash.base.v1beta1.ResourceUnits"; - cpu: CPU | undefined; - memory: Memory | undefined; - storage: Storage | undefined; - endpoints: Endpoint[]; -} - -function createBaseCPU(): CPU { - return { $type: "akash.base.v1beta1.CPU", units: undefined, attributes: [] }; -} - -export const CPU = { - $type: "akash.base.v1beta1.CPU" as const, - - encode(message: CPU, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.units !== undefined) { - ResourceValue.encode(message.units, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CPU { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCPU(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.units = ResourceValue.decode(reader, reader.uint32()); - break; - case 2: - message.attributes.push(Attribute.decode(reader, reader.uint32())); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): CPU { - return { - $type: CPU.$type, - units: isSet(object.units) - ? ResourceValue.fromJSON(object.units) - : undefined, - attributes: Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: CPU): unknown { - const obj: any = {}; - message.units !== undefined && - (obj.units = message.units - ? ResourceValue.toJSON(message.units) - : undefined); - if (message.attributes) { - obj.attributes = message.attributes.map((e) => - e ? Attribute.toJSON(e) : undefined, - ); - } else { - obj.attributes = []; - } - return obj; - }, - - fromPartial, I>>(object: I): CPU { - const message = createBaseCPU(); - message.units = - object.units !== undefined && object.units !== null - ? ResourceValue.fromPartial(object.units) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(CPU.$type, CPU); - -function createBaseMemory(): Memory { - return { - $type: "akash.base.v1beta1.Memory", - quantity: undefined, - attributes: [], - }; -} - -export const Memory = { - $type: "akash.base.v1beta1.Memory" as const, - - encode( - message: Memory, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.quantity !== undefined) { - ResourceValue.encode(message.quantity, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Memory { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMemory(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.quantity = ResourceValue.decode(reader, reader.uint32()); - break; - case 2: - message.attributes.push(Attribute.decode(reader, reader.uint32())); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Memory { - return { - $type: Memory.$type, - quantity: isSet(object.quantity) - ? ResourceValue.fromJSON(object.quantity) - : undefined, - attributes: Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Memory): unknown { - const obj: any = {}; - message.quantity !== undefined && - (obj.quantity = message.quantity - ? ResourceValue.toJSON(message.quantity) - : undefined); - if (message.attributes) { - obj.attributes = message.attributes.map((e) => - e ? Attribute.toJSON(e) : undefined, - ); - } else { - obj.attributes = []; - } - return obj; - }, - - fromPartial, I>>(object: I): Memory { - const message = createBaseMemory(); - message.quantity = - object.quantity !== undefined && object.quantity !== null - ? ResourceValue.fromPartial(object.quantity) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Memory.$type, Memory); - -function createBaseStorage(): Storage { - return { - $type: "akash.base.v1beta1.Storage", - quantity: undefined, - attributes: [], - }; -} - -export const Storage = { - $type: "akash.base.v1beta1.Storage" as const, - - encode( - message: Storage, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.quantity !== undefined) { - ResourceValue.encode(message.quantity, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Storage { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseStorage(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.quantity = ResourceValue.decode(reader, reader.uint32()); - break; - case 2: - message.attributes.push(Attribute.decode(reader, reader.uint32())); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Storage { - return { - $type: Storage.$type, - quantity: isSet(object.quantity) - ? ResourceValue.fromJSON(object.quantity) - : undefined, - attributes: Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Storage): unknown { - const obj: any = {}; - message.quantity !== undefined && - (obj.quantity = message.quantity - ? ResourceValue.toJSON(message.quantity) - : undefined); - if (message.attributes) { - obj.attributes = message.attributes.map((e) => - e ? Attribute.toJSON(e) : undefined, - ); - } else { - obj.attributes = []; - } - return obj; - }, - - fromPartial, I>>(object: I): Storage { - const message = createBaseStorage(); - message.quantity = - object.quantity !== undefined && object.quantity !== null - ? ResourceValue.fromPartial(object.quantity) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Storage.$type, Storage); - -function createBaseResourceUnits(): ResourceUnits { - return { - $type: "akash.base.v1beta1.ResourceUnits", - cpu: undefined, - memory: undefined, - storage: undefined, - endpoints: [], - }; -} - -export const ResourceUnits = { - $type: "akash.base.v1beta1.ResourceUnits" as const, - - encode( - message: ResourceUnits, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.cpu !== undefined) { - CPU.encode(message.cpu, writer.uint32(10).fork()).ldelim(); - } - if (message.memory !== undefined) { - Memory.encode(message.memory, writer.uint32(18).fork()).ldelim(); - } - if (message.storage !== undefined) { - Storage.encode(message.storage, writer.uint32(26).fork()).ldelim(); - } - for (const v of message.endpoints) { - Endpoint.encode(v!, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ResourceUnits { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseResourceUnits(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.cpu = CPU.decode(reader, reader.uint32()); - break; - case 2: - message.memory = Memory.decode(reader, reader.uint32()); - break; - case 3: - message.storage = Storage.decode(reader, reader.uint32()); - break; - case 4: - message.endpoints.push(Endpoint.decode(reader, reader.uint32())); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): ResourceUnits { - return { - $type: ResourceUnits.$type, - cpu: isSet(object.cpu) ? CPU.fromJSON(object.cpu) : undefined, - memory: isSet(object.memory) ? Memory.fromJSON(object.memory) : undefined, - storage: isSet(object.storage) - ? Storage.fromJSON(object.storage) - : undefined, - endpoints: Array.isArray(object?.endpoints) - ? object.endpoints.map((e: any) => Endpoint.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ResourceUnits): unknown { - const obj: any = {}; - message.cpu !== undefined && - (obj.cpu = message.cpu ? CPU.toJSON(message.cpu) : undefined); - message.memory !== undefined && - (obj.memory = message.memory ? Memory.toJSON(message.memory) : undefined); - message.storage !== undefined && - (obj.storage = message.storage - ? Storage.toJSON(message.storage) - : undefined); - if (message.endpoints) { - obj.endpoints = message.endpoints.map((e) => - e ? Endpoint.toJSON(e) : undefined, - ); - } else { - obj.endpoints = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): ResourceUnits { - const message = createBaseResourceUnits(); - message.cpu = - object.cpu !== undefined && object.cpu !== null - ? CPU.fromPartial(object.cpu) - : undefined; - message.memory = - object.memory !== undefined && object.memory !== null - ? Memory.fromPartial(object.memory) - : undefined; - message.storage = - object.storage !== undefined && object.storage !== null - ? Storage.fromPartial(object.storage) - : undefined; - message.endpoints = - object.endpoints?.map((e) => Endpoint.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(ResourceUnits.$type, ResourceUnits); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/base/v1beta1/resourcevalue.ts b/ts/src/deprecated/akash/base/v1beta1/resourcevalue.ts deleted file mode 100644 index 056dbabb..00000000 --- a/ts/src/deprecated/akash/base/v1beta1/resourcevalue.ts +++ /dev/null @@ -1,146 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import * as _m0 from "protobufjs/minimal"; - -export const protobufPackage = "akash.base.v1beta1"; - -/** Unit stores cpu, memory and storage metrics */ -export interface ResourceValue { - $type: "akash.base.v1beta1.ResourceValue"; - val: Uint8Array; -} - -function createBaseResourceValue(): ResourceValue { - return { $type: "akash.base.v1beta1.ResourceValue", val: new Uint8Array() }; -} - -export const ResourceValue = { - $type: "akash.base.v1beta1.ResourceValue" as const, - - encode( - message: ResourceValue, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.val.length !== 0) { - writer.uint32(10).bytes(message.val); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ResourceValue { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseResourceValue(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.val = reader.bytes(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): ResourceValue { - return { - $type: ResourceValue.$type, - val: isSet(object.val) ? bytesFromBase64(object.val) : new Uint8Array(), - }; - }, - - toJSON(message: ResourceValue): unknown { - const obj: any = {}; - message.val !== undefined && - (obj.val = base64FromBytes( - message.val !== undefined ? message.val : new Uint8Array(), - )); - return obj; - }, - - fromPartial, I>>( - object: I, - ): ResourceValue { - const message = createBaseResourceValue(); - message.val = object.val ?? new Uint8Array(); - return message; - }, -}; - -messageTypeRegistry.set(ResourceValue.$type, ResourceValue); - -declare var self: any | undefined; -declare var window: any | undefined; -declare var global: any | undefined; -var globalThis: any = (() => { - if (typeof globalThis !== "undefined") return globalThis; - if (typeof self !== "undefined") return self; - if (typeof window !== "undefined") return window; - if (typeof global !== "undefined") return global; - throw "Unable to locate global object"; -})(); - -const atob: (b64: string) => string = - globalThis.atob || - ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); -function bytesFromBase64(b64: string): Uint8Array { - const bin = atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; -} - -const btoa: (bin: string) => string = - globalThis.btoa || - ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); -function base64FromBytes(arr: Uint8Array): string { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(String.fromCharCode(byte)); - }); - return btoa(bin.join("")); -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/cert/v1beta1/cert.ts b/ts/src/deprecated/akash/cert/v1beta1/cert.ts deleted file mode 100644 index bb5e6f8f..00000000 --- a/ts/src/deprecated/akash/cert/v1beta1/cert.ts +++ /dev/null @@ -1,739 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import _m0 from "protobufjs/minimal"; - -export const protobufPackage = "akash.cert.v1beta1"; - -/** CertificateID stores owner and sequence number */ -export interface CertificateID { - $type: "akash.cert.v1beta1.CertificateID"; - owner: string; - serial: string; -} - -/** Certificate stores state, certificate and it's public key */ -export interface Certificate { - $type: "akash.cert.v1beta1.Certificate"; - state: Certificate_State; - cert: Uint8Array; - pubkey: Uint8Array; -} - -/** State is an enum which refers to state of deployment */ -export enum Certificate_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** valid - CertificateValid denotes state for deployment active */ - valid = 1, - /** revoked - CertificateRevoked denotes state for deployment closed */ - revoked = 2, - UNRECOGNIZED = -1, -} - -export function certificate_StateFromJSON(object: any): Certificate_State { - switch (object) { - case 0: - case "invalid": - return Certificate_State.invalid; - case 1: - case "valid": - return Certificate_State.valid; - case 2: - case "revoked": - return Certificate_State.revoked; - case -1: - case "UNRECOGNIZED": - default: - return Certificate_State.UNRECOGNIZED; - } -} - -export function certificate_StateToJSON(object: Certificate_State): string { - switch (object) { - case Certificate_State.invalid: - return "invalid"; - case Certificate_State.valid: - return "valid"; - case Certificate_State.revoked: - return "revoked"; - default: - return "UNKNOWN"; - } -} - -/** CertificateFilter defines filters used to filter certificates */ -export interface CertificateFilter { - $type: "akash.cert.v1beta1.CertificateFilter"; - owner: string; - serial: string; - state: string; -} - -/** MsgCreateCertificate defines an SDK message for creating certificate */ -export interface MsgCreateCertificate { - $type: "akash.cert.v1beta1.MsgCreateCertificate"; - owner: string; - cert: Uint8Array; - pubkey: Uint8Array; -} - -/** MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. */ -export interface MsgCreateCertificateResponse { - $type: "akash.cert.v1beta1.MsgCreateCertificateResponse"; -} - -/** MsgRevokeCertificate defines an SDK message for revoking certificate */ -export interface MsgRevokeCertificate { - $type: "akash.cert.v1beta1.MsgRevokeCertificate"; - id?: CertificateID; -} - -/** MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. */ -export interface MsgRevokeCertificateResponse { - $type: "akash.cert.v1beta1.MsgRevokeCertificateResponse"; -} - -function createBaseCertificateID(): CertificateID { - return { $type: "akash.cert.v1beta1.CertificateID", owner: "", serial: "" }; -} - -export const CertificateID = { - $type: "akash.cert.v1beta1.CertificateID" as const, - - encode( - message: CertificateID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.serial !== "") { - writer.uint32(18).string(message.serial); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CertificateID { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCertificateID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.owner = reader.string(); - break; - case 2: - message.serial = reader.string(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): CertificateID { - return { - $type: CertificateID.$type, - owner: isSet(object.owner) ? String(object.owner) : "", - serial: isSet(object.serial) ? String(object.serial) : "", - }; - }, - - toJSON(message: CertificateID): unknown { - const obj: any = {}; - message.owner !== undefined && (obj.owner = message.owner); - message.serial !== undefined && (obj.serial = message.serial); - return obj; - }, - - fromPartial, I>>( - object: I, - ): CertificateID { - const message = createBaseCertificateID(); - message.owner = object.owner ?? ""; - message.serial = object.serial ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(CertificateID.$type, CertificateID); - -function createBaseCertificate(): Certificate { - return { - $type: "akash.cert.v1beta1.Certificate", - state: 0, - cert: new Uint8Array(), - pubkey: new Uint8Array(), - }; -} - -export const Certificate = { - $type: "akash.cert.v1beta1.Certificate" as const, - - encode( - message: Certificate, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.cert.length !== 0) { - writer.uint32(26).bytes(message.cert); - } - if (message.pubkey.length !== 0) { - writer.uint32(34).bytes(message.pubkey); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Certificate { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCertificate(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 2: - message.state = reader.int32() as any; - break; - case 3: - message.cert = reader.bytes(); - break; - case 4: - message.pubkey = reader.bytes(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Certificate { - return { - $type: Certificate.$type, - state: isSet(object.state) ? certificate_StateFromJSON(object.state) : 0, - cert: isSet(object.cert) - ? bytesFromBase64(object.cert) - : new Uint8Array(), - pubkey: isSet(object.pubkey) - ? bytesFromBase64(object.pubkey) - : new Uint8Array(), - }; - }, - - toJSON(message: Certificate): unknown { - const obj: any = {}; - message.state !== undefined && - (obj.state = certificate_StateToJSON(message.state)); - message.cert !== undefined && - (obj.cert = base64FromBytes( - message.cert !== undefined ? message.cert : new Uint8Array(), - )); - message.pubkey !== undefined && - (obj.pubkey = base64FromBytes( - message.pubkey !== undefined ? message.pubkey : new Uint8Array(), - )); - return obj; - }, - - fromPartial, I>>( - object: I, - ): Certificate { - const message = createBaseCertificate(); - message.state = object.state ?? 0; - message.cert = object.cert ?? new Uint8Array(); - message.pubkey = object.pubkey ?? new Uint8Array(); - return message; - }, -}; - -messageTypeRegistry.set(Certificate.$type, Certificate); - -function createBaseCertificateFilter(): CertificateFilter { - return { - $type: "akash.cert.v1beta1.CertificateFilter", - owner: "", - serial: "", - state: "", - }; -} - -export const CertificateFilter = { - $type: "akash.cert.v1beta1.CertificateFilter" as const, - - encode( - message: CertificateFilter, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.serial !== "") { - writer.uint32(18).string(message.serial); - } - if (message.state !== "") { - writer.uint32(26).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CertificateFilter { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCertificateFilter(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.owner = reader.string(); - break; - case 2: - message.serial = reader.string(); - break; - case 3: - message.state = reader.string(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): CertificateFilter { - return { - $type: CertificateFilter.$type, - owner: isSet(object.owner) ? String(object.owner) : "", - serial: isSet(object.serial) ? String(object.serial) : "", - state: isSet(object.state) ? String(object.state) : "", - }; - }, - - toJSON(message: CertificateFilter): unknown { - const obj: any = {}; - message.owner !== undefined && (obj.owner = message.owner); - message.serial !== undefined && (obj.serial = message.serial); - message.state !== undefined && (obj.state = message.state); - return obj; - }, - - fromPartial, I>>( - object: I, - ): CertificateFilter { - const message = createBaseCertificateFilter(); - message.owner = object.owner ?? ""; - message.serial = object.serial ?? ""; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(CertificateFilter.$type, CertificateFilter); - -function createBaseMsgCreateCertificate(): MsgCreateCertificate { - return { - $type: "akash.cert.v1beta1.MsgCreateCertificate", - owner: "", - cert: new Uint8Array(), - pubkey: new Uint8Array(), - }; -} - -export const MsgCreateCertificate = { - $type: "akash.cert.v1beta1.MsgCreateCertificate" as const, - - encode( - message: MsgCreateCertificate, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.cert.length !== 0) { - writer.uint32(18).bytes(message.cert); - } - if (message.pubkey.length !== 0) { - writer.uint32(26).bytes(message.pubkey); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateCertificate { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateCertificate(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.owner = reader.string(); - break; - case 2: - message.cert = reader.bytes(); - break; - case 3: - message.pubkey = reader.bytes(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgCreateCertificate { - return { - $type: MsgCreateCertificate.$type, - owner: isSet(object.owner) ? String(object.owner) : "", - cert: isSet(object.cert) - ? bytesFromBase64(object.cert) - : new Uint8Array(), - pubkey: isSet(object.pubkey) - ? bytesFromBase64(object.pubkey) - : new Uint8Array(), - }; - }, - - toJSON(message: MsgCreateCertificate): unknown { - const obj: any = {}; - message.owner !== undefined && (obj.owner = message.owner); - message.cert !== undefined && - (obj.cert = base64FromBytes( - message.cert !== undefined ? message.cert : new Uint8Array(), - )); - message.pubkey !== undefined && - (obj.pubkey = base64FromBytes( - message.pubkey !== undefined ? message.pubkey : new Uint8Array(), - )); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgCreateCertificate { - const message = createBaseMsgCreateCertificate(); - message.owner = object.owner ?? ""; - message.cert = object.cert ?? new Uint8Array(); - message.pubkey = object.pubkey ?? new Uint8Array(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateCertificate.$type, MsgCreateCertificate); - -function createBaseMsgCreateCertificateResponse(): MsgCreateCertificateResponse { - return { $type: "akash.cert.v1beta1.MsgCreateCertificateResponse" }; -} - -export const MsgCreateCertificateResponse = { - $type: "akash.cert.v1beta1.MsgCreateCertificateResponse" as const, - - encode( - _: MsgCreateCertificateResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateCertificateResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateCertificateResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgCreateCertificateResponse { - return { - $type: MsgCreateCertificateResponse.$type, - }; - }, - - toJSON(_: MsgCreateCertificateResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgCreateCertificateResponse { - const message = createBaseMsgCreateCertificateResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCreateCertificateResponse.$type, - MsgCreateCertificateResponse, -); - -function createBaseMsgRevokeCertificate(): MsgRevokeCertificate { - return { $type: "akash.cert.v1beta1.MsgRevokeCertificate", id: undefined }; -} - -export const MsgRevokeCertificate = { - $type: "akash.cert.v1beta1.MsgRevokeCertificate" as const, - - encode( - message: MsgRevokeCertificate, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - CertificateID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgRevokeCertificate { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgRevokeCertificate(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.id = CertificateID.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgRevokeCertificate { - return { - $type: MsgRevokeCertificate.$type, - id: isSet(object.id) ? CertificateID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgRevokeCertificate): unknown { - const obj: any = {}; - message.id !== undefined && - (obj.id = message.id ? CertificateID.toJSON(message.id) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgRevokeCertificate { - const message = createBaseMsgRevokeCertificate(); - message.id = - object.id !== undefined && object.id !== null - ? CertificateID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgRevokeCertificate.$type, MsgRevokeCertificate); - -function createBaseMsgRevokeCertificateResponse(): MsgRevokeCertificateResponse { - return { $type: "akash.cert.v1beta1.MsgRevokeCertificateResponse" }; -} - -export const MsgRevokeCertificateResponse = { - $type: "akash.cert.v1beta1.MsgRevokeCertificateResponse" as const, - - encode( - _: MsgRevokeCertificateResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgRevokeCertificateResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgRevokeCertificateResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgRevokeCertificateResponse { - return { - $type: MsgRevokeCertificateResponse.$type, - }; - }, - - toJSON(_: MsgRevokeCertificateResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgRevokeCertificateResponse { - const message = createBaseMsgRevokeCertificateResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgRevokeCertificateResponse.$type, - MsgRevokeCertificateResponse, -); - -/** Msg defines the provider Msg service */ -export interface Msg { - /** CreateCertificate defines a method to create new certificate given proper inputs. */ - CreateCertificate( - request: MsgCreateCertificate, - ): Promise; - /** RevokeCertificate defines a method to revoke the certificate */ - RevokeCertificate( - request: MsgRevokeCertificate, - ): Promise; -} - -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - constructor(rpc: Rpc) { - this.rpc = rpc; - this.CreateCertificate = this.CreateCertificate.bind(this); - this.RevokeCertificate = this.RevokeCertificate.bind(this); - } - CreateCertificate( - request: MsgCreateCertificate, - ): Promise { - const data = MsgCreateCertificate.encode(request).finish(); - const promise = this.rpc.request( - "akash.cert.v1beta1.Msg", - "CreateCertificate", - data, - ); - return promise.then((data) => - MsgCreateCertificateResponse.decode(new _m0.Reader(data)), - ); - } - - RevokeCertificate( - request: MsgRevokeCertificate, - ): Promise { - const data = MsgRevokeCertificate.encode(request).finish(); - const promise = this.rpc.request( - "akash.cert.v1beta1.Msg", - "RevokeCertificate", - data, - ); - return promise.then((data) => - MsgRevokeCertificateResponse.decode(new _m0.Reader(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -declare var self: any | undefined; -declare var window: any | undefined; -declare var global: any | undefined; -var globalThis: any = (() => { - if (typeof globalThis !== "undefined") return globalThis; - if (typeof self !== "undefined") return self; - if (typeof window !== "undefined") return window; - if (typeof global !== "undefined") return global; - throw "Unable to locate global object"; -})(); - -const atob: (b64: string) => string = - globalThis.atob || - ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); -function bytesFromBase64(b64: string): Uint8Array { - const bin = atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; -} - -const btoa: (bin: string) => string = - globalThis.btoa || - ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); -function base64FromBytes(arr: Uint8Array): string { - const bin: string[] = []; - for (const byte of arr) { - bin.push(String.fromCharCode(byte)); - } - return btoa(bin.join("")); -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/cert/v1beta1/genesis.ts b/ts/src/deprecated/akash/cert/v1beta1/genesis.ts deleted file mode 100644 index eac1048f..00000000 --- a/ts/src/deprecated/akash/cert/v1beta1/genesis.ts +++ /dev/null @@ -1,211 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Certificate } from "../../../akash/cert/v1beta1/cert"; - -export const protobufPackage = "akash.cert.v1beta1"; - -/** GenesisCertificate defines certificate entry at genesis */ -export interface GenesisCertificate { - $type: "akash.cert.v1beta1.GenesisCertificate"; - owner: string; - certificate?: Certificate; -} - -/** GenesisState defines the basic genesis state used by cert module */ -export interface GenesisState { - $type: "akash.cert.v1beta1.GenesisState"; - certificates: GenesisCertificate[]; -} - -function createBaseGenesisCertificate(): GenesisCertificate { - return { - $type: "akash.cert.v1beta1.GenesisCertificate", - owner: "", - certificate: undefined, - }; -} - -export const GenesisCertificate = { - $type: "akash.cert.v1beta1.GenesisCertificate" as const, - - encode( - message: GenesisCertificate, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.certificate !== undefined) { - Certificate.encode( - message.certificate, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisCertificate { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisCertificate(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.owner = reader.string(); - break; - case 2: - message.certificate = Certificate.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): GenesisCertificate { - return { - $type: GenesisCertificate.$type, - owner: isSet(object.owner) ? String(object.owner) : "", - certificate: isSet(object.certificate) - ? Certificate.fromJSON(object.certificate) - : undefined, - }; - }, - - toJSON(message: GenesisCertificate): unknown { - const obj: any = {}; - message.owner !== undefined && (obj.owner = message.owner); - message.certificate !== undefined && - (obj.certificate = message.certificate - ? Certificate.toJSON(message.certificate) - : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): GenesisCertificate { - const message = createBaseGenesisCertificate(); - message.owner = object.owner ?? ""; - message.certificate = - object.certificate !== undefined && object.certificate !== null - ? Certificate.fromPartial(object.certificate) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(GenesisCertificate.$type, GenesisCertificate); - -function createBaseGenesisState(): GenesisState { - return { $type: "akash.cert.v1beta1.GenesisState", certificates: [] }; -} - -export const GenesisState = { - $type: "akash.cert.v1beta1.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.certificates) { - GenesisCertificate.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.certificates.push( - GenesisCertificate.decode(reader, reader.uint32()), - ); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - certificates: Array.isArray(object?.certificates) - ? object.certificates.map((e: any) => GenesisCertificate.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.certificates) { - obj.certificates = message.certificates.map((e) => - e ? GenesisCertificate.toJSON(e) : undefined, - ); - } else { - obj.certificates = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): GenesisState { - const message = createBaseGenesisState(); - message.certificates = - object.certificates?.map((e) => GenesisCertificate.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/cert/v1beta1/query.ts b/ts/src/deprecated/akash/cert/v1beta1/query.ts deleted file mode 100644 index b032ecd8..00000000 --- a/ts/src/deprecated/akash/cert/v1beta1/query.ts +++ /dev/null @@ -1,390 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import _m0 from "protobufjs/minimal"; - -import { - Certificate, - CertificateFilter, -} from "../../../akash/cert/v1beta1/cert"; -import Long from "long"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; - -export const protobufPackage = "akash.cert.v1beta1"; - -export interface CertificateResponse { - $type: "akash.cert.v1beta1.CertificateResponse"; - certificate?: Certificate; - serial: string; -} - -/** QueryDeploymentsRequest is request type for the Query/Deployments RPC method */ -export interface QueryCertificatesRequest { - $type: "akash.cert.v1beta1.QueryCertificatesRequest"; - filter?: CertificateFilter; - pagination?: PageRequest; -} - -/** QueryCertificatesResponse is response type for the Query/Certificates RPC method */ -export interface QueryCertificatesResponse { - $type: "akash.cert.v1beta1.QueryCertificatesResponse"; - certificates: CertificateResponse[]; - pagination?: PageResponse; -} - -function createBaseCertificateResponse(): CertificateResponse { - return { - $type: "akash.cert.v1beta1.CertificateResponse", - certificate: undefined, - serial: "", - }; -} - -export const CertificateResponse = { - $type: "akash.cert.v1beta1.CertificateResponse" as const, - - encode( - message: CertificateResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.certificate !== undefined) { - Certificate.encode( - message.certificate, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.serial !== "") { - writer.uint32(18).string(message.serial); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CertificateResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCertificateResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.certificate = Certificate.decode(reader, reader.uint32()); - break; - case 2: - message.serial = reader.string(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): CertificateResponse { - return { - $type: CertificateResponse.$type, - certificate: isSet(object.certificate) - ? Certificate.fromJSON(object.certificate) - : undefined, - serial: isSet(object.serial) ? String(object.serial) : "", - }; - }, - - toJSON(message: CertificateResponse): unknown { - const obj: any = {}; - message.certificate !== undefined && - (obj.certificate = message.certificate - ? Certificate.toJSON(message.certificate) - : undefined); - message.serial !== undefined && (obj.serial = message.serial); - return obj; - }, - - fromPartial, I>>( - object: I, - ): CertificateResponse { - const message = createBaseCertificateResponse(); - message.certificate = - object.certificate !== undefined && object.certificate !== null - ? Certificate.fromPartial(object.certificate) - : undefined; - message.serial = object.serial ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(CertificateResponse.$type, CertificateResponse); - -function createBaseQueryCertificatesRequest(): QueryCertificatesRequest { - return { - $type: "akash.cert.v1beta1.QueryCertificatesRequest", - filter: undefined, - pagination: undefined, - }; -} - -export const QueryCertificatesRequest = { - $type: "akash.cert.v1beta1.QueryCertificatesRequest" as const, - - encode( - message: QueryCertificatesRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filter !== undefined) { - CertificateFilter.encode( - message.filter, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryCertificatesRequest { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryCertificatesRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.filter = CertificateFilter.decode(reader, reader.uint32()); - break; - case 2: - message.pagination = PageRequest.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): QueryCertificatesRequest { - return { - $type: QueryCertificatesRequest.$type, - filter: isSet(object.filter) - ? CertificateFilter.fromJSON(object.filter) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryCertificatesRequest): unknown { - const obj: any = {}; - message.filter !== undefined && - (obj.filter = message.filter - ? CertificateFilter.toJSON(message.filter) - : undefined); - message.pagination !== undefined && - (obj.pagination = message.pagination - ? PageRequest.toJSON(message.pagination) - : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): QueryCertificatesRequest { - const message = createBaseQueryCertificatesRequest(); - message.filter = - object.filter !== undefined && object.filter !== null - ? CertificateFilter.fromPartial(object.filter) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryCertificatesRequest.$type, - QueryCertificatesRequest, -); - -function createBaseQueryCertificatesResponse(): QueryCertificatesResponse { - return { - $type: "akash.cert.v1beta1.QueryCertificatesResponse", - certificates: [], - pagination: undefined, - }; -} - -export const QueryCertificatesResponse = { - $type: "akash.cert.v1beta1.QueryCertificatesResponse" as const, - - encode( - message: QueryCertificatesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.certificates) { - CertificateResponse.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryCertificatesResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryCertificatesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.certificates.push( - CertificateResponse.decode(reader, reader.uint32()), - ); - break; - case 2: - message.pagination = PageResponse.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): QueryCertificatesResponse { - return { - $type: QueryCertificatesResponse.$type, - certificates: Array.isArray(object?.certificates) - ? object.certificates.map((e: any) => CertificateResponse.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryCertificatesResponse): unknown { - const obj: any = {}; - if (message.certificates) { - obj.certificates = message.certificates.map((e) => - e ? CertificateResponse.toJSON(e) : undefined, - ); - } else { - obj.certificates = []; - } - message.pagination !== undefined && - (obj.pagination = message.pagination - ? PageResponse.toJSON(message.pagination) - : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): QueryCertificatesResponse { - const message = createBaseQueryCertificatesResponse(); - message.certificates = - object.certificates?.map((e) => CertificateResponse.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryCertificatesResponse.$type, - QueryCertificatesResponse, -); - -/** Query defines the gRPC querier service */ -export interface Query { - /** Certificates queries certificates */ - Certificates( - request: QueryCertificatesRequest, - ): Promise; -} - -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - constructor(rpc: Rpc) { - this.rpc = rpc; - this.Certificates = this.Certificates.bind(this); - } - Certificates( - request: QueryCertificatesRequest, - ): Promise { - const data = QueryCertificatesRequest.encode(request).finish(); - const promise = this.rpc.request( - "akash.cert.v1beta1.Query", - "Certificates", - data, - ); - return promise.then((data) => - QueryCertificatesResponse.decode(new _m0.Reader(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/deployment/v1beta1/authz.ts b/ts/src/deprecated/akash/deployment/v1beta1/authz.ts deleted file mode 100644 index 84e74f76..00000000 --- a/ts/src/deprecated/akash/deployment/v1beta1/authz.ts +++ /dev/null @@ -1,134 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import * as _m0 from "protobufjs/minimal"; - -export const protobufPackage = "akash.deployment.v1beta1"; - -/** - * DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from - * the granter's account for a deployment. - */ -export interface DepositDeploymentAuthorization { - $type: "akash.deployment.v1beta1.DepositDeploymentAuthorization"; - /** - * SpendLimit is the amount the grantee is authorized to spend from the granter's account for - * the purpose of deployment. - */ - spendLimit: Coin | undefined; -} - -function createBaseDepositDeploymentAuthorization(): DepositDeploymentAuthorization { - return { - $type: "akash.deployment.v1beta1.DepositDeploymentAuthorization", - spendLimit: undefined, - }; -} - -export const DepositDeploymentAuthorization = { - $type: "akash.deployment.v1beta1.DepositDeploymentAuthorization" as const, - - encode( - message: DepositDeploymentAuthorization, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.spendLimit !== undefined) { - Coin.encode(message.spendLimit, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): DepositDeploymentAuthorization { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDepositDeploymentAuthorization(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.spendLimit = Coin.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): DepositDeploymentAuthorization { - return { - $type: DepositDeploymentAuthorization.$type, - spendLimit: isSet(object.spendLimit) - ? Coin.fromJSON(object.spendLimit) - : undefined, - }; - }, - - toJSON(message: DepositDeploymentAuthorization): unknown { - const obj: any = {}; - message.spendLimit !== undefined && - (obj.spendLimit = message.spendLimit - ? Coin.toJSON(message.spendLimit) - : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): DepositDeploymentAuthorization { - const message = createBaseDepositDeploymentAuthorization(); - message.spendLimit = - object.spendLimit !== undefined && object.spendLimit !== null - ? Coin.fromPartial(object.spendLimit) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - DepositDeploymentAuthorization.$type, - DepositDeploymentAuthorization, -); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/deployment/v1beta1/deployment.ts b/ts/src/deprecated/akash/deployment/v1beta1/deployment.ts deleted file mode 100644 index bcbb7171..00000000 --- a/ts/src/deprecated/akash/deployment/v1beta1/deployment.ts +++ /dev/null @@ -1,1210 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import { - GroupSpec, - MsgCloseGroupResponse, - MsgPauseGroupResponse, - MsgStartGroupResponse, - MsgCloseGroup, - MsgPauseGroup, - MsgStartGroup, -} from "./group"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import * as _m0 from "protobufjs/minimal"; - -export const protobufPackage = "akash.deployment.v1beta1"; - -/** MsgCreateDeployment defines an SDK message for creating deployment */ -export interface MsgCreateDeployment { - $type: "akash.deployment.v1beta1.MsgCreateDeployment"; - id: DeploymentID | undefined; - groups: GroupSpec[]; - version: Uint8Array; - deposit: Coin | undefined; -} - -/** MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. */ -export interface MsgCreateDeploymentResponse { - $type: "akash.deployment.v1beta1.MsgCreateDeploymentResponse"; -} - -/** MsgDepositDeployment deposits more funds into the deposit account */ -export interface MsgDepositDeployment { - $type: "akash.deployment.v1beta1.MsgDepositDeployment"; - id: DeploymentID | undefined; - amount: Coin | undefined; -} - -/** MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. */ -export interface MsgDepositDeploymentResponse { - $type: "akash.deployment.v1beta1.MsgDepositDeploymentResponse"; -} - -/** MsgUpdateDeployment defines an SDK message for updating deployment */ -export interface MsgUpdateDeployment { - $type: "akash.deployment.v1beta1.MsgUpdateDeployment"; - id: DeploymentID | undefined; - groups: GroupSpec[]; - version: Uint8Array; -} - -/** MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. */ -export interface MsgUpdateDeploymentResponse { - $type: "akash.deployment.v1beta1.MsgUpdateDeploymentResponse"; -} - -/** MsgCloseDeployment defines an SDK message for closing deployment */ -export interface MsgCloseDeployment { - $type: "akash.deployment.v1beta1.MsgCloseDeployment"; - id: DeploymentID | undefined; -} - -/** MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. */ -export interface MsgCloseDeploymentResponse { - $type: "akash.deployment.v1beta1.MsgCloseDeploymentResponse"; -} - -/** DeploymentID stores owner and sequence number */ -export interface DeploymentID { - $type: "akash.deployment.v1beta1.DeploymentID"; - owner: string; - dseq: Long; -} - -/** Deployment stores deploymentID, state and version details */ -export interface Deployment { - $type: "akash.deployment.v1beta1.Deployment"; - deploymentId: DeploymentID | undefined; - state: Deployment_State; - version: Uint8Array; - createdAt: Long; -} - -/** State is an enum which refers to state of deployment */ -export enum Deployment_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** active - DeploymentActive denotes state for deployment active */ - active = 1, - /** closed - DeploymentClosed denotes state for deployment closed */ - closed = 2, - UNRECOGNIZED = -1, -} - -export function deployment_StateFromJSON(object: any): Deployment_State { - switch (object) { - case 0: - case "invalid": - return Deployment_State.invalid; - case 1: - case "active": - return Deployment_State.active; - case 2: - case "closed": - return Deployment_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Deployment_State.UNRECOGNIZED; - } -} - -export function deployment_StateToJSON(object: Deployment_State): string { - switch (object) { - case Deployment_State.invalid: - return "invalid"; - case Deployment_State.active: - return "active"; - case Deployment_State.closed: - return "closed"; - case Deployment_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** DeploymentFilters defines filters used to filter deployments */ -export interface DeploymentFilters { - $type: "akash.deployment.v1beta1.DeploymentFilters"; - owner: string; - dseq: Long; - state: string; -} - -function createBaseMsgCreateDeployment(): MsgCreateDeployment { - return { - $type: "akash.deployment.v1beta1.MsgCreateDeployment", - id: undefined, - groups: [], - version: new Uint8Array(), - deposit: undefined, - }; -} - -export const MsgCreateDeployment = { - $type: "akash.deployment.v1beta1.MsgCreateDeployment" as const, - - encode( - message: MsgCreateDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - GroupSpec.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.version.length !== 0) { - writer.uint32(26).bytes(message.version); - } - if (message.deposit !== undefined) { - Coin.encode(message.deposit, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateDeployment { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.id = DeploymentID.decode(reader, reader.uint32()); - break; - case 2: - message.groups.push(GroupSpec.decode(reader, reader.uint32())); - break; - case 3: - message.version = reader.bytes(); - break; - case 4: - message.deposit = Coin.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgCreateDeployment { - return { - $type: MsgCreateDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - groups: Array.isArray(object?.groups) - ? object.groups.map((e: any) => GroupSpec.fromJSON(e)) - : [], - version: isSet(object.version) - ? bytesFromBase64(object.version) - : new Uint8Array(), - deposit: isSet(object.deposit) - ? Coin.fromJSON(object.deposit) - : undefined, - }; - }, - - toJSON(message: MsgCreateDeployment): unknown { - const obj: any = {}; - message.id !== undefined && - (obj.id = message.id ? DeploymentID.toJSON(message.id) : undefined); - if (message.groups) { - obj.groups = message.groups.map((e) => - e ? GroupSpec.toJSON(e) : undefined, - ); - } else { - obj.groups = []; - } - message.version !== undefined && - (obj.version = base64FromBytes( - message.version !== undefined ? message.version : new Uint8Array(), - )); - message.deposit !== undefined && - (obj.deposit = message.deposit - ? Coin.toJSON(message.deposit) - : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgCreateDeployment { - const message = createBaseMsgCreateDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - message.groups = object.groups?.map((e) => GroupSpec.fromPartial(e)) || []; - message.version = object.version ?? new Uint8Array(); - message.deposit = - object.deposit !== undefined && object.deposit !== null - ? Coin.fromPartial(object.deposit) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateDeployment.$type, MsgCreateDeployment); - -function createBaseMsgCreateDeploymentResponse(): MsgCreateDeploymentResponse { - return { $type: "akash.deployment.v1beta1.MsgCreateDeploymentResponse" }; -} - -export const MsgCreateDeploymentResponse = { - $type: "akash.deployment.v1beta1.MsgCreateDeploymentResponse" as const, - - encode( - _: MsgCreateDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateDeploymentResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgCreateDeploymentResponse { - return { - $type: MsgCreateDeploymentResponse.$type, - }; - }, - - toJSON(_: MsgCreateDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgCreateDeploymentResponse { - const message = createBaseMsgCreateDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCreateDeploymentResponse.$type, - MsgCreateDeploymentResponse, -); - -function createBaseMsgDepositDeployment(): MsgDepositDeployment { - return { - $type: "akash.deployment.v1beta1.MsgDepositDeployment", - id: undefined, - amount: undefined, - }; -} - -export const MsgDepositDeployment = { - $type: "akash.deployment.v1beta1.MsgDepositDeployment" as const, - - encode( - message: MsgDepositDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - if (message.amount !== undefined) { - Coin.encode(message.amount, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDepositDeployment { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDepositDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.id = DeploymentID.decode(reader, reader.uint32()); - break; - case 2: - message.amount = Coin.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgDepositDeployment { - return { - $type: MsgDepositDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - amount: isSet(object.amount) ? Coin.fromJSON(object.amount) : undefined, - }; - }, - - toJSON(message: MsgDepositDeployment): unknown { - const obj: any = {}; - message.id !== undefined && - (obj.id = message.id ? DeploymentID.toJSON(message.id) : undefined); - message.amount !== undefined && - (obj.amount = message.amount ? Coin.toJSON(message.amount) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgDepositDeployment { - const message = createBaseMsgDepositDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - message.amount = - object.amount !== undefined && object.amount !== null - ? Coin.fromPartial(object.amount) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgDepositDeployment.$type, MsgDepositDeployment); - -function createBaseMsgDepositDeploymentResponse(): MsgDepositDeploymentResponse { - return { $type: "akash.deployment.v1beta1.MsgDepositDeploymentResponse" }; -} - -export const MsgDepositDeploymentResponse = { - $type: "akash.deployment.v1beta1.MsgDepositDeploymentResponse" as const, - - encode( - _: MsgDepositDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDepositDeploymentResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDepositDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgDepositDeploymentResponse { - return { - $type: MsgDepositDeploymentResponse.$type, - }; - }, - - toJSON(_: MsgDepositDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgDepositDeploymentResponse { - const message = createBaseMsgDepositDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgDepositDeploymentResponse.$type, - MsgDepositDeploymentResponse, -); - -function createBaseMsgUpdateDeployment(): MsgUpdateDeployment { - return { - $type: "akash.deployment.v1beta1.MsgUpdateDeployment", - id: undefined, - groups: [], - version: new Uint8Array(), - }; -} - -export const MsgUpdateDeployment = { - $type: "akash.deployment.v1beta1.MsgUpdateDeployment" as const, - - encode( - message: MsgUpdateDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - GroupSpec.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.version.length !== 0) { - writer.uint32(26).bytes(message.version); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgUpdateDeployment { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.id = DeploymentID.decode(reader, reader.uint32()); - break; - case 2: - message.groups.push(GroupSpec.decode(reader, reader.uint32())); - break; - case 3: - message.version = reader.bytes(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgUpdateDeployment { - return { - $type: MsgUpdateDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - groups: Array.isArray(object?.groups) - ? object.groups.map((e: any) => GroupSpec.fromJSON(e)) - : [], - version: isSet(object.version) - ? bytesFromBase64(object.version) - : new Uint8Array(), - }; - }, - - toJSON(message: MsgUpdateDeployment): unknown { - const obj: any = {}; - message.id !== undefined && - (obj.id = message.id ? DeploymentID.toJSON(message.id) : undefined); - if (message.groups) { - obj.groups = message.groups.map((e) => - e ? GroupSpec.toJSON(e) : undefined, - ); - } else { - obj.groups = []; - } - message.version !== undefined && - (obj.version = base64FromBytes( - message.version !== undefined ? message.version : new Uint8Array(), - )); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgUpdateDeployment { - const message = createBaseMsgUpdateDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - message.groups = object.groups?.map((e) => GroupSpec.fromPartial(e)) || []; - message.version = object.version ?? new Uint8Array(); - return message; - }, -}; - -messageTypeRegistry.set(MsgUpdateDeployment.$type, MsgUpdateDeployment); - -function createBaseMsgUpdateDeploymentResponse(): MsgUpdateDeploymentResponse { - return { $type: "akash.deployment.v1beta1.MsgUpdateDeploymentResponse" }; -} - -export const MsgUpdateDeploymentResponse = { - $type: "akash.deployment.v1beta1.MsgUpdateDeploymentResponse" as const, - - encode( - _: MsgUpdateDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgUpdateDeploymentResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgUpdateDeploymentResponse { - return { - $type: MsgUpdateDeploymentResponse.$type, - }; - }, - - toJSON(_: MsgUpdateDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgUpdateDeploymentResponse { - const message = createBaseMsgUpdateDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgUpdateDeploymentResponse.$type, - MsgUpdateDeploymentResponse, -); - -function createBaseMsgCloseDeployment(): MsgCloseDeployment { - return { - $type: "akash.deployment.v1beta1.MsgCloseDeployment", - id: undefined, - }; -} - -export const MsgCloseDeployment = { - $type: "akash.deployment.v1beta1.MsgCloseDeployment" as const, - - encode( - message: MsgCloseDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseDeployment { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.id = DeploymentID.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgCloseDeployment { - return { - $type: MsgCloseDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgCloseDeployment): unknown { - const obj: any = {}; - message.id !== undefined && - (obj.id = message.id ? DeploymentID.toJSON(message.id) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgCloseDeployment { - const message = createBaseMsgCloseDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseDeployment.$type, MsgCloseDeployment); - -function createBaseMsgCloseDeploymentResponse(): MsgCloseDeploymentResponse { - return { $type: "akash.deployment.v1beta1.MsgCloseDeploymentResponse" }; -} - -export const MsgCloseDeploymentResponse = { - $type: "akash.deployment.v1beta1.MsgCloseDeploymentResponse" as const, - - encode( - _: MsgCloseDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCloseDeploymentResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgCloseDeploymentResponse { - return { - $type: MsgCloseDeploymentResponse.$type, - }; - }, - - toJSON(_: MsgCloseDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgCloseDeploymentResponse { - const message = createBaseMsgCloseDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCloseDeploymentResponse.$type, - MsgCloseDeploymentResponse, -); - -function createBaseDeploymentID(): DeploymentID { - return { - $type: "akash.deployment.v1beta1.DeploymentID", - owner: "", - dseq: Long.UZERO, - }; -} - -export const DeploymentID = { - $type: "akash.deployment.v1beta1.DeploymentID" as const, - - encode( - message: DeploymentID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.isZero()) { - writer.uint32(16).uint64(message.dseq); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DeploymentID { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDeploymentID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.owner = reader.string(); - break; - case 2: - message.dseq = reader.uint64() as Long; - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): DeploymentID { - return { - $type: DeploymentID.$type, - owner: isSet(object.owner) ? String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - }; - }, - - toJSON(message: DeploymentID): unknown { - const obj: any = {}; - message.owner !== undefined && (obj.owner = message.owner); - message.dseq !== undefined && - (obj.dseq = (message.dseq || Long.UZERO).toString()); - return obj; - }, - - fromPartial, I>>( - object: I, - ): DeploymentID { - const message = createBaseDeploymentID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - return message; - }, -}; - -messageTypeRegistry.set(DeploymentID.$type, DeploymentID); - -function createBaseDeployment(): Deployment { - return { - $type: "akash.deployment.v1beta1.Deployment", - deploymentId: undefined, - state: 0, - version: new Uint8Array(), - createdAt: Long.ZERO, - }; -} - -export const Deployment = { - $type: "akash.deployment.v1beta1.Deployment" as const, - - encode( - message: Deployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deploymentId !== undefined) { - DeploymentID.encode( - message.deploymentId, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.version.length !== 0) { - writer.uint32(26).bytes(message.version); - } - if (!message.createdAt.isZero()) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Deployment { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.deploymentId = DeploymentID.decode(reader, reader.uint32()); - break; - case 2: - message.state = reader.int32() as any; - break; - case 3: - message.version = reader.bytes(); - break; - case 4: - message.createdAt = reader.int64() as Long; - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Deployment { - return { - $type: Deployment.$type, - deploymentId: isSet(object.deploymentId) - ? DeploymentID.fromJSON(object.deploymentId) - : undefined, - state: isSet(object.state) ? deployment_StateFromJSON(object.state) : 0, - version: isSet(object.version) - ? bytesFromBase64(object.version) - : new Uint8Array(), - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Deployment): unknown { - const obj: any = {}; - message.deploymentId !== undefined && - (obj.deploymentId = message.deploymentId - ? DeploymentID.toJSON(message.deploymentId) - : undefined); - message.state !== undefined && - (obj.state = deployment_StateToJSON(message.state)); - message.version !== undefined && - (obj.version = base64FromBytes( - message.version !== undefined ? message.version : new Uint8Array(), - )); - message.createdAt !== undefined && - (obj.createdAt = (message.createdAt || Long.ZERO).toString()); - return obj; - }, - - fromPartial, I>>( - object: I, - ): Deployment { - const message = createBaseDeployment(); - message.deploymentId = - object.deploymentId !== undefined && object.deploymentId !== null - ? DeploymentID.fromPartial(object.deploymentId) - : undefined; - message.state = object.state ?? 0; - message.version = object.version ?? new Uint8Array(); - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Deployment.$type, Deployment); - -function createBaseDeploymentFilters(): DeploymentFilters { - return { - $type: "akash.deployment.v1beta1.DeploymentFilters", - owner: "", - dseq: Long.UZERO, - state: "", - }; -} - -export const DeploymentFilters = { - $type: "akash.deployment.v1beta1.DeploymentFilters" as const, - - encode( - message: DeploymentFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.isZero()) { - writer.uint32(16).uint64(message.dseq); - } - if (message.state !== "") { - writer.uint32(26).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DeploymentFilters { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDeploymentFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.owner = reader.string(); - break; - case 2: - message.dseq = reader.uint64() as Long; - break; - case 3: - message.state = reader.string(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): DeploymentFilters { - return { - $type: DeploymentFilters.$type, - owner: isSet(object.owner) ? String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - state: isSet(object.state) ? String(object.state) : "", - }; - }, - - toJSON(message: DeploymentFilters): unknown { - const obj: any = {}; - message.owner !== undefined && (obj.owner = message.owner); - message.dseq !== undefined && - (obj.dseq = (message.dseq || Long.UZERO).toString()); - message.state !== undefined && (obj.state = message.state); - return obj; - }, - - fromPartial, I>>( - object: I, - ): DeploymentFilters { - const message = createBaseDeploymentFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(DeploymentFilters.$type, DeploymentFilters); - -/** Msg defines the deployment Msg service. */ -export interface Msg { - /** CreateDeployment defines a method to create new deployment given proper inputs. */ - CreateDeployment( - request: MsgCreateDeployment, - ): Promise; - /** DepositDeployment deposits more funds into the deployment account */ - DepositDeployment( - request: MsgDepositDeployment, - ): Promise; - /** UpdateDeployment defines a method to update a deployment given proper inputs. */ - UpdateDeployment( - request: MsgUpdateDeployment, - ): Promise; - /** CloseDeployment defines a method to close a deployment given proper inputs. */ - CloseDeployment( - request: MsgCloseDeployment, - ): Promise; - /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ - CloseGroup(request: MsgCloseGroup): Promise; - /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ - PauseGroup(request: MsgPauseGroup): Promise; - /** StartGroup defines a method to close a group of a deployment given proper inputs. */ - StartGroup(request: MsgStartGroup): Promise; -} - -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - constructor(rpc: Rpc) { - this.rpc = rpc; - this.CreateDeployment = this.CreateDeployment.bind(this); - this.DepositDeployment = this.DepositDeployment.bind(this); - this.UpdateDeployment = this.UpdateDeployment.bind(this); - this.CloseDeployment = this.CloseDeployment.bind(this); - this.CloseGroup = this.CloseGroup.bind(this); - this.PauseGroup = this.PauseGroup.bind(this); - this.StartGroup = this.StartGroup.bind(this); - } - CreateDeployment( - request: MsgCreateDeployment, - ): Promise { - const data = MsgCreateDeployment.encode(request).finish(); - const promise = this.rpc.request( - "akash.deployment.v1beta1.Msg", - "CreateDeployment", - data, - ); - return promise.then((data) => - MsgCreateDeploymentResponse.decode(new _m0.Reader(data)), - ); - } - - DepositDeployment( - request: MsgDepositDeployment, - ): Promise { - const data = MsgDepositDeployment.encode(request).finish(); - const promise = this.rpc.request( - "akash.deployment.v1beta1.Msg", - "DepositDeployment", - data, - ); - return promise.then((data) => - MsgDepositDeploymentResponse.decode(new _m0.Reader(data)), - ); - } - - UpdateDeployment( - request: MsgUpdateDeployment, - ): Promise { - const data = MsgUpdateDeployment.encode(request).finish(); - const promise = this.rpc.request( - "akash.deployment.v1beta1.Msg", - "UpdateDeployment", - data, - ); - return promise.then((data) => - MsgUpdateDeploymentResponse.decode(new _m0.Reader(data)), - ); - } - - CloseDeployment( - request: MsgCloseDeployment, - ): Promise { - const data = MsgCloseDeployment.encode(request).finish(); - const promise = this.rpc.request( - "akash.deployment.v1beta1.Msg", - "CloseDeployment", - data, - ); - return promise.then((data) => - MsgCloseDeploymentResponse.decode(new _m0.Reader(data)), - ); - } - - CloseGroup(request: MsgCloseGroup): Promise { - const data = MsgCloseGroup.encode(request).finish(); - const promise = this.rpc.request( - "akash.deployment.v1beta1.Msg", - "CloseGroup", - data, - ); - return promise.then((data) => - MsgCloseGroupResponse.decode(new _m0.Reader(data)), - ); - } - - PauseGroup(request: MsgPauseGroup): Promise { - const data = MsgPauseGroup.encode(request).finish(); - const promise = this.rpc.request( - "akash.deployment.v1beta1.Msg", - "PauseGroup", - data, - ); - return promise.then((data) => - MsgPauseGroupResponse.decode(new _m0.Reader(data)), - ); - } - - StartGroup(request: MsgStartGroup): Promise { - const data = MsgStartGroup.encode(request).finish(); - const promise = this.rpc.request( - "akash.deployment.v1beta1.Msg", - "StartGroup", - data, - ); - return promise.then((data) => - MsgStartGroupResponse.decode(new _m0.Reader(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -declare var self: any | undefined; -declare var window: any | undefined; -declare var global: any | undefined; -var globalThis: any = (() => { - if (typeof globalThis !== "undefined") return globalThis; - if (typeof self !== "undefined") return self; - if (typeof window !== "undefined") return window; - if (typeof global !== "undefined") return global; - throw "Unable to locate global object"; -})(); - -const atob: (b64: string) => string = - globalThis.atob || - ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); -function bytesFromBase64(b64: string): Uint8Array { - const bin = atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; -} - -const btoa: (bin: string) => string = - globalThis.btoa || - ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); -function base64FromBytes(arr: Uint8Array): string { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(String.fromCharCode(byte)); - }); - return btoa(bin.join("")); -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/deployment/v1beta1/genesis.ts b/ts/src/deprecated/akash/deployment/v1beta1/genesis.ts deleted file mode 100644 index 361e1b7b..00000000 --- a/ts/src/deprecated/akash/deployment/v1beta1/genesis.ts +++ /dev/null @@ -1,234 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import { Deployment } from "./deployment"; -import { Group } from "./group"; -import { Params } from "./params"; -import * as _m0 from "protobufjs/minimal"; - -export const protobufPackage = "akash.deployment.v1beta1"; - -/** GenesisDeployment defines the basic genesis state used by deployment module */ -export interface GenesisDeployment { - $type: "akash.deployment.v1beta1.GenesisDeployment"; - deployment: Deployment | undefined; - groups: Group[]; -} - -/** GenesisState stores slice of genesis deployment instance */ -export interface GenesisState { - $type: "akash.deployment.v1beta1.GenesisState"; - deployments: GenesisDeployment[]; - params: Params | undefined; -} - -function createBaseGenesisDeployment(): GenesisDeployment { - return { - $type: "akash.deployment.v1beta1.GenesisDeployment", - deployment: undefined, - groups: [], - }; -} - -export const GenesisDeployment = { - $type: "akash.deployment.v1beta1.GenesisDeployment" as const, - - encode( - message: GenesisDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deployment !== undefined) { - Deployment.encode(message.deployment, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - Group.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisDeployment { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.deployment = Deployment.decode(reader, reader.uint32()); - break; - case 2: - message.groups.push(Group.decode(reader, reader.uint32())); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): GenesisDeployment { - return { - $type: GenesisDeployment.$type, - deployment: isSet(object.deployment) - ? Deployment.fromJSON(object.deployment) - : undefined, - groups: Array.isArray(object?.groups) - ? object.groups.map((e: any) => Group.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisDeployment): unknown { - const obj: any = {}; - message.deployment !== undefined && - (obj.deployment = message.deployment - ? Deployment.toJSON(message.deployment) - : undefined); - if (message.groups) { - obj.groups = message.groups.map((e) => (e ? Group.toJSON(e) : undefined)); - } else { - obj.groups = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): GenesisDeployment { - const message = createBaseGenesisDeployment(); - message.deployment = - object.deployment !== undefined && object.deployment !== null - ? Deployment.fromPartial(object.deployment) - : undefined; - message.groups = object.groups?.map((e) => Group.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisDeployment.$type, GenesisDeployment); - -function createBaseGenesisState(): GenesisState { - return { - $type: "akash.deployment.v1beta1.GenesisState", - deployments: [], - params: undefined, - }; -} - -export const GenesisState = { - $type: "akash.deployment.v1beta1.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.deployments) { - GenesisDeployment.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.params !== undefined) { - Params.encode(message.params, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.deployments.push( - GenesisDeployment.decode(reader, reader.uint32()), - ); - break; - case 2: - message.params = Params.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - deployments: Array.isArray(object?.deployments) - ? object.deployments.map((e: any) => GenesisDeployment.fromJSON(e)) - : [], - params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.deployments) { - obj.deployments = message.deployments.map((e) => - e ? GenesisDeployment.toJSON(e) : undefined, - ); - } else { - obj.deployments = []; - } - message.params !== undefined && - (obj.params = message.params ? Params.toJSON(message.params) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): GenesisState { - const message = createBaseGenesisState(); - message.deployments = - object.deployments?.map((e) => GenesisDeployment.fromPartial(e)) || []; - message.params = - object.params !== undefined && object.params !== null - ? Params.fromPartial(object.params) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/deployment/v1beta1/group.ts b/ts/src/deprecated/akash/deployment/v1beta1/group.ts deleted file mode 100644 index 6757fda4..00000000 --- a/ts/src/deprecated/akash/deployment/v1beta1/group.ts +++ /dev/null @@ -1,912 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import { PlacementRequirements } from "../../base/v1beta1/attribute"; -import { ResourceUnits } from "../../base/v1beta1/resource"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import * as _m0 from "protobufjs/minimal"; - -export const protobufPackage = "akash.deployment.v1beta1"; - -/** MsgCloseGroup defines SDK message to close a single Group within a Deployment. */ -export interface MsgCloseGroup { - $type: "akash.deployment.v1beta1.MsgCloseGroup"; - id: GroupID | undefined; -} - -/** MsgCloseGroupResponse defines the Msg/CloseGroup response type. */ -export interface MsgCloseGroupResponse { - $type: "akash.deployment.v1beta1.MsgCloseGroupResponse"; -} - -/** MsgPauseGroup defines SDK message to close a single Group within a Deployment. */ -export interface MsgPauseGroup { - $type: "akash.deployment.v1beta1.MsgPauseGroup"; - id: GroupID | undefined; -} - -/** MsgPauseGroupResponse defines the Msg/PauseGroup response type. */ -export interface MsgPauseGroupResponse { - $type: "akash.deployment.v1beta1.MsgPauseGroupResponse"; -} - -/** MsgStartGroup defines SDK message to close a single Group within a Deployment. */ -export interface MsgStartGroup { - $type: "akash.deployment.v1beta1.MsgStartGroup"; - id: GroupID | undefined; -} - -/** MsgStartGroupResponse defines the Msg/StartGroup response type. */ -export interface MsgStartGroupResponse { - $type: "akash.deployment.v1beta1.MsgStartGroupResponse"; -} - -/** GroupID stores owner, deployment sequence number and group sequence number */ -export interface GroupID { - $type: "akash.deployment.v1beta1.GroupID"; - owner: string; - dseq: Long; - gseq: number; -} - -/** GroupSpec stores group specifications */ -export interface GroupSpec { - $type: "akash.deployment.v1beta1.GroupSpec"; - name: string; - requirements: PlacementRequirements | undefined; - resources: Resource[]; -} - -/** Group stores group id, state and specifications of group */ -export interface Group { - $type: "akash.deployment.v1beta1.Group"; - groupId: GroupID | undefined; - state: Group_State; - groupSpec: GroupSpec | undefined; - createdAt: Long; -} - -/** State is an enum which refers to state of group */ -export enum Group_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** open - GroupOpen denotes state for group open */ - open = 1, - /** paused - GroupOrdered denotes state for group ordered */ - paused = 2, - /** insufficient_funds - GroupInsufficientFunds denotes state for group insufficient_funds */ - insufficient_funds = 3, - /** closed - GroupClosed denotes state for group closed */ - closed = 4, - UNRECOGNIZED = -1, -} - -export function group_StateFromJSON(object: any): Group_State { - switch (object) { - case 0: - case "invalid": - return Group_State.invalid; - case 1: - case "open": - return Group_State.open; - case 2: - case "paused": - return Group_State.paused; - case 3: - case "insufficient_funds": - return Group_State.insufficient_funds; - case 4: - case "closed": - return Group_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Group_State.UNRECOGNIZED; - } -} - -export function group_StateToJSON(object: Group_State): string { - switch (object) { - case Group_State.invalid: - return "invalid"; - case Group_State.open: - return "open"; - case Group_State.paused: - return "paused"; - case Group_State.insufficient_funds: - return "insufficient_funds"; - case Group_State.closed: - return "closed"; - case Group_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** Resource stores unit, total count and price of resource */ -export interface Resource { - $type: "akash.deployment.v1beta1.Resource"; - resources: ResourceUnits | undefined; - count: number; - price: Coin | undefined; -} - -function createBaseMsgCloseGroup(): MsgCloseGroup { - return { $type: "akash.deployment.v1beta1.MsgCloseGroup", id: undefined }; -} - -export const MsgCloseGroup = { - $type: "akash.deployment.v1beta1.MsgCloseGroup" as const, - - encode( - message: MsgCloseGroup, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseGroup { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.id = GroupID.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgCloseGroup { - return { - $type: MsgCloseGroup.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgCloseGroup): unknown { - const obj: any = {}; - message.id !== undefined && - (obj.id = message.id ? GroupID.toJSON(message.id) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgCloseGroup { - const message = createBaseMsgCloseGroup(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseGroup.$type, MsgCloseGroup); - -function createBaseMsgCloseGroupResponse(): MsgCloseGroupResponse { - return { $type: "akash.deployment.v1beta1.MsgCloseGroupResponse" }; -} - -export const MsgCloseGroupResponse = { - $type: "akash.deployment.v1beta1.MsgCloseGroupResponse" as const, - - encode( - _: MsgCloseGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCloseGroupResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgCloseGroupResponse { - return { - $type: MsgCloseGroupResponse.$type, - }; - }, - - toJSON(_: MsgCloseGroupResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgCloseGroupResponse { - const message = createBaseMsgCloseGroupResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseGroupResponse.$type, MsgCloseGroupResponse); - -function createBaseMsgPauseGroup(): MsgPauseGroup { - return { $type: "akash.deployment.v1beta1.MsgPauseGroup", id: undefined }; -} - -export const MsgPauseGroup = { - $type: "akash.deployment.v1beta1.MsgPauseGroup" as const, - - encode( - message: MsgPauseGroup, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgPauseGroup { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgPauseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.id = GroupID.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgPauseGroup { - return { - $type: MsgPauseGroup.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgPauseGroup): unknown { - const obj: any = {}; - message.id !== undefined && - (obj.id = message.id ? GroupID.toJSON(message.id) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgPauseGroup { - const message = createBaseMsgPauseGroup(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgPauseGroup.$type, MsgPauseGroup); - -function createBaseMsgPauseGroupResponse(): MsgPauseGroupResponse { - return { $type: "akash.deployment.v1beta1.MsgPauseGroupResponse" }; -} - -export const MsgPauseGroupResponse = { - $type: "akash.deployment.v1beta1.MsgPauseGroupResponse" as const, - - encode( - _: MsgPauseGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgPauseGroupResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgPauseGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgPauseGroupResponse { - return { - $type: MsgPauseGroupResponse.$type, - }; - }, - - toJSON(_: MsgPauseGroupResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgPauseGroupResponse { - const message = createBaseMsgPauseGroupResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgPauseGroupResponse.$type, MsgPauseGroupResponse); - -function createBaseMsgStartGroup(): MsgStartGroup { - return { $type: "akash.deployment.v1beta1.MsgStartGroup", id: undefined }; -} - -export const MsgStartGroup = { - $type: "akash.deployment.v1beta1.MsgStartGroup" as const, - - encode( - message: MsgStartGroup, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgStartGroup { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgStartGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.id = GroupID.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgStartGroup { - return { - $type: MsgStartGroup.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgStartGroup): unknown { - const obj: any = {}; - message.id !== undefined && - (obj.id = message.id ? GroupID.toJSON(message.id) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgStartGroup { - const message = createBaseMsgStartGroup(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgStartGroup.$type, MsgStartGroup); - -function createBaseMsgStartGroupResponse(): MsgStartGroupResponse { - return { $type: "akash.deployment.v1beta1.MsgStartGroupResponse" }; -} - -export const MsgStartGroupResponse = { - $type: "akash.deployment.v1beta1.MsgStartGroupResponse" as const, - - encode( - _: MsgStartGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgStartGroupResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgStartGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgStartGroupResponse { - return { - $type: MsgStartGroupResponse.$type, - }; - }, - - toJSON(_: MsgStartGroupResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgStartGroupResponse { - const message = createBaseMsgStartGroupResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgStartGroupResponse.$type, MsgStartGroupResponse); - -function createBaseGroupID(): GroupID { - return { - $type: "akash.deployment.v1beta1.GroupID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - }; -} - -export const GroupID = { - $type: "akash.deployment.v1beta1.GroupID" as const, - - encode( - message: GroupID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.isZero()) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GroupID { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroupID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.owner = reader.string(); - break; - case 2: - message.dseq = reader.uint64() as Long; - break; - case 3: - message.gseq = reader.uint32(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): GroupID { - return { - $type: GroupID.$type, - owner: isSet(object.owner) ? String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? Number(object.gseq) : 0, - }; - }, - - toJSON(message: GroupID): unknown { - const obj: any = {}; - message.owner !== undefined && (obj.owner = message.owner); - message.dseq !== undefined && - (obj.dseq = (message.dseq || Long.UZERO).toString()); - message.gseq !== undefined && (obj.gseq = Math.round(message.gseq)); - return obj; - }, - - fromPartial, I>>(object: I): GroupID { - const message = createBaseGroupID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(GroupID.$type, GroupID); - -function createBaseGroupSpec(): GroupSpec { - return { - $type: "akash.deployment.v1beta1.GroupSpec", - name: "", - requirements: undefined, - resources: [], - }; -} - -export const GroupSpec = { - $type: "akash.deployment.v1beta1.GroupSpec" as const, - - encode( - message: GroupSpec, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.requirements !== undefined) { - PlacementRequirements.encode( - message.requirements, - writer.uint32(18).fork(), - ).ldelim(); - } - for (const v of message.resources) { - Resource.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GroupSpec { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroupSpec(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.name = reader.string(); - break; - case 2: - message.requirements = PlacementRequirements.decode( - reader, - reader.uint32(), - ); - break; - case 3: - message.resources.push(Resource.decode(reader, reader.uint32())); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): GroupSpec { - return { - $type: GroupSpec.$type, - name: isSet(object.name) ? String(object.name) : "", - requirements: isSet(object.requirements) - ? PlacementRequirements.fromJSON(object.requirements) - : undefined, - resources: Array.isArray(object?.resources) - ? object.resources.map((e: any) => Resource.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GroupSpec): unknown { - const obj: any = {}; - message.name !== undefined && (obj.name = message.name); - message.requirements !== undefined && - (obj.requirements = message.requirements - ? PlacementRequirements.toJSON(message.requirements) - : undefined); - if (message.resources) { - obj.resources = message.resources.map((e) => - e ? Resource.toJSON(e) : undefined, - ); - } else { - obj.resources = []; - } - return obj; - }, - - fromPartial, I>>( - object: I, - ): GroupSpec { - const message = createBaseGroupSpec(); - message.name = object.name ?? ""; - message.requirements = - object.requirements !== undefined && object.requirements !== null - ? PlacementRequirements.fromPartial(object.requirements) - : undefined; - message.resources = - object.resources?.map((e) => Resource.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GroupSpec.$type, GroupSpec); - -function createBaseGroup(): Group { - return { - $type: "akash.deployment.v1beta1.Group", - groupId: undefined, - state: 0, - groupSpec: undefined, - createdAt: Long.ZERO, - }; -} - -export const Group = { - $type: "akash.deployment.v1beta1.Group" as const, - - encode(message: Group, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.groupId !== undefined) { - GroupID.encode(message.groupId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.groupSpec !== undefined) { - GroupSpec.encode(message.groupSpec, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.isZero()) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Group { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.groupId = GroupID.decode(reader, reader.uint32()); - break; - case 2: - message.state = reader.int32() as any; - break; - case 3: - message.groupSpec = GroupSpec.decode(reader, reader.uint32()); - break; - case 4: - message.createdAt = reader.int64() as Long; - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Group { - return { - $type: Group.$type, - groupId: isSet(object.groupId) - ? GroupID.fromJSON(object.groupId) - : undefined, - state: isSet(object.state) ? group_StateFromJSON(object.state) : 0, - groupSpec: isSet(object.groupSpec) - ? GroupSpec.fromJSON(object.groupSpec) - : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Group): unknown { - const obj: any = {}; - message.groupId !== undefined && - (obj.groupId = message.groupId - ? GroupID.toJSON(message.groupId) - : undefined); - message.state !== undefined && - (obj.state = group_StateToJSON(message.state)); - message.groupSpec !== undefined && - (obj.groupSpec = message.groupSpec - ? GroupSpec.toJSON(message.groupSpec) - : undefined); - message.createdAt !== undefined && - (obj.createdAt = (message.createdAt || Long.ZERO).toString()); - return obj; - }, - - fromPartial, I>>(object: I): Group { - const message = createBaseGroup(); - message.groupId = - object.groupId !== undefined && object.groupId !== null - ? GroupID.fromPartial(object.groupId) - : undefined; - message.state = object.state ?? 0; - message.groupSpec = - object.groupSpec !== undefined && object.groupSpec !== null - ? GroupSpec.fromPartial(object.groupSpec) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Group.$type, Group); - -function createBaseResource(): Resource { - return { - $type: "akash.deployment.v1beta1.Resource", - resources: undefined, - count: 0, - price: undefined, - }; -} - -export const Resource = { - $type: "akash.deployment.v1beta1.Resource" as const, - - encode( - message: Resource, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.resources !== undefined) { - ResourceUnits.encode( - message.resources, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.count !== 0) { - writer.uint32(16).uint32(message.count); - } - if (message.price !== undefined) { - Coin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Resource { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseResource(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.resources = ResourceUnits.decode(reader, reader.uint32()); - break; - case 2: - message.count = reader.uint32(); - break; - case 3: - message.price = Coin.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Resource { - return { - $type: Resource.$type, - resources: isSet(object.resources) - ? ResourceUnits.fromJSON(object.resources) - : undefined, - count: isSet(object.count) ? Number(object.count) : 0, - price: isSet(object.price) ? Coin.fromJSON(object.price) : undefined, - }; - }, - - toJSON(message: Resource): unknown { - const obj: any = {}; - message.resources !== undefined && - (obj.resources = message.resources - ? ResourceUnits.toJSON(message.resources) - : undefined); - message.count !== undefined && (obj.count = Math.round(message.count)); - message.price !== undefined && - (obj.price = message.price ? Coin.toJSON(message.price) : undefined); - return obj; - }, - - fromPartial, I>>(object: I): Resource { - const message = createBaseResource(); - message.resources = - object.resources !== undefined && object.resources !== null - ? ResourceUnits.fromPartial(object.resources) - : undefined; - message.count = object.count ?? 0; - message.price = - object.price !== undefined && object.price !== null - ? Coin.fromPartial(object.price) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Resource.$type, Resource); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/deployment/v1beta1/params.ts b/ts/src/deprecated/akash/deployment/v1beta1/params.ts deleted file mode 100644 index 20aacff8..00000000 --- a/ts/src/deprecated/akash/deployment/v1beta1/params.ts +++ /dev/null @@ -1,123 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import * as _m0 from "protobufjs/minimal"; - -export const protobufPackage = "akash.deployment.v1beta1"; - -/** Params defines the parameters for the x/deployment package */ -export interface Params { - $type: "akash.deployment.v1beta1.Params"; - deploymentMinDeposit: Coin | undefined; -} - -function createBaseParams(): Params { - return { - $type: "akash.deployment.v1beta1.Params", - deploymentMinDeposit: undefined, - }; -} - -export const Params = { - $type: "akash.deployment.v1beta1.Params" as const, - - encode( - message: Params, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deploymentMinDeposit !== undefined) { - Coin.encode( - message.deploymentMinDeposit, - writer.uint32(10).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Params { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.deploymentMinDeposit = Coin.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Params { - return { - $type: Params.$type, - deploymentMinDeposit: isSet(object.deploymentMinDeposit) - ? Coin.fromJSON(object.deploymentMinDeposit) - : undefined, - }; - }, - - toJSON(message: Params): unknown { - const obj: any = {}; - message.deploymentMinDeposit !== undefined && - (obj.deploymentMinDeposit = message.deploymentMinDeposit - ? Coin.toJSON(message.deploymentMinDeposit) - : undefined); - return obj; - }, - - fromPartial, I>>(object: I): Params { - const message = createBaseParams(); - message.deploymentMinDeposit = - object.deploymentMinDeposit !== undefined && - object.deploymentMinDeposit !== null - ? Coin.fromPartial(object.deploymentMinDeposit) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Params.$type, Params); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/deployment/v1beta1/query.ts b/ts/src/deprecated/akash/deployment/v1beta1/query.ts deleted file mode 100644 index d5e3a2e5..00000000 --- a/ts/src/deprecated/akash/deployment/v1beta1/query.ts +++ /dev/null @@ -1,662 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import { DeploymentFilters, DeploymentID, Deployment } from "./deployment"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import * as _m0 from "protobufjs/minimal"; -import { Group, GroupID } from "./group"; -import { Account } from "../../../../generated/akash/escrow/v1beta1/types"; - -export const protobufPackage = "akash.deployment.v1beta1"; - -/** QueryDeploymentsRequest is request type for the Query/Deployments RPC method */ -export interface QueryDeploymentsRequest { - $type: "akash.deployment.v1beta1.QueryDeploymentsRequest"; - filters: DeploymentFilters | undefined; - pagination: PageRequest | undefined; -} - -/** QueryDeploymentsResponse is response type for the Query/Deployments RPC method */ -export interface QueryDeploymentsResponse { - $type: "akash.deployment.v1beta1.QueryDeploymentsResponse"; - deployments: QueryDeploymentResponse[]; - pagination: PageResponse | undefined; -} - -/** QueryDeploymentRequest is request type for the Query/Deployment RPC method */ -export interface QueryDeploymentRequest { - $type: "akash.deployment.v1beta1.QueryDeploymentRequest"; - id: DeploymentID | undefined; -} - -/** QueryDeploymentResponse is response type for the Query/Deployment RPC method */ -export interface QueryDeploymentResponse { - $type: "akash.deployment.v1beta1.QueryDeploymentResponse"; - deployment: Deployment | undefined; - groups: Group[]; - escrowAccount: Account | undefined; -} - -/** QueryGroupRequest is request type for the Query/Group RPC method */ -export interface QueryGroupRequest { - $type: "akash.deployment.v1beta1.QueryGroupRequest"; - id: GroupID | undefined; -} - -/** QueryGroupResponse is response type for the Query/Group RPC method */ -export interface QueryGroupResponse { - $type: "akash.deployment.v1beta1.QueryGroupResponse"; - group: Group | undefined; -} - -function createBaseQueryDeploymentsRequest(): QueryDeploymentsRequest { - return { - $type: "akash.deployment.v1beta1.QueryDeploymentsRequest", - filters: undefined, - pagination: undefined, - }; -} - -export const QueryDeploymentsRequest = { - $type: "akash.deployment.v1beta1.QueryDeploymentsRequest" as const, - - encode( - message: QueryDeploymentsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filters !== undefined) { - DeploymentFilters.encode( - message.filters, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentsRequest { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.filters = DeploymentFilters.decode(reader, reader.uint32()); - break; - case 2: - message.pagination = PageRequest.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): QueryDeploymentsRequest { - return { - $type: QueryDeploymentsRequest.$type, - filters: isSet(object.filters) - ? DeploymentFilters.fromJSON(object.filters) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryDeploymentsRequest): unknown { - const obj: any = {}; - message.filters !== undefined && - (obj.filters = message.filters - ? DeploymentFilters.toJSON(message.filters) - : undefined); - message.pagination !== undefined && - (obj.pagination = message.pagination - ? PageRequest.toJSON(message.pagination) - : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): QueryDeploymentsRequest { - const message = createBaseQueryDeploymentsRequest(); - message.filters = - object.filters !== undefined && object.filters !== null - ? DeploymentFilters.fromPartial(object.filters) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryDeploymentsRequest.$type, QueryDeploymentsRequest); - -function createBaseQueryDeploymentsResponse(): QueryDeploymentsResponse { - return { - $type: "akash.deployment.v1beta1.QueryDeploymentsResponse", - deployments: [], - pagination: undefined, - }; -} - -export const QueryDeploymentsResponse = { - $type: "akash.deployment.v1beta1.QueryDeploymentsResponse" as const, - - encode( - message: QueryDeploymentsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.deployments) { - QueryDeploymentResponse.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentsResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.deployments.push( - QueryDeploymentResponse.decode(reader, reader.uint32()), - ); - break; - case 2: - message.pagination = PageResponse.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): QueryDeploymentsResponse { - return { - $type: QueryDeploymentsResponse.$type, - deployments: Array.isArray(object?.deployments) - ? object.deployments.map((e: any) => - QueryDeploymentResponse.fromJSON(e), - ) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryDeploymentsResponse): unknown { - const obj: any = {}; - if (message.deployments) { - obj.deployments = message.deployments.map((e) => - e ? QueryDeploymentResponse.toJSON(e) : undefined, - ); - } else { - obj.deployments = []; - } - message.pagination !== undefined && - (obj.pagination = message.pagination - ? PageResponse.toJSON(message.pagination) - : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): QueryDeploymentsResponse { - const message = createBaseQueryDeploymentsResponse(); - message.deployments = - object.deployments?.map((e) => QueryDeploymentResponse.fromPartial(e)) || - []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryDeploymentsResponse.$type, - QueryDeploymentsResponse, -); - -function createBaseQueryDeploymentRequest(): QueryDeploymentRequest { - return { - $type: "akash.deployment.v1beta1.QueryDeploymentRequest", - id: undefined, - }; -} - -export const QueryDeploymentRequest = { - $type: "akash.deployment.v1beta1.QueryDeploymentRequest" as const, - - encode( - message: QueryDeploymentRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentRequest { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.id = DeploymentID.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): QueryDeploymentRequest { - return { - $type: QueryDeploymentRequest.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryDeploymentRequest): unknown { - const obj: any = {}; - message.id !== undefined && - (obj.id = message.id ? DeploymentID.toJSON(message.id) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): QueryDeploymentRequest { - const message = createBaseQueryDeploymentRequest(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryDeploymentRequest.$type, QueryDeploymentRequest); - -function createBaseQueryDeploymentResponse(): QueryDeploymentResponse { - return { - $type: "akash.deployment.v1beta1.QueryDeploymentResponse", - deployment: undefined, - groups: [], - escrowAccount: undefined, - }; -} - -export const QueryDeploymentResponse = { - $type: "akash.deployment.v1beta1.QueryDeploymentResponse" as const, - - encode( - message: QueryDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deployment !== undefined) { - Deployment.encode(message.deployment, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - Group.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.escrowAccount !== undefined) { - Account.encode(message.escrowAccount, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.deployment = Deployment.decode(reader, reader.uint32()); - break; - case 2: - message.groups.push(Group.decode(reader, reader.uint32())); - break; - case 3: - message.escrowAccount = Account.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): QueryDeploymentResponse { - return { - $type: QueryDeploymentResponse.$type, - deployment: isSet(object.deployment) - ? Deployment.fromJSON(object.deployment) - : undefined, - groups: Array.isArray(object?.groups) - ? object.groups.map((e: any) => Group.fromJSON(e)) - : [], - escrowAccount: isSet(object.escrowAccount) - ? Account.fromJSON(object.escrowAccount) - : undefined, - }; - }, - - toJSON(message: QueryDeploymentResponse): unknown { - const obj: any = {}; - message.deployment !== undefined && - (obj.deployment = message.deployment - ? Deployment.toJSON(message.deployment) - : undefined); - if (message.groups) { - obj.groups = message.groups.map((e) => (e ? Group.toJSON(e) : undefined)); - } else { - obj.groups = []; - } - message.escrowAccount !== undefined && - (obj.escrowAccount = message.escrowAccount - ? Account.toJSON(message.escrowAccount) - : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): QueryDeploymentResponse { - const message = createBaseQueryDeploymentResponse(); - message.deployment = - object.deployment !== undefined && object.deployment !== null - ? Deployment.fromPartial(object.deployment) - : undefined; - message.groups = object.groups?.map((e) => Group.fromPartial(e)) || []; - message.escrowAccount = - object.escrowAccount !== undefined && object.escrowAccount !== null - ? Account.fromPartial(object.escrowAccount) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryDeploymentResponse.$type, QueryDeploymentResponse); - -function createBaseQueryGroupRequest(): QueryGroupRequest { - return { $type: "akash.deployment.v1beta1.QueryGroupRequest", id: undefined }; -} - -export const QueryGroupRequest = { - $type: "akash.deployment.v1beta1.QueryGroupRequest" as const, - - encode( - message: QueryGroupRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryGroupRequest { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryGroupRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.id = GroupID.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): QueryGroupRequest { - return { - $type: QueryGroupRequest.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryGroupRequest): unknown { - const obj: any = {}; - message.id !== undefined && - (obj.id = message.id ? GroupID.toJSON(message.id) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): QueryGroupRequest { - const message = createBaseQueryGroupRequest(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryGroupRequest.$type, QueryGroupRequest); - -function createBaseQueryGroupResponse(): QueryGroupResponse { - return { - $type: "akash.deployment.v1beta1.QueryGroupResponse", - group: undefined, - }; -} - -export const QueryGroupResponse = { - $type: "akash.deployment.v1beta1.QueryGroupResponse" as const, - - encode( - message: QueryGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.group !== undefined) { - Group.encode(message.group, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryGroupResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.group = Group.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): QueryGroupResponse { - return { - $type: QueryGroupResponse.$type, - group: isSet(object.group) ? Group.fromJSON(object.group) : undefined, - }; - }, - - toJSON(message: QueryGroupResponse): unknown { - const obj: any = {}; - message.group !== undefined && - (obj.group = message.group ? Group.toJSON(message.group) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): QueryGroupResponse { - const message = createBaseQueryGroupResponse(); - message.group = - object.group !== undefined && object.group !== null - ? Group.fromPartial(object.group) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryGroupResponse.$type, QueryGroupResponse); - -/** Query defines the gRPC querier service */ -export interface Query { - /** Deployments queries deployments */ - Deployments( - request: QueryDeploymentsRequest, - ): Promise; - /** Deployment queries deployment details */ - Deployment(request: QueryDeploymentRequest): Promise; - /** Group queries group details */ - Group(request: QueryGroupRequest): Promise; -} - -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - constructor(rpc: Rpc) { - this.rpc = rpc; - this.Deployments = this.Deployments.bind(this); - this.Deployment = this.Deployment.bind(this); - this.Group = this.Group.bind(this); - } - Deployments( - request: QueryDeploymentsRequest, - ): Promise { - const data = QueryDeploymentsRequest.encode(request).finish(); - const promise = this.rpc.request( - "akash.deployment.v1beta1.Query", - "Deployments", - data, - ); - return promise.then((data) => - QueryDeploymentsResponse.decode(new _m0.Reader(data)), - ); - } - - Deployment( - request: QueryDeploymentRequest, - ): Promise { - const data = QueryDeploymentRequest.encode(request).finish(); - const promise = this.rpc.request( - "akash.deployment.v1beta1.Query", - "Deployment", - data, - ); - return promise.then((data) => - QueryDeploymentResponse.decode(new _m0.Reader(data)), - ); - } - - Group(request: QueryGroupRequest): Promise { - const data = QueryGroupRequest.encode(request).finish(); - const promise = this.rpc.request( - "akash.deployment.v1beta1.Query", - "Group", - data, - ); - return promise.then((data) => - QueryGroupResponse.decode(new _m0.Reader(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/market/v1beta1/bid.ts b/ts/src/deprecated/akash/market/v1beta1/bid.ts deleted file mode 100644 index d40429da..00000000 --- a/ts/src/deprecated/akash/market/v1beta1/bid.ts +++ /dev/null @@ -1,752 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { OrderID } from "../../../akash/market/v1beta1/order"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; - -export const protobufPackage = "akash.market.v1beta1"; - -/** MsgCreateBid defines an SDK message for creating Bid */ -export interface MsgCreateBid { - $type: "akash.market.v1beta1.MsgCreateBid"; - order?: OrderID; - provider: string; - price?: Coin; - deposit?: Coin; -} - -/** MsgCreateBidResponse defines the Msg/CreateBid response type. */ -export interface MsgCreateBidResponse { - $type: "akash.market.v1beta1.MsgCreateBidResponse"; -} - -/** MsgCloseBid defines an SDK message for closing bid */ -export interface MsgCloseBid { - $type: "akash.market.v1beta1.MsgCloseBid"; - bidId?: BidID; -} - -/** MsgCloseBidResponse defines the Msg/CloseBid response type. */ -export interface MsgCloseBidResponse { - $type: "akash.market.v1beta1.MsgCloseBidResponse"; -} - -/** - * BidID stores owner and all other seq numbers - * A successful bid becomes a Lease(ID). - */ -export interface BidID { - $type: "akash.market.v1beta1.BidID"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; -} - -/** Bid stores BidID, state of bid and price */ -export interface Bid { - $type: "akash.market.v1beta1.Bid"; - bidId?: BidID; - state: Bid_State; - price?: Coin; - createdAt: Long; -} - -/** State is an enum which refers to state of bid */ -export enum Bid_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** open - BidOpen denotes state for bid open */ - open = 1, - /** active - BidMatched denotes state for bid open */ - active = 2, - /** lost - BidLost denotes state for bid lost */ - lost = 3, - /** closed - BidClosed denotes state for bid closed */ - closed = 4, - UNRECOGNIZED = -1, -} - -export function bid_StateFromJSON(object: any): Bid_State { - switch (object) { - case 0: - case "invalid": - return Bid_State.invalid; - case 1: - case "open": - return Bid_State.open; - case 2: - case "active": - return Bid_State.active; - case 3: - case "lost": - return Bid_State.lost; - case 4: - case "closed": - return Bid_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Bid_State.UNRECOGNIZED; - } -} - -export function bid_StateToJSON(object: Bid_State): string { - switch (object) { - case Bid_State.invalid: - return "invalid"; - case Bid_State.open: - return "open"; - case Bid_State.active: - return "active"; - case Bid_State.lost: - return "lost"; - case Bid_State.closed: - return "closed"; - default: - return "UNKNOWN"; - } -} - -/** BidFilters defines flags for bid list filter */ -export interface BidFilters { - $type: "akash.market.v1beta1.BidFilters"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; - state: string; -} - -function createBaseMsgCreateBid(): MsgCreateBid { - return { - $type: "akash.market.v1beta1.MsgCreateBid", - order: undefined, - provider: "", - price: undefined, - deposit: undefined, - }; -} - -export const MsgCreateBid = { - $type: "akash.market.v1beta1.MsgCreateBid" as const, - - encode( - message: MsgCreateBid, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.order !== undefined) { - OrderID.encode(message.order, writer.uint32(10).fork()).ldelim(); - } - if (message.provider !== "") { - writer.uint32(18).string(message.provider); - } - if (message.price !== undefined) { - Coin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - if (message.deposit !== undefined) { - Coin.encode(message.deposit, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateBid { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateBid(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.order = OrderID.decode(reader, reader.uint32()); - break; - case 2: - message.provider = reader.string(); - break; - case 3: - message.price = Coin.decode(reader, reader.uint32()); - break; - case 4: - message.deposit = Coin.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgCreateBid { - return { - $type: MsgCreateBid.$type, - order: isSet(object.order) ? OrderID.fromJSON(object.order) : undefined, - provider: isSet(object.provider) ? String(object.provider) : "", - price: isSet(object.price) ? Coin.fromJSON(object.price) : undefined, - deposit: isSet(object.deposit) - ? Coin.fromJSON(object.deposit) - : undefined, - }; - }, - - toJSON(message: MsgCreateBid): unknown { - const obj: any = {}; - message.order !== undefined && - (obj.order = message.order ? OrderID.toJSON(message.order) : undefined); - message.provider !== undefined && (obj.provider = message.provider); - message.price !== undefined && - (obj.price = message.price ? Coin.toJSON(message.price) : undefined); - message.deposit !== undefined && - (obj.deposit = message.deposit - ? Coin.toJSON(message.deposit) - : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgCreateBid { - const message = createBaseMsgCreateBid(); - message.order = - object.order !== undefined && object.order !== null - ? OrderID.fromPartial(object.order) - : undefined; - message.provider = object.provider ?? ""; - message.price = - object.price !== undefined && object.price !== null - ? Coin.fromPartial(object.price) - : undefined; - message.deposit = - object.deposit !== undefined && object.deposit !== null - ? Coin.fromPartial(object.deposit) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateBid.$type, MsgCreateBid); - -function createBaseMsgCreateBidResponse(): MsgCreateBidResponse { - return { $type: "akash.market.v1beta1.MsgCreateBidResponse" }; -} - -export const MsgCreateBidResponse = { - $type: "akash.market.v1beta1.MsgCreateBidResponse" as const, - - encode( - _: MsgCreateBidResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateBidResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateBidResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgCreateBidResponse { - return { - $type: MsgCreateBidResponse.$type, - }; - }, - - toJSON(_: MsgCreateBidResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgCreateBidResponse { - const message = createBaseMsgCreateBidResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateBidResponse.$type, MsgCreateBidResponse); - -function createBaseMsgCloseBid(): MsgCloseBid { - return { $type: "akash.market.v1beta1.MsgCloseBid", bidId: undefined }; -} - -export const MsgCloseBid = { - $type: "akash.market.v1beta1.MsgCloseBid" as const, - - encode( - message: MsgCloseBid, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidId !== undefined) { - BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseBid { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseBid(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.bidId = BidID.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgCloseBid { - return { - $type: MsgCloseBid.$type, - bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, - }; - }, - - toJSON(message: MsgCloseBid): unknown { - const obj: any = {}; - message.bidId !== undefined && - (obj.bidId = message.bidId ? BidID.toJSON(message.bidId) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgCloseBid { - const message = createBaseMsgCloseBid(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? BidID.fromPartial(object.bidId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseBid.$type, MsgCloseBid); - -function createBaseMsgCloseBidResponse(): MsgCloseBidResponse { - return { $type: "akash.market.v1beta1.MsgCloseBidResponse" }; -} - -export const MsgCloseBidResponse = { - $type: "akash.market.v1beta1.MsgCloseBidResponse" as const, - - encode( - _: MsgCloseBidResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseBidResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseBidResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgCloseBidResponse { - return { - $type: MsgCloseBidResponse.$type, - }; - }, - - toJSON(_: MsgCloseBidResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgCloseBidResponse { - const message = createBaseMsgCloseBidResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseBidResponse.$type, MsgCloseBidResponse); - -function createBaseBidID(): BidID { - return { - $type: "akash.market.v1beta1.BidID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - }; -} - -export const BidID = { - $type: "akash.market.v1beta1.BidID" as const, - - encode(message: BidID, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.isZero()) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): BidID { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseBidID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.owner = reader.string(); - break; - case 2: - message.dseq = reader.uint64() as Long; - break; - case 3: - message.gseq = reader.uint32(); - break; - case 4: - message.oseq = reader.uint32(); - break; - case 5: - message.provider = reader.string(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): BidID { - return { - $type: BidID.$type, - owner: isSet(object.owner) ? String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromString(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? Number(object.oseq) : 0, - provider: isSet(object.provider) ? String(object.provider) : "", - }; - }, - - toJSON(message: BidID): unknown { - const obj: any = {}; - message.owner !== undefined && (obj.owner = message.owner); - message.dseq !== undefined && - (obj.dseq = (message.dseq || Long.UZERO).toString()); - message.gseq !== undefined && (obj.gseq = Math.round(message.gseq)); - message.oseq !== undefined && (obj.oseq = Math.round(message.oseq)); - message.provider !== undefined && (obj.provider = message.provider); - return obj; - }, - - fromPartial, I>>(object: I): BidID { - const message = createBaseBidID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(BidID.$type, BidID); - -function createBaseBid(): Bid { - return { - $type: "akash.market.v1beta1.Bid", - bidId: undefined, - state: 0, - price: undefined, - createdAt: Long.ZERO, - }; -} - -export const Bid = { - $type: "akash.market.v1beta1.Bid" as const, - - encode(message: Bid, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.bidId !== undefined) { - BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.price !== undefined) { - Coin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.isZero()) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Bid { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseBid(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.bidId = BidID.decode(reader, reader.uint32()); - break; - case 2: - message.state = reader.int32() as any; - break; - case 3: - message.price = Coin.decode(reader, reader.uint32()); - break; - case 4: - message.createdAt = reader.int64() as Long; - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Bid { - return { - $type: Bid.$type, - bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, - state: isSet(object.state) ? bid_StateFromJSON(object.state) : 0, - price: isSet(object.price) ? Coin.fromJSON(object.price) : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromString(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Bid): unknown { - const obj: any = {}; - message.bidId !== undefined && - (obj.bidId = message.bidId ? BidID.toJSON(message.bidId) : undefined); - message.state !== undefined && (obj.state = bid_StateToJSON(message.state)); - message.price !== undefined && - (obj.price = message.price ? Coin.toJSON(message.price) : undefined); - message.createdAt !== undefined && - (obj.createdAt = (message.createdAt || Long.ZERO).toString()); - return obj; - }, - - fromPartial, I>>(object: I): Bid { - const message = createBaseBid(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? BidID.fromPartial(object.bidId) - : undefined; - message.state = object.state ?? 0; - message.price = - object.price !== undefined && object.price !== null - ? Coin.fromPartial(object.price) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Bid.$type, Bid); - -function createBaseBidFilters(): BidFilters { - return { - $type: "akash.market.v1beta1.BidFilters", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - state: "", - }; -} - -export const BidFilters = { - $type: "akash.market.v1beta1.BidFilters" as const, - - encode( - message: BidFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.isZero()) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - if (message.state !== "") { - writer.uint32(50).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): BidFilters { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseBidFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.owner = reader.string(); - break; - case 2: - message.dseq = reader.uint64() as Long; - break; - case 3: - message.gseq = reader.uint32(); - break; - case 4: - message.oseq = reader.uint32(); - break; - case 5: - message.provider = reader.string(); - break; - case 6: - message.state = reader.string(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): BidFilters { - return { - $type: BidFilters.$type, - owner: isSet(object.owner) ? String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromString(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? Number(object.oseq) : 0, - provider: isSet(object.provider) ? String(object.provider) : "", - state: isSet(object.state) ? String(object.state) : "", - }; - }, - - toJSON(message: BidFilters): unknown { - const obj: any = {}; - message.owner !== undefined && (obj.owner = message.owner); - message.dseq !== undefined && - (obj.dseq = (message.dseq || Long.UZERO).toString()); - message.gseq !== undefined && (obj.gseq = Math.round(message.gseq)); - message.oseq !== undefined && (obj.oseq = Math.round(message.oseq)); - message.provider !== undefined && (obj.provider = message.provider); - message.state !== undefined && (obj.state = message.state); - return obj; - }, - - fromPartial, I>>( - object: I, - ): BidFilters { - const message = createBaseBidFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(BidFilters.$type, BidFilters); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/market/v1beta1/lease.ts b/ts/src/deprecated/akash/market/v1beta1/lease.ts deleted file mode 100644 index b29f266a..00000000 --- a/ts/src/deprecated/akash/market/v1beta1/lease.ts +++ /dev/null @@ -1,837 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { BidID } from "../../../akash/market/v1beta1/bid"; - -/** LeaseID stores bid details of lease */ -export interface LeaseID { - $type: "akash.market.v1beta1.LeaseID"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; -} - -/** Lease stores LeaseID, state of lease and price */ -export interface Lease { - $type: "akash.market.v1beta1.Lease"; - leaseId?: LeaseID; - state: Lease_State; - price?: Coin; - createdAt: Long; -} - -/** State is an enum which refers to state of lease */ -export enum Lease_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** active - LeaseActive denotes state for lease active */ - active = 1, - /** insufficient_funds - LeaseInsufficientFunds denotes state for lease insufficient_funds */ - insufficient_funds = 2, - /** closed - LeaseClosed denotes state for lease closed */ - closed = 3, - UNRECOGNIZED = -1, -} - -export function lease_StateFromJSON(object: any): Lease_State { - switch (object) { - case 0: - case "invalid": - return Lease_State.invalid; - case 1: - case "active": - return Lease_State.active; - case 2: - case "insufficient_funds": - return Lease_State.insufficient_funds; - case 3: - case "closed": - return Lease_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Lease_State.UNRECOGNIZED; - } -} - -export function lease_StateToJSON(object: Lease_State): string { - switch (object) { - case Lease_State.invalid: - return "invalid"; - case Lease_State.active: - return "active"; - case Lease_State.insufficient_funds: - return "insufficient_funds"; - case Lease_State.closed: - return "closed"; - default: - return "UNKNOWN"; - } -} - -/** LeaseFilters defines flags for lease list filter */ -export interface LeaseFilters { - $type: "akash.market.v1beta1.LeaseFilters"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; - state: string; -} - -/** MsgCreateLease is sent to create a lease */ -export interface MsgCreateLease { - $type: "akash.market.v1beta1.MsgCreateLease"; - bidId?: BidID; -} - -/** MsgCreateLeaseResponse is the response from creating a lease */ -export interface MsgCreateLeaseResponse { - $type: "akash.market.v1beta1.MsgCreateLeaseResponse"; -} - -/** MsgWithdrawLease defines an SDK message for closing bid */ -export interface MsgWithdrawLease { - $type: "akash.market.v1beta1.MsgWithdrawLease"; - bidId?: LeaseID; -} - -/** MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. */ -export interface MsgWithdrawLeaseResponse { - $type: "akash.market.v1beta1.MsgWithdrawLeaseResponse"; -} - -/** MsgCloseLease defines an SDK message for closing order */ -export interface MsgCloseLease { - $type: "akash.market.v1beta1.MsgCloseLease"; - leaseId?: LeaseID; -} - -/** MsgCloseLeaseResponse defines the Msg/CloseLease response type. */ -export interface MsgCloseLeaseResponse { - $type: "akash.market.v1beta1.MsgCloseLeaseResponse"; -} - -function createBaseLeaseID(): LeaseID { - return { - $type: "akash.market.v1beta1.LeaseID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - }; -} - -export const LeaseID = { - $type: "akash.market.v1beta1.LeaseID" as const, - - encode( - message: LeaseID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.isZero()) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): LeaseID { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseLeaseID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.owner = reader.string(); - break; - case 2: - message.dseq = reader.uint64() as Long; - break; - case 3: - message.gseq = reader.uint32(); - break; - case 4: - message.oseq = reader.uint32(); - break; - case 5: - message.provider = reader.string(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): LeaseID { - return { - $type: LeaseID.$type, - owner: isSet(object.owner) ? String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromString(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? Number(object.oseq) : 0, - provider: isSet(object.provider) ? String(object.provider) : "", - }; - }, - - toJSON(message: LeaseID): unknown { - const obj: any = {}; - message.owner !== undefined && (obj.owner = message.owner); - message.dseq !== undefined && - (obj.dseq = (message.dseq || Long.UZERO).toString()); - message.gseq !== undefined && (obj.gseq = Math.round(message.gseq)); - message.oseq !== undefined && (obj.oseq = Math.round(message.oseq)); - message.provider !== undefined && (obj.provider = message.provider); - return obj; - }, - - fromPartial, I>>(object: I): LeaseID { - const message = createBaseLeaseID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(LeaseID.$type, LeaseID); - -function createBaseLease(): Lease { - return { - $type: "akash.market.v1beta1.Lease", - leaseId: undefined, - state: 0, - price: undefined, - createdAt: Long.ZERO, - }; -} - -export const Lease = { - $type: "akash.market.v1beta1.Lease" as const, - - encode(message: Lease, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.price !== undefined) { - Coin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.isZero()) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Lease { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.leaseId = LeaseID.decode(reader, reader.uint32()); - break; - case 2: - message.state = reader.int32() as any; - break; - case 3: - message.price = Coin.decode(reader, reader.uint32()); - break; - case 4: - message.createdAt = reader.int64() as Long; - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Lease { - return { - $type: Lease.$type, - leaseId: isSet(object.leaseId) - ? LeaseID.fromJSON(object.leaseId) - : undefined, - state: isSet(object.state) ? lease_StateFromJSON(object.state) : 0, - price: isSet(object.price) ? Coin.fromJSON(object.price) : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromString(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Lease): unknown { - const obj: any = {}; - message.leaseId !== undefined && - (obj.leaseId = message.leaseId - ? LeaseID.toJSON(message.leaseId) - : undefined); - message.state !== undefined && - (obj.state = lease_StateToJSON(message.state)); - message.price !== undefined && - (obj.price = message.price ? Coin.toJSON(message.price) : undefined); - message.createdAt !== undefined && - (obj.createdAt = (message.createdAt || Long.ZERO).toString()); - return obj; - }, - - fromPartial, I>>(object: I): Lease { - const message = createBaseLease(); - message.leaseId = - object.leaseId !== undefined && object.leaseId !== null - ? LeaseID.fromPartial(object.leaseId) - : undefined; - message.state = object.state ?? 0; - message.price = - object.price !== undefined && object.price !== null - ? Coin.fromPartial(object.price) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Lease.$type, Lease); - -function createBaseLeaseFilters(): LeaseFilters { - return { - $type: "akash.market.v1beta1.LeaseFilters", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - state: "", - }; -} - -export const LeaseFilters = { - $type: "akash.market.v1beta1.LeaseFilters" as const, - - encode( - message: LeaseFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.isZero()) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - if (message.state !== "") { - writer.uint32(50).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): LeaseFilters { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseLeaseFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.owner = reader.string(); - break; - case 2: - message.dseq = reader.uint64() as Long; - break; - case 3: - message.gseq = reader.uint32(); - break; - case 4: - message.oseq = reader.uint32(); - break; - case 5: - message.provider = reader.string(); - break; - case 6: - message.state = reader.string(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): LeaseFilters { - return { - $type: LeaseFilters.$type, - owner: isSet(object.owner) ? String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromString(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? Number(object.oseq) : 0, - provider: isSet(object.provider) ? String(object.provider) : "", - state: isSet(object.state) ? String(object.state) : "", - }; - }, - - toJSON(message: LeaseFilters): unknown { - const obj: any = {}; - message.owner !== undefined && (obj.owner = message.owner); - message.dseq !== undefined && - (obj.dseq = (message.dseq || Long.UZERO).toString()); - message.gseq !== undefined && (obj.gseq = Math.round(message.gseq)); - message.oseq !== undefined && (obj.oseq = Math.round(message.oseq)); - message.provider !== undefined && (obj.provider = message.provider); - message.state !== undefined && (obj.state = message.state); - return obj; - }, - - fromPartial, I>>( - object: I, - ): LeaseFilters { - const message = createBaseLeaseFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(LeaseFilters.$type, LeaseFilters); - -function createBaseMsgCreateLease(): MsgCreateLease { - return { $type: "akash.market.v1beta1.MsgCreateLease", bidId: undefined }; -} - -export const MsgCreateLease = { - $type: "akash.market.v1beta1.MsgCreateLease" as const, - - encode( - message: MsgCreateLease, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidId !== undefined) { - BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateLease { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.bidId = BidID.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgCreateLease { - return { - $type: MsgCreateLease.$type, - bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, - }; - }, - - toJSON(message: MsgCreateLease): unknown { - const obj: any = {}; - message.bidId !== undefined && - (obj.bidId = message.bidId ? BidID.toJSON(message.bidId) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgCreateLease { - const message = createBaseMsgCreateLease(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? BidID.fromPartial(object.bidId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateLease.$type, MsgCreateLease); - -function createBaseMsgCreateLeaseResponse(): MsgCreateLeaseResponse { - return { $type: "akash.market.v1beta1.MsgCreateLeaseResponse" }; -} - -export const MsgCreateLeaseResponse = { - $type: "akash.market.v1beta1.MsgCreateLeaseResponse" as const, - - encode( - _: MsgCreateLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateLeaseResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgCreateLeaseResponse { - return { - $type: MsgCreateLeaseResponse.$type, - }; - }, - - toJSON(_: MsgCreateLeaseResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgCreateLeaseResponse { - const message = createBaseMsgCreateLeaseResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateLeaseResponse.$type, MsgCreateLeaseResponse); - -function createBaseMsgWithdrawLease(): MsgWithdrawLease { - return { $type: "akash.market.v1beta1.MsgWithdrawLease", bidId: undefined }; -} - -export const MsgWithdrawLease = { - $type: "akash.market.v1beta1.MsgWithdrawLease" as const, - - encode( - message: MsgWithdrawLease, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidId !== undefined) { - LeaseID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgWithdrawLease { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgWithdrawLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.bidId = LeaseID.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgWithdrawLease { - return { - $type: MsgWithdrawLease.$type, - bidId: isSet(object.bidId) ? LeaseID.fromJSON(object.bidId) : undefined, - }; - }, - - toJSON(message: MsgWithdrawLease): unknown { - const obj: any = {}; - message.bidId !== undefined && - (obj.bidId = message.bidId ? LeaseID.toJSON(message.bidId) : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgWithdrawLease { - const message = createBaseMsgWithdrawLease(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? LeaseID.fromPartial(object.bidId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgWithdrawLease.$type, MsgWithdrawLease); - -function createBaseMsgWithdrawLeaseResponse(): MsgWithdrawLeaseResponse { - return { $type: "akash.market.v1beta1.MsgWithdrawLeaseResponse" }; -} - -export const MsgWithdrawLeaseResponse = { - $type: "akash.market.v1beta1.MsgWithdrawLeaseResponse" as const, - - encode( - _: MsgWithdrawLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgWithdrawLeaseResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgWithdrawLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgWithdrawLeaseResponse { - return { - $type: MsgWithdrawLeaseResponse.$type, - }; - }, - - toJSON(_: MsgWithdrawLeaseResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgWithdrawLeaseResponse { - const message = createBaseMsgWithdrawLeaseResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgWithdrawLeaseResponse.$type, - MsgWithdrawLeaseResponse, -); - -function createBaseMsgCloseLease(): MsgCloseLease { - return { $type: "akash.market.v1beta1.MsgCloseLease", leaseId: undefined }; -} - -export const MsgCloseLease = { - $type: "akash.market.v1beta1.MsgCloseLease" as const, - - encode( - message: MsgCloseLease, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseLease { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.leaseId = LeaseID.decode(reader, reader.uint32()); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): MsgCloseLease { - return { - $type: MsgCloseLease.$type, - leaseId: isSet(object.leaseId) - ? LeaseID.fromJSON(object.leaseId) - : undefined, - }; - }, - - toJSON(message: MsgCloseLease): unknown { - const obj: any = {}; - message.leaseId !== undefined && - (obj.leaseId = message.leaseId - ? LeaseID.toJSON(message.leaseId) - : undefined); - return obj; - }, - - fromPartial, I>>( - object: I, - ): MsgCloseLease { - const message = createBaseMsgCloseLease(); - message.leaseId = - object.leaseId !== undefined && object.leaseId !== null - ? LeaseID.fromPartial(object.leaseId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseLease.$type, MsgCloseLease); - -function createBaseMsgCloseLeaseResponse(): MsgCloseLeaseResponse { - return { $type: "akash.market.v1beta1.MsgCloseLeaseResponse" }; -} - -export const MsgCloseLeaseResponse = { - $type: "akash.market.v1beta1.MsgCloseLeaseResponse" as const, - - encode( - _: MsgCloseLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCloseLeaseResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(_: any): MsgCloseLeaseResponse { - return { - $type: MsgCloseLeaseResponse.$type, - }; - }, - - toJSON(_: MsgCloseLeaseResponse): unknown { - const obj: any = {}; - return obj; - }, - - fromPartial, I>>( - _: I, - ): MsgCloseLeaseResponse { - const message = createBaseMsgCloseLeaseResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseLeaseResponse.$type, MsgCloseLeaseResponse); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/akash/market/v1beta1/order.ts b/ts/src/deprecated/akash/market/v1beta1/order.ts deleted file mode 100644 index c987a405..00000000 --- a/ts/src/deprecated/akash/market/v1beta1/order.ts +++ /dev/null @@ -1,428 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { GroupSpec } from "../../../akash/deployment/v1beta1/group"; - -export const protobufPackage = "akash.market.v1beta1"; - -/** OrderID stores owner and all other seq numbers */ -export interface OrderID { - $type: "akash.market.v1beta1.OrderID"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; -} - -/** Order stores orderID, state of order and other details */ -export interface Order { - $type: "akash.market.v1beta1.Order"; - orderId?: OrderID; - state: Order_State; - spec?: GroupSpec; - createdAt: Long; -} - -/** State is an enum which refers to state of order */ -export enum Order_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** open - OrderOpen denotes state for order open */ - open = 1, - /** active - OrderMatched denotes state for order matched */ - active = 2, - /** closed - OrderClosed denotes state for order lost */ - closed = 3, - UNRECOGNIZED = -1, -} - -export function order_StateFromJSON(object: any): Order_State { - switch (object) { - case 0: - case "invalid": - return Order_State.invalid; - case 1: - case "open": - return Order_State.open; - case 2: - case "active": - return Order_State.active; - case 3: - case "closed": - return Order_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Order_State.UNRECOGNIZED; - } -} - -export function order_StateToJSON(object: Order_State): string { - switch (object) { - case Order_State.invalid: - return "invalid"; - case Order_State.open: - return "open"; - case Order_State.active: - return "active"; - case Order_State.closed: - return "closed"; - default: - return "UNKNOWN"; - } -} - -/** OrderFilters defines flags for order list filter */ -export interface OrderFilters { - $type: "akash.market.v1beta1.OrderFilters"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - state: string; -} - -function createBaseOrderID(): OrderID { - return { - $type: "akash.market.v1beta1.OrderID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - }; -} - -export const OrderID = { - $type: "akash.market.v1beta1.OrderID" as const, - - encode( - message: OrderID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.isZero()) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): OrderID { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOrderID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.owner = reader.string(); - break; - case 2: - message.dseq = reader.uint64() as Long; - break; - case 3: - message.gseq = reader.uint32(); - break; - case 4: - message.oseq = reader.uint32(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): OrderID { - return { - $type: OrderID.$type, - owner: isSet(object.owner) ? String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromString(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? Number(object.oseq) : 0, - }; - }, - - toJSON(message: OrderID): unknown { - const obj: any = {}; - message.owner !== undefined && (obj.owner = message.owner); - message.dseq !== undefined && - (obj.dseq = (message.dseq || Long.UZERO).toString()); - message.gseq !== undefined && (obj.gseq = Math.round(message.gseq)); - message.oseq !== undefined && (obj.oseq = Math.round(message.oseq)); - return obj; - }, - - fromPartial, I>>(object: I): OrderID { - const message = createBaseOrderID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(OrderID.$type, OrderID); - -function createBaseOrder(): Order { - return { - $type: "akash.market.v1beta1.Order", - orderId: undefined, - state: 0, - spec: undefined, - createdAt: Long.ZERO, - }; -} - -export const Order = { - $type: "akash.market.v1beta1.Order" as const, - - encode(message: Order, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.orderId !== undefined) { - OrderID.encode(message.orderId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.spec !== undefined) { - GroupSpec.encode(message.spec, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.isZero()) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Order { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOrder(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.orderId = OrderID.decode(reader, reader.uint32()); - break; - case 2: - message.state = reader.int32() as any; - break; - case 3: - message.spec = GroupSpec.decode(reader, reader.uint32()); - break; - case 4: - message.createdAt = reader.int64() as Long; - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Order { - return { - $type: Order.$type, - orderId: isSet(object.orderId) - ? OrderID.fromJSON(object.orderId) - : undefined, - state: isSet(object.state) ? order_StateFromJSON(object.state) : 0, - spec: isSet(object.spec) ? GroupSpec.fromJSON(object.spec) : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromString(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Order): unknown { - const obj: any = {}; - message.orderId !== undefined && - (obj.orderId = message.orderId - ? OrderID.toJSON(message.orderId) - : undefined); - message.state !== undefined && - (obj.state = order_StateToJSON(message.state)); - message.spec !== undefined && - (obj.spec = message.spec ? GroupSpec.toJSON(message.spec) : undefined); - message.createdAt !== undefined && - (obj.createdAt = (message.createdAt || Long.ZERO).toString()); - return obj; - }, - - fromPartial, I>>(object: I): Order { - const message = createBaseOrder(); - message.orderId = - object.orderId !== undefined && object.orderId !== null - ? OrderID.fromPartial(object.orderId) - : undefined; - message.state = object.state ?? 0; - message.spec = - object.spec !== undefined && object.spec !== null - ? GroupSpec.fromPartial(object.spec) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Order.$type, Order); - -function createBaseOrderFilters(): OrderFilters { - return { - $type: "akash.market.v1beta1.OrderFilters", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - state: "", - }; -} - -export const OrderFilters = { - $type: "akash.market.v1beta1.OrderFilters" as const, - - encode( - message: OrderFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.isZero()) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.state !== "") { - writer.uint32(42).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): OrderFilters { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOrderFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.owner = reader.string(); - break; - case 2: - message.dseq = reader.uint64() as Long; - break; - case 3: - message.gseq = reader.uint32(); - break; - case 4: - message.oseq = reader.uint32(); - break; - case 5: - message.state = reader.string(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): OrderFilters { - return { - $type: OrderFilters.$type, - owner: isSet(object.owner) ? String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromString(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? Number(object.oseq) : 0, - state: isSet(object.state) ? String(object.state) : "", - }; - }, - - toJSON(message: OrderFilters): unknown { - const obj: any = {}; - message.owner !== undefined && (obj.owner = message.owner); - message.dseq !== undefined && - (obj.dseq = (message.dseq || Long.UZERO).toString()); - message.gseq !== undefined && (obj.gseq = Math.round(message.gseq)); - message.oseq !== undefined && (obj.oseq = Math.round(message.oseq)); - message.state !== undefined && (obj.state = message.state); - return obj; - }, - - fromPartial, I>>( - object: I, - ): OrderFilters { - const message = createBaseOrderFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(OrderFilters.$type, OrderFilters); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/cosmos/base/query/v1beta1/pagination.ts b/ts/src/deprecated/cosmos/base/query/v1beta1/pagination.ts deleted file mode 100644 index b2d50180..00000000 --- a/ts/src/deprecated/cosmos/base/query/v1beta1/pagination.ts +++ /dev/null @@ -1,338 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../../typeRegistry"; -import Long from "long"; -import * as _m0 from "protobufjs/minimal"; - -export const protobufPackage = "cosmos.base.query.v1beta1"; - -/** - * PageRequest is to be embedded in gRPC request messages for efficient - * pagination. Ex: - * - * message SomeRequest { - * Foo some_parameter = 1; - * PageRequest pagination = 2; - * } - */ -export interface PageRequest { - $type: "cosmos.base.query.v1beta1.PageRequest"; - /** - * key is a value returned in PageResponse.next_key to begin - * querying the next page most efficiently. Only one of offset or key - * should be set. - */ - key: Uint8Array; - /** - * offset is a numeric offset that can be used when key is unavailable. - * It is less efficient than using key. Only one of offset or key should - * be set. - */ - offset: Long; - /** - * limit is the total number of results to be returned in the result page. - * If left empty it will default to a value to be set by each app. - */ - limit: Long; - /** - * count_total is set to true to indicate that the result set should include - * a count of the total number of items available for pagination in UIs. - * count_total is only respected when offset is used. It is ignored when key - * is set. - */ - countTotal: boolean; - /** - * reverse is set to true if results are to be returned in the descending order. - * - * Since: cosmos-sdk 0.43 - */ - reverse: boolean; -} - -/** - * PageResponse is to be embedded in gRPC response messages where the - * corresponding request message has used PageRequest. - * - * message SomeResponse { - * repeated Bar results = 1; - * PageResponse page = 2; - * } - */ -export interface PageResponse { - $type: "cosmos.base.query.v1beta1.PageResponse"; - /** - * next_key is the key to be passed to PageRequest.key to - * query the next page most efficiently - */ - nextKey: Uint8Array; - /** - * total is total number of results available if PageRequest.count_total - * was set, its value is undefined otherwise - */ - total: Long; -} - -function createBasePageRequest(): PageRequest { - return { - $type: "cosmos.base.query.v1beta1.PageRequest", - key: new Uint8Array(), - offset: Long.UZERO, - limit: Long.UZERO, - countTotal: false, - reverse: false, - }; -} - -export const PageRequest = { - $type: "cosmos.base.query.v1beta1.PageRequest" as const, - - encode( - message: PageRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.key.length !== 0) { - writer.uint32(10).bytes(message.key); - } - if (!message.offset.isZero()) { - writer.uint32(16).uint64(message.offset); - } - if (!message.limit.isZero()) { - writer.uint32(24).uint64(message.limit); - } - if (message.countTotal === true) { - writer.uint32(32).bool(message.countTotal); - } - if (message.reverse === true) { - writer.uint32(40).bool(message.reverse); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): PageRequest { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePageRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.key = reader.bytes(); - break; - case 2: - message.offset = reader.uint64() as Long; - break; - case 3: - message.limit = reader.uint64() as Long; - break; - case 4: - message.countTotal = reader.bool(); - break; - case 5: - message.reverse = reader.bool(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): PageRequest { - return { - $type: PageRequest.$type, - key: isSet(object.key) ? bytesFromBase64(object.key) : new Uint8Array(), - offset: isSet(object.offset) ? Long.fromValue(object.offset) : Long.UZERO, - limit: isSet(object.limit) ? Long.fromValue(object.limit) : Long.UZERO, - countTotal: isSet(object.countTotal) ? Boolean(object.countTotal) : false, - reverse: isSet(object.reverse) ? Boolean(object.reverse) : false, - }; - }, - - toJSON(message: PageRequest): unknown { - const obj: any = {}; - message.key !== undefined && - (obj.key = base64FromBytes( - message.key !== undefined ? message.key : new Uint8Array(), - )); - message.offset !== undefined && - (obj.offset = (message.offset || Long.UZERO).toString()); - message.limit !== undefined && - (obj.limit = (message.limit || Long.UZERO).toString()); - message.countTotal !== undefined && (obj.countTotal = message.countTotal); - message.reverse !== undefined && (obj.reverse = message.reverse); - return obj; - }, - - fromPartial, I>>( - object: I, - ): PageRequest { - const message = createBasePageRequest(); - message.key = object.key ?? new Uint8Array(); - message.offset = - object.offset !== undefined && object.offset !== null - ? Long.fromValue(object.offset) - : Long.UZERO; - message.limit = - object.limit !== undefined && object.limit !== null - ? Long.fromValue(object.limit) - : Long.UZERO; - message.countTotal = object.countTotal ?? false; - message.reverse = object.reverse ?? false; - return message; - }, -}; - -messageTypeRegistry.set(PageRequest.$type, PageRequest); - -function createBasePageResponse(): PageResponse { - return { - $type: "cosmos.base.query.v1beta1.PageResponse", - nextKey: new Uint8Array(), - total: Long.UZERO, - }; -} - -export const PageResponse = { - $type: "cosmos.base.query.v1beta1.PageResponse" as const, - - encode( - message: PageResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.nextKey.length !== 0) { - writer.uint32(10).bytes(message.nextKey); - } - if (!message.total.isZero()) { - writer.uint32(16).uint64(message.total); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): PageResponse { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePageResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.nextKey = reader.bytes(); - break; - case 2: - message.total = reader.uint64() as Long; - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): PageResponse { - return { - $type: PageResponse.$type, - nextKey: isSet(object.nextKey) - ? bytesFromBase64(object.nextKey) - : new Uint8Array(), - total: isSet(object.total) ? Long.fromValue(object.total) : Long.UZERO, - }; - }, - - toJSON(message: PageResponse): unknown { - const obj: any = {}; - message.nextKey !== undefined && - (obj.nextKey = base64FromBytes( - message.nextKey !== undefined ? message.nextKey : new Uint8Array(), - )); - message.total !== undefined && - (obj.total = (message.total || Long.UZERO).toString()); - return obj; - }, - - fromPartial, I>>( - object: I, - ): PageResponse { - const message = createBasePageResponse(); - message.nextKey = object.nextKey ?? new Uint8Array(); - message.total = - object.total !== undefined && object.total !== null - ? Long.fromValue(object.total) - : Long.UZERO; - return message; - }, -}; - -messageTypeRegistry.set(PageResponse.$type, PageResponse); - -declare var self: any | undefined; -declare var window: any | undefined; -declare var global: any | undefined; -var globalThis: any = (() => { - if (typeof globalThis !== "undefined") return globalThis; - if (typeof self !== "undefined") return self; - if (typeof window !== "undefined") return window; - if (typeof global !== "undefined") return global; - throw "Unable to locate global object"; -})(); - -const atob: (b64: string) => string = - globalThis.atob || - ((b64) => globalThis.Buffer.from(b64, "base64").toString("binary")); -function bytesFromBase64(b64: string): Uint8Array { - const bin = atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; -} - -const btoa: (bin: string) => string = - globalThis.btoa || - ((bin) => globalThis.Buffer.from(bin, "binary").toString("base64")); -function base64FromBytes(arr: Uint8Array): string { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(String.fromCharCode(byte)); - }); - return btoa(bin.join("")); -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/cosmos/base/v1beta1/coin.ts b/ts/src/deprecated/cosmos/base/v1beta1/coin.ts deleted file mode 100644 index 77139dfe..00000000 --- a/ts/src/deprecated/cosmos/base/v1beta1/coin.ts +++ /dev/null @@ -1,325 +0,0 @@ -/* eslint-disable */ -import { messageTypeRegistry } from "../../../typeRegistry"; -import Long from "long"; -import * as _m0 from "protobufjs/minimal"; - -export const protobufPackage = "cosmos.base.v1beta1"; - -/** - * Coin defines a token with a denomination and an amount. - * - * NOTE: The amount field is an Int which implements the custom method - * signatures required by gogoproto. - */ -export interface Coin { - $type: "cosmos.base.v1beta1.Coin"; - denom: string; - amount: string; -} - -/** - * DecCoin defines a token with a denomination and a decimal amount. - * - * NOTE: The amount field is an Dec which implements the custom method - * signatures required by gogoproto. - */ -export interface DecCoin { - $type: "cosmos.base.v1beta1.DecCoin"; - denom: string; - amount: string; -} - -/** IntProto defines a Protobuf wrapper around an Int object. */ -export interface IntProto { - $type: "cosmos.base.v1beta1.IntProto"; - int: string; -} - -/** DecProto defines a Protobuf wrapper around a Dec object. */ -export interface DecProto { - $type: "cosmos.base.v1beta1.DecProto"; - dec: string; -} - -function createBaseCoin(): Coin { - return { $type: "cosmos.base.v1beta1.Coin", denom: "", amount: "" }; -} - -export const Coin = { - $type: "cosmos.base.v1beta1.Coin" as const, - - encode(message: Coin, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.denom !== "") { - writer.uint32(10).string(message.denom); - } - if (message.amount !== "") { - writer.uint32(18).string(message.amount); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Coin { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCoin(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.denom = reader.string(); - break; - case 2: - message.amount = reader.string(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): Coin { - return { - $type: Coin.$type, - denom: isSet(object.denom) ? String(object.denom) : "", - amount: isSet(object.amount) ? String(object.amount) : "", - }; - }, - - toJSON(message: Coin): unknown { - const obj: any = {}; - message.denom !== undefined && (obj.denom = message.denom); - message.amount !== undefined && (obj.amount = message.amount); - return obj; - }, - - fromPartial, I>>(object: I): Coin { - const message = createBaseCoin(); - message.denom = object.denom ?? ""; - message.amount = object.amount ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(Coin.$type, Coin); - -function createBaseDecCoin(): DecCoin { - return { $type: "cosmos.base.v1beta1.DecCoin", denom: "", amount: "" }; -} - -export const DecCoin = { - $type: "cosmos.base.v1beta1.DecCoin" as const, - - encode( - message: DecCoin, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.denom !== "") { - writer.uint32(10).string(message.denom); - } - if (message.amount !== "") { - writer.uint32(18).string(message.amount.padEnd(23, "0")); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DecCoin { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDecCoin(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.denom = reader.string(); - break; - case 2: - message.amount = (parseInt(reader.string()) / 10 ** 18).toPrecision( - 18, - ); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): DecCoin { - return { - $type: DecCoin.$type, - denom: isSet(object.denom) ? String(object.denom) : "", - amount: isSet(object.amount) ? String(object.amount) : "", - }; - }, - - toJSON(message: DecCoin): unknown { - const obj: any = {}; - message.denom !== undefined && (obj.denom = message.denom); - message.amount !== undefined && (obj.amount = message.amount); - return obj; - }, - - fromPartial, I>>(object: I): DecCoin { - const message = createBaseDecCoin(); - message.denom = object.denom ?? ""; - message.amount = object.amount ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(DecCoin.$type, DecCoin); - -function createBaseIntProto(): IntProto { - return { $type: "cosmos.base.v1beta1.IntProto", int: "" }; -} - -export const IntProto = { - $type: "cosmos.base.v1beta1.IntProto" as const, - - encode( - message: IntProto, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.int !== "") { - writer.uint32(10).string(message.int); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): IntProto { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseIntProto(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.int = reader.string(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): IntProto { - return { - $type: IntProto.$type, - int: isSet(object.int) ? String(object.int) : "", - }; - }, - - toJSON(message: IntProto): unknown { - const obj: any = {}; - message.int !== undefined && (obj.int = message.int); - return obj; - }, - - fromPartial, I>>(object: I): IntProto { - const message = createBaseIntProto(); - message.int = object.int ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(IntProto.$type, IntProto); - -function createBaseDecProto(): DecProto { - return { $type: "cosmos.base.v1beta1.DecProto", dec: "" }; -} - -export const DecProto = { - $type: "cosmos.base.v1beta1.DecProto" as const, - - encode( - message: DecProto, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.dec !== "") { - writer.uint32(10).string(message.dec); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DecProto { - const reader = input instanceof _m0.Reader ? input : new _m0.Reader(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDecProto(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - message.dec = reader.string(); - break; - default: - reader.skipType(tag & 7); - break; - } - } - return message; - }, - - fromJSON(object: any): DecProto { - return { - $type: DecProto.$type, - dec: isSet(object.dec) ? String(object.dec) : "", - }; - }, - - toJSON(message: DecProto): unknown { - const obj: any = {}; - message.dec !== undefined && (obj.dec = message.dec); - return obj; - }, - - fromPartial, I>>(object: I): DecProto { - const message = createBaseDecProto(); - message.dec = object.dec ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(DecProto.$type, DecProto); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -type KeysOfUnion = T extends T ? keyof T : never; -export type Exact = P extends Builtin - ? P - : P & { [K in keyof P]: Exact } & Record< - Exclude | "$type">, - never - >; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/deprecated/index.akash.cert.v1beta1.ts b/ts/src/deprecated/index.akash.cert.v1beta1.ts deleted file mode 100644 index 2c571d3c..00000000 --- a/ts/src/deprecated/index.akash.cert.v1beta1.ts +++ /dev/null @@ -1,3 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/cert/v1beta1/cert"; diff --git a/ts/src/deprecated/index.akash.market.v1beta1.ts b/ts/src/deprecated/index.akash.market.v1beta1.ts deleted file mode 100644 index 2c6af733..00000000 --- a/ts/src/deprecated/index.akash.market.v1beta1.ts +++ /dev/null @@ -1,4 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/market/v1beta1/bid"; -export * from "./akash/market/v1beta1/lease"; diff --git a/ts/src/deprecated/typeRegistry.ts b/ts/src/deprecated/typeRegistry.ts deleted file mode 100644 index ec388569..00000000 --- a/ts/src/deprecated/typeRegistry.ts +++ /dev/null @@ -1,36 +0,0 @@ -/* eslint-disable */ -import * as _m0 from "protobufjs/minimal"; -import Long from "long"; - -export interface MessageType { - $type: Message["$type"]; - encode(message: Message, writer?: _m0.Writer): _m0.Writer; - decode(input: _m0.Reader | Uint8Array, length?: number): Message; - fromJSON(object: any): Message; - toJSON(message: Message): unknown; - fromPartial(object: DeepPartial): Message; -} - -export type UnknownMessage = { $type: string }; - -export const messageTypeRegistry = new Map(); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends Array - ? Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; diff --git a/ts/src/generated/akash/audit/v1/audit.ts b/ts/src/generated/akash/audit/v1/audit.ts new file mode 100644 index 00000000..7d299ad1 --- /dev/null +++ b/ts/src/generated/akash/audit/v1/audit.ts @@ -0,0 +1,349 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/audit/v1/audit.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Attribute } from "../../base/attributes/v1/attribute"; + +/** Provider stores owner auditor and attributes details */ +export interface AuditedProvider { + $type: "akash.audit.v1.AuditedProvider"; + owner: string; + auditor: string; + attributes: Attribute[]; +} + +/** Attributes */ +export interface AuditedAttributesStore { + $type: "akash.audit.v1.AuditedAttributesStore"; + attributes: Attribute[]; +} + +/** AttributesFilters defines filters used to filter deployments */ +export interface AttributesFilters { + $type: "akash.audit.v1.AttributesFilters"; + auditors: string[]; + owners: string[]; +} + +function createBaseAuditedProvider(): AuditedProvider { + return { + $type: "akash.audit.v1.AuditedProvider", + owner: "", + auditor: "", + attributes: [], + }; +} + +export const AuditedProvider: MessageFns< + AuditedProvider, + "akash.audit.v1.AuditedProvider" +> = { + $type: "akash.audit.v1.AuditedProvider" as const, + + encode( + message: AuditedProvider, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (message.auditor !== "") { + writer.uint32(18).string(message.auditor); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(34).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): AuditedProvider { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseAuditedProvider(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.auditor = reader.string(); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): AuditedProvider { + return { + $type: AuditedProvider.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: AuditedProvider): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.auditor !== "") { + obj.auditor = message.auditor; + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): AuditedProvider { + return AuditedProvider.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): AuditedProvider { + const message = createBaseAuditedProvider(); + message.owner = object.owner ?? ""; + message.auditor = object.auditor ?? ""; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(AuditedProvider.$type, AuditedProvider); + +function createBaseAuditedAttributesStore(): AuditedAttributesStore { + return { $type: "akash.audit.v1.AuditedAttributesStore", attributes: [] }; +} + +export const AuditedAttributesStore: MessageFns< + AuditedAttributesStore, + "akash.audit.v1.AuditedAttributesStore" +> = { + $type: "akash.audit.v1.AuditedAttributesStore" as const, + + encode( + message: AuditedAttributesStore, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): AuditedAttributesStore { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseAuditedAttributesStore(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): AuditedAttributesStore { + return { + $type: AuditedAttributesStore.$type, + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: AuditedAttributesStore): unknown { + const obj: any = {}; + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): AuditedAttributesStore { + return AuditedAttributesStore.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): AuditedAttributesStore { + const message = createBaseAuditedAttributesStore(); + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(AuditedAttributesStore.$type, AuditedAttributesStore); + +function createBaseAttributesFilters(): AttributesFilters { + return { + $type: "akash.audit.v1.AttributesFilters", + auditors: [], + owners: [], + }; +} + +export const AttributesFilters: MessageFns< + AttributesFilters, + "akash.audit.v1.AttributesFilters" +> = { + $type: "akash.audit.v1.AttributesFilters" as const, + + encode( + message: AttributesFilters, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.auditors) { + writer.uint32(10).string(v!); + } + for (const v of message.owners) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): AttributesFilters { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseAttributesFilters(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.auditors.push(reader.string()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.owners.push(reader.string()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): AttributesFilters { + return { + $type: AttributesFilters.$type, + auditors: globalThis.Array.isArray(object?.auditors) + ? object.auditors.map((e: any) => globalThis.String(e)) + : [], + owners: globalThis.Array.isArray(object?.owners) + ? object.owners.map((e: any) => globalThis.String(e)) + : [], + }; + }, + + toJSON(message: AttributesFilters): unknown { + const obj: any = {}; + if (message.auditors?.length) { + obj.auditors = message.auditors; + } + if (message.owners?.length) { + obj.owners = message.owners; + } + return obj; + }, + + create(base?: DeepPartial): AttributesFilters { + return AttributesFilters.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): AttributesFilters { + const message = createBaseAttributesFilters(); + message.auditors = object.auditors?.map((e) => e) || []; + message.owners = object.owners?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(AttributesFilters.$type, AttributesFilters); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/audit/v1/event.ts b/ts/src/generated/akash/audit/v1/event.ts new file mode 100644 index 00000000..5ca12e3a --- /dev/null +++ b/ts/src/generated/akash/audit/v1/event.ts @@ -0,0 +1,259 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/audit/v1/event.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** EventTrustedAuditorCreated defines an SDK message for signing a provider attributes */ +export interface EventTrustedAuditorCreated { + $type: "akash.audit.v1.EventTrustedAuditorCreated"; + owner: string; + auditor: string; +} + +/** EventTrustedAuditorCreated defines an SDK message for signing a provider attributes */ +export interface EventTrustedAuditorDeleted { + $type: "akash.audit.v1.EventTrustedAuditorDeleted"; + owner: string; + auditor: string; +} + +function createBaseEventTrustedAuditorCreated(): EventTrustedAuditorCreated { + return { + $type: "akash.audit.v1.EventTrustedAuditorCreated", + owner: "", + auditor: "", + }; +} + +export const EventTrustedAuditorCreated: MessageFns< + EventTrustedAuditorCreated, + "akash.audit.v1.EventTrustedAuditorCreated" +> = { + $type: "akash.audit.v1.EventTrustedAuditorCreated" as const, + + encode( + message: EventTrustedAuditorCreated, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (message.auditor !== "") { + writer.uint32(18).string(message.auditor); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): EventTrustedAuditorCreated { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventTrustedAuditorCreated(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.auditor = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventTrustedAuditorCreated { + return { + $type: EventTrustedAuditorCreated.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", + }; + }, + + toJSON(message: EventTrustedAuditorCreated): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.auditor !== "") { + obj.auditor = message.auditor; + } + return obj; + }, + + create( + base?: DeepPartial, + ): EventTrustedAuditorCreated { + return EventTrustedAuditorCreated.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): EventTrustedAuditorCreated { + const message = createBaseEventTrustedAuditorCreated(); + message.owner = object.owner ?? ""; + message.auditor = object.auditor ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + EventTrustedAuditorCreated.$type, + EventTrustedAuditorCreated, +); + +function createBaseEventTrustedAuditorDeleted(): EventTrustedAuditorDeleted { + return { + $type: "akash.audit.v1.EventTrustedAuditorDeleted", + owner: "", + auditor: "", + }; +} + +export const EventTrustedAuditorDeleted: MessageFns< + EventTrustedAuditorDeleted, + "akash.audit.v1.EventTrustedAuditorDeleted" +> = { + $type: "akash.audit.v1.EventTrustedAuditorDeleted" as const, + + encode( + message: EventTrustedAuditorDeleted, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (message.auditor !== "") { + writer.uint32(18).string(message.auditor); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): EventTrustedAuditorDeleted { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventTrustedAuditorDeleted(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.auditor = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventTrustedAuditorDeleted { + return { + $type: EventTrustedAuditorDeleted.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", + }; + }, + + toJSON(message: EventTrustedAuditorDeleted): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.auditor !== "") { + obj.auditor = message.auditor; + } + return obj; + }, + + create( + base?: DeepPartial, + ): EventTrustedAuditorDeleted { + return EventTrustedAuditorDeleted.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): EventTrustedAuditorDeleted { + const message = createBaseEventTrustedAuditorDeleted(); + message.owner = object.owner ?? ""; + message.auditor = object.auditor ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + EventTrustedAuditorDeleted.$type, + EventTrustedAuditorDeleted, +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/audit/v1/genesis.ts b/ts/src/generated/akash/audit/v1/genesis.ts new file mode 100644 index 00000000..116b0780 --- /dev/null +++ b/ts/src/generated/akash/audit/v1/genesis.ts @@ -0,0 +1,124 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/audit/v1/genesis.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { AuditedProvider } from "./audit"; + +/** GenesisState defines the basic genesis state used by audit module */ +export interface GenesisState { + $type: "akash.audit.v1.GenesisState"; + providers: AuditedProvider[]; +} + +function createBaseGenesisState(): GenesisState { + return { $type: "akash.audit.v1.GenesisState", providers: [] }; +} + +export const GenesisState: MessageFns< + GenesisState, + "akash.audit.v1.GenesisState" +> = { + $type: "akash.audit.v1.GenesisState" as const, + + encode( + message: GenesisState, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.providers) { + AuditedProvider.encode(v!, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GenesisState { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisState(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.providers.push( + AuditedProvider.decode(reader, reader.uint32()), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisState { + return { + $type: GenesisState.$type, + providers: globalThis.Array.isArray(object?.providers) + ? object.providers.map((e: any) => AuditedProvider.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GenesisState): unknown { + const obj: any = {}; + if (message.providers?.length) { + obj.providers = message.providers.map((e) => AuditedProvider.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): GenesisState { + return GenesisState.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisState { + const message = createBaseGenesisState(); + message.providers = + object.providers?.map((e) => AuditedProvider.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GenesisState.$type, GenesisState); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/audit/v1/msg.ts b/ts/src/generated/akash/audit/v1/msg.ts new file mode 100644 index 00000000..9d7d59ea --- /dev/null +++ b/ts/src/generated/akash/audit/v1/msg.ts @@ -0,0 +1,437 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/audit/v1/msg.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Attribute } from "../../base/attributes/v1/attribute"; + +/** MsgSignProviderAttributes defines an SDK message for signing a provider attributes */ +export interface MsgSignProviderAttributes { + $type: "akash.audit.v1.MsgSignProviderAttributes"; + owner: string; + auditor: string; + attributes: Attribute[]; +} + +/** MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. */ +export interface MsgSignProviderAttributesResponse { + $type: "akash.audit.v1.MsgSignProviderAttributesResponse"; +} + +/** MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes */ +export interface MsgDeleteProviderAttributes { + $type: "akash.audit.v1.MsgDeleteProviderAttributes"; + owner: string; + auditor: string; + keys: string[]; +} + +/** MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. */ +export interface MsgDeleteProviderAttributesResponse { + $type: "akash.audit.v1.MsgDeleteProviderAttributesResponse"; +} + +function createBaseMsgSignProviderAttributes(): MsgSignProviderAttributes { + return { + $type: "akash.audit.v1.MsgSignProviderAttributes", + owner: "", + auditor: "", + attributes: [], + }; +} + +export const MsgSignProviderAttributes: MessageFns< + MsgSignProviderAttributes, + "akash.audit.v1.MsgSignProviderAttributes" +> = { + $type: "akash.audit.v1.MsgSignProviderAttributes" as const, + + encode( + message: MsgSignProviderAttributes, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (message.auditor !== "") { + writer.uint32(18).string(message.auditor); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(26).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgSignProviderAttributes { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgSignProviderAttributes(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.auditor = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgSignProviderAttributes { + return { + $type: MsgSignProviderAttributes.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: MsgSignProviderAttributes): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.auditor !== "") { + obj.auditor = message.auditor; + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create( + base?: DeepPartial, + ): MsgSignProviderAttributes { + return MsgSignProviderAttributes.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): MsgSignProviderAttributes { + const message = createBaseMsgSignProviderAttributes(); + message.owner = object.owner ?? ""; + message.auditor = object.auditor ?? ""; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set( + MsgSignProviderAttributes.$type, + MsgSignProviderAttributes, +); + +function createBaseMsgSignProviderAttributesResponse(): MsgSignProviderAttributesResponse { + return { $type: "akash.audit.v1.MsgSignProviderAttributesResponse" }; +} + +export const MsgSignProviderAttributesResponse: MessageFns< + MsgSignProviderAttributesResponse, + "akash.audit.v1.MsgSignProviderAttributesResponse" +> = { + $type: "akash.audit.v1.MsgSignProviderAttributesResponse" as const, + + encode( + _: MsgSignProviderAttributesResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgSignProviderAttributesResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgSignProviderAttributesResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgSignProviderAttributesResponse { + return { $type: MsgSignProviderAttributesResponse.$type }; + }, + + toJSON(_: MsgSignProviderAttributesResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgSignProviderAttributesResponse { + return MsgSignProviderAttributesResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgSignProviderAttributesResponse { + const message = createBaseMsgSignProviderAttributesResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgSignProviderAttributesResponse.$type, + MsgSignProviderAttributesResponse, +); + +function createBaseMsgDeleteProviderAttributes(): MsgDeleteProviderAttributes { + return { + $type: "akash.audit.v1.MsgDeleteProviderAttributes", + owner: "", + auditor: "", + keys: [], + }; +} + +export const MsgDeleteProviderAttributes: MessageFns< + MsgDeleteProviderAttributes, + "akash.audit.v1.MsgDeleteProviderAttributes" +> = { + $type: "akash.audit.v1.MsgDeleteProviderAttributes" as const, + + encode( + message: MsgDeleteProviderAttributes, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (message.auditor !== "") { + writer.uint32(18).string(message.auditor); + } + for (const v of message.keys) { + writer.uint32(26).string(v!); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgDeleteProviderAttributes { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgDeleteProviderAttributes(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.auditor = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.keys.push(reader.string()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgDeleteProviderAttributes { + return { + $type: MsgDeleteProviderAttributes.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", + keys: globalThis.Array.isArray(object?.keys) + ? object.keys.map((e: any) => globalThis.String(e)) + : [], + }; + }, + + toJSON(message: MsgDeleteProviderAttributes): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.auditor !== "") { + obj.auditor = message.auditor; + } + if (message.keys?.length) { + obj.keys = message.keys; + } + return obj; + }, + + create( + base?: DeepPartial, + ): MsgDeleteProviderAttributes { + return MsgDeleteProviderAttributes.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): MsgDeleteProviderAttributes { + const message = createBaseMsgDeleteProviderAttributes(); + message.owner = object.owner ?? ""; + message.auditor = object.auditor ?? ""; + message.keys = object.keys?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + MsgDeleteProviderAttributes.$type, + MsgDeleteProviderAttributes, +); + +function createBaseMsgDeleteProviderAttributesResponse(): MsgDeleteProviderAttributesResponse { + return { $type: "akash.audit.v1.MsgDeleteProviderAttributesResponse" }; +} + +export const MsgDeleteProviderAttributesResponse: MessageFns< + MsgDeleteProviderAttributesResponse, + "akash.audit.v1.MsgDeleteProviderAttributesResponse" +> = { + $type: "akash.audit.v1.MsgDeleteProviderAttributesResponse" as const, + + encode( + _: MsgDeleteProviderAttributesResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgDeleteProviderAttributesResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgDeleteProviderAttributesResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgDeleteProviderAttributesResponse { + return { $type: MsgDeleteProviderAttributesResponse.$type }; + }, + + toJSON(_: MsgDeleteProviderAttributesResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgDeleteProviderAttributesResponse { + return MsgDeleteProviderAttributesResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgDeleteProviderAttributesResponse { + const message = createBaseMsgDeleteProviderAttributesResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgDeleteProviderAttributesResponse.$type, + MsgDeleteProviderAttributesResponse, +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/audit/v1/query.ts b/ts/src/generated/akash/audit/v1/query.ts new file mode 100644 index 00000000..56fb28fd --- /dev/null +++ b/ts/src/generated/akash/audit/v1/query.ts @@ -0,0 +1,793 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/audit/v1/query.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { + PageRequest, + PageResponse, +} from "../../../cosmos/base/query/v1beta1/pagination"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { AuditedProvider } from "./audit"; + +/** QueryProvidersResponse is response type for the Query/Providers RPC method */ +export interface QueryProvidersResponse { + $type: "akash.audit.v1.QueryProvidersResponse"; + providers: AuditedProvider[]; + pagination: PageResponse | undefined; +} + +/** QueryProviderRequest is request type for the Query/Provider RPC method */ +export interface QueryProviderRequest { + $type: "akash.audit.v1.QueryProviderRequest"; + auditor: string; + owner: string; +} + +/** QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method */ +export interface QueryAllProvidersAttributesRequest { + $type: "akash.audit.v1.QueryAllProvidersAttributesRequest"; + pagination: PageRequest | undefined; +} + +/** QueryProviderAttributesRequest is request type for the Query/Provider RPC method */ +export interface QueryProviderAttributesRequest { + $type: "akash.audit.v1.QueryProviderAttributesRequest"; + owner: string; + pagination: PageRequest | undefined; +} + +/** QueryProviderAuditorRequest is request type for the Query/Providers RPC method */ +export interface QueryProviderAuditorRequest { + $type: "akash.audit.v1.QueryProviderAuditorRequest"; + auditor: string; + owner: string; +} + +/** QueryAuditorAttributesRequest is request type for the Query/Providers RPC method */ +export interface QueryAuditorAttributesRequest { + $type: "akash.audit.v1.QueryAuditorAttributesRequest"; + auditor: string; + pagination: PageRequest | undefined; +} + +function createBaseQueryProvidersResponse(): QueryProvidersResponse { + return { + $type: "akash.audit.v1.QueryProvidersResponse", + providers: [], + pagination: undefined, + }; +} + +export const QueryProvidersResponse: MessageFns< + QueryProvidersResponse, + "akash.audit.v1.QueryProvidersResponse" +> = { + $type: "akash.audit.v1.QueryProvidersResponse" as const, + + encode( + message: QueryProvidersResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.providers) { + AuditedProvider.encode(v!, writer.uint32(10).fork()).join(); + } + if (message.pagination !== undefined) { + PageResponse.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryProvidersResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProvidersResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.providers.push( + AuditedProvider.decode(reader, reader.uint32()), + ); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProvidersResponse { + return { + $type: QueryProvidersResponse.$type, + providers: globalThis.Array.isArray(object?.providers) + ? object.providers.map((e: any) => AuditedProvider.fromJSON(e)) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryProvidersResponse): unknown { + const obj: any = {}; + if (message.providers?.length) { + obj.providers = message.providers.map((e) => AuditedProvider.toJSON(e)); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryProvidersResponse { + return QueryProvidersResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryProvidersResponse { + const message = createBaseQueryProvidersResponse(); + message.providers = + object.providers?.map((e) => AuditedProvider.fromPartial(e)) || []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryProvidersResponse.$type, QueryProvidersResponse); + +function createBaseQueryProviderRequest(): QueryProviderRequest { + return { + $type: "akash.audit.v1.QueryProviderRequest", + auditor: "", + owner: "", + }; +} + +export const QueryProviderRequest: MessageFns< + QueryProviderRequest, + "akash.audit.v1.QueryProviderRequest" +> = { + $type: "akash.audit.v1.QueryProviderRequest" as const, + + encode( + message: QueryProviderRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.auditor !== "") { + writer.uint32(10).string(message.auditor); + } + if (message.owner !== "") { + writer.uint32(18).string(message.owner); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryProviderRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProviderRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.auditor = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.owner = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProviderRequest { + return { + $type: QueryProviderRequest.$type, + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + }; + }, + + toJSON(message: QueryProviderRequest): unknown { + const obj: any = {}; + if (message.auditor !== "") { + obj.auditor = message.auditor; + } + if (message.owner !== "") { + obj.owner = message.owner; + } + return obj; + }, + + create(base?: DeepPartial): QueryProviderRequest { + return QueryProviderRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryProviderRequest { + const message = createBaseQueryProviderRequest(); + message.auditor = object.auditor ?? ""; + message.owner = object.owner ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(QueryProviderRequest.$type, QueryProviderRequest); + +function createBaseQueryAllProvidersAttributesRequest(): QueryAllProvidersAttributesRequest { + return { + $type: "akash.audit.v1.QueryAllProvidersAttributesRequest", + pagination: undefined, + }; +} + +export const QueryAllProvidersAttributesRequest: MessageFns< + QueryAllProvidersAttributesRequest, + "akash.audit.v1.QueryAllProvidersAttributesRequest" +> = { + $type: "akash.audit.v1.QueryAllProvidersAttributesRequest" as const, + + encode( + message: QueryAllProvidersAttributesRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryAllProvidersAttributesRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryAllProvidersAttributesRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryAllProvidersAttributesRequest { + return { + $type: QueryAllProvidersAttributesRequest.$type, + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryAllProvidersAttributesRequest): unknown { + const obj: any = {}; + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create( + base?: DeepPartial, + ): QueryAllProvidersAttributesRequest { + return QueryAllProvidersAttributesRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryAllProvidersAttributesRequest { + const message = createBaseQueryAllProvidersAttributesRequest(); + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + QueryAllProvidersAttributesRequest.$type, + QueryAllProvidersAttributesRequest, +); + +function createBaseQueryProviderAttributesRequest(): QueryProviderAttributesRequest { + return { + $type: "akash.audit.v1.QueryProviderAttributesRequest", + owner: "", + pagination: undefined, + }; +} + +export const QueryProviderAttributesRequest: MessageFns< + QueryProviderAttributesRequest, + "akash.audit.v1.QueryProviderAttributesRequest" +> = { + $type: "akash.audit.v1.QueryProviderAttributesRequest" as const, + + encode( + message: QueryProviderAttributesRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryProviderAttributesRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProviderAttributesRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProviderAttributesRequest { + return { + $type: QueryProviderAttributesRequest.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryProviderAttributesRequest): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create( + base?: DeepPartial, + ): QueryProviderAttributesRequest { + return QueryProviderAttributesRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryProviderAttributesRequest { + const message = createBaseQueryProviderAttributesRequest(); + message.owner = object.owner ?? ""; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + QueryProviderAttributesRequest.$type, + QueryProviderAttributesRequest, +); + +function createBaseQueryProviderAuditorRequest(): QueryProviderAuditorRequest { + return { + $type: "akash.audit.v1.QueryProviderAuditorRequest", + auditor: "", + owner: "", + }; +} + +export const QueryProviderAuditorRequest: MessageFns< + QueryProviderAuditorRequest, + "akash.audit.v1.QueryProviderAuditorRequest" +> = { + $type: "akash.audit.v1.QueryProviderAuditorRequest" as const, + + encode( + message: QueryProviderAuditorRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.auditor !== "") { + writer.uint32(10).string(message.auditor); + } + if (message.owner !== "") { + writer.uint32(18).string(message.owner); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryProviderAuditorRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProviderAuditorRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.auditor = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.owner = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProviderAuditorRequest { + return { + $type: QueryProviderAuditorRequest.$type, + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + }; + }, + + toJSON(message: QueryProviderAuditorRequest): unknown { + const obj: any = {}; + if (message.auditor !== "") { + obj.auditor = message.auditor; + } + if (message.owner !== "") { + obj.owner = message.owner; + } + return obj; + }, + + create( + base?: DeepPartial, + ): QueryProviderAuditorRequest { + return QueryProviderAuditorRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryProviderAuditorRequest { + const message = createBaseQueryProviderAuditorRequest(); + message.auditor = object.auditor ?? ""; + message.owner = object.owner ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + QueryProviderAuditorRequest.$type, + QueryProviderAuditorRequest, +); + +function createBaseQueryAuditorAttributesRequest(): QueryAuditorAttributesRequest { + return { + $type: "akash.audit.v1.QueryAuditorAttributesRequest", + auditor: "", + pagination: undefined, + }; +} + +export const QueryAuditorAttributesRequest: MessageFns< + QueryAuditorAttributesRequest, + "akash.audit.v1.QueryAuditorAttributesRequest" +> = { + $type: "akash.audit.v1.QueryAuditorAttributesRequest" as const, + + encode( + message: QueryAuditorAttributesRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.auditor !== "") { + writer.uint32(10).string(message.auditor); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryAuditorAttributesRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryAuditorAttributesRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.auditor = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryAuditorAttributesRequest { + return { + $type: QueryAuditorAttributesRequest.$type, + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryAuditorAttributesRequest): unknown { + const obj: any = {}; + if (message.auditor !== "") { + obj.auditor = message.auditor; + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create( + base?: DeepPartial, + ): QueryAuditorAttributesRequest { + return QueryAuditorAttributesRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryAuditorAttributesRequest { + const message = createBaseQueryAuditorAttributesRequest(); + message.auditor = object.auditor ?? ""; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + QueryAuditorAttributesRequest.$type, + QueryAuditorAttributesRequest, +); + +/** Query defines the gRPC querier service */ +export interface Query { + /** + * AllProvidersAttributes queries all providers + * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + */ + AllProvidersAttributes( + request: QueryAllProvidersAttributesRequest, + ): Promise; + /** + * ProviderAttributes queries all provider signed attributes + * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + */ + ProviderAttributes( + request: QueryProviderAttributesRequest, + ): Promise; + /** + * ProviderAuditorAttributes queries provider signed attributes by specific auditor + * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + */ + ProviderAuditorAttributes( + request: QueryProviderAuditorRequest, + ): Promise; + /** + * AuditorAttributes queries all providers signed by this auditor + * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + */ + AuditorAttributes( + request: QueryAuditorAttributesRequest, + ): Promise; +} + +export const QueryServiceName = "akash.audit.v1.Query"; +export class QueryClientImpl implements Query { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || QueryServiceName; + this.rpc = rpc; + this.AllProvidersAttributes = this.AllProvidersAttributes.bind(this); + this.ProviderAttributes = this.ProviderAttributes.bind(this); + this.ProviderAuditorAttributes = this.ProviderAuditorAttributes.bind(this); + this.AuditorAttributes = this.AuditorAttributes.bind(this); + } + AllProvidersAttributes( + request: QueryAllProvidersAttributesRequest, + ): Promise { + const data = QueryAllProvidersAttributesRequest.encode(request).finish(); + const promise = this.rpc.request( + this.service, + "AllProvidersAttributes", + data, + ); + return promise.then((data) => + QueryProvidersResponse.decode(new BinaryReader(data)), + ); + } + + ProviderAttributes( + request: QueryProviderAttributesRequest, + ): Promise { + const data = QueryProviderAttributesRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "ProviderAttributes", data); + return promise.then((data) => + QueryProvidersResponse.decode(new BinaryReader(data)), + ); + } + + ProviderAuditorAttributes( + request: QueryProviderAuditorRequest, + ): Promise { + const data = QueryProviderAuditorRequest.encode(request).finish(); + const promise = this.rpc.request( + this.service, + "ProviderAuditorAttributes", + data, + ); + return promise.then((data) => + QueryProvidersResponse.decode(new BinaryReader(data)), + ); + } + + AuditorAttributes( + request: QueryAuditorAttributesRequest, + ): Promise { + const data = QueryAuditorAttributesRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "AuditorAttributes", data); + return promise.then((data) => + QueryProvidersResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/audit/v1/service.grpc-js.ts b/ts/src/generated/akash/audit/v1/service.grpc-js.ts new file mode 100644 index 00000000..87294ba4 --- /dev/null +++ b/ts/src/generated/akash/audit/v1/service.grpc-js.ts @@ -0,0 +1,141 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/audit/v1/service.proto + +/* eslint-disable */ +import { + ChannelCredentials, + Client, + makeGenericClientConstructor, + Metadata, +} from "@grpc/grpc-js"; +import type { + CallOptions, + ClientOptions, + ClientUnaryCall, + handleUnaryCall, + ServiceError, + UntypedServiceImplementation, +} from "@grpc/grpc-js"; +import { + MsgDeleteProviderAttributes, + MsgDeleteProviderAttributesResponse, + MsgSignProviderAttributes, + MsgSignProviderAttributesResponse, +} from "./msg"; + +export const protobufPackage = "akash.audit.v1"; + +/** Msg defines the provider Msg service */ +export type MsgService = typeof MsgService; +export const MsgService = { + /** SignProviderAttributes defines a method that signs provider attributes */ + signProviderAttributes: { + path: "/akash.audit.v1.Msg/SignProviderAttributes", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgSignProviderAttributes) => + Buffer.from(MsgSignProviderAttributes.encode(value).finish()), + requestDeserialize: (value: Buffer) => + MsgSignProviderAttributes.decode(value), + responseSerialize: (value: MsgSignProviderAttributesResponse) => + Buffer.from(MsgSignProviderAttributesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgSignProviderAttributesResponse.decode(value), + }, + /** DeleteProviderAttributes defines a method that deletes provider attributes */ + deleteProviderAttributes: { + path: "/akash.audit.v1.Msg/DeleteProviderAttributes", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgDeleteProviderAttributes) => + Buffer.from(MsgDeleteProviderAttributes.encode(value).finish()), + requestDeserialize: (value: Buffer) => + MsgDeleteProviderAttributes.decode(value), + responseSerialize: (value: MsgDeleteProviderAttributesResponse) => + Buffer.from(MsgDeleteProviderAttributesResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgDeleteProviderAttributesResponse.decode(value), + }, +} as const; + +export interface MsgServer extends UntypedServiceImplementation { + /** SignProviderAttributes defines a method that signs provider attributes */ + signProviderAttributes: handleUnaryCall< + MsgSignProviderAttributes, + MsgSignProviderAttributesResponse + >; + /** DeleteProviderAttributes defines a method that deletes provider attributes */ + deleteProviderAttributes: handleUnaryCall< + MsgDeleteProviderAttributes, + MsgDeleteProviderAttributesResponse + >; +} + +export interface MsgClient extends Client { + /** SignProviderAttributes defines a method that signs provider attributes */ + signProviderAttributes( + request: MsgSignProviderAttributes, + callback: ( + error: ServiceError | null, + response: MsgSignProviderAttributesResponse, + ) => void, + ): ClientUnaryCall; + signProviderAttributes( + request: MsgSignProviderAttributes, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgSignProviderAttributesResponse, + ) => void, + ): ClientUnaryCall; + signProviderAttributes( + request: MsgSignProviderAttributes, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgSignProviderAttributesResponse, + ) => void, + ): ClientUnaryCall; + /** DeleteProviderAttributes defines a method that deletes provider attributes */ + deleteProviderAttributes( + request: MsgDeleteProviderAttributes, + callback: ( + error: ServiceError | null, + response: MsgDeleteProviderAttributesResponse, + ) => void, + ): ClientUnaryCall; + deleteProviderAttributes( + request: MsgDeleteProviderAttributes, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgDeleteProviderAttributesResponse, + ) => void, + ): ClientUnaryCall; + deleteProviderAttributes( + request: MsgDeleteProviderAttributes, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgDeleteProviderAttributesResponse, + ) => void, + ): ClientUnaryCall; +} + +export const MsgClient = makeGenericClientConstructor( + MsgService, + "akash.audit.v1.Msg", +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial, + ): MsgClient; + service: typeof MsgService; + serviceName: string; +}; diff --git a/ts/src/generated/akash/audit/v1/service.ts b/ts/src/generated/akash/audit/v1/service.ts new file mode 100644 index 00000000..7110a123 --- /dev/null +++ b/ts/src/generated/akash/audit/v1/service.ts @@ -0,0 +1,73 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/audit/v1/service.proto + +/* eslint-disable */ +import { BinaryReader } from "@bufbuild/protobuf/wire"; +import { + MsgDeleteProviderAttributes, + MsgDeleteProviderAttributesResponse, + MsgSignProviderAttributes, + MsgSignProviderAttributesResponse, +} from "./msg"; + +/** Msg defines the provider Msg service */ +export interface Msg { + /** SignProviderAttributes defines a method that signs provider attributes */ + SignProviderAttributes( + request: MsgSignProviderAttributes, + ): Promise; + /** DeleteProviderAttributes defines a method that deletes provider attributes */ + DeleteProviderAttributes( + request: MsgDeleteProviderAttributes, + ): Promise; +} + +export const MsgServiceName = "akash.audit.v1.Msg"; +export class MsgClientImpl implements Msg { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || MsgServiceName; + this.rpc = rpc; + this.SignProviderAttributes = this.SignProviderAttributes.bind(this); + this.DeleteProviderAttributes = this.DeleteProviderAttributes.bind(this); + } + SignProviderAttributes( + request: MsgSignProviderAttributes, + ): Promise { + const data = MsgSignProviderAttributes.encode(request).finish(); + const promise = this.rpc.request( + this.service, + "SignProviderAttributes", + data, + ); + return promise.then((data) => + MsgSignProviderAttributesResponse.decode(new BinaryReader(data)), + ); + } + + DeleteProviderAttributes( + request: MsgDeleteProviderAttributes, + ): Promise { + const data = MsgDeleteProviderAttributes.encode(request).finish(); + const promise = this.rpc.request( + this.service, + "DeleteProviderAttributes", + data, + ); + return promise.then((data) => + MsgDeleteProviderAttributesResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} diff --git a/ts/src/generated/akash/audit/v1beta1/audit.ts b/ts/src/generated/akash/audit/v1beta1/audit.ts deleted file mode 100644 index 218bc82b..00000000 --- a/ts/src/generated/akash/audit/v1beta1/audit.ts +++ /dev/null @@ -1,880 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Attribute } from "../../base/v1beta1/attribute"; - -/** Provider stores owner auditor and attributes details */ -export interface Provider { - $type: "akash.audit.v1beta1.Provider"; - owner: string; - auditor: string; - attributes: Attribute[]; -} - -/** Attributes */ -export interface AuditedAttributes { - $type: "akash.audit.v1beta1.AuditedAttributes"; - owner: string; - auditor: string; - attributes: Attribute[]; -} - -/** AttributesResponse represents details of deployment along with group details */ -export interface AttributesResponse { - $type: "akash.audit.v1beta1.AttributesResponse"; - attributes: AuditedAttributes[]; -} - -/** AttributesFilters defines filters used to filter deployments */ -export interface AttributesFilters { - $type: "akash.audit.v1beta1.AttributesFilters"; - auditors: string[]; - owners: string[]; -} - -/** MsgSignProviderAttributes defines an SDK message for signing a provider attributes */ -export interface MsgSignProviderAttributes { - $type: "akash.audit.v1beta1.MsgSignProviderAttributes"; - owner: string; - auditor: string; - attributes: Attribute[]; -} - -/** MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. */ -export interface MsgSignProviderAttributesResponse { - $type: "akash.audit.v1beta1.MsgSignProviderAttributesResponse"; -} - -/** MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes */ -export interface MsgDeleteProviderAttributes { - $type: "akash.audit.v1beta1.MsgDeleteProviderAttributes"; - owner: string; - auditor: string; - keys: string[]; -} - -/** MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. */ -export interface MsgDeleteProviderAttributesResponse { - $type: "akash.audit.v1beta1.MsgDeleteProviderAttributesResponse"; -} - -function createBaseProvider(): Provider { - return { - $type: "akash.audit.v1beta1.Provider", - owner: "", - auditor: "", - attributes: [], - }; -} - -export const Provider = { - $type: "akash.audit.v1beta1.Provider" as const, - - encode( - message: Provider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.auditor !== "") { - writer.uint32(18).string(message.auditor); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Provider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.auditor = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Provider { - return { - $type: Provider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Provider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): Provider { - return Provider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Provider { - const message = createBaseProvider(); - message.owner = object.owner ?? ""; - message.auditor = object.auditor ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Provider.$type, Provider); - -function createBaseAuditedAttributes(): AuditedAttributes { - return { - $type: "akash.audit.v1beta1.AuditedAttributes", - owner: "", - auditor: "", - attributes: [], - }; -} - -export const AuditedAttributes = { - $type: "akash.audit.v1beta1.AuditedAttributes" as const, - - encode( - message: AuditedAttributes, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.auditor !== "") { - writer.uint32(18).string(message.auditor); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): AuditedAttributes { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAuditedAttributes(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.auditor = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): AuditedAttributes { - return { - $type: AuditedAttributes.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: AuditedAttributes): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): AuditedAttributes { - return AuditedAttributes.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): AuditedAttributes { - const message = createBaseAuditedAttributes(); - message.owner = object.owner ?? ""; - message.auditor = object.auditor ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(AuditedAttributes.$type, AuditedAttributes); - -function createBaseAttributesResponse(): AttributesResponse { - return { $type: "akash.audit.v1beta1.AttributesResponse", attributes: [] }; -} - -export const AttributesResponse = { - $type: "akash.audit.v1beta1.AttributesResponse" as const, - - encode( - message: AttributesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.attributes) { - AuditedAttributes.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): AttributesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAttributesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.attributes.push( - AuditedAttributes.decode(reader, reader.uint32()), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): AttributesResponse { - return { - $type: AttributesResponse.$type, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => AuditedAttributes.fromJSON(e)) - : [], - }; - }, - - toJSON(message: AttributesResponse): unknown { - const obj: any = {}; - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => - AuditedAttributes.toJSON(e), - ); - } - return obj; - }, - - create(base?: DeepPartial): AttributesResponse { - return AttributesResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): AttributesResponse { - const message = createBaseAttributesResponse(); - message.attributes = - object.attributes?.map((e) => AuditedAttributes.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(AttributesResponse.$type, AttributesResponse); - -function createBaseAttributesFilters(): AttributesFilters { - return { - $type: "akash.audit.v1beta1.AttributesFilters", - auditors: [], - owners: [], - }; -} - -export const AttributesFilters = { - $type: "akash.audit.v1beta1.AttributesFilters" as const, - - encode( - message: AttributesFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.auditors) { - writer.uint32(10).string(v!); - } - for (const v of message.owners) { - writer.uint32(18).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): AttributesFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAttributesFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.auditors.push(reader.string()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.owners.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): AttributesFilters { - return { - $type: AttributesFilters.$type, - auditors: globalThis.Array.isArray(object?.auditors) - ? object.auditors.map((e: any) => globalThis.String(e)) - : [], - owners: globalThis.Array.isArray(object?.owners) - ? object.owners.map((e: any) => globalThis.String(e)) - : [], - }; - }, - - toJSON(message: AttributesFilters): unknown { - const obj: any = {}; - if (message.auditors?.length) { - obj.auditors = message.auditors; - } - if (message.owners?.length) { - obj.owners = message.owners; - } - return obj; - }, - - create(base?: DeepPartial): AttributesFilters { - return AttributesFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): AttributesFilters { - const message = createBaseAttributesFilters(); - message.auditors = object.auditors?.map((e) => e) || []; - message.owners = object.owners?.map((e) => e) || []; - return message; - }, -}; - -messageTypeRegistry.set(AttributesFilters.$type, AttributesFilters); - -function createBaseMsgSignProviderAttributes(): MsgSignProviderAttributes { - return { - $type: "akash.audit.v1beta1.MsgSignProviderAttributes", - owner: "", - auditor: "", - attributes: [], - }; -} - -export const MsgSignProviderAttributes = { - $type: "akash.audit.v1beta1.MsgSignProviderAttributes" as const, - - encode( - message: MsgSignProviderAttributes, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.auditor !== "") { - writer.uint32(18).string(message.auditor); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgSignProviderAttributes { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgSignProviderAttributes(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.auditor = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgSignProviderAttributes { - return { - $type: MsgSignProviderAttributes.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: MsgSignProviderAttributes): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create( - base?: DeepPartial, - ): MsgSignProviderAttributes { - return MsgSignProviderAttributes.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): MsgSignProviderAttributes { - const message = createBaseMsgSignProviderAttributes(); - message.owner = object.owner ?? ""; - message.auditor = object.auditor ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set( - MsgSignProviderAttributes.$type, - MsgSignProviderAttributes, -); - -function createBaseMsgSignProviderAttributesResponse(): MsgSignProviderAttributesResponse { - return { $type: "akash.audit.v1beta1.MsgSignProviderAttributesResponse" }; -} - -export const MsgSignProviderAttributesResponse = { - $type: "akash.audit.v1beta1.MsgSignProviderAttributesResponse" as const, - - encode( - _: MsgSignProviderAttributesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgSignProviderAttributesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgSignProviderAttributesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgSignProviderAttributesResponse { - return { $type: MsgSignProviderAttributesResponse.$type }; - }, - - toJSON(_: MsgSignProviderAttributesResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgSignProviderAttributesResponse { - return MsgSignProviderAttributesResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgSignProviderAttributesResponse { - const message = createBaseMsgSignProviderAttributesResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgSignProviderAttributesResponse.$type, - MsgSignProviderAttributesResponse, -); - -function createBaseMsgDeleteProviderAttributes(): MsgDeleteProviderAttributes { - return { - $type: "akash.audit.v1beta1.MsgDeleteProviderAttributes", - owner: "", - auditor: "", - keys: [], - }; -} - -export const MsgDeleteProviderAttributes = { - $type: "akash.audit.v1beta1.MsgDeleteProviderAttributes" as const, - - encode( - message: MsgDeleteProviderAttributes, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.auditor !== "") { - writer.uint32(18).string(message.auditor); - } - for (const v of message.keys) { - writer.uint32(26).string(v!); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDeleteProviderAttributes { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDeleteProviderAttributes(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.auditor = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.keys.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgDeleteProviderAttributes { - return { - $type: MsgDeleteProviderAttributes.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - keys: globalThis.Array.isArray(object?.keys) - ? object.keys.map((e: any) => globalThis.String(e)) - : [], - }; - }, - - toJSON(message: MsgDeleteProviderAttributes): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.keys?.length) { - obj.keys = message.keys; - } - return obj; - }, - - create( - base?: DeepPartial, - ): MsgDeleteProviderAttributes { - return MsgDeleteProviderAttributes.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): MsgDeleteProviderAttributes { - const message = createBaseMsgDeleteProviderAttributes(); - message.owner = object.owner ?? ""; - message.auditor = object.auditor ?? ""; - message.keys = object.keys?.map((e) => e) || []; - return message; - }, -}; - -messageTypeRegistry.set( - MsgDeleteProviderAttributes.$type, - MsgDeleteProviderAttributes, -); - -function createBaseMsgDeleteProviderAttributesResponse(): MsgDeleteProviderAttributesResponse { - return { $type: "akash.audit.v1beta1.MsgDeleteProviderAttributesResponse" }; -} - -export const MsgDeleteProviderAttributesResponse = { - $type: "akash.audit.v1beta1.MsgDeleteProviderAttributesResponse" as const, - - encode( - _: MsgDeleteProviderAttributesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDeleteProviderAttributesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDeleteProviderAttributesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgDeleteProviderAttributesResponse { - return { $type: MsgDeleteProviderAttributesResponse.$type }; - }, - - toJSON(_: MsgDeleteProviderAttributesResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgDeleteProviderAttributesResponse { - return MsgDeleteProviderAttributesResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgDeleteProviderAttributesResponse { - const message = createBaseMsgDeleteProviderAttributesResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgDeleteProviderAttributesResponse.$type, - MsgDeleteProviderAttributesResponse, -); - -/** Msg defines the provider Msg service */ -export interface Msg { - /** SignProviderAttributes defines a method that signs provider attributes */ - SignProviderAttributes( - request: MsgSignProviderAttributes, - ): Promise; - /** DeleteProviderAttributes defines a method that deletes provider attributes */ - DeleteProviderAttributes( - request: MsgDeleteProviderAttributes, - ): Promise; -} - -export const MsgServiceName = "akash.audit.v1beta1.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.SignProviderAttributes = this.SignProviderAttributes.bind(this); - this.DeleteProviderAttributes = this.DeleteProviderAttributes.bind(this); - } - SignProviderAttributes( - request: MsgSignProviderAttributes, - ): Promise { - const data = MsgSignProviderAttributes.encode(request).finish(); - const promise = this.rpc.request( - this.service, - "SignProviderAttributes", - data, - ); - return promise.then((data) => - MsgSignProviderAttributesResponse.decode(_m0.Reader.create(data)), - ); - } - - DeleteProviderAttributes( - request: MsgDeleteProviderAttributes, - ): Promise { - const data = MsgDeleteProviderAttributes.encode(request).finish(); - const promise = this.rpc.request( - this.service, - "DeleteProviderAttributes", - data, - ); - return promise.then((data) => - MsgDeleteProviderAttributesResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/audit/v1beta2/audit.ts b/ts/src/generated/akash/audit/v1beta2/audit.ts deleted file mode 100644 index 3b291990..00000000 --- a/ts/src/generated/akash/audit/v1beta2/audit.ts +++ /dev/null @@ -1,880 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Attribute } from "../../base/v1beta2/attribute"; - -/** Provider stores owner auditor and attributes details */ -export interface Provider { - $type: "akash.audit.v1beta2.Provider"; - owner: string; - auditor: string; - attributes: Attribute[]; -} - -/** Attributes */ -export interface AuditedAttributes { - $type: "akash.audit.v1beta2.AuditedAttributes"; - owner: string; - auditor: string; - attributes: Attribute[]; -} - -/** AttributesResponse represents details of deployment along with group details */ -export interface AttributesResponse { - $type: "akash.audit.v1beta2.AttributesResponse"; - attributes: AuditedAttributes[]; -} - -/** AttributesFilters defines filters used to filter deployments */ -export interface AttributesFilters { - $type: "akash.audit.v1beta2.AttributesFilters"; - auditors: string[]; - owners: string[]; -} - -/** MsgSignProviderAttributes defines an SDK message for signing a provider attributes */ -export interface MsgSignProviderAttributes { - $type: "akash.audit.v1beta2.MsgSignProviderAttributes"; - owner: string; - auditor: string; - attributes: Attribute[]; -} - -/** MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. */ -export interface MsgSignProviderAttributesResponse { - $type: "akash.audit.v1beta2.MsgSignProviderAttributesResponse"; -} - -/** MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes */ -export interface MsgDeleteProviderAttributes { - $type: "akash.audit.v1beta2.MsgDeleteProviderAttributes"; - owner: string; - auditor: string; - keys: string[]; -} - -/** MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. */ -export interface MsgDeleteProviderAttributesResponse { - $type: "akash.audit.v1beta2.MsgDeleteProviderAttributesResponse"; -} - -function createBaseProvider(): Provider { - return { - $type: "akash.audit.v1beta2.Provider", - owner: "", - auditor: "", - attributes: [], - }; -} - -export const Provider = { - $type: "akash.audit.v1beta2.Provider" as const, - - encode( - message: Provider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.auditor !== "") { - writer.uint32(18).string(message.auditor); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Provider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.auditor = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Provider { - return { - $type: Provider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Provider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): Provider { - return Provider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Provider { - const message = createBaseProvider(); - message.owner = object.owner ?? ""; - message.auditor = object.auditor ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Provider.$type, Provider); - -function createBaseAuditedAttributes(): AuditedAttributes { - return { - $type: "akash.audit.v1beta2.AuditedAttributes", - owner: "", - auditor: "", - attributes: [], - }; -} - -export const AuditedAttributes = { - $type: "akash.audit.v1beta2.AuditedAttributes" as const, - - encode( - message: AuditedAttributes, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.auditor !== "") { - writer.uint32(18).string(message.auditor); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): AuditedAttributes { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAuditedAttributes(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.auditor = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): AuditedAttributes { - return { - $type: AuditedAttributes.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: AuditedAttributes): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): AuditedAttributes { - return AuditedAttributes.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): AuditedAttributes { - const message = createBaseAuditedAttributes(); - message.owner = object.owner ?? ""; - message.auditor = object.auditor ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(AuditedAttributes.$type, AuditedAttributes); - -function createBaseAttributesResponse(): AttributesResponse { - return { $type: "akash.audit.v1beta2.AttributesResponse", attributes: [] }; -} - -export const AttributesResponse = { - $type: "akash.audit.v1beta2.AttributesResponse" as const, - - encode( - message: AttributesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.attributes) { - AuditedAttributes.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): AttributesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAttributesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.attributes.push( - AuditedAttributes.decode(reader, reader.uint32()), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): AttributesResponse { - return { - $type: AttributesResponse.$type, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => AuditedAttributes.fromJSON(e)) - : [], - }; - }, - - toJSON(message: AttributesResponse): unknown { - const obj: any = {}; - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => - AuditedAttributes.toJSON(e), - ); - } - return obj; - }, - - create(base?: DeepPartial): AttributesResponse { - return AttributesResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): AttributesResponse { - const message = createBaseAttributesResponse(); - message.attributes = - object.attributes?.map((e) => AuditedAttributes.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(AttributesResponse.$type, AttributesResponse); - -function createBaseAttributesFilters(): AttributesFilters { - return { - $type: "akash.audit.v1beta2.AttributesFilters", - auditors: [], - owners: [], - }; -} - -export const AttributesFilters = { - $type: "akash.audit.v1beta2.AttributesFilters" as const, - - encode( - message: AttributesFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.auditors) { - writer.uint32(10).string(v!); - } - for (const v of message.owners) { - writer.uint32(18).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): AttributesFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAttributesFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.auditors.push(reader.string()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.owners.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): AttributesFilters { - return { - $type: AttributesFilters.$type, - auditors: globalThis.Array.isArray(object?.auditors) - ? object.auditors.map((e: any) => globalThis.String(e)) - : [], - owners: globalThis.Array.isArray(object?.owners) - ? object.owners.map((e: any) => globalThis.String(e)) - : [], - }; - }, - - toJSON(message: AttributesFilters): unknown { - const obj: any = {}; - if (message.auditors?.length) { - obj.auditors = message.auditors; - } - if (message.owners?.length) { - obj.owners = message.owners; - } - return obj; - }, - - create(base?: DeepPartial): AttributesFilters { - return AttributesFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): AttributesFilters { - const message = createBaseAttributesFilters(); - message.auditors = object.auditors?.map((e) => e) || []; - message.owners = object.owners?.map((e) => e) || []; - return message; - }, -}; - -messageTypeRegistry.set(AttributesFilters.$type, AttributesFilters); - -function createBaseMsgSignProviderAttributes(): MsgSignProviderAttributes { - return { - $type: "akash.audit.v1beta2.MsgSignProviderAttributes", - owner: "", - auditor: "", - attributes: [], - }; -} - -export const MsgSignProviderAttributes = { - $type: "akash.audit.v1beta2.MsgSignProviderAttributes" as const, - - encode( - message: MsgSignProviderAttributes, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.auditor !== "") { - writer.uint32(18).string(message.auditor); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgSignProviderAttributes { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgSignProviderAttributes(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.auditor = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgSignProviderAttributes { - return { - $type: MsgSignProviderAttributes.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: MsgSignProviderAttributes): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create( - base?: DeepPartial, - ): MsgSignProviderAttributes { - return MsgSignProviderAttributes.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): MsgSignProviderAttributes { - const message = createBaseMsgSignProviderAttributes(); - message.owner = object.owner ?? ""; - message.auditor = object.auditor ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set( - MsgSignProviderAttributes.$type, - MsgSignProviderAttributes, -); - -function createBaseMsgSignProviderAttributesResponse(): MsgSignProviderAttributesResponse { - return { $type: "akash.audit.v1beta2.MsgSignProviderAttributesResponse" }; -} - -export const MsgSignProviderAttributesResponse = { - $type: "akash.audit.v1beta2.MsgSignProviderAttributesResponse" as const, - - encode( - _: MsgSignProviderAttributesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgSignProviderAttributesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgSignProviderAttributesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgSignProviderAttributesResponse { - return { $type: MsgSignProviderAttributesResponse.$type }; - }, - - toJSON(_: MsgSignProviderAttributesResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgSignProviderAttributesResponse { - return MsgSignProviderAttributesResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgSignProviderAttributesResponse { - const message = createBaseMsgSignProviderAttributesResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgSignProviderAttributesResponse.$type, - MsgSignProviderAttributesResponse, -); - -function createBaseMsgDeleteProviderAttributes(): MsgDeleteProviderAttributes { - return { - $type: "akash.audit.v1beta2.MsgDeleteProviderAttributes", - owner: "", - auditor: "", - keys: [], - }; -} - -export const MsgDeleteProviderAttributes = { - $type: "akash.audit.v1beta2.MsgDeleteProviderAttributes" as const, - - encode( - message: MsgDeleteProviderAttributes, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.auditor !== "") { - writer.uint32(18).string(message.auditor); - } - for (const v of message.keys) { - writer.uint32(26).string(v!); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDeleteProviderAttributes { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDeleteProviderAttributes(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.auditor = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.keys.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgDeleteProviderAttributes { - return { - $type: MsgDeleteProviderAttributes.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - keys: globalThis.Array.isArray(object?.keys) - ? object.keys.map((e: any) => globalThis.String(e)) - : [], - }; - }, - - toJSON(message: MsgDeleteProviderAttributes): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.keys?.length) { - obj.keys = message.keys; - } - return obj; - }, - - create( - base?: DeepPartial, - ): MsgDeleteProviderAttributes { - return MsgDeleteProviderAttributes.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): MsgDeleteProviderAttributes { - const message = createBaseMsgDeleteProviderAttributes(); - message.owner = object.owner ?? ""; - message.auditor = object.auditor ?? ""; - message.keys = object.keys?.map((e) => e) || []; - return message; - }, -}; - -messageTypeRegistry.set( - MsgDeleteProviderAttributes.$type, - MsgDeleteProviderAttributes, -); - -function createBaseMsgDeleteProviderAttributesResponse(): MsgDeleteProviderAttributesResponse { - return { $type: "akash.audit.v1beta2.MsgDeleteProviderAttributesResponse" }; -} - -export const MsgDeleteProviderAttributesResponse = { - $type: "akash.audit.v1beta2.MsgDeleteProviderAttributesResponse" as const, - - encode( - _: MsgDeleteProviderAttributesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDeleteProviderAttributesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDeleteProviderAttributesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgDeleteProviderAttributesResponse { - return { $type: MsgDeleteProviderAttributesResponse.$type }; - }, - - toJSON(_: MsgDeleteProviderAttributesResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgDeleteProviderAttributesResponse { - return MsgDeleteProviderAttributesResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgDeleteProviderAttributesResponse { - const message = createBaseMsgDeleteProviderAttributesResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgDeleteProviderAttributesResponse.$type, - MsgDeleteProviderAttributesResponse, -); - -/** Msg defines the provider Msg service */ -export interface Msg { - /** SignProviderAttributes defines a method that signs provider attributes */ - SignProviderAttributes( - request: MsgSignProviderAttributes, - ): Promise; - /** DeleteProviderAttributes defines a method that deletes provider attributes */ - DeleteProviderAttributes( - request: MsgDeleteProviderAttributes, - ): Promise; -} - -export const MsgServiceName = "akash.audit.v1beta2.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.SignProviderAttributes = this.SignProviderAttributes.bind(this); - this.DeleteProviderAttributes = this.DeleteProviderAttributes.bind(this); - } - SignProviderAttributes( - request: MsgSignProviderAttributes, - ): Promise { - const data = MsgSignProviderAttributes.encode(request).finish(); - const promise = this.rpc.request( - this.service, - "SignProviderAttributes", - data, - ); - return promise.then((data) => - MsgSignProviderAttributesResponse.decode(_m0.Reader.create(data)), - ); - } - - DeleteProviderAttributes( - request: MsgDeleteProviderAttributes, - ): Promise { - const data = MsgDeleteProviderAttributes.encode(request).finish(); - const promise = this.rpc.request( - this.service, - "DeleteProviderAttributes", - data, - ); - return promise.then((data) => - MsgDeleteProviderAttributesResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/audit/v1beta2/genesis.ts b/ts/src/generated/akash/audit/v1beta2/genesis.ts deleted file mode 100644 index 85849af6..00000000 --- a/ts/src/generated/akash/audit/v1beta2/genesis.ts +++ /dev/null @@ -1,112 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { AuditedAttributes } from "./audit"; - -/** GenesisState defines the basic genesis state used by audit module */ -export interface GenesisState { - $type: "akash.audit.v1beta2.GenesisState"; - attributes: AuditedAttributes[]; -} - -function createBaseGenesisState(): GenesisState { - return { $type: "akash.audit.v1beta2.GenesisState", attributes: [] }; -} - -export const GenesisState = { - $type: "akash.audit.v1beta2.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.attributes) { - AuditedAttributes.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.attributes.push( - AuditedAttributes.decode(reader, reader.uint32()), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => AuditedAttributes.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => - AuditedAttributes.toJSON(e), - ); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.attributes = - object.attributes?.map((e) => AuditedAttributes.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} diff --git a/ts/src/generated/akash/audit/v1beta2/query.ts b/ts/src/generated/akash/audit/v1beta2/query.ts deleted file mode 100644 index cd0e4ab3..00000000 --- a/ts/src/generated/akash/audit/v1beta2/query.ts +++ /dev/null @@ -1,765 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Provider } from "./audit"; - -/** QueryProvidersResponse is response type for the Query/Providers RPC method */ -export interface QueryProvidersResponse { - $type: "akash.audit.v1beta2.QueryProvidersResponse"; - providers: Provider[]; - pagination: PageResponse | undefined; -} - -/** QueryProviderRequest is request type for the Query/Provider RPC method */ -export interface QueryProviderRequest { - $type: "akash.audit.v1beta2.QueryProviderRequest"; - auditor: string; - owner: string; -} - -/** QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method */ -export interface QueryAllProvidersAttributesRequest { - $type: "akash.audit.v1beta2.QueryAllProvidersAttributesRequest"; - pagination: PageRequest | undefined; -} - -/** QueryProviderAttributesRequest is request type for the Query/Provider RPC method */ -export interface QueryProviderAttributesRequest { - $type: "akash.audit.v1beta2.QueryProviderAttributesRequest"; - owner: string; - pagination: PageRequest | undefined; -} - -/** QueryProviderAuditorRequest is request type for the Query/Providers RPC method */ -export interface QueryProviderAuditorRequest { - $type: "akash.audit.v1beta2.QueryProviderAuditorRequest"; - auditor: string; - owner: string; -} - -/** QueryAuditorAttributesRequest is request type for the Query/Providers RPC method */ -export interface QueryAuditorAttributesRequest { - $type: "akash.audit.v1beta2.QueryAuditorAttributesRequest"; - auditor: string; - pagination: PageRequest | undefined; -} - -function createBaseQueryProvidersResponse(): QueryProvidersResponse { - return { - $type: "akash.audit.v1beta2.QueryProvidersResponse", - providers: [], - pagination: undefined, - }; -} - -export const QueryProvidersResponse = { - $type: "akash.audit.v1beta2.QueryProvidersResponse" as const, - - encode( - message: QueryProvidersResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.providers) { - Provider.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProvidersResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProvidersResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.providers.push(Provider.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProvidersResponse { - return { - $type: QueryProvidersResponse.$type, - providers: globalThis.Array.isArray(object?.providers) - ? object.providers.map((e: any) => Provider.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryProvidersResponse): unknown { - const obj: any = {}; - if (message.providers?.length) { - obj.providers = message.providers.map((e) => Provider.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryProvidersResponse { - return QueryProvidersResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryProvidersResponse { - const message = createBaseQueryProvidersResponse(); - message.providers = - object.providers?.map((e) => Provider.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryProvidersResponse.$type, QueryProvidersResponse); - -function createBaseQueryProviderRequest(): QueryProviderRequest { - return { - $type: "akash.audit.v1beta2.QueryProviderRequest", - auditor: "", - owner: "", - }; -} - -export const QueryProviderRequest = { - $type: "akash.audit.v1beta2.QueryProviderRequest" as const, - - encode( - message: QueryProviderRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.auditor !== "") { - writer.uint32(10).string(message.auditor); - } - if (message.owner !== "") { - writer.uint32(18).string(message.owner); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProviderRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProviderRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.auditor = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.owner = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProviderRequest { - return { - $type: QueryProviderRequest.$type, - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - }; - }, - - toJSON(message: QueryProviderRequest): unknown { - const obj: any = {}; - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.owner !== "") { - obj.owner = message.owner; - } - return obj; - }, - - create(base?: DeepPartial): QueryProviderRequest { - return QueryProviderRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryProviderRequest { - const message = createBaseQueryProviderRequest(); - message.auditor = object.auditor ?? ""; - message.owner = object.owner ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(QueryProviderRequest.$type, QueryProviderRequest); - -function createBaseQueryAllProvidersAttributesRequest(): QueryAllProvidersAttributesRequest { - return { - $type: "akash.audit.v1beta2.QueryAllProvidersAttributesRequest", - pagination: undefined, - }; -} - -export const QueryAllProvidersAttributesRequest = { - $type: "akash.audit.v1beta2.QueryAllProvidersAttributesRequest" as const, - - encode( - message: QueryAllProvidersAttributesRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryAllProvidersAttributesRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryAllProvidersAttributesRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryAllProvidersAttributesRequest { - return { - $type: QueryAllProvidersAttributesRequest.$type, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryAllProvidersAttributesRequest): unknown { - const obj: any = {}; - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryAllProvidersAttributesRequest { - return QueryAllProvidersAttributesRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryAllProvidersAttributesRequest { - const message = createBaseQueryAllProvidersAttributesRequest(); - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryAllProvidersAttributesRequest.$type, - QueryAllProvidersAttributesRequest, -); - -function createBaseQueryProviderAttributesRequest(): QueryProviderAttributesRequest { - return { - $type: "akash.audit.v1beta2.QueryProviderAttributesRequest", - owner: "", - pagination: undefined, - }; -} - -export const QueryProviderAttributesRequest = { - $type: "akash.audit.v1beta2.QueryProviderAttributesRequest" as const, - - encode( - message: QueryProviderAttributesRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProviderAttributesRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProviderAttributesRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProviderAttributesRequest { - return { - $type: QueryProviderAttributesRequest.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryProviderAttributesRequest): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryProviderAttributesRequest { - return QueryProviderAttributesRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryProviderAttributesRequest { - const message = createBaseQueryProviderAttributesRequest(); - message.owner = object.owner ?? ""; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryProviderAttributesRequest.$type, - QueryProviderAttributesRequest, -); - -function createBaseQueryProviderAuditorRequest(): QueryProviderAuditorRequest { - return { - $type: "akash.audit.v1beta2.QueryProviderAuditorRequest", - auditor: "", - owner: "", - }; -} - -export const QueryProviderAuditorRequest = { - $type: "akash.audit.v1beta2.QueryProviderAuditorRequest" as const, - - encode( - message: QueryProviderAuditorRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.auditor !== "") { - writer.uint32(10).string(message.auditor); - } - if (message.owner !== "") { - writer.uint32(18).string(message.owner); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProviderAuditorRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProviderAuditorRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.auditor = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.owner = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProviderAuditorRequest { - return { - $type: QueryProviderAuditorRequest.$type, - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - }; - }, - - toJSON(message: QueryProviderAuditorRequest): unknown { - const obj: any = {}; - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.owner !== "") { - obj.owner = message.owner; - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryProviderAuditorRequest { - return QueryProviderAuditorRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryProviderAuditorRequest { - const message = createBaseQueryProviderAuditorRequest(); - message.auditor = object.auditor ?? ""; - message.owner = object.owner ?? ""; - return message; - }, -}; - -messageTypeRegistry.set( - QueryProviderAuditorRequest.$type, - QueryProviderAuditorRequest, -); - -function createBaseQueryAuditorAttributesRequest(): QueryAuditorAttributesRequest { - return { - $type: "akash.audit.v1beta2.QueryAuditorAttributesRequest", - auditor: "", - pagination: undefined, - }; -} - -export const QueryAuditorAttributesRequest = { - $type: "akash.audit.v1beta2.QueryAuditorAttributesRequest" as const, - - encode( - message: QueryAuditorAttributesRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.auditor !== "") { - writer.uint32(10).string(message.auditor); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryAuditorAttributesRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryAuditorAttributesRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.auditor = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryAuditorAttributesRequest { - return { - $type: QueryAuditorAttributesRequest.$type, - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryAuditorAttributesRequest): unknown { - const obj: any = {}; - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryAuditorAttributesRequest { - return QueryAuditorAttributesRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryAuditorAttributesRequest { - const message = createBaseQueryAuditorAttributesRequest(); - message.auditor = object.auditor ?? ""; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryAuditorAttributesRequest.$type, - QueryAuditorAttributesRequest, -); - -/** Query defines the gRPC querier service */ -export interface Query { - /** - * AllProvidersAttributes queries all providers - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - */ - AllProvidersAttributes( - request: QueryAllProvidersAttributesRequest, - ): Promise; - /** - * ProviderAttributes queries all provider signed attributes - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - */ - ProviderAttributes( - request: QueryProviderAttributesRequest, - ): Promise; - /** - * ProviderAuditorAttributes queries provider signed attributes by specific auditor - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - */ - ProviderAuditorAttributes( - request: QueryProviderAuditorRequest, - ): Promise; - /** - * AuditorAttributes queries all providers signed by this auditor - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - */ - AuditorAttributes( - request: QueryAuditorAttributesRequest, - ): Promise; -} - -export const QueryServiceName = "akash.audit.v1beta2.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.AllProvidersAttributes = this.AllProvidersAttributes.bind(this); - this.ProviderAttributes = this.ProviderAttributes.bind(this); - this.ProviderAuditorAttributes = this.ProviderAuditorAttributes.bind(this); - this.AuditorAttributes = this.AuditorAttributes.bind(this); - } - AllProvidersAttributes( - request: QueryAllProvidersAttributesRequest, - ): Promise { - const data = QueryAllProvidersAttributesRequest.encode(request).finish(); - const promise = this.rpc.request( - this.service, - "AllProvidersAttributes", - data, - ); - return promise.then((data) => - QueryProvidersResponse.decode(_m0.Reader.create(data)), - ); - } - - ProviderAttributes( - request: QueryProviderAttributesRequest, - ): Promise { - const data = QueryProviderAttributesRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "ProviderAttributes", data); - return promise.then((data) => - QueryProvidersResponse.decode(_m0.Reader.create(data)), - ); - } - - ProviderAuditorAttributes( - request: QueryProviderAuditorRequest, - ): Promise { - const data = QueryProviderAuditorRequest.encode(request).finish(); - const promise = this.rpc.request( - this.service, - "ProviderAuditorAttributes", - data, - ); - return promise.then((data) => - QueryProvidersResponse.decode(_m0.Reader.create(data)), - ); - } - - AuditorAttributes( - request: QueryAuditorAttributesRequest, - ): Promise { - const data = QueryAuditorAttributesRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "AuditorAttributes", data); - return promise.then((data) => - QueryProvidersResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/audit/v1beta3/audit.ts b/ts/src/generated/akash/audit/v1beta3/audit.ts deleted file mode 100644 index 94be5e63..00000000 --- a/ts/src/generated/akash/audit/v1beta3/audit.ts +++ /dev/null @@ -1,880 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Attribute } from "../../base/v1beta3/attribute"; - -/** Provider stores owner auditor and attributes details */ -export interface Provider { - $type: "akash.audit.v1beta3.Provider"; - owner: string; - auditor: string; - attributes: Attribute[]; -} - -/** Attributes */ -export interface AuditedAttributes { - $type: "akash.audit.v1beta3.AuditedAttributes"; - owner: string; - auditor: string; - attributes: Attribute[]; -} - -/** AttributesResponse represents details of deployment along with group details */ -export interface AttributesResponse { - $type: "akash.audit.v1beta3.AttributesResponse"; - attributes: AuditedAttributes[]; -} - -/** AttributesFilters defines filters used to filter deployments */ -export interface AttributesFilters { - $type: "akash.audit.v1beta3.AttributesFilters"; - auditors: string[]; - owners: string[]; -} - -/** MsgSignProviderAttributes defines an SDK message for signing a provider attributes */ -export interface MsgSignProviderAttributes { - $type: "akash.audit.v1beta3.MsgSignProviderAttributes"; - owner: string; - auditor: string; - attributes: Attribute[]; -} - -/** MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. */ -export interface MsgSignProviderAttributesResponse { - $type: "akash.audit.v1beta3.MsgSignProviderAttributesResponse"; -} - -/** MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes */ -export interface MsgDeleteProviderAttributes { - $type: "akash.audit.v1beta3.MsgDeleteProviderAttributes"; - owner: string; - auditor: string; - keys: string[]; -} - -/** MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. */ -export interface MsgDeleteProviderAttributesResponse { - $type: "akash.audit.v1beta3.MsgDeleteProviderAttributesResponse"; -} - -function createBaseProvider(): Provider { - return { - $type: "akash.audit.v1beta3.Provider", - owner: "", - auditor: "", - attributes: [], - }; -} - -export const Provider = { - $type: "akash.audit.v1beta3.Provider" as const, - - encode( - message: Provider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.auditor !== "") { - writer.uint32(18).string(message.auditor); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Provider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.auditor = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Provider { - return { - $type: Provider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Provider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): Provider { - return Provider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Provider { - const message = createBaseProvider(); - message.owner = object.owner ?? ""; - message.auditor = object.auditor ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Provider.$type, Provider); - -function createBaseAuditedAttributes(): AuditedAttributes { - return { - $type: "akash.audit.v1beta3.AuditedAttributes", - owner: "", - auditor: "", - attributes: [], - }; -} - -export const AuditedAttributes = { - $type: "akash.audit.v1beta3.AuditedAttributes" as const, - - encode( - message: AuditedAttributes, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.auditor !== "") { - writer.uint32(18).string(message.auditor); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): AuditedAttributes { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAuditedAttributes(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.auditor = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): AuditedAttributes { - return { - $type: AuditedAttributes.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: AuditedAttributes): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): AuditedAttributes { - return AuditedAttributes.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): AuditedAttributes { - const message = createBaseAuditedAttributes(); - message.owner = object.owner ?? ""; - message.auditor = object.auditor ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(AuditedAttributes.$type, AuditedAttributes); - -function createBaseAttributesResponse(): AttributesResponse { - return { $type: "akash.audit.v1beta3.AttributesResponse", attributes: [] }; -} - -export const AttributesResponse = { - $type: "akash.audit.v1beta3.AttributesResponse" as const, - - encode( - message: AttributesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.attributes) { - AuditedAttributes.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): AttributesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAttributesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.attributes.push( - AuditedAttributes.decode(reader, reader.uint32()), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): AttributesResponse { - return { - $type: AttributesResponse.$type, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => AuditedAttributes.fromJSON(e)) - : [], - }; - }, - - toJSON(message: AttributesResponse): unknown { - const obj: any = {}; - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => - AuditedAttributes.toJSON(e), - ); - } - return obj; - }, - - create(base?: DeepPartial): AttributesResponse { - return AttributesResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): AttributesResponse { - const message = createBaseAttributesResponse(); - message.attributes = - object.attributes?.map((e) => AuditedAttributes.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(AttributesResponse.$type, AttributesResponse); - -function createBaseAttributesFilters(): AttributesFilters { - return { - $type: "akash.audit.v1beta3.AttributesFilters", - auditors: [], - owners: [], - }; -} - -export const AttributesFilters = { - $type: "akash.audit.v1beta3.AttributesFilters" as const, - - encode( - message: AttributesFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.auditors) { - writer.uint32(10).string(v!); - } - for (const v of message.owners) { - writer.uint32(18).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): AttributesFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAttributesFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.auditors.push(reader.string()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.owners.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): AttributesFilters { - return { - $type: AttributesFilters.$type, - auditors: globalThis.Array.isArray(object?.auditors) - ? object.auditors.map((e: any) => globalThis.String(e)) - : [], - owners: globalThis.Array.isArray(object?.owners) - ? object.owners.map((e: any) => globalThis.String(e)) - : [], - }; - }, - - toJSON(message: AttributesFilters): unknown { - const obj: any = {}; - if (message.auditors?.length) { - obj.auditors = message.auditors; - } - if (message.owners?.length) { - obj.owners = message.owners; - } - return obj; - }, - - create(base?: DeepPartial): AttributesFilters { - return AttributesFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): AttributesFilters { - const message = createBaseAttributesFilters(); - message.auditors = object.auditors?.map((e) => e) || []; - message.owners = object.owners?.map((e) => e) || []; - return message; - }, -}; - -messageTypeRegistry.set(AttributesFilters.$type, AttributesFilters); - -function createBaseMsgSignProviderAttributes(): MsgSignProviderAttributes { - return { - $type: "akash.audit.v1beta3.MsgSignProviderAttributes", - owner: "", - auditor: "", - attributes: [], - }; -} - -export const MsgSignProviderAttributes = { - $type: "akash.audit.v1beta3.MsgSignProviderAttributes" as const, - - encode( - message: MsgSignProviderAttributes, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.auditor !== "") { - writer.uint32(18).string(message.auditor); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgSignProviderAttributes { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgSignProviderAttributes(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.auditor = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgSignProviderAttributes { - return { - $type: MsgSignProviderAttributes.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: MsgSignProviderAttributes): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create( - base?: DeepPartial, - ): MsgSignProviderAttributes { - return MsgSignProviderAttributes.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): MsgSignProviderAttributes { - const message = createBaseMsgSignProviderAttributes(); - message.owner = object.owner ?? ""; - message.auditor = object.auditor ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set( - MsgSignProviderAttributes.$type, - MsgSignProviderAttributes, -); - -function createBaseMsgSignProviderAttributesResponse(): MsgSignProviderAttributesResponse { - return { $type: "akash.audit.v1beta3.MsgSignProviderAttributesResponse" }; -} - -export const MsgSignProviderAttributesResponse = { - $type: "akash.audit.v1beta3.MsgSignProviderAttributesResponse" as const, - - encode( - _: MsgSignProviderAttributesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgSignProviderAttributesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgSignProviderAttributesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgSignProviderAttributesResponse { - return { $type: MsgSignProviderAttributesResponse.$type }; - }, - - toJSON(_: MsgSignProviderAttributesResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgSignProviderAttributesResponse { - return MsgSignProviderAttributesResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgSignProviderAttributesResponse { - const message = createBaseMsgSignProviderAttributesResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgSignProviderAttributesResponse.$type, - MsgSignProviderAttributesResponse, -); - -function createBaseMsgDeleteProviderAttributes(): MsgDeleteProviderAttributes { - return { - $type: "akash.audit.v1beta3.MsgDeleteProviderAttributes", - owner: "", - auditor: "", - keys: [], - }; -} - -export const MsgDeleteProviderAttributes = { - $type: "akash.audit.v1beta3.MsgDeleteProviderAttributes" as const, - - encode( - message: MsgDeleteProviderAttributes, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.auditor !== "") { - writer.uint32(18).string(message.auditor); - } - for (const v of message.keys) { - writer.uint32(26).string(v!); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDeleteProviderAttributes { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDeleteProviderAttributes(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.auditor = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.keys.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgDeleteProviderAttributes { - return { - $type: MsgDeleteProviderAttributes.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - keys: globalThis.Array.isArray(object?.keys) - ? object.keys.map((e: any) => globalThis.String(e)) - : [], - }; - }, - - toJSON(message: MsgDeleteProviderAttributes): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.keys?.length) { - obj.keys = message.keys; - } - return obj; - }, - - create( - base?: DeepPartial, - ): MsgDeleteProviderAttributes { - return MsgDeleteProviderAttributes.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): MsgDeleteProviderAttributes { - const message = createBaseMsgDeleteProviderAttributes(); - message.owner = object.owner ?? ""; - message.auditor = object.auditor ?? ""; - message.keys = object.keys?.map((e) => e) || []; - return message; - }, -}; - -messageTypeRegistry.set( - MsgDeleteProviderAttributes.$type, - MsgDeleteProviderAttributes, -); - -function createBaseMsgDeleteProviderAttributesResponse(): MsgDeleteProviderAttributesResponse { - return { $type: "akash.audit.v1beta3.MsgDeleteProviderAttributesResponse" }; -} - -export const MsgDeleteProviderAttributesResponse = { - $type: "akash.audit.v1beta3.MsgDeleteProviderAttributesResponse" as const, - - encode( - _: MsgDeleteProviderAttributesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDeleteProviderAttributesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDeleteProviderAttributesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgDeleteProviderAttributesResponse { - return { $type: MsgDeleteProviderAttributesResponse.$type }; - }, - - toJSON(_: MsgDeleteProviderAttributesResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgDeleteProviderAttributesResponse { - return MsgDeleteProviderAttributesResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgDeleteProviderAttributesResponse { - const message = createBaseMsgDeleteProviderAttributesResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgDeleteProviderAttributesResponse.$type, - MsgDeleteProviderAttributesResponse, -); - -/** Msg defines the provider Msg service */ -export interface Msg { - /** SignProviderAttributes defines a method that signs provider attributes */ - SignProviderAttributes( - request: MsgSignProviderAttributes, - ): Promise; - /** DeleteProviderAttributes defines a method that deletes provider attributes */ - DeleteProviderAttributes( - request: MsgDeleteProviderAttributes, - ): Promise; -} - -export const MsgServiceName = "akash.audit.v1beta3.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.SignProviderAttributes = this.SignProviderAttributes.bind(this); - this.DeleteProviderAttributes = this.DeleteProviderAttributes.bind(this); - } - SignProviderAttributes( - request: MsgSignProviderAttributes, - ): Promise { - const data = MsgSignProviderAttributes.encode(request).finish(); - const promise = this.rpc.request( - this.service, - "SignProviderAttributes", - data, - ); - return promise.then((data) => - MsgSignProviderAttributesResponse.decode(_m0.Reader.create(data)), - ); - } - - DeleteProviderAttributes( - request: MsgDeleteProviderAttributes, - ): Promise { - const data = MsgDeleteProviderAttributes.encode(request).finish(); - const promise = this.rpc.request( - this.service, - "DeleteProviderAttributes", - data, - ); - return promise.then((data) => - MsgDeleteProviderAttributesResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/audit/v1beta3/genesis.ts b/ts/src/generated/akash/audit/v1beta3/genesis.ts deleted file mode 100644 index cb032fcd..00000000 --- a/ts/src/generated/akash/audit/v1beta3/genesis.ts +++ /dev/null @@ -1,112 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { AuditedAttributes } from "./audit"; - -/** GenesisState defines the basic genesis state used by audit module */ -export interface GenesisState { - $type: "akash.audit.v1beta3.GenesisState"; - attributes: AuditedAttributes[]; -} - -function createBaseGenesisState(): GenesisState { - return { $type: "akash.audit.v1beta3.GenesisState", attributes: [] }; -} - -export const GenesisState = { - $type: "akash.audit.v1beta3.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.attributes) { - AuditedAttributes.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.attributes.push( - AuditedAttributes.decode(reader, reader.uint32()), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => AuditedAttributes.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => - AuditedAttributes.toJSON(e), - ); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.attributes = - object.attributes?.map((e) => AuditedAttributes.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} diff --git a/ts/src/generated/akash/audit/v1beta3/query.ts b/ts/src/generated/akash/audit/v1beta3/query.ts deleted file mode 100644 index 9bbe8774..00000000 --- a/ts/src/generated/akash/audit/v1beta3/query.ts +++ /dev/null @@ -1,765 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Provider } from "./audit"; - -/** QueryProvidersResponse is response type for the Query/Providers RPC method */ -export interface QueryProvidersResponse { - $type: "akash.audit.v1beta3.QueryProvidersResponse"; - providers: Provider[]; - pagination: PageResponse | undefined; -} - -/** QueryProviderRequest is request type for the Query/Provider RPC method */ -export interface QueryProviderRequest { - $type: "akash.audit.v1beta3.QueryProviderRequest"; - auditor: string; - owner: string; -} - -/** QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method */ -export interface QueryAllProvidersAttributesRequest { - $type: "akash.audit.v1beta3.QueryAllProvidersAttributesRequest"; - pagination: PageRequest | undefined; -} - -/** QueryProviderAttributesRequest is request type for the Query/Provider RPC method */ -export interface QueryProviderAttributesRequest { - $type: "akash.audit.v1beta3.QueryProviderAttributesRequest"; - owner: string; - pagination: PageRequest | undefined; -} - -/** QueryProviderAuditorRequest is request type for the Query/Providers RPC method */ -export interface QueryProviderAuditorRequest { - $type: "akash.audit.v1beta3.QueryProviderAuditorRequest"; - auditor: string; - owner: string; -} - -/** QueryAuditorAttributesRequest is request type for the Query/Providers RPC method */ -export interface QueryAuditorAttributesRequest { - $type: "akash.audit.v1beta3.QueryAuditorAttributesRequest"; - auditor: string; - pagination: PageRequest | undefined; -} - -function createBaseQueryProvidersResponse(): QueryProvidersResponse { - return { - $type: "akash.audit.v1beta3.QueryProvidersResponse", - providers: [], - pagination: undefined, - }; -} - -export const QueryProvidersResponse = { - $type: "akash.audit.v1beta3.QueryProvidersResponse" as const, - - encode( - message: QueryProvidersResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.providers) { - Provider.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProvidersResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProvidersResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.providers.push(Provider.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProvidersResponse { - return { - $type: QueryProvidersResponse.$type, - providers: globalThis.Array.isArray(object?.providers) - ? object.providers.map((e: any) => Provider.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryProvidersResponse): unknown { - const obj: any = {}; - if (message.providers?.length) { - obj.providers = message.providers.map((e) => Provider.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryProvidersResponse { - return QueryProvidersResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryProvidersResponse { - const message = createBaseQueryProvidersResponse(); - message.providers = - object.providers?.map((e) => Provider.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryProvidersResponse.$type, QueryProvidersResponse); - -function createBaseQueryProviderRequest(): QueryProviderRequest { - return { - $type: "akash.audit.v1beta3.QueryProviderRequest", - auditor: "", - owner: "", - }; -} - -export const QueryProviderRequest = { - $type: "akash.audit.v1beta3.QueryProviderRequest" as const, - - encode( - message: QueryProviderRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.auditor !== "") { - writer.uint32(10).string(message.auditor); - } - if (message.owner !== "") { - writer.uint32(18).string(message.owner); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProviderRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProviderRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.auditor = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.owner = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProviderRequest { - return { - $type: QueryProviderRequest.$type, - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - }; - }, - - toJSON(message: QueryProviderRequest): unknown { - const obj: any = {}; - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.owner !== "") { - obj.owner = message.owner; - } - return obj; - }, - - create(base?: DeepPartial): QueryProviderRequest { - return QueryProviderRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryProviderRequest { - const message = createBaseQueryProviderRequest(); - message.auditor = object.auditor ?? ""; - message.owner = object.owner ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(QueryProviderRequest.$type, QueryProviderRequest); - -function createBaseQueryAllProvidersAttributesRequest(): QueryAllProvidersAttributesRequest { - return { - $type: "akash.audit.v1beta3.QueryAllProvidersAttributesRequest", - pagination: undefined, - }; -} - -export const QueryAllProvidersAttributesRequest = { - $type: "akash.audit.v1beta3.QueryAllProvidersAttributesRequest" as const, - - encode( - message: QueryAllProvidersAttributesRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryAllProvidersAttributesRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryAllProvidersAttributesRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryAllProvidersAttributesRequest { - return { - $type: QueryAllProvidersAttributesRequest.$type, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryAllProvidersAttributesRequest): unknown { - const obj: any = {}; - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryAllProvidersAttributesRequest { - return QueryAllProvidersAttributesRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryAllProvidersAttributesRequest { - const message = createBaseQueryAllProvidersAttributesRequest(); - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryAllProvidersAttributesRequest.$type, - QueryAllProvidersAttributesRequest, -); - -function createBaseQueryProviderAttributesRequest(): QueryProviderAttributesRequest { - return { - $type: "akash.audit.v1beta3.QueryProviderAttributesRequest", - owner: "", - pagination: undefined, - }; -} - -export const QueryProviderAttributesRequest = { - $type: "akash.audit.v1beta3.QueryProviderAttributesRequest" as const, - - encode( - message: QueryProviderAttributesRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProviderAttributesRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProviderAttributesRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProviderAttributesRequest { - return { - $type: QueryProviderAttributesRequest.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryProviderAttributesRequest): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryProviderAttributesRequest { - return QueryProviderAttributesRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryProviderAttributesRequest { - const message = createBaseQueryProviderAttributesRequest(); - message.owner = object.owner ?? ""; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryProviderAttributesRequest.$type, - QueryProviderAttributesRequest, -); - -function createBaseQueryProviderAuditorRequest(): QueryProviderAuditorRequest { - return { - $type: "akash.audit.v1beta3.QueryProviderAuditorRequest", - auditor: "", - owner: "", - }; -} - -export const QueryProviderAuditorRequest = { - $type: "akash.audit.v1beta3.QueryProviderAuditorRequest" as const, - - encode( - message: QueryProviderAuditorRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.auditor !== "") { - writer.uint32(10).string(message.auditor); - } - if (message.owner !== "") { - writer.uint32(18).string(message.owner); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProviderAuditorRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProviderAuditorRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.auditor = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.owner = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProviderAuditorRequest { - return { - $type: QueryProviderAuditorRequest.$type, - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - }; - }, - - toJSON(message: QueryProviderAuditorRequest): unknown { - const obj: any = {}; - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.owner !== "") { - obj.owner = message.owner; - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryProviderAuditorRequest { - return QueryProviderAuditorRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryProviderAuditorRequest { - const message = createBaseQueryProviderAuditorRequest(); - message.auditor = object.auditor ?? ""; - message.owner = object.owner ?? ""; - return message; - }, -}; - -messageTypeRegistry.set( - QueryProviderAuditorRequest.$type, - QueryProviderAuditorRequest, -); - -function createBaseQueryAuditorAttributesRequest(): QueryAuditorAttributesRequest { - return { - $type: "akash.audit.v1beta3.QueryAuditorAttributesRequest", - auditor: "", - pagination: undefined, - }; -} - -export const QueryAuditorAttributesRequest = { - $type: "akash.audit.v1beta3.QueryAuditorAttributesRequest" as const, - - encode( - message: QueryAuditorAttributesRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.auditor !== "") { - writer.uint32(10).string(message.auditor); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryAuditorAttributesRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryAuditorAttributesRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.auditor = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryAuditorAttributesRequest { - return { - $type: QueryAuditorAttributesRequest.$type, - auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : "", - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryAuditorAttributesRequest): unknown { - const obj: any = {}; - if (message.auditor !== "") { - obj.auditor = message.auditor; - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryAuditorAttributesRequest { - return QueryAuditorAttributesRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryAuditorAttributesRequest { - const message = createBaseQueryAuditorAttributesRequest(); - message.auditor = object.auditor ?? ""; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryAuditorAttributesRequest.$type, - QueryAuditorAttributesRequest, -); - -/** Query defines the gRPC querier service */ -export interface Query { - /** - * AllProvidersAttributes queries all providers - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - */ - AllProvidersAttributes( - request: QueryAllProvidersAttributesRequest, - ): Promise; - /** - * ProviderAttributes queries all provider signed attributes - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - */ - ProviderAttributes( - request: QueryProviderAttributesRequest, - ): Promise; - /** - * ProviderAuditorAttributes queries provider signed attributes by specific auditor - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - */ - ProviderAuditorAttributes( - request: QueryProviderAuditorRequest, - ): Promise; - /** - * AuditorAttributes queries all providers signed by this auditor - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - */ - AuditorAttributes( - request: QueryAuditorAttributesRequest, - ): Promise; -} - -export const QueryServiceName = "akash.audit.v1beta3.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.AllProvidersAttributes = this.AllProvidersAttributes.bind(this); - this.ProviderAttributes = this.ProviderAttributes.bind(this); - this.ProviderAuditorAttributes = this.ProviderAuditorAttributes.bind(this); - this.AuditorAttributes = this.AuditorAttributes.bind(this); - } - AllProvidersAttributes( - request: QueryAllProvidersAttributesRequest, - ): Promise { - const data = QueryAllProvidersAttributesRequest.encode(request).finish(); - const promise = this.rpc.request( - this.service, - "AllProvidersAttributes", - data, - ); - return promise.then((data) => - QueryProvidersResponse.decode(_m0.Reader.create(data)), - ); - } - - ProviderAttributes( - request: QueryProviderAttributesRequest, - ): Promise { - const data = QueryProviderAttributesRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "ProviderAttributes", data); - return promise.then((data) => - QueryProvidersResponse.decode(_m0.Reader.create(data)), - ); - } - - ProviderAuditorAttributes( - request: QueryProviderAuditorRequest, - ): Promise { - const data = QueryProviderAuditorRequest.encode(request).finish(); - const promise = this.rpc.request( - this.service, - "ProviderAuditorAttributes", - data, - ); - return promise.then((data) => - QueryProvidersResponse.decode(_m0.Reader.create(data)), - ); - } - - AuditorAttributes( - request: QueryAuditorAttributesRequest, - ): Promise { - const data = QueryAuditorAttributesRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "AuditorAttributes", data); - return promise.then((data) => - QueryProvidersResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/attributes/v1/attribute.ts b/ts/src/generated/akash/base/attributes/v1/attribute.ts new file mode 100644 index 00000000..5955dd87 --- /dev/null +++ b/ts/src/generated/akash/base/attributes/v1/attribute.ts @@ -0,0 +1,354 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/base/attributes/v1/attribute.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../../typeRegistry"; + +/** Attribute represents key value pair */ +export interface Attribute { + $type: "akash.base.attributes.v1.Attribute"; + key: string; + value: string; +} + +/** + * SignedBy represents validation accounts that tenant expects signatures for provider attributes + * AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many + * entries there + * this behaviour to be discussed + */ +export interface SignedBy { + $type: "akash.base.attributes.v1.SignedBy"; + /** all_of all keys in this list must have signed attributes */ + allOf: string[]; + /** any_of at least of of the keys from the list must have signed attributes */ + anyOf: string[]; +} + +/** PlacementRequirements */ +export interface PlacementRequirements { + $type: "akash.base.attributes.v1.PlacementRequirements"; + /** SignedBy list of keys that tenants expect to have signatures from */ + signedBy: SignedBy | undefined; + /** Attribute list of attributes tenant expects from the provider */ + attributes: Attribute[]; +} + +function createBaseAttribute(): Attribute { + return { $type: "akash.base.attributes.v1.Attribute", key: "", value: "" }; +} + +export const Attribute: MessageFns< + Attribute, + "akash.base.attributes.v1.Attribute" +> = { + $type: "akash.base.attributes.v1.Attribute" as const, + + encode( + message: Attribute, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.key !== "") { + writer.uint32(10).string(message.key); + } + if (message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Attribute { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseAttribute(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.key = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.value = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Attribute { + return { + $type: Attribute.$type, + key: isSet(object.key) ? globalThis.String(object.key) : "", + value: isSet(object.value) ? globalThis.String(object.value) : "", + }; + }, + + toJSON(message: Attribute): unknown { + const obj: any = {}; + if (message.key !== "") { + obj.key = message.key; + } + if (message.value !== "") { + obj.value = message.value; + } + return obj; + }, + + create(base?: DeepPartial): Attribute { + return Attribute.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Attribute { + const message = createBaseAttribute(); + message.key = object.key ?? ""; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Attribute.$type, Attribute); + +function createBaseSignedBy(): SignedBy { + return { $type: "akash.base.attributes.v1.SignedBy", allOf: [], anyOf: [] }; +} + +export const SignedBy: MessageFns< + SignedBy, + "akash.base.attributes.v1.SignedBy" +> = { + $type: "akash.base.attributes.v1.SignedBy" as const, + + encode( + message: SignedBy, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.allOf) { + writer.uint32(10).string(v!); + } + for (const v of message.anyOf) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): SignedBy { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseSignedBy(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.allOf.push(reader.string()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.anyOf.push(reader.string()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): SignedBy { + return { + $type: SignedBy.$type, + allOf: globalThis.Array.isArray(object?.allOf) + ? object.allOf.map((e: any) => globalThis.String(e)) + : [], + anyOf: globalThis.Array.isArray(object?.anyOf) + ? object.anyOf.map((e: any) => globalThis.String(e)) + : [], + }; + }, + + toJSON(message: SignedBy): unknown { + const obj: any = {}; + if (message.allOf?.length) { + obj.allOf = message.allOf; + } + if (message.anyOf?.length) { + obj.anyOf = message.anyOf; + } + return obj; + }, + + create(base?: DeepPartial): SignedBy { + return SignedBy.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): SignedBy { + const message = createBaseSignedBy(); + message.allOf = object.allOf?.map((e) => e) || []; + message.anyOf = object.anyOf?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(SignedBy.$type, SignedBy); + +function createBasePlacementRequirements(): PlacementRequirements { + return { + $type: "akash.base.attributes.v1.PlacementRequirements", + signedBy: undefined, + attributes: [], + }; +} + +export const PlacementRequirements: MessageFns< + PlacementRequirements, + "akash.base.attributes.v1.PlacementRequirements" +> = { + $type: "akash.base.attributes.v1.PlacementRequirements" as const, + + encode( + message: PlacementRequirements, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.signedBy !== undefined) { + SignedBy.encode(message.signedBy, writer.uint32(10).fork()).join(); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): PlacementRequirements { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBasePlacementRequirements(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.signedBy = SignedBy.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): PlacementRequirements { + return { + $type: PlacementRequirements.$type, + signedBy: isSet(object.signedBy) + ? SignedBy.fromJSON(object.signedBy) + : undefined, + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: PlacementRequirements): unknown { + const obj: any = {}; + if (message.signedBy !== undefined) { + obj.signedBy = SignedBy.toJSON(message.signedBy); + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): PlacementRequirements { + return PlacementRequirements.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): PlacementRequirements { + const message = createBasePlacementRequirements(); + message.signedBy = + object.signedBy !== undefined && object.signedBy !== null + ? SignedBy.fromPartial(object.signedBy) + : undefined; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(PlacementRequirements.$type, PlacementRequirements); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/base/resources/v1beta4/cpu.ts b/ts/src/generated/akash/base/resources/v1beta4/cpu.ts new file mode 100644 index 00000000..08203d9b --- /dev/null +++ b/ts/src/generated/akash/base/resources/v1beta4/cpu.ts @@ -0,0 +1,149 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/base/resources/v1beta4/cpu.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../../typeRegistry"; +import { Attribute } from "../../attributes/v1/attribute"; +import { ResourceValue } from "./resourcevalue"; + +/** CPU stores resource units and cpu config attributes */ +export interface CPU { + $type: "akash.base.resources.v1beta4.CPU"; + units: ResourceValue | undefined; + attributes: Attribute[]; +} + +function createBaseCPU(): CPU { + return { + $type: "akash.base.resources.v1beta4.CPU", + units: undefined, + attributes: [], + }; +} + +export const CPU: MessageFns = { + $type: "akash.base.resources.v1beta4.CPU" as const, + + encode( + message: CPU, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.units !== undefined) { + ResourceValue.encode(message.units, writer.uint32(10).fork()).join(); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): CPU { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseCPU(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.units = ResourceValue.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): CPU { + return { + $type: CPU.$type, + units: isSet(object.units) + ? ResourceValue.fromJSON(object.units) + : undefined, + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: CPU): unknown { + const obj: any = {}; + if (message.units !== undefined) { + obj.units = ResourceValue.toJSON(message.units); + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): CPU { + return CPU.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): CPU { + const message = createBaseCPU(); + message.units = + object.units !== undefined && object.units !== null + ? ResourceValue.fromPartial(object.units) + : undefined; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(CPU.$type, CPU); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/base/resources/v1beta4/endpoint.ts b/ts/src/generated/akash/base/resources/v1beta4/endpoint.ts new file mode 100644 index 00000000..c809ae22 --- /dev/null +++ b/ts/src/generated/akash/base/resources/v1beta4/endpoint.ts @@ -0,0 +1,187 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/base/resources/v1beta4/endpoint.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../../typeRegistry"; + +/** Endpoint describes a publicly accessible IP service */ +export interface Endpoint { + $type: "akash.base.resources.v1beta4.Endpoint"; + kind: Endpoint_Kind; + sequenceNumber: number; +} + +/** This describes how the endpoint is implemented when the lease is deployed */ +export enum Endpoint_Kind { + /** SHARED_HTTP - Describes an endpoint that becomes a Kubernetes Ingress */ + SHARED_HTTP = 0, + /** RANDOM_PORT - Describes an endpoint that becomes a Kubernetes NodePort */ + RANDOM_PORT = 1, + /** LEASED_IP - Describes an endpoint that becomes a leased IP */ + LEASED_IP = 2, + UNRECOGNIZED = -1, +} + +export function endpoint_KindFromJSON(object: any): Endpoint_Kind { + switch (object) { + case 0: + case "SHARED_HTTP": + return Endpoint_Kind.SHARED_HTTP; + case 1: + case "RANDOM_PORT": + return Endpoint_Kind.RANDOM_PORT; + case 2: + case "LEASED_IP": + return Endpoint_Kind.LEASED_IP; + case -1: + case "UNRECOGNIZED": + default: + return Endpoint_Kind.UNRECOGNIZED; + } +} + +export function endpoint_KindToJSON(object: Endpoint_Kind): string { + switch (object) { + case Endpoint_Kind.SHARED_HTTP: + return "SHARED_HTTP"; + case Endpoint_Kind.RANDOM_PORT: + return "RANDOM_PORT"; + case Endpoint_Kind.LEASED_IP: + return "LEASED_IP"; + case Endpoint_Kind.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +function createBaseEndpoint(): Endpoint { + return { + $type: "akash.base.resources.v1beta4.Endpoint", + kind: 0, + sequenceNumber: 0, + }; +} + +export const Endpoint: MessageFns< + Endpoint, + "akash.base.resources.v1beta4.Endpoint" +> = { + $type: "akash.base.resources.v1beta4.Endpoint" as const, + + encode( + message: Endpoint, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.kind !== 0) { + writer.uint32(8).int32(message.kind); + } + if (message.sequenceNumber !== 0) { + writer.uint32(16).uint32(message.sequenceNumber); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Endpoint { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEndpoint(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 8) { + break; + } + + message.kind = reader.int32() as any; + continue; + case 2: + if (tag !== 16) { + break; + } + + message.sequenceNumber = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Endpoint { + return { + $type: Endpoint.$type, + kind: isSet(object.kind) ? endpoint_KindFromJSON(object.kind) : 0, + sequenceNumber: isSet(object.sequenceNumber) + ? globalThis.Number(object.sequenceNumber) + : 0, + }; + }, + + toJSON(message: Endpoint): unknown { + const obj: any = {}; + if (message.kind !== 0) { + obj.kind = endpoint_KindToJSON(message.kind); + } + if (message.sequenceNumber !== 0) { + obj.sequenceNumber = Math.round(message.sequenceNumber); + } + return obj; + }, + + create(base?: DeepPartial): Endpoint { + return Endpoint.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Endpoint { + const message = createBaseEndpoint(); + message.kind = object.kind ?? 0; + message.sequenceNumber = object.sequenceNumber ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Endpoint.$type, Endpoint); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/base/resources/v1beta4/gpu.ts b/ts/src/generated/akash/base/resources/v1beta4/gpu.ts new file mode 100644 index 00000000..6caa4b05 --- /dev/null +++ b/ts/src/generated/akash/base/resources/v1beta4/gpu.ts @@ -0,0 +1,149 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/base/resources/v1beta4/gpu.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../../typeRegistry"; +import { Attribute } from "../../attributes/v1/attribute"; +import { ResourceValue } from "./resourcevalue"; + +/** GPU stores resource units and cpu config attributes */ +export interface GPU { + $type: "akash.base.resources.v1beta4.GPU"; + units: ResourceValue | undefined; + attributes: Attribute[]; +} + +function createBaseGPU(): GPU { + return { + $type: "akash.base.resources.v1beta4.GPU", + units: undefined, + attributes: [], + }; +} + +export const GPU: MessageFns = { + $type: "akash.base.resources.v1beta4.GPU" as const, + + encode( + message: GPU, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.units !== undefined) { + ResourceValue.encode(message.units, writer.uint32(10).fork()).join(); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GPU { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGPU(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.units = ResourceValue.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GPU { + return { + $type: GPU.$type, + units: isSet(object.units) + ? ResourceValue.fromJSON(object.units) + : undefined, + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GPU): unknown { + const obj: any = {}; + if (message.units !== undefined) { + obj.units = ResourceValue.toJSON(message.units); + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): GPU { + return GPU.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GPU { + const message = createBaseGPU(); + message.units = + object.units !== undefined && object.units !== null + ? ResourceValue.fromPartial(object.units) + : undefined; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GPU.$type, GPU); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/base/resources/v1beta4/memory.ts b/ts/src/generated/akash/base/resources/v1beta4/memory.ts new file mode 100644 index 00000000..457b9ae4 --- /dev/null +++ b/ts/src/generated/akash/base/resources/v1beta4/memory.ts @@ -0,0 +1,150 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/base/resources/v1beta4/memory.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../../typeRegistry"; +import { Attribute } from "../../attributes/v1/attribute"; +import { ResourceValue } from "./resourcevalue"; + +/** Memory stores resource quantity and memory attributes */ +export interface Memory { + $type: "akash.base.resources.v1beta4.Memory"; + quantity: ResourceValue | undefined; + attributes: Attribute[]; +} + +function createBaseMemory(): Memory { + return { + $type: "akash.base.resources.v1beta4.Memory", + quantity: undefined, + attributes: [], + }; +} + +export const Memory: MessageFns = + { + $type: "akash.base.resources.v1beta4.Memory" as const, + + encode( + message: Memory, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.quantity !== undefined) { + ResourceValue.encode(message.quantity, writer.uint32(10).fork()).join(); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Memory { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMemory(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.quantity = ResourceValue.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Memory { + return { + $type: Memory.$type, + quantity: isSet(object.quantity) + ? ResourceValue.fromJSON(object.quantity) + : undefined, + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: Memory): unknown { + const obj: any = {}; + if (message.quantity !== undefined) { + obj.quantity = ResourceValue.toJSON(message.quantity); + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): Memory { + return Memory.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Memory { + const message = createBaseMemory(); + message.quantity = + object.quantity !== undefined && object.quantity !== null + ? ResourceValue.fromPartial(object.quantity) + : undefined; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, + }; + +messageTypeRegistry.set(Memory.$type, Memory); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/base/v1beta3/resources.ts b/ts/src/generated/akash/base/resources/v1beta4/resources.ts similarity index 77% rename from ts/src/generated/akash/base/v1beta3/resources.ts rename to ts/src/generated/akash/base/resources/v1beta4/resources.ts index 33402bb6..b7bb9e20 100644 --- a/ts/src/generated/akash/base/v1beta3/resources.ts +++ b/ts/src/generated/akash/base/resources/v1beta4/resources.ts @@ -1,7 +1,13 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/base/resources/v1beta4/resources.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; +import { messageTypeRegistry } from "../../../../typeRegistry"; import { CPU } from "./cpu"; import { Endpoint } from "./endpoint"; import { GPU } from "./gpu"; @@ -13,7 +19,7 @@ import { Storage } from "./storage"; * if field is nil resource is not present in the given data-structure */ export interface Resources { - $type: "akash.base.v1beta3.Resources"; + $type: "akash.base.resources.v1beta4.Resources"; id: number; cpu: CPU | undefined; memory: Memory | undefined; @@ -24,7 +30,7 @@ export interface Resources { function createBaseResources(): Resources { return { - $type: "akash.base.v1beta3.Resources", + $type: "akash.base.resources.v1beta4.Resources", id: 0, cpu: undefined, memory: undefined, @@ -34,37 +40,40 @@ function createBaseResources(): Resources { }; } -export const Resources = { - $type: "akash.base.v1beta3.Resources" as const, +export const Resources: MessageFns< + Resources, + "akash.base.resources.v1beta4.Resources" +> = { + $type: "akash.base.resources.v1beta4.Resources" as const, encode( message: Resources, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.id !== 0) { writer.uint32(8).uint32(message.id); } if (message.cpu !== undefined) { - CPU.encode(message.cpu, writer.uint32(18).fork()).ldelim(); + CPU.encode(message.cpu, writer.uint32(18).fork()).join(); } if (message.memory !== undefined) { - Memory.encode(message.memory, writer.uint32(26).fork()).ldelim(); + Memory.encode(message.memory, writer.uint32(26).fork()).join(); } for (const v of message.storage) { - Storage.encode(v!, writer.uint32(34).fork()).ldelim(); + Storage.encode(v!, writer.uint32(34).fork()).join(); } if (message.gpu !== undefined) { - GPU.encode(message.gpu, writer.uint32(42).fork()).ldelim(); + GPU.encode(message.gpu, writer.uint32(42).fork()).join(); } for (const v of message.endpoints) { - Endpoint.encode(v!, writer.uint32(50).fork()).ldelim(); + Endpoint.encode(v!, writer.uint32(50).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Resources { + decode(input: BinaryReader | Uint8Array, length?: number): Resources { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseResources(); while (reader.pos < end) { @@ -116,7 +125,7 @@ export const Resources = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -208,11 +217,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/base/resources/v1beta4/resourcevalue.ts b/ts/src/generated/akash/base/resources/v1beta4/resourcevalue.ts new file mode 100644 index 00000000..94d31e57 --- /dev/null +++ b/ts/src/generated/akash/base/resources/v1beta4/resourcevalue.ts @@ -0,0 +1,150 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/base/resources/v1beta4/resourcevalue.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../../typeRegistry"; + +/** Unit stores cpu, memory and storage metrics */ +export interface ResourceValue { + $type: "akash.base.resources.v1beta4.ResourceValue"; + val: Uint8Array; +} + +function createBaseResourceValue(): ResourceValue { + return { + $type: "akash.base.resources.v1beta4.ResourceValue", + val: new Uint8Array(0), + }; +} + +export const ResourceValue: MessageFns< + ResourceValue, + "akash.base.resources.v1beta4.ResourceValue" +> = { + $type: "akash.base.resources.v1beta4.ResourceValue" as const, + + encode( + message: ResourceValue, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.val.length !== 0) { + writer.uint32(10).bytes(message.val); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): ResourceValue { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseResourceValue(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.val = reader.bytes(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): ResourceValue { + return { + $type: ResourceValue.$type, + val: isSet(object.val) ? bytesFromBase64(object.val) : new Uint8Array(0), + }; + }, + + toJSON(message: ResourceValue): unknown { + const obj: any = {}; + if (message.val.length !== 0) { + obj.val = base64FromBytes(message.val); + } + return obj; + }, + + create(base?: DeepPartial): ResourceValue { + return ResourceValue.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ResourceValue { + const message = createBaseResourceValue(); + message.val = object.val ?? new Uint8Array(0); + return message; + }, +}; + +messageTypeRegistry.set(ResourceValue.$type, ResourceValue); + +function bytesFromBase64(b64: string): Uint8Array { + if ((globalThis as any).Buffer) { + return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); + } else { + const bin = globalThis.atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; + } +} + +function base64FromBytes(arr: Uint8Array): string { + if ((globalThis as any).Buffer) { + return globalThis.Buffer.from(arr).toString("base64"); + } else { + const bin: string[] = []; + arr.forEach((byte) => { + bin.push(globalThis.String.fromCharCode(byte)); + }); + return globalThis.btoa(bin.join("")); + } +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/base/resources/v1beta4/storage.ts b/ts/src/generated/akash/base/resources/v1beta4/storage.ts new file mode 100644 index 00000000..942c2dfd --- /dev/null +++ b/ts/src/generated/akash/base/resources/v1beta4/storage.ts @@ -0,0 +1,169 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/base/resources/v1beta4/storage.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../../typeRegistry"; +import { Attribute } from "../../attributes/v1/attribute"; +import { ResourceValue } from "./resourcevalue"; + +/** Storage stores resource quantity and storage attributes */ +export interface Storage { + $type: "akash.base.resources.v1beta4.Storage"; + name: string; + quantity: ResourceValue | undefined; + attributes: Attribute[]; +} + +function createBaseStorage(): Storage { + return { + $type: "akash.base.resources.v1beta4.Storage", + name: "", + quantity: undefined, + attributes: [], + }; +} + +export const Storage: MessageFns< + Storage, + "akash.base.resources.v1beta4.Storage" +> = { + $type: "akash.base.resources.v1beta4.Storage" as const, + + encode( + message: Storage, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.quantity !== undefined) { + ResourceValue.encode(message.quantity, writer.uint32(18).fork()).join(); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(26).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Storage { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseStorage(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.quantity = ResourceValue.decode(reader, reader.uint32()); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Storage { + return { + $type: Storage.$type, + name: isSet(object.name) ? globalThis.String(object.name) : "", + quantity: isSet(object.quantity) + ? ResourceValue.fromJSON(object.quantity) + : undefined, + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: Storage): unknown { + const obj: any = {}; + if (message.name !== "") { + obj.name = message.name; + } + if (message.quantity !== undefined) { + obj.quantity = ResourceValue.toJSON(message.quantity); + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): Storage { + return Storage.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Storage { + const message = createBaseStorage(); + message.name = object.name ?? ""; + message.quantity = + object.quantity !== undefined && object.quantity !== null + ? ResourceValue.fromPartial(object.quantity) + : undefined; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Storage.$type, Storage); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/base/v1beta1/attribute.ts b/ts/src/generated/akash/base/v1beta1/attribute.ts deleted file mode 100644 index 51509443..00000000 --- a/ts/src/generated/akash/base/v1beta1/attribute.ts +++ /dev/null @@ -1,334 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Attribute represents key value pair */ -export interface Attribute { - $type: "akash.base.v1beta1.Attribute"; - key: string; - value: string; -} - -/** - * SignedBy represents validation accounts that tenant expects signatures for provider attributes - * AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many - * entries there - * this behaviour to be discussed - */ -export interface SignedBy { - $type: "akash.base.v1beta1.SignedBy"; - /** all_of all keys in this list must have signed attributes */ - allOf: string[]; - /** any_of at least of of the keys from the list must have signed attributes */ - anyOf: string[]; -} - -/** PlacementRequirements */ -export interface PlacementRequirements { - $type: "akash.base.v1beta1.PlacementRequirements"; - /** SignedBy list of keys that tenants expect to have signatures from */ - signedBy: SignedBy | undefined; - /** Attribute list of attributes tenant expects from the provider */ - attributes: Attribute[]; -} - -function createBaseAttribute(): Attribute { - return { $type: "akash.base.v1beta1.Attribute", key: "", value: "" }; -} - -export const Attribute = { - $type: "akash.base.v1beta1.Attribute" as const, - - encode( - message: Attribute, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value !== "") { - writer.uint32(18).string(message.value); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Attribute { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAttribute(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Attribute { - return { - $type: Attribute.$type, - key: isSet(object.key) ? globalThis.String(object.key) : "", - value: isSet(object.value) ? globalThis.String(object.value) : "", - }; - }, - - toJSON(message: Attribute): unknown { - const obj: any = {}; - if (message.key !== "") { - obj.key = message.key; - } - if (message.value !== "") { - obj.value = message.value; - } - return obj; - }, - - create(base?: DeepPartial): Attribute { - return Attribute.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Attribute { - const message = createBaseAttribute(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(Attribute.$type, Attribute); - -function createBaseSignedBy(): SignedBy { - return { $type: "akash.base.v1beta1.SignedBy", allOf: [], anyOf: [] }; -} - -export const SignedBy = { - $type: "akash.base.v1beta1.SignedBy" as const, - - encode( - message: SignedBy, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.allOf) { - writer.uint32(10).string(v!); - } - for (const v of message.anyOf) { - writer.uint32(18).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SignedBy { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSignedBy(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.allOf.push(reader.string()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.anyOf.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SignedBy { - return { - $type: SignedBy.$type, - allOf: globalThis.Array.isArray(object?.allOf) - ? object.allOf.map((e: any) => globalThis.String(e)) - : [], - anyOf: globalThis.Array.isArray(object?.anyOf) - ? object.anyOf.map((e: any) => globalThis.String(e)) - : [], - }; - }, - - toJSON(message: SignedBy): unknown { - const obj: any = {}; - if (message.allOf?.length) { - obj.allOf = message.allOf; - } - if (message.anyOf?.length) { - obj.anyOf = message.anyOf; - } - return obj; - }, - - create(base?: DeepPartial): SignedBy { - return SignedBy.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): SignedBy { - const message = createBaseSignedBy(); - message.allOf = object.allOf?.map((e) => e) || []; - message.anyOf = object.anyOf?.map((e) => e) || []; - return message; - }, -}; - -messageTypeRegistry.set(SignedBy.$type, SignedBy); - -function createBasePlacementRequirements(): PlacementRequirements { - return { - $type: "akash.base.v1beta1.PlacementRequirements", - signedBy: undefined, - attributes: [], - }; -} - -export const PlacementRequirements = { - $type: "akash.base.v1beta1.PlacementRequirements" as const, - - encode( - message: PlacementRequirements, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.signedBy !== undefined) { - SignedBy.encode(message.signedBy, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): PlacementRequirements { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePlacementRequirements(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.signedBy = SignedBy.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): PlacementRequirements { - return { - $type: PlacementRequirements.$type, - signedBy: isSet(object.signedBy) - ? SignedBy.fromJSON(object.signedBy) - : undefined, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: PlacementRequirements): unknown { - const obj: any = {}; - if (message.signedBy !== undefined) { - obj.signedBy = SignedBy.toJSON(message.signedBy); - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): PlacementRequirements { - return PlacementRequirements.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): PlacementRequirements { - const message = createBasePlacementRequirements(); - message.signedBy = - object.signedBy !== undefined && object.signedBy !== null - ? SignedBy.fromPartial(object.signedBy) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(PlacementRequirements.$type, PlacementRequirements); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta1/endpoint.ts b/ts/src/generated/akash/base/v1beta1/endpoint.ts deleted file mode 100644 index b84ed036..00000000 --- a/ts/src/generated/akash/base/v1beta1/endpoint.ts +++ /dev/null @@ -1,144 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Endpoint describes a publicly accessible IP service */ -export interface Endpoint { - $type: "akash.base.v1beta1.Endpoint"; - kind: Endpoint_Kind; -} - -/** This describes how the endpoint is implemented when the lease is deployed */ -export enum Endpoint_Kind { - /** SHARED_HTTP - Describes an endpoint that becomes a Kubernetes Ingress */ - SHARED_HTTP = 0, - /** RANDOM_PORT - Describes an endpoint that becomes a Kubernetes NodePort */ - RANDOM_PORT = 1, - UNRECOGNIZED = -1, -} - -export function endpoint_KindFromJSON(object: any): Endpoint_Kind { - switch (object) { - case 0: - case "SHARED_HTTP": - return Endpoint_Kind.SHARED_HTTP; - case 1: - case "RANDOM_PORT": - return Endpoint_Kind.RANDOM_PORT; - case -1: - case "UNRECOGNIZED": - default: - return Endpoint_Kind.UNRECOGNIZED; - } -} - -export function endpoint_KindToJSON(object: Endpoint_Kind): string { - switch (object) { - case Endpoint_Kind.SHARED_HTTP: - return "SHARED_HTTP"; - case Endpoint_Kind.RANDOM_PORT: - return "RANDOM_PORT"; - case Endpoint_Kind.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -function createBaseEndpoint(): Endpoint { - return { $type: "akash.base.v1beta1.Endpoint", kind: 0 }; -} - -export const Endpoint = { - $type: "akash.base.v1beta1.Endpoint" as const, - - encode( - message: Endpoint, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.kind !== 0) { - writer.uint32(8).int32(message.kind); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Endpoint { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEndpoint(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.kind = reader.int32() as any; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Endpoint { - return { - $type: Endpoint.$type, - kind: isSet(object.kind) ? endpoint_KindFromJSON(object.kind) : 0, - }; - }, - - toJSON(message: Endpoint): unknown { - const obj: any = {}; - if (message.kind !== 0) { - obj.kind = endpoint_KindToJSON(message.kind); - } - return obj; - }, - - create(base?: DeepPartial): Endpoint { - return Endpoint.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Endpoint { - const message = createBaseEndpoint(); - message.kind = object.kind ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(Endpoint.$type, Endpoint); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta1/resource.ts b/ts/src/generated/akash/base/v1beta1/resource.ts deleted file mode 100644 index 6866fb15..00000000 --- a/ts/src/generated/akash/base/v1beta1/resource.ts +++ /dev/null @@ -1,481 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Attribute } from "./attribute"; -import { Endpoint } from "./endpoint"; -import { ResourceValue } from "./resourcevalue"; - -/** CPU stores resource units and cpu config attributes */ -export interface CPU { - $type: "akash.base.v1beta1.CPU"; - units: ResourceValue | undefined; - attributes: Attribute[]; -} - -/** Memory stores resource quantity and memory attributes */ -export interface Memory { - $type: "akash.base.v1beta1.Memory"; - quantity: ResourceValue | undefined; - attributes: Attribute[]; -} - -/** Storage stores resource quantity and storage attributes */ -export interface Storage { - $type: "akash.base.v1beta1.Storage"; - quantity: ResourceValue | undefined; - attributes: Attribute[]; -} - -/** - * ResourceUnits describes all available resources types for deployment/node etc - * if field is nil resource is not present in the given data-structure - */ -export interface ResourceUnits { - $type: "akash.base.v1beta1.ResourceUnits"; - cpu: CPU | undefined; - memory: Memory | undefined; - storage: Storage | undefined; - endpoints: Endpoint[]; -} - -function createBaseCPU(): CPU { - return { $type: "akash.base.v1beta1.CPU", units: undefined, attributes: [] }; -} - -export const CPU = { - $type: "akash.base.v1beta1.CPU" as const, - - encode(message: CPU, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.units !== undefined) { - ResourceValue.encode(message.units, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CPU { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCPU(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.units = ResourceValue.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CPU { - return { - $type: CPU.$type, - units: isSet(object.units) - ? ResourceValue.fromJSON(object.units) - : undefined, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: CPU): unknown { - const obj: any = {}; - if (message.units !== undefined) { - obj.units = ResourceValue.toJSON(message.units); - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): CPU { - return CPU.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): CPU { - const message = createBaseCPU(); - message.units = - object.units !== undefined && object.units !== null - ? ResourceValue.fromPartial(object.units) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(CPU.$type, CPU); - -function createBaseMemory(): Memory { - return { - $type: "akash.base.v1beta1.Memory", - quantity: undefined, - attributes: [], - }; -} - -export const Memory = { - $type: "akash.base.v1beta1.Memory" as const, - - encode( - message: Memory, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.quantity !== undefined) { - ResourceValue.encode(message.quantity, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Memory { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMemory(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.quantity = ResourceValue.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Memory { - return { - $type: Memory.$type, - quantity: isSet(object.quantity) - ? ResourceValue.fromJSON(object.quantity) - : undefined, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Memory): unknown { - const obj: any = {}; - if (message.quantity !== undefined) { - obj.quantity = ResourceValue.toJSON(message.quantity); - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): Memory { - return Memory.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Memory { - const message = createBaseMemory(); - message.quantity = - object.quantity !== undefined && object.quantity !== null - ? ResourceValue.fromPartial(object.quantity) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Memory.$type, Memory); - -function createBaseStorage(): Storage { - return { - $type: "akash.base.v1beta1.Storage", - quantity: undefined, - attributes: [], - }; -} - -export const Storage = { - $type: "akash.base.v1beta1.Storage" as const, - - encode( - message: Storage, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.quantity !== undefined) { - ResourceValue.encode(message.quantity, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Storage { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseStorage(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.quantity = ResourceValue.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Storage { - return { - $type: Storage.$type, - quantity: isSet(object.quantity) - ? ResourceValue.fromJSON(object.quantity) - : undefined, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Storage): unknown { - const obj: any = {}; - if (message.quantity !== undefined) { - obj.quantity = ResourceValue.toJSON(message.quantity); - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): Storage { - return Storage.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Storage { - const message = createBaseStorage(); - message.quantity = - object.quantity !== undefined && object.quantity !== null - ? ResourceValue.fromPartial(object.quantity) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Storage.$type, Storage); - -function createBaseResourceUnits(): ResourceUnits { - return { - $type: "akash.base.v1beta1.ResourceUnits", - cpu: undefined, - memory: undefined, - storage: undefined, - endpoints: [], - }; -} - -export const ResourceUnits = { - $type: "akash.base.v1beta1.ResourceUnits" as const, - - encode( - message: ResourceUnits, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.cpu !== undefined) { - CPU.encode(message.cpu, writer.uint32(10).fork()).ldelim(); - } - if (message.memory !== undefined) { - Memory.encode(message.memory, writer.uint32(18).fork()).ldelim(); - } - if (message.storage !== undefined) { - Storage.encode(message.storage, writer.uint32(26).fork()).ldelim(); - } - for (const v of message.endpoints) { - Endpoint.encode(v!, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ResourceUnits { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseResourceUnits(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.cpu = CPU.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.memory = Memory.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.storage = Storage.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.endpoints.push(Endpoint.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ResourceUnits { - return { - $type: ResourceUnits.$type, - cpu: isSet(object.cpu) ? CPU.fromJSON(object.cpu) : undefined, - memory: isSet(object.memory) ? Memory.fromJSON(object.memory) : undefined, - storage: isSet(object.storage) - ? Storage.fromJSON(object.storage) - : undefined, - endpoints: globalThis.Array.isArray(object?.endpoints) - ? object.endpoints.map((e: any) => Endpoint.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ResourceUnits): unknown { - const obj: any = {}; - if (message.cpu !== undefined) { - obj.cpu = CPU.toJSON(message.cpu); - } - if (message.memory !== undefined) { - obj.memory = Memory.toJSON(message.memory); - } - if (message.storage !== undefined) { - obj.storage = Storage.toJSON(message.storage); - } - if (message.endpoints?.length) { - obj.endpoints = message.endpoints.map((e) => Endpoint.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): ResourceUnits { - return ResourceUnits.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ResourceUnits { - const message = createBaseResourceUnits(); - message.cpu = - object.cpu !== undefined && object.cpu !== null - ? CPU.fromPartial(object.cpu) - : undefined; - message.memory = - object.memory !== undefined && object.memory !== null - ? Memory.fromPartial(object.memory) - : undefined; - message.storage = - object.storage !== undefined && object.storage !== null - ? Storage.fromPartial(object.storage) - : undefined; - message.endpoints = - object.endpoints?.map((e) => Endpoint.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(ResourceUnits.$type, ResourceUnits); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta1/resourcevalue.ts b/ts/src/generated/akash/base/v1beta1/resourcevalue.ts deleted file mode 100644 index 5578c929..00000000 --- a/ts/src/generated/akash/base/v1beta1/resourcevalue.ts +++ /dev/null @@ -1,133 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Unit stores cpu, memory and storage metrics */ -export interface ResourceValue { - $type: "akash.base.v1beta1.ResourceValue"; - val: Uint8Array; -} - -function createBaseResourceValue(): ResourceValue { - return { $type: "akash.base.v1beta1.ResourceValue", val: new Uint8Array(0) }; -} - -export const ResourceValue = { - $type: "akash.base.v1beta1.ResourceValue" as const, - - encode( - message: ResourceValue, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.val.length !== 0) { - writer.uint32(10).bytes(message.val); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ResourceValue { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseResourceValue(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.val = reader.bytes(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ResourceValue { - return { - $type: ResourceValue.$type, - val: isSet(object.val) ? bytesFromBase64(object.val) : new Uint8Array(0), - }; - }, - - toJSON(message: ResourceValue): unknown { - const obj: any = {}; - if (message.val.length !== 0) { - obj.val = base64FromBytes(message.val); - } - return obj; - }, - - create(base?: DeepPartial): ResourceValue { - return ResourceValue.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ResourceValue { - const message = createBaseResourceValue(); - message.val = object.val ?? new Uint8Array(0); - return message; - }, -}; - -messageTypeRegistry.set(ResourceValue.$type, ResourceValue); - -function bytesFromBase64(b64: string): Uint8Array { - if ((globalThis as any).Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if ((globalThis as any).Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(globalThis.String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta2/attribute.ts b/ts/src/generated/akash/base/v1beta2/attribute.ts deleted file mode 100644 index c05e5a02..00000000 --- a/ts/src/generated/akash/base/v1beta2/attribute.ts +++ /dev/null @@ -1,334 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Attribute represents key value pair */ -export interface Attribute { - $type: "akash.base.v1beta2.Attribute"; - key: string; - value: string; -} - -/** - * SignedBy represents validation accounts that tenant expects signatures for provider attributes - * AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many - * entries there - * this behaviour to be discussed - */ -export interface SignedBy { - $type: "akash.base.v1beta2.SignedBy"; - /** all_of all keys in this list must have signed attributes */ - allOf: string[]; - /** any_of at least of of the keys from the list must have signed attributes */ - anyOf: string[]; -} - -/** PlacementRequirements */ -export interface PlacementRequirements { - $type: "akash.base.v1beta2.PlacementRequirements"; - /** SignedBy list of keys that tenants expect to have signatures from */ - signedBy: SignedBy | undefined; - /** Attribute list of attributes tenant expects from the provider */ - attributes: Attribute[]; -} - -function createBaseAttribute(): Attribute { - return { $type: "akash.base.v1beta2.Attribute", key: "", value: "" }; -} - -export const Attribute = { - $type: "akash.base.v1beta2.Attribute" as const, - - encode( - message: Attribute, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value !== "") { - writer.uint32(18).string(message.value); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Attribute { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAttribute(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Attribute { - return { - $type: Attribute.$type, - key: isSet(object.key) ? globalThis.String(object.key) : "", - value: isSet(object.value) ? globalThis.String(object.value) : "", - }; - }, - - toJSON(message: Attribute): unknown { - const obj: any = {}; - if (message.key !== "") { - obj.key = message.key; - } - if (message.value !== "") { - obj.value = message.value; - } - return obj; - }, - - create(base?: DeepPartial): Attribute { - return Attribute.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Attribute { - const message = createBaseAttribute(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(Attribute.$type, Attribute); - -function createBaseSignedBy(): SignedBy { - return { $type: "akash.base.v1beta2.SignedBy", allOf: [], anyOf: [] }; -} - -export const SignedBy = { - $type: "akash.base.v1beta2.SignedBy" as const, - - encode( - message: SignedBy, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.allOf) { - writer.uint32(10).string(v!); - } - for (const v of message.anyOf) { - writer.uint32(18).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SignedBy { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSignedBy(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.allOf.push(reader.string()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.anyOf.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SignedBy { - return { - $type: SignedBy.$type, - allOf: globalThis.Array.isArray(object?.allOf) - ? object.allOf.map((e: any) => globalThis.String(e)) - : [], - anyOf: globalThis.Array.isArray(object?.anyOf) - ? object.anyOf.map((e: any) => globalThis.String(e)) - : [], - }; - }, - - toJSON(message: SignedBy): unknown { - const obj: any = {}; - if (message.allOf?.length) { - obj.allOf = message.allOf; - } - if (message.anyOf?.length) { - obj.anyOf = message.anyOf; - } - return obj; - }, - - create(base?: DeepPartial): SignedBy { - return SignedBy.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): SignedBy { - const message = createBaseSignedBy(); - message.allOf = object.allOf?.map((e) => e) || []; - message.anyOf = object.anyOf?.map((e) => e) || []; - return message; - }, -}; - -messageTypeRegistry.set(SignedBy.$type, SignedBy); - -function createBasePlacementRequirements(): PlacementRequirements { - return { - $type: "akash.base.v1beta2.PlacementRequirements", - signedBy: undefined, - attributes: [], - }; -} - -export const PlacementRequirements = { - $type: "akash.base.v1beta2.PlacementRequirements" as const, - - encode( - message: PlacementRequirements, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.signedBy !== undefined) { - SignedBy.encode(message.signedBy, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): PlacementRequirements { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePlacementRequirements(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.signedBy = SignedBy.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): PlacementRequirements { - return { - $type: PlacementRequirements.$type, - signedBy: isSet(object.signedBy) - ? SignedBy.fromJSON(object.signedBy) - : undefined, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: PlacementRequirements): unknown { - const obj: any = {}; - if (message.signedBy !== undefined) { - obj.signedBy = SignedBy.toJSON(message.signedBy); - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): PlacementRequirements { - return PlacementRequirements.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): PlacementRequirements { - const message = createBasePlacementRequirements(); - message.signedBy = - object.signedBy !== undefined && object.signedBy !== null - ? SignedBy.fromPartial(object.signedBy) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(PlacementRequirements.$type, PlacementRequirements); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta2/endpoint.ts b/ts/src/generated/akash/base/v1beta2/endpoint.ts deleted file mode 100644 index bd7d7e40..00000000 --- a/ts/src/generated/akash/base/v1beta2/endpoint.ts +++ /dev/null @@ -1,169 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Endpoint describes a publicly accessible IP service */ -export interface Endpoint { - $type: "akash.base.v1beta2.Endpoint"; - kind: Endpoint_Kind; - sequenceNumber: number; -} - -/** This describes how the endpoint is implemented when the lease is deployed */ -export enum Endpoint_Kind { - /** SHARED_HTTP - Describes an endpoint that becomes a Kubernetes Ingress */ - SHARED_HTTP = 0, - /** RANDOM_PORT - Describes an endpoint that becomes a Kubernetes NodePort */ - RANDOM_PORT = 1, - /** LEASED_IP - Describes an endpoint that becomes a leased IP */ - LEASED_IP = 2, - UNRECOGNIZED = -1, -} - -export function endpoint_KindFromJSON(object: any): Endpoint_Kind { - switch (object) { - case 0: - case "SHARED_HTTP": - return Endpoint_Kind.SHARED_HTTP; - case 1: - case "RANDOM_PORT": - return Endpoint_Kind.RANDOM_PORT; - case 2: - case "LEASED_IP": - return Endpoint_Kind.LEASED_IP; - case -1: - case "UNRECOGNIZED": - default: - return Endpoint_Kind.UNRECOGNIZED; - } -} - -export function endpoint_KindToJSON(object: Endpoint_Kind): string { - switch (object) { - case Endpoint_Kind.SHARED_HTTP: - return "SHARED_HTTP"; - case Endpoint_Kind.RANDOM_PORT: - return "RANDOM_PORT"; - case Endpoint_Kind.LEASED_IP: - return "LEASED_IP"; - case Endpoint_Kind.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -function createBaseEndpoint(): Endpoint { - return { $type: "akash.base.v1beta2.Endpoint", kind: 0, sequenceNumber: 0 }; -} - -export const Endpoint = { - $type: "akash.base.v1beta2.Endpoint" as const, - - encode( - message: Endpoint, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.kind !== 0) { - writer.uint32(8).int32(message.kind); - } - if (message.sequenceNumber !== 0) { - writer.uint32(16).uint32(message.sequenceNumber); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Endpoint { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEndpoint(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.kind = reader.int32() as any; - continue; - case 2: - if (tag !== 16) { - break; - } - - message.sequenceNumber = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Endpoint { - return { - $type: Endpoint.$type, - kind: isSet(object.kind) ? endpoint_KindFromJSON(object.kind) : 0, - sequenceNumber: isSet(object.sequenceNumber) - ? globalThis.Number(object.sequenceNumber) - : 0, - }; - }, - - toJSON(message: Endpoint): unknown { - const obj: any = {}; - if (message.kind !== 0) { - obj.kind = endpoint_KindToJSON(message.kind); - } - if (message.sequenceNumber !== 0) { - obj.sequenceNumber = Math.round(message.sequenceNumber); - } - return obj; - }, - - create(base?: DeepPartial): Endpoint { - return Endpoint.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Endpoint { - const message = createBaseEndpoint(); - message.kind = object.kind ?? 0; - message.sequenceNumber = object.sequenceNumber ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(Endpoint.$type, Endpoint); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta2/resource.ts b/ts/src/generated/akash/base/v1beta2/resource.ts deleted file mode 100644 index 00311c40..00000000 --- a/ts/src/generated/akash/base/v1beta2/resource.ts +++ /dev/null @@ -1,352 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Attribute } from "./attribute"; -import { ResourceValue } from "./resourcevalue"; - -/** CPU stores resource units and cpu config attributes */ -export interface CPU { - $type: "akash.base.v1beta2.CPU"; - units: ResourceValue | undefined; - attributes: Attribute[]; -} - -/** Memory stores resource quantity and memory attributes */ -export interface Memory { - $type: "akash.base.v1beta2.Memory"; - quantity: ResourceValue | undefined; - attributes: Attribute[]; -} - -/** Storage stores resource quantity and storage attributes */ -export interface Storage { - $type: "akash.base.v1beta2.Storage"; - name: string; - quantity: ResourceValue | undefined; - attributes: Attribute[]; -} - -function createBaseCPU(): CPU { - return { $type: "akash.base.v1beta2.CPU", units: undefined, attributes: [] }; -} - -export const CPU = { - $type: "akash.base.v1beta2.CPU" as const, - - encode(message: CPU, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.units !== undefined) { - ResourceValue.encode(message.units, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CPU { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCPU(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.units = ResourceValue.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CPU { - return { - $type: CPU.$type, - units: isSet(object.units) - ? ResourceValue.fromJSON(object.units) - : undefined, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: CPU): unknown { - const obj: any = {}; - if (message.units !== undefined) { - obj.units = ResourceValue.toJSON(message.units); - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): CPU { - return CPU.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): CPU { - const message = createBaseCPU(); - message.units = - object.units !== undefined && object.units !== null - ? ResourceValue.fromPartial(object.units) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(CPU.$type, CPU); - -function createBaseMemory(): Memory { - return { - $type: "akash.base.v1beta2.Memory", - quantity: undefined, - attributes: [], - }; -} - -export const Memory = { - $type: "akash.base.v1beta2.Memory" as const, - - encode( - message: Memory, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.quantity !== undefined) { - ResourceValue.encode(message.quantity, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Memory { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMemory(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.quantity = ResourceValue.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Memory { - return { - $type: Memory.$type, - quantity: isSet(object.quantity) - ? ResourceValue.fromJSON(object.quantity) - : undefined, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Memory): unknown { - const obj: any = {}; - if (message.quantity !== undefined) { - obj.quantity = ResourceValue.toJSON(message.quantity); - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): Memory { - return Memory.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Memory { - const message = createBaseMemory(); - message.quantity = - object.quantity !== undefined && object.quantity !== null - ? ResourceValue.fromPartial(object.quantity) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Memory.$type, Memory); - -function createBaseStorage(): Storage { - return { - $type: "akash.base.v1beta2.Storage", - name: "", - quantity: undefined, - attributes: [], - }; -} - -export const Storage = { - $type: "akash.base.v1beta2.Storage" as const, - - encode( - message: Storage, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.quantity !== undefined) { - ResourceValue.encode(message.quantity, writer.uint32(18).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Storage { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseStorage(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.quantity = ResourceValue.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Storage { - return { - $type: Storage.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - quantity: isSet(object.quantity) - ? ResourceValue.fromJSON(object.quantity) - : undefined, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Storage): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.quantity !== undefined) { - obj.quantity = ResourceValue.toJSON(message.quantity); - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): Storage { - return Storage.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Storage { - const message = createBaseStorage(); - message.name = object.name ?? ""; - message.quantity = - object.quantity !== undefined && object.quantity !== null - ? ResourceValue.fromPartial(object.quantity) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Storage.$type, Storage); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta2/resourceunits.ts b/ts/src/generated/akash/base/v1beta2/resourceunits.ts deleted file mode 100644 index f77bd5dd..00000000 --- a/ts/src/generated/akash/base/v1beta2/resourceunits.ts +++ /dev/null @@ -1,178 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Endpoint } from "./endpoint"; -import { CPU, Memory, Storage } from "./resource"; - -/** - * ResourceUnits describes all available resources types for deployment/node etc - * if field is nil resource is not present in the given data-structure - */ -export interface ResourceUnits { - $type: "akash.base.v1beta2.ResourceUnits"; - cpu: CPU | undefined; - memory: Memory | undefined; - storage: Storage[]; - endpoints: Endpoint[]; -} - -function createBaseResourceUnits(): ResourceUnits { - return { - $type: "akash.base.v1beta2.ResourceUnits", - cpu: undefined, - memory: undefined, - storage: [], - endpoints: [], - }; -} - -export const ResourceUnits = { - $type: "akash.base.v1beta2.ResourceUnits" as const, - - encode( - message: ResourceUnits, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.cpu !== undefined) { - CPU.encode(message.cpu, writer.uint32(10).fork()).ldelim(); - } - if (message.memory !== undefined) { - Memory.encode(message.memory, writer.uint32(18).fork()).ldelim(); - } - for (const v of message.storage) { - Storage.encode(v!, writer.uint32(26).fork()).ldelim(); - } - for (const v of message.endpoints) { - Endpoint.encode(v!, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ResourceUnits { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseResourceUnits(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.cpu = CPU.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.memory = Memory.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.storage.push(Storage.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.endpoints.push(Endpoint.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ResourceUnits { - return { - $type: ResourceUnits.$type, - cpu: isSet(object.cpu) ? CPU.fromJSON(object.cpu) : undefined, - memory: isSet(object.memory) ? Memory.fromJSON(object.memory) : undefined, - storage: globalThis.Array.isArray(object?.storage) - ? object.storage.map((e: any) => Storage.fromJSON(e)) - : [], - endpoints: globalThis.Array.isArray(object?.endpoints) - ? object.endpoints.map((e: any) => Endpoint.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ResourceUnits): unknown { - const obj: any = {}; - if (message.cpu !== undefined) { - obj.cpu = CPU.toJSON(message.cpu); - } - if (message.memory !== undefined) { - obj.memory = Memory.toJSON(message.memory); - } - if (message.storage?.length) { - obj.storage = message.storage.map((e) => Storage.toJSON(e)); - } - if (message.endpoints?.length) { - obj.endpoints = message.endpoints.map((e) => Endpoint.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): ResourceUnits { - return ResourceUnits.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ResourceUnits { - const message = createBaseResourceUnits(); - message.cpu = - object.cpu !== undefined && object.cpu !== null - ? CPU.fromPartial(object.cpu) - : undefined; - message.memory = - object.memory !== undefined && object.memory !== null - ? Memory.fromPartial(object.memory) - : undefined; - message.storage = object.storage?.map((e) => Storage.fromPartial(e)) || []; - message.endpoints = - object.endpoints?.map((e) => Endpoint.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(ResourceUnits.$type, ResourceUnits); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta2/resourcevalue.ts b/ts/src/generated/akash/base/v1beta2/resourcevalue.ts deleted file mode 100644 index 7b2ffd95..00000000 --- a/ts/src/generated/akash/base/v1beta2/resourcevalue.ts +++ /dev/null @@ -1,133 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Unit stores cpu, memory and storage metrics */ -export interface ResourceValue { - $type: "akash.base.v1beta2.ResourceValue"; - val: Uint8Array; -} - -function createBaseResourceValue(): ResourceValue { - return { $type: "akash.base.v1beta2.ResourceValue", val: new Uint8Array(0) }; -} - -export const ResourceValue = { - $type: "akash.base.v1beta2.ResourceValue" as const, - - encode( - message: ResourceValue, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.val.length !== 0) { - writer.uint32(10).bytes(message.val); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ResourceValue { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseResourceValue(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.val = reader.bytes(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ResourceValue { - return { - $type: ResourceValue.$type, - val: isSet(object.val) ? bytesFromBase64(object.val) : new Uint8Array(0), - }; - }, - - toJSON(message: ResourceValue): unknown { - const obj: any = {}; - if (message.val.length !== 0) { - obj.val = base64FromBytes(message.val); - } - return obj; - }, - - create(base?: DeepPartial): ResourceValue { - return ResourceValue.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ResourceValue { - const message = createBaseResourceValue(); - message.val = object.val ?? new Uint8Array(0); - return message; - }, -}; - -messageTypeRegistry.set(ResourceValue.$type, ResourceValue); - -function bytesFromBase64(b64: string): Uint8Array { - if ((globalThis as any).Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if ((globalThis as any).Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(globalThis.String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta3/attribute.ts b/ts/src/generated/akash/base/v1beta3/attribute.ts deleted file mode 100644 index 93a91c72..00000000 --- a/ts/src/generated/akash/base/v1beta3/attribute.ts +++ /dev/null @@ -1,334 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Attribute represents key value pair */ -export interface Attribute { - $type: "akash.base.v1beta3.Attribute"; - key: string; - value: string; -} - -/** - * SignedBy represents validation accounts that tenant expects signatures for provider attributes - * AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many - * entries there - * this behaviour to be discussed - */ -export interface SignedBy { - $type: "akash.base.v1beta3.SignedBy"; - /** all_of all keys in this list must have signed attributes */ - allOf: string[]; - /** any_of at least of of the keys from the list must have signed attributes */ - anyOf: string[]; -} - -/** PlacementRequirements */ -export interface PlacementRequirements { - $type: "akash.base.v1beta3.PlacementRequirements"; - /** SignedBy list of keys that tenants expect to have signatures from */ - signedBy: SignedBy | undefined; - /** Attribute list of attributes tenant expects from the provider */ - attributes: Attribute[]; -} - -function createBaseAttribute(): Attribute { - return { $type: "akash.base.v1beta3.Attribute", key: "", value: "" }; -} - -export const Attribute = { - $type: "akash.base.v1beta3.Attribute" as const, - - encode( - message: Attribute, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.key !== "") { - writer.uint32(10).string(message.key); - } - if (message.value !== "") { - writer.uint32(18).string(message.value); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Attribute { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAttribute(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.key = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.value = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Attribute { - return { - $type: Attribute.$type, - key: isSet(object.key) ? globalThis.String(object.key) : "", - value: isSet(object.value) ? globalThis.String(object.value) : "", - }; - }, - - toJSON(message: Attribute): unknown { - const obj: any = {}; - if (message.key !== "") { - obj.key = message.key; - } - if (message.value !== "") { - obj.value = message.value; - } - return obj; - }, - - create(base?: DeepPartial): Attribute { - return Attribute.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Attribute { - const message = createBaseAttribute(); - message.key = object.key ?? ""; - message.value = object.value ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(Attribute.$type, Attribute); - -function createBaseSignedBy(): SignedBy { - return { $type: "akash.base.v1beta3.SignedBy", allOf: [], anyOf: [] }; -} - -export const SignedBy = { - $type: "akash.base.v1beta3.SignedBy" as const, - - encode( - message: SignedBy, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.allOf) { - writer.uint32(10).string(v!); - } - for (const v of message.anyOf) { - writer.uint32(18).string(v!); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): SignedBy { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseSignedBy(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.allOf.push(reader.string()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.anyOf.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): SignedBy { - return { - $type: SignedBy.$type, - allOf: globalThis.Array.isArray(object?.allOf) - ? object.allOf.map((e: any) => globalThis.String(e)) - : [], - anyOf: globalThis.Array.isArray(object?.anyOf) - ? object.anyOf.map((e: any) => globalThis.String(e)) - : [], - }; - }, - - toJSON(message: SignedBy): unknown { - const obj: any = {}; - if (message.allOf?.length) { - obj.allOf = message.allOf; - } - if (message.anyOf?.length) { - obj.anyOf = message.anyOf; - } - return obj; - }, - - create(base?: DeepPartial): SignedBy { - return SignedBy.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): SignedBy { - const message = createBaseSignedBy(); - message.allOf = object.allOf?.map((e) => e) || []; - message.anyOf = object.anyOf?.map((e) => e) || []; - return message; - }, -}; - -messageTypeRegistry.set(SignedBy.$type, SignedBy); - -function createBasePlacementRequirements(): PlacementRequirements { - return { - $type: "akash.base.v1beta3.PlacementRequirements", - signedBy: undefined, - attributes: [], - }; -} - -export const PlacementRequirements = { - $type: "akash.base.v1beta3.PlacementRequirements" as const, - - encode( - message: PlacementRequirements, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.signedBy !== undefined) { - SignedBy.encode(message.signedBy, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): PlacementRequirements { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePlacementRequirements(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.signedBy = SignedBy.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): PlacementRequirements { - return { - $type: PlacementRequirements.$type, - signedBy: isSet(object.signedBy) - ? SignedBy.fromJSON(object.signedBy) - : undefined, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: PlacementRequirements): unknown { - const obj: any = {}; - if (message.signedBy !== undefined) { - obj.signedBy = SignedBy.toJSON(message.signedBy); - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): PlacementRequirements { - return PlacementRequirements.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): PlacementRequirements { - const message = createBasePlacementRequirements(); - message.signedBy = - object.signedBy !== undefined && object.signedBy !== null - ? SignedBy.fromPartial(object.signedBy) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(PlacementRequirements.$type, PlacementRequirements); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta3/cpu.ts b/ts/src/generated/akash/base/v1beta3/cpu.ts deleted file mode 100644 index 6b35457e..00000000 --- a/ts/src/generated/akash/base/v1beta3/cpu.ts +++ /dev/null @@ -1,131 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Attribute } from "./attribute"; -import { ResourceValue } from "./resourcevalue"; - -/** CPU stores resource units and cpu config attributes */ -export interface CPU { - $type: "akash.base.v1beta3.CPU"; - units: ResourceValue | undefined; - attributes: Attribute[]; -} - -function createBaseCPU(): CPU { - return { $type: "akash.base.v1beta3.CPU", units: undefined, attributes: [] }; -} - -export const CPU = { - $type: "akash.base.v1beta3.CPU" as const, - - encode(message: CPU, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.units !== undefined) { - ResourceValue.encode(message.units, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CPU { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCPU(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.units = ResourceValue.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CPU { - return { - $type: CPU.$type, - units: isSet(object.units) - ? ResourceValue.fromJSON(object.units) - : undefined, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: CPU): unknown { - const obj: any = {}; - if (message.units !== undefined) { - obj.units = ResourceValue.toJSON(message.units); - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): CPU { - return CPU.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): CPU { - const message = createBaseCPU(); - message.units = - object.units !== undefined && object.units !== null - ? ResourceValue.fromPartial(object.units) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(CPU.$type, CPU); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta3/endpoint.ts b/ts/src/generated/akash/base/v1beta3/endpoint.ts deleted file mode 100644 index 49679d9c..00000000 --- a/ts/src/generated/akash/base/v1beta3/endpoint.ts +++ /dev/null @@ -1,169 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Endpoint describes a publicly accessible IP service */ -export interface Endpoint { - $type: "akash.base.v1beta3.Endpoint"; - kind: Endpoint_Kind; - sequenceNumber: number; -} - -/** This describes how the endpoint is implemented when the lease is deployed */ -export enum Endpoint_Kind { - /** SHARED_HTTP - Describes an endpoint that becomes a Kubernetes Ingress */ - SHARED_HTTP = 0, - /** RANDOM_PORT - Describes an endpoint that becomes a Kubernetes NodePort */ - RANDOM_PORT = 1, - /** LEASED_IP - Describes an endpoint that becomes a leased IP */ - LEASED_IP = 2, - UNRECOGNIZED = -1, -} - -export function endpoint_KindFromJSON(object: any): Endpoint_Kind { - switch (object) { - case 0: - case "SHARED_HTTP": - return Endpoint_Kind.SHARED_HTTP; - case 1: - case "RANDOM_PORT": - return Endpoint_Kind.RANDOM_PORT; - case 2: - case "LEASED_IP": - return Endpoint_Kind.LEASED_IP; - case -1: - case "UNRECOGNIZED": - default: - return Endpoint_Kind.UNRECOGNIZED; - } -} - -export function endpoint_KindToJSON(object: Endpoint_Kind): string { - switch (object) { - case Endpoint_Kind.SHARED_HTTP: - return "SHARED_HTTP"; - case Endpoint_Kind.RANDOM_PORT: - return "RANDOM_PORT"; - case Endpoint_Kind.LEASED_IP: - return "LEASED_IP"; - case Endpoint_Kind.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -function createBaseEndpoint(): Endpoint { - return { $type: "akash.base.v1beta3.Endpoint", kind: 0, sequenceNumber: 0 }; -} - -export const Endpoint = { - $type: "akash.base.v1beta3.Endpoint" as const, - - encode( - message: Endpoint, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.kind !== 0) { - writer.uint32(8).int32(message.kind); - } - if (message.sequenceNumber !== 0) { - writer.uint32(16).uint32(message.sequenceNumber); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Endpoint { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseEndpoint(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.kind = reader.int32() as any; - continue; - case 2: - if (tag !== 16) { - break; - } - - message.sequenceNumber = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Endpoint { - return { - $type: Endpoint.$type, - kind: isSet(object.kind) ? endpoint_KindFromJSON(object.kind) : 0, - sequenceNumber: isSet(object.sequenceNumber) - ? globalThis.Number(object.sequenceNumber) - : 0, - }; - }, - - toJSON(message: Endpoint): unknown { - const obj: any = {}; - if (message.kind !== 0) { - obj.kind = endpoint_KindToJSON(message.kind); - } - if (message.sequenceNumber !== 0) { - obj.sequenceNumber = Math.round(message.sequenceNumber); - } - return obj; - }, - - create(base?: DeepPartial): Endpoint { - return Endpoint.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Endpoint { - const message = createBaseEndpoint(); - message.kind = object.kind ?? 0; - message.sequenceNumber = object.sequenceNumber ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(Endpoint.$type, Endpoint); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta3/gpu.ts b/ts/src/generated/akash/base/v1beta3/gpu.ts deleted file mode 100644 index bf04e7f7..00000000 --- a/ts/src/generated/akash/base/v1beta3/gpu.ts +++ /dev/null @@ -1,131 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Attribute } from "./attribute"; -import { ResourceValue } from "./resourcevalue"; - -/** GPU stores resource units and cpu config attributes */ -export interface GPU { - $type: "akash.base.v1beta3.GPU"; - units: ResourceValue | undefined; - attributes: Attribute[]; -} - -function createBaseGPU(): GPU { - return { $type: "akash.base.v1beta3.GPU", units: undefined, attributes: [] }; -} - -export const GPU = { - $type: "akash.base.v1beta3.GPU" as const, - - encode(message: GPU, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.units !== undefined) { - ResourceValue.encode(message.units, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GPU { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGPU(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.units = ResourceValue.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GPU { - return { - $type: GPU.$type, - units: isSet(object.units) - ? ResourceValue.fromJSON(object.units) - : undefined, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GPU): unknown { - const obj: any = {}; - if (message.units !== undefined) { - obj.units = ResourceValue.toJSON(message.units); - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GPU { - return GPU.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GPU { - const message = createBaseGPU(); - message.units = - object.units !== undefined && object.units !== null - ? ResourceValue.fromPartial(object.units) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GPU.$type, GPU); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta3/memory.ts b/ts/src/generated/akash/base/v1beta3/memory.ts deleted file mode 100644 index 31885184..00000000 --- a/ts/src/generated/akash/base/v1beta3/memory.ts +++ /dev/null @@ -1,138 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Attribute } from "./attribute"; -import { ResourceValue } from "./resourcevalue"; - -/** Memory stores resource quantity and memory attributes */ -export interface Memory { - $type: "akash.base.v1beta3.Memory"; - quantity: ResourceValue | undefined; - attributes: Attribute[]; -} - -function createBaseMemory(): Memory { - return { - $type: "akash.base.v1beta3.Memory", - quantity: undefined, - attributes: [], - }; -} - -export const Memory = { - $type: "akash.base.v1beta3.Memory" as const, - - encode( - message: Memory, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.quantity !== undefined) { - ResourceValue.encode(message.quantity, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Memory { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMemory(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.quantity = ResourceValue.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Memory { - return { - $type: Memory.$type, - quantity: isSet(object.quantity) - ? ResourceValue.fromJSON(object.quantity) - : undefined, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Memory): unknown { - const obj: any = {}; - if (message.quantity !== undefined) { - obj.quantity = ResourceValue.toJSON(message.quantity); - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): Memory { - return Memory.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Memory { - const message = createBaseMemory(); - message.quantity = - object.quantity !== undefined && object.quantity !== null - ? ResourceValue.fromPartial(object.quantity) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Memory.$type, Memory); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta3/resourcevalue.ts b/ts/src/generated/akash/base/v1beta3/resourcevalue.ts deleted file mode 100644 index 58a92096..00000000 --- a/ts/src/generated/akash/base/v1beta3/resourcevalue.ts +++ /dev/null @@ -1,133 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Unit stores cpu, memory and storage metrics */ -export interface ResourceValue { - $type: "akash.base.v1beta3.ResourceValue"; - val: Uint8Array; -} - -function createBaseResourceValue(): ResourceValue { - return { $type: "akash.base.v1beta3.ResourceValue", val: new Uint8Array(0) }; -} - -export const ResourceValue = { - $type: "akash.base.v1beta3.ResourceValue" as const, - - encode( - message: ResourceValue, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.val.length !== 0) { - writer.uint32(10).bytes(message.val); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ResourceValue { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseResourceValue(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.val = reader.bytes(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ResourceValue { - return { - $type: ResourceValue.$type, - val: isSet(object.val) ? bytesFromBase64(object.val) : new Uint8Array(0), - }; - }, - - toJSON(message: ResourceValue): unknown { - const obj: any = {}; - if (message.val.length !== 0) { - obj.val = base64FromBytes(message.val); - } - return obj; - }, - - create(base?: DeepPartial): ResourceValue { - return ResourceValue.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ResourceValue { - const message = createBaseResourceValue(); - message.val = object.val ?? new Uint8Array(0); - return message; - }, -}; - -messageTypeRegistry.set(ResourceValue.$type, ResourceValue); - -function bytesFromBase64(b64: string): Uint8Array { - if ((globalThis as any).Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if ((globalThis as any).Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(globalThis.String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/base/v1beta3/storage.ts b/ts/src/generated/akash/base/v1beta3/storage.ts deleted file mode 100644 index c9dda78b..00000000 --- a/ts/src/generated/akash/base/v1beta3/storage.ts +++ /dev/null @@ -1,155 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Attribute } from "./attribute"; -import { ResourceValue } from "./resourcevalue"; - -/** Storage stores resource quantity and storage attributes */ -export interface Storage { - $type: "akash.base.v1beta3.Storage"; - name: string; - quantity: ResourceValue | undefined; - attributes: Attribute[]; -} - -function createBaseStorage(): Storage { - return { - $type: "akash.base.v1beta3.Storage", - name: "", - quantity: undefined, - attributes: [], - }; -} - -export const Storage = { - $type: "akash.base.v1beta3.Storage" as const, - - encode( - message: Storage, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.quantity !== undefined) { - ResourceValue.encode(message.quantity, writer.uint32(18).fork()).ldelim(); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Storage { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseStorage(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.quantity = ResourceValue.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Storage { - return { - $type: Storage.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - quantity: isSet(object.quantity) - ? ResourceValue.fromJSON(object.quantity) - : undefined, - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Storage): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.quantity !== undefined) { - obj.quantity = ResourceValue.toJSON(message.quantity); - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): Storage { - return Storage.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Storage { - const message = createBaseStorage(); - message.name = object.name ?? ""; - message.quantity = - object.quantity !== undefined && object.quantity !== null - ? ResourceValue.fromPartial(object.quantity) - : undefined; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Storage.$type, Storage); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/cert/v1/cert.ts b/ts/src/generated/akash/cert/v1/cert.ts new file mode 100644 index 00000000..24982e34 --- /dev/null +++ b/ts/src/generated/akash/cert/v1/cert.ts @@ -0,0 +1,316 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/cert/v1/cert.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** State is an enum which refers to state of deployment */ +export enum State { + /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ + invalid = 0, + /** valid - CertificateValid denotes state for deployment active */ + valid = 1, + /** revoked - CertificateRevoked denotes state for deployment closed */ + revoked = 2, + UNRECOGNIZED = -1, +} + +export function stateFromJSON(object: any): State { + switch (object) { + case 0: + case "invalid": + return State.invalid; + case 1: + case "valid": + return State.valid; + case 2: + case "revoked": + return State.revoked; + case -1: + case "UNRECOGNIZED": + default: + return State.UNRECOGNIZED; + } +} + +export function stateToJSON(object: State): string { + switch (object) { + case State.invalid: + return "invalid"; + case State.valid: + return "valid"; + case State.revoked: + return "revoked"; + case State.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +/** ID stores owner and sequence number */ +export interface ID { + $type: "akash.cert.v1.ID"; + owner: string; + serial: string; +} + +/** Certificate stores state, certificate and it's public key */ +export interface Certificate { + $type: "akash.cert.v1.Certificate"; + state: State; + cert: Uint8Array; + pubkey: Uint8Array; +} + +function createBaseID(): ID { + return { $type: "akash.cert.v1.ID", owner: "", serial: "" }; +} + +export const ID: MessageFns = { + $type: "akash.cert.v1.ID" as const, + + encode(message: ID, writer: BinaryWriter = new BinaryWriter()): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (message.serial !== "") { + writer.uint32(18).string(message.serial); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): ID { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseID(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.serial = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): ID { + return { + $type: ID.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + serial: isSet(object.serial) ? globalThis.String(object.serial) : "", + }; + }, + + toJSON(message: ID): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.serial !== "") { + obj.serial = message.serial; + } + return obj; + }, + + create(base?: DeepPartial): ID { + return ID.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ID { + const message = createBaseID(); + message.owner = object.owner ?? ""; + message.serial = object.serial ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ID.$type, ID); + +function createBaseCertificate(): Certificate { + return { + $type: "akash.cert.v1.Certificate", + state: 0, + cert: new Uint8Array(0), + pubkey: new Uint8Array(0), + }; +} + +export const Certificate: MessageFns = + { + $type: "akash.cert.v1.Certificate" as const, + + encode( + message: Certificate, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.state !== 0) { + writer.uint32(16).int32(message.state); + } + if (message.cert.length !== 0) { + writer.uint32(26).bytes(message.cert); + } + if (message.pubkey.length !== 0) { + writer.uint32(34).bytes(message.pubkey); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Certificate { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseCertificate(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 2: + if (tag !== 16) { + break; + } + + message.state = reader.int32() as any; + continue; + case 3: + if (tag !== 26) { + break; + } + + message.cert = reader.bytes(); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.pubkey = reader.bytes(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Certificate { + return { + $type: Certificate.$type, + state: isSet(object.state) ? stateFromJSON(object.state) : 0, + cert: isSet(object.cert) + ? bytesFromBase64(object.cert) + : new Uint8Array(0), + pubkey: isSet(object.pubkey) + ? bytesFromBase64(object.pubkey) + : new Uint8Array(0), + }; + }, + + toJSON(message: Certificate): unknown { + const obj: any = {}; + if (message.state !== 0) { + obj.state = stateToJSON(message.state); + } + if (message.cert.length !== 0) { + obj.cert = base64FromBytes(message.cert); + } + if (message.pubkey.length !== 0) { + obj.pubkey = base64FromBytes(message.pubkey); + } + return obj; + }, + + create(base?: DeepPartial): Certificate { + return Certificate.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Certificate { + const message = createBaseCertificate(); + message.state = object.state ?? 0; + message.cert = object.cert ?? new Uint8Array(0); + message.pubkey = object.pubkey ?? new Uint8Array(0); + return message; + }, + }; + +messageTypeRegistry.set(Certificate.$type, Certificate); + +function bytesFromBase64(b64: string): Uint8Array { + if ((globalThis as any).Buffer) { + return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); + } else { + const bin = globalThis.atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; + } +} + +function base64FromBytes(arr: Uint8Array): string { + if ((globalThis as any).Buffer) { + return globalThis.Buffer.from(arr).toString("base64"); + } else { + const bin: string[] = []; + arr.forEach((byte) => { + bin.push(globalThis.String.fromCharCode(byte)); + }); + return globalThis.btoa(bin.join("")); + } +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/cert/v1/filters.ts b/ts/src/generated/akash/cert/v1/filters.ts new file mode 100644 index 00000000..586b42ef --- /dev/null +++ b/ts/src/generated/akash/cert/v1/filters.ts @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/cert/v1/filters.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** CertificateFilter defines filters used to filter certificates */ +export interface CertificateFilter { + $type: "akash.cert.v1.CertificateFilter"; + owner: string; + serial: string; + state: string; +} + +function createBaseCertificateFilter(): CertificateFilter { + return { + $type: "akash.cert.v1.CertificateFilter", + owner: "", + serial: "", + state: "", + }; +} + +export const CertificateFilter: MessageFns< + CertificateFilter, + "akash.cert.v1.CertificateFilter" +> = { + $type: "akash.cert.v1.CertificateFilter" as const, + + encode( + message: CertificateFilter, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (message.serial !== "") { + writer.uint32(18).string(message.serial); + } + if (message.state !== "") { + writer.uint32(26).string(message.state); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): CertificateFilter { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseCertificateFilter(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.serial = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.state = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): CertificateFilter { + return { + $type: CertificateFilter.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + serial: isSet(object.serial) ? globalThis.String(object.serial) : "", + state: isSet(object.state) ? globalThis.String(object.state) : "", + }; + }, + + toJSON(message: CertificateFilter): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.serial !== "") { + obj.serial = message.serial; + } + if (message.state !== "") { + obj.state = message.state; + } + return obj; + }, + + create(base?: DeepPartial): CertificateFilter { + return CertificateFilter.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): CertificateFilter { + const message = createBaseCertificateFilter(); + message.owner = object.owner ?? ""; + message.serial = object.serial ?? ""; + message.state = object.state ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CertificateFilter.$type, CertificateFilter); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/cert/v1/genesis.ts b/ts/src/generated/akash/cert/v1/genesis.ts new file mode 100644 index 00000000..87c91557 --- /dev/null +++ b/ts/src/generated/akash/cert/v1/genesis.ts @@ -0,0 +1,235 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/cert/v1/genesis.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Certificate } from "./cert"; + +/** GenesisCertificate defines certificate entry at genesis */ +export interface GenesisCertificate { + $type: "akash.cert.v1.GenesisCertificate"; + owner: string; + certificate: Certificate | undefined; +} + +/** GenesisState defines the basic genesis state used by cert module */ +export interface GenesisState { + $type: "akash.cert.v1.GenesisState"; + certificates: GenesisCertificate[]; +} + +function createBaseGenesisCertificate(): GenesisCertificate { + return { + $type: "akash.cert.v1.GenesisCertificate", + owner: "", + certificate: undefined, + }; +} + +export const GenesisCertificate: MessageFns< + GenesisCertificate, + "akash.cert.v1.GenesisCertificate" +> = { + $type: "akash.cert.v1.GenesisCertificate" as const, + + encode( + message: GenesisCertificate, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (message.certificate !== undefined) { + Certificate.encode(message.certificate, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): GenesisCertificate { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisCertificate(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.certificate = Certificate.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisCertificate { + return { + $type: GenesisCertificate.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + certificate: isSet(object.certificate) + ? Certificate.fromJSON(object.certificate) + : undefined, + }; + }, + + toJSON(message: GenesisCertificate): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.certificate !== undefined) { + obj.certificate = Certificate.toJSON(message.certificate); + } + return obj; + }, + + create(base?: DeepPartial): GenesisCertificate { + return GenesisCertificate.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisCertificate { + const message = createBaseGenesisCertificate(); + message.owner = object.owner ?? ""; + message.certificate = + object.certificate !== undefined && object.certificate !== null + ? Certificate.fromPartial(object.certificate) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(GenesisCertificate.$type, GenesisCertificate); + +function createBaseGenesisState(): GenesisState { + return { $type: "akash.cert.v1.GenesisState", certificates: [] }; +} + +export const GenesisState: MessageFns< + GenesisState, + "akash.cert.v1.GenesisState" +> = { + $type: "akash.cert.v1.GenesisState" as const, + + encode( + message: GenesisState, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.certificates) { + GenesisCertificate.encode(v!, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GenesisState { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisState(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.certificates.push( + GenesisCertificate.decode(reader, reader.uint32()), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisState { + return { + $type: GenesisState.$type, + certificates: globalThis.Array.isArray(object?.certificates) + ? object.certificates.map((e: any) => GenesisCertificate.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GenesisState): unknown { + const obj: any = {}; + if (message.certificates?.length) { + obj.certificates = message.certificates.map((e) => + GenesisCertificate.toJSON(e), + ); + } + return obj; + }, + + create(base?: DeepPartial): GenesisState { + return GenesisState.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisState { + const message = createBaseGenesisState(); + message.certificates = + object.certificates?.map((e) => GenesisCertificate.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GenesisState.$type, GenesisState); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/cert/v1/msg.ts b/ts/src/generated/akash/cert/v1/msg.ts new file mode 100644 index 00000000..957dd0f8 --- /dev/null +++ b/ts/src/generated/akash/cert/v1/msg.ts @@ -0,0 +1,413 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/cert/v1/msg.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { ID } from "./cert"; + +/** MsgCreateCertificate defines an SDK message for creating certificate */ +export interface MsgCreateCertificate { + $type: "akash.cert.v1.MsgCreateCertificate"; + owner: string; + cert: Uint8Array; + pubkey: Uint8Array; +} + +/** MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. */ +export interface MsgCreateCertificateResponse { + $type: "akash.cert.v1.MsgCreateCertificateResponse"; +} + +/** MsgRevokeCertificate defines an SDK message for revoking certificate */ +export interface MsgRevokeCertificate { + $type: "akash.cert.v1.MsgRevokeCertificate"; + id: ID | undefined; +} + +/** MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. */ +export interface MsgRevokeCertificateResponse { + $type: "akash.cert.v1.MsgRevokeCertificateResponse"; +} + +function createBaseMsgCreateCertificate(): MsgCreateCertificate { + return { + $type: "akash.cert.v1.MsgCreateCertificate", + owner: "", + cert: new Uint8Array(0), + pubkey: new Uint8Array(0), + }; +} + +export const MsgCreateCertificate: MessageFns< + MsgCreateCertificate, + "akash.cert.v1.MsgCreateCertificate" +> = { + $type: "akash.cert.v1.MsgCreateCertificate" as const, + + encode( + message: MsgCreateCertificate, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (message.cert.length !== 0) { + writer.uint32(18).bytes(message.cert); + } + if (message.pubkey.length !== 0) { + writer.uint32(26).bytes(message.pubkey); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgCreateCertificate { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateCertificate(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.cert = reader.bytes(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.pubkey = reader.bytes(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCreateCertificate { + return { + $type: MsgCreateCertificate.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + cert: isSet(object.cert) + ? bytesFromBase64(object.cert) + : new Uint8Array(0), + pubkey: isSet(object.pubkey) + ? bytesFromBase64(object.pubkey) + : new Uint8Array(0), + }; + }, + + toJSON(message: MsgCreateCertificate): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.cert.length !== 0) { + obj.cert = base64FromBytes(message.cert); + } + if (message.pubkey.length !== 0) { + obj.pubkey = base64FromBytes(message.pubkey); + } + return obj; + }, + + create(base?: DeepPartial): MsgCreateCertificate { + return MsgCreateCertificate.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCreateCertificate { + const message = createBaseMsgCreateCertificate(); + message.owner = object.owner ?? ""; + message.cert = object.cert ?? new Uint8Array(0); + message.pubkey = object.pubkey ?? new Uint8Array(0); + return message; + }, +}; + +messageTypeRegistry.set(MsgCreateCertificate.$type, MsgCreateCertificate); + +function createBaseMsgCreateCertificateResponse(): MsgCreateCertificateResponse { + return { $type: "akash.cert.v1.MsgCreateCertificateResponse" }; +} + +export const MsgCreateCertificateResponse: MessageFns< + MsgCreateCertificateResponse, + "akash.cert.v1.MsgCreateCertificateResponse" +> = { + $type: "akash.cert.v1.MsgCreateCertificateResponse" as const, + + encode( + _: MsgCreateCertificateResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgCreateCertificateResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateCertificateResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCreateCertificateResponse { + return { $type: MsgCreateCertificateResponse.$type }; + }, + + toJSON(_: MsgCreateCertificateResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgCreateCertificateResponse { + return MsgCreateCertificateResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgCreateCertificateResponse { + const message = createBaseMsgCreateCertificateResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgCreateCertificateResponse.$type, + MsgCreateCertificateResponse, +); + +function createBaseMsgRevokeCertificate(): MsgRevokeCertificate { + return { $type: "akash.cert.v1.MsgRevokeCertificate", id: undefined }; +} + +export const MsgRevokeCertificate: MessageFns< + MsgRevokeCertificate, + "akash.cert.v1.MsgRevokeCertificate" +> = { + $type: "akash.cert.v1.MsgRevokeCertificate" as const, + + encode( + message: MsgRevokeCertificate, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + ID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgRevokeCertificate { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgRevokeCertificate(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = ID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgRevokeCertificate { + return { + $type: MsgRevokeCertificate.$type, + id: isSet(object.id) ? ID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: MsgRevokeCertificate): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = ID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): MsgRevokeCertificate { + return MsgRevokeCertificate.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgRevokeCertificate { + const message = createBaseMsgRevokeCertificate(); + message.id = + object.id !== undefined && object.id !== null + ? ID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgRevokeCertificate.$type, MsgRevokeCertificate); + +function createBaseMsgRevokeCertificateResponse(): MsgRevokeCertificateResponse { + return { $type: "akash.cert.v1.MsgRevokeCertificateResponse" }; +} + +export const MsgRevokeCertificateResponse: MessageFns< + MsgRevokeCertificateResponse, + "akash.cert.v1.MsgRevokeCertificateResponse" +> = { + $type: "akash.cert.v1.MsgRevokeCertificateResponse" as const, + + encode( + _: MsgRevokeCertificateResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgRevokeCertificateResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgRevokeCertificateResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgRevokeCertificateResponse { + return { $type: MsgRevokeCertificateResponse.$type }; + }, + + toJSON(_: MsgRevokeCertificateResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgRevokeCertificateResponse { + return MsgRevokeCertificateResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgRevokeCertificateResponse { + const message = createBaseMsgRevokeCertificateResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgRevokeCertificateResponse.$type, + MsgRevokeCertificateResponse, +); + +function bytesFromBase64(b64: string): Uint8Array { + if ((globalThis as any).Buffer) { + return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); + } else { + const bin = globalThis.atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; + } +} + +function base64FromBytes(arr: Uint8Array): string { + if ((globalThis as any).Buffer) { + return globalThis.Buffer.from(arr).toString("base64"); + } else { + const bin: string[] = []; + arr.forEach((byte) => { + bin.push(globalThis.String.fromCharCode(byte)); + }); + return globalThis.btoa(bin.join("")); + } +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/cert/v1/query.ts b/ts/src/generated/akash/cert/v1/query.ts new file mode 100644 index 00000000..be2289ca --- /dev/null +++ b/ts/src/generated/akash/cert/v1/query.ts @@ -0,0 +1,428 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/cert/v1/query.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { + PageRequest, + PageResponse, +} from "../../../cosmos/base/query/v1beta1/pagination"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Certificate } from "./cert"; +import { CertificateFilter } from "./filters"; + +/** CertificateResponse contains a single X509 certificate and its serial number */ +export interface CertificateResponse { + $type: "akash.cert.v1.CertificateResponse"; + certificate: Certificate | undefined; + serial: string; +} + +/** QueryDeploymentsRequest is request type for the Query/Deployments RPC method */ +export interface QueryCertificatesRequest { + $type: "akash.cert.v1.QueryCertificatesRequest"; + filter: CertificateFilter | undefined; + pagination: PageRequest | undefined; +} + +/** QueryCertificatesResponse is response type for the Query/Certificates RPC method */ +export interface QueryCertificatesResponse { + $type: "akash.cert.v1.QueryCertificatesResponse"; + certificates: CertificateResponse[]; + pagination: PageResponse | undefined; +} + +function createBaseCertificateResponse(): CertificateResponse { + return { + $type: "akash.cert.v1.CertificateResponse", + certificate: undefined, + serial: "", + }; +} + +export const CertificateResponse: MessageFns< + CertificateResponse, + "akash.cert.v1.CertificateResponse" +> = { + $type: "akash.cert.v1.CertificateResponse" as const, + + encode( + message: CertificateResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.certificate !== undefined) { + Certificate.encode(message.certificate, writer.uint32(10).fork()).join(); + } + if (message.serial !== "") { + writer.uint32(18).string(message.serial); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): CertificateResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseCertificateResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.certificate = Certificate.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.serial = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): CertificateResponse { + return { + $type: CertificateResponse.$type, + certificate: isSet(object.certificate) + ? Certificate.fromJSON(object.certificate) + : undefined, + serial: isSet(object.serial) ? globalThis.String(object.serial) : "", + }; + }, + + toJSON(message: CertificateResponse): unknown { + const obj: any = {}; + if (message.certificate !== undefined) { + obj.certificate = Certificate.toJSON(message.certificate); + } + if (message.serial !== "") { + obj.serial = message.serial; + } + return obj; + }, + + create(base?: DeepPartial): CertificateResponse { + return CertificateResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): CertificateResponse { + const message = createBaseCertificateResponse(); + message.certificate = + object.certificate !== undefined && object.certificate !== null + ? Certificate.fromPartial(object.certificate) + : undefined; + message.serial = object.serial ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(CertificateResponse.$type, CertificateResponse); + +function createBaseQueryCertificatesRequest(): QueryCertificatesRequest { + return { + $type: "akash.cert.v1.QueryCertificatesRequest", + filter: undefined, + pagination: undefined, + }; +} + +export const QueryCertificatesRequest: MessageFns< + QueryCertificatesRequest, + "akash.cert.v1.QueryCertificatesRequest" +> = { + $type: "akash.cert.v1.QueryCertificatesRequest" as const, + + encode( + message: QueryCertificatesRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.filter !== undefined) { + CertificateFilter.encode(message.filter, writer.uint32(10).fork()).join(); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryCertificatesRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryCertificatesRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.filter = CertificateFilter.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryCertificatesRequest { + return { + $type: QueryCertificatesRequest.$type, + filter: isSet(object.filter) + ? CertificateFilter.fromJSON(object.filter) + : undefined, + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryCertificatesRequest): unknown { + const obj: any = {}; + if (message.filter !== undefined) { + obj.filter = CertificateFilter.toJSON(message.filter); + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create( + base?: DeepPartial, + ): QueryCertificatesRequest { + return QueryCertificatesRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryCertificatesRequest { + const message = createBaseQueryCertificatesRequest(); + message.filter = + object.filter !== undefined && object.filter !== null + ? CertificateFilter.fromPartial(object.filter) + : undefined; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + QueryCertificatesRequest.$type, + QueryCertificatesRequest, +); + +function createBaseQueryCertificatesResponse(): QueryCertificatesResponse { + return { + $type: "akash.cert.v1.QueryCertificatesResponse", + certificates: [], + pagination: undefined, + }; +} + +export const QueryCertificatesResponse: MessageFns< + QueryCertificatesResponse, + "akash.cert.v1.QueryCertificatesResponse" +> = { + $type: "akash.cert.v1.QueryCertificatesResponse" as const, + + encode( + message: QueryCertificatesResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.certificates) { + CertificateResponse.encode(v!, writer.uint32(10).fork()).join(); + } + if (message.pagination !== undefined) { + PageResponse.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryCertificatesResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryCertificatesResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.certificates.push( + CertificateResponse.decode(reader, reader.uint32()), + ); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryCertificatesResponse { + return { + $type: QueryCertificatesResponse.$type, + certificates: globalThis.Array.isArray(object?.certificates) + ? object.certificates.map((e: any) => CertificateResponse.fromJSON(e)) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryCertificatesResponse): unknown { + const obj: any = {}; + if (message.certificates?.length) { + obj.certificates = message.certificates.map((e) => + CertificateResponse.toJSON(e), + ); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create( + base?: DeepPartial, + ): QueryCertificatesResponse { + return QueryCertificatesResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryCertificatesResponse { + const message = createBaseQueryCertificatesResponse(); + message.certificates = + object.certificates?.map((e) => CertificateResponse.fromPartial(e)) || []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + QueryCertificatesResponse.$type, + QueryCertificatesResponse, +); + +/** Query defines the gRPC querier service */ +export interface Query { + /** Certificates queries certificates */ + Certificates( + request: QueryCertificatesRequest, + ): Promise; +} + +export const QueryServiceName = "akash.cert.v1.Query"; +export class QueryClientImpl implements Query { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || QueryServiceName; + this.rpc = rpc; + this.Certificates = this.Certificates.bind(this); + } + Certificates( + request: QueryCertificatesRequest, + ): Promise { + const data = QueryCertificatesRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Certificates", data); + return promise.then((data) => + QueryCertificatesResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/cert/v1/service.grpc-js.ts b/ts/src/generated/akash/cert/v1/service.grpc-js.ts new file mode 100644 index 00000000..7bc62e03 --- /dev/null +++ b/ts/src/generated/akash/cert/v1/service.grpc-js.ts @@ -0,0 +1,139 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/cert/v1/service.proto + +/* eslint-disable */ +import { + ChannelCredentials, + Client, + makeGenericClientConstructor, + Metadata, +} from "@grpc/grpc-js"; +import type { + CallOptions, + ClientOptions, + ClientUnaryCall, + handleUnaryCall, + ServiceError, + UntypedServiceImplementation, +} from "@grpc/grpc-js"; +import { + MsgCreateCertificate, + MsgCreateCertificateResponse, + MsgRevokeCertificate, + MsgRevokeCertificateResponse, +} from "./msg"; + +export const protobufPackage = "akash.cert.v1"; + +/** Msg defines the provider Msg service */ +export type MsgService = typeof MsgService; +export const MsgService = { + /** CreateCertificate defines a method to create new certificate given proper inputs. */ + createCertificate: { + path: "/akash.cert.v1.Msg/CreateCertificate", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCreateCertificate) => + Buffer.from(MsgCreateCertificate.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCreateCertificate.decode(value), + responseSerialize: (value: MsgCreateCertificateResponse) => + Buffer.from(MsgCreateCertificateResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgCreateCertificateResponse.decode(value), + }, + /** RevokeCertificate defines a method to revoke the certificate */ + revokeCertificate: { + path: "/akash.cert.v1.Msg/RevokeCertificate", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgRevokeCertificate) => + Buffer.from(MsgRevokeCertificate.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgRevokeCertificate.decode(value), + responseSerialize: (value: MsgRevokeCertificateResponse) => + Buffer.from(MsgRevokeCertificateResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgRevokeCertificateResponse.decode(value), + }, +} as const; + +export interface MsgServer extends UntypedServiceImplementation { + /** CreateCertificate defines a method to create new certificate given proper inputs. */ + createCertificate: handleUnaryCall< + MsgCreateCertificate, + MsgCreateCertificateResponse + >; + /** RevokeCertificate defines a method to revoke the certificate */ + revokeCertificate: handleUnaryCall< + MsgRevokeCertificate, + MsgRevokeCertificateResponse + >; +} + +export interface MsgClient extends Client { + /** CreateCertificate defines a method to create new certificate given proper inputs. */ + createCertificate( + request: MsgCreateCertificate, + callback: ( + error: ServiceError | null, + response: MsgCreateCertificateResponse, + ) => void, + ): ClientUnaryCall; + createCertificate( + request: MsgCreateCertificate, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCreateCertificateResponse, + ) => void, + ): ClientUnaryCall; + createCertificate( + request: MsgCreateCertificate, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCreateCertificateResponse, + ) => void, + ): ClientUnaryCall; + /** RevokeCertificate defines a method to revoke the certificate */ + revokeCertificate( + request: MsgRevokeCertificate, + callback: ( + error: ServiceError | null, + response: MsgRevokeCertificateResponse, + ) => void, + ): ClientUnaryCall; + revokeCertificate( + request: MsgRevokeCertificate, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgRevokeCertificateResponse, + ) => void, + ): ClientUnaryCall; + revokeCertificate( + request: MsgRevokeCertificate, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgRevokeCertificateResponse, + ) => void, + ): ClientUnaryCall; +} + +export const MsgClient = makeGenericClientConstructor( + MsgService, + "akash.cert.v1.Msg", +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial, + ): MsgClient; + service: typeof MsgService; + serviceName: string; +}; diff --git a/ts/src/generated/akash/cert/v1/service.ts b/ts/src/generated/akash/cert/v1/service.ts new file mode 100644 index 00000000..5ac3865d --- /dev/null +++ b/ts/src/generated/akash/cert/v1/service.ts @@ -0,0 +1,65 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/cert/v1/service.proto + +/* eslint-disable */ +import { BinaryReader } from "@bufbuild/protobuf/wire"; +import { + MsgCreateCertificate, + MsgCreateCertificateResponse, + MsgRevokeCertificate, + MsgRevokeCertificateResponse, +} from "./msg"; + +/** Msg defines the provider Msg service */ +export interface Msg { + /** CreateCertificate defines a method to create new certificate given proper inputs. */ + CreateCertificate( + request: MsgCreateCertificate, + ): Promise; + /** RevokeCertificate defines a method to revoke the certificate */ + RevokeCertificate( + request: MsgRevokeCertificate, + ): Promise; +} + +export const MsgServiceName = "akash.cert.v1.Msg"; +export class MsgClientImpl implements Msg { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || MsgServiceName; + this.rpc = rpc; + this.CreateCertificate = this.CreateCertificate.bind(this); + this.RevokeCertificate = this.RevokeCertificate.bind(this); + } + CreateCertificate( + request: MsgCreateCertificate, + ): Promise { + const data = MsgCreateCertificate.encode(request).finish(); + const promise = this.rpc.request(this.service, "CreateCertificate", data); + return promise.then((data) => + MsgCreateCertificateResponse.decode(new BinaryReader(data)), + ); + } + + RevokeCertificate( + request: MsgRevokeCertificate, + ): Promise { + const data = MsgRevokeCertificate.encode(request).finish(); + const promise = this.rpc.request(this.service, "RevokeCertificate", data); + return promise.then((data) => + MsgRevokeCertificateResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} diff --git a/ts/src/generated/akash/cert/v1beta2/cert.ts b/ts/src/generated/akash/cert/v1beta2/cert.ts deleted file mode 100644 index f278489a..00000000 --- a/ts/src/generated/akash/cert/v1beta2/cert.ts +++ /dev/null @@ -1,799 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** CertificateID stores owner and sequence number */ -export interface CertificateID { - $type: "akash.cert.v1beta2.CertificateID"; - owner: string; - serial: string; -} - -/** Certificate stores state, certificate and it's public key */ -export interface Certificate { - $type: "akash.cert.v1beta2.Certificate"; - state: Certificate_State; - cert: Uint8Array; - pubkey: Uint8Array; -} - -/** State is an enum which refers to state of deployment */ -export enum Certificate_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** valid - CertificateValid denotes state for deployment active */ - valid = 1, - /** revoked - CertificateRevoked denotes state for deployment closed */ - revoked = 2, - UNRECOGNIZED = -1, -} - -export function certificate_StateFromJSON(object: any): Certificate_State { - switch (object) { - case 0: - case "invalid": - return Certificate_State.invalid; - case 1: - case "valid": - return Certificate_State.valid; - case 2: - case "revoked": - return Certificate_State.revoked; - case -1: - case "UNRECOGNIZED": - default: - return Certificate_State.UNRECOGNIZED; - } -} - -export function certificate_StateToJSON(object: Certificate_State): string { - switch (object) { - case Certificate_State.invalid: - return "invalid"; - case Certificate_State.valid: - return "valid"; - case Certificate_State.revoked: - return "revoked"; - case Certificate_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** CertificateFilter defines filters used to filter certificates */ -export interface CertificateFilter { - $type: "akash.cert.v1beta2.CertificateFilter"; - owner: string; - serial: string; - state: string; -} - -/** MsgCreateCertificate defines an SDK message for creating certificate */ -export interface MsgCreateCertificate { - $type: "akash.cert.v1beta2.MsgCreateCertificate"; - owner: string; - cert: Uint8Array; - pubkey: Uint8Array; -} - -/** MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. */ -export interface MsgCreateCertificateResponse { - $type: "akash.cert.v1beta2.MsgCreateCertificateResponse"; -} - -/** MsgRevokeCertificate defines an SDK message for revoking certificate */ -export interface MsgRevokeCertificate { - $type: "akash.cert.v1beta2.MsgRevokeCertificate"; - id: CertificateID | undefined; -} - -/** MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. */ -export interface MsgRevokeCertificateResponse { - $type: "akash.cert.v1beta2.MsgRevokeCertificateResponse"; -} - -function createBaseCertificateID(): CertificateID { - return { $type: "akash.cert.v1beta2.CertificateID", owner: "", serial: "" }; -} - -export const CertificateID = { - $type: "akash.cert.v1beta2.CertificateID" as const, - - encode( - message: CertificateID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.serial !== "") { - writer.uint32(18).string(message.serial); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CertificateID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCertificateID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.serial = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CertificateID { - return { - $type: CertificateID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - serial: isSet(object.serial) ? globalThis.String(object.serial) : "", - }; - }, - - toJSON(message: CertificateID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.serial !== "") { - obj.serial = message.serial; - } - return obj; - }, - - create(base?: DeepPartial): CertificateID { - return CertificateID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): CertificateID { - const message = createBaseCertificateID(); - message.owner = object.owner ?? ""; - message.serial = object.serial ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(CertificateID.$type, CertificateID); - -function createBaseCertificate(): Certificate { - return { - $type: "akash.cert.v1beta2.Certificate", - state: 0, - cert: new Uint8Array(0), - pubkey: new Uint8Array(0), - }; -} - -export const Certificate = { - $type: "akash.cert.v1beta2.Certificate" as const, - - encode( - message: Certificate, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.cert.length !== 0) { - writer.uint32(26).bytes(message.cert); - } - if (message.pubkey.length !== 0) { - writer.uint32(34).bytes(message.pubkey); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Certificate { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCertificate(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.cert = reader.bytes(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.pubkey = reader.bytes(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Certificate { - return { - $type: Certificate.$type, - state: isSet(object.state) ? certificate_StateFromJSON(object.state) : 0, - cert: isSet(object.cert) - ? bytesFromBase64(object.cert) - : new Uint8Array(0), - pubkey: isSet(object.pubkey) - ? bytesFromBase64(object.pubkey) - : new Uint8Array(0), - }; - }, - - toJSON(message: Certificate): unknown { - const obj: any = {}; - if (message.state !== 0) { - obj.state = certificate_StateToJSON(message.state); - } - if (message.cert.length !== 0) { - obj.cert = base64FromBytes(message.cert); - } - if (message.pubkey.length !== 0) { - obj.pubkey = base64FromBytes(message.pubkey); - } - return obj; - }, - - create(base?: DeepPartial): Certificate { - return Certificate.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Certificate { - const message = createBaseCertificate(); - message.state = object.state ?? 0; - message.cert = object.cert ?? new Uint8Array(0); - message.pubkey = object.pubkey ?? new Uint8Array(0); - return message; - }, -}; - -messageTypeRegistry.set(Certificate.$type, Certificate); - -function createBaseCertificateFilter(): CertificateFilter { - return { - $type: "akash.cert.v1beta2.CertificateFilter", - owner: "", - serial: "", - state: "", - }; -} - -export const CertificateFilter = { - $type: "akash.cert.v1beta2.CertificateFilter" as const, - - encode( - message: CertificateFilter, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.serial !== "") { - writer.uint32(18).string(message.serial); - } - if (message.state !== "") { - writer.uint32(26).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CertificateFilter { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCertificateFilter(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.serial = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CertificateFilter { - return { - $type: CertificateFilter.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - serial: isSet(object.serial) ? globalThis.String(object.serial) : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: CertificateFilter): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.serial !== "") { - obj.serial = message.serial; - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): CertificateFilter { - return CertificateFilter.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): CertificateFilter { - const message = createBaseCertificateFilter(); - message.owner = object.owner ?? ""; - message.serial = object.serial ?? ""; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(CertificateFilter.$type, CertificateFilter); - -function createBaseMsgCreateCertificate(): MsgCreateCertificate { - return { - $type: "akash.cert.v1beta2.MsgCreateCertificate", - owner: "", - cert: new Uint8Array(0), - pubkey: new Uint8Array(0), - }; -} - -export const MsgCreateCertificate = { - $type: "akash.cert.v1beta2.MsgCreateCertificate" as const, - - encode( - message: MsgCreateCertificate, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.cert.length !== 0) { - writer.uint32(18).bytes(message.cert); - } - if (message.pubkey.length !== 0) { - writer.uint32(26).bytes(message.pubkey); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateCertificate { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateCertificate(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.cert = reader.bytes(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.pubkey = reader.bytes(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateCertificate { - return { - $type: MsgCreateCertificate.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - cert: isSet(object.cert) - ? bytesFromBase64(object.cert) - : new Uint8Array(0), - pubkey: isSet(object.pubkey) - ? bytesFromBase64(object.pubkey) - : new Uint8Array(0), - }; - }, - - toJSON(message: MsgCreateCertificate): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.cert.length !== 0) { - obj.cert = base64FromBytes(message.cert); - } - if (message.pubkey.length !== 0) { - obj.pubkey = base64FromBytes(message.pubkey); - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateCertificate { - return MsgCreateCertificate.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateCertificate { - const message = createBaseMsgCreateCertificate(); - message.owner = object.owner ?? ""; - message.cert = object.cert ?? new Uint8Array(0); - message.pubkey = object.pubkey ?? new Uint8Array(0); - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateCertificate.$type, MsgCreateCertificate); - -function createBaseMsgCreateCertificateResponse(): MsgCreateCertificateResponse { - return { $type: "akash.cert.v1beta2.MsgCreateCertificateResponse" }; -} - -export const MsgCreateCertificateResponse = { - $type: "akash.cert.v1beta2.MsgCreateCertificateResponse" as const, - - encode( - _: MsgCreateCertificateResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateCertificateResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateCertificateResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateCertificateResponse { - return { $type: MsgCreateCertificateResponse.$type }; - }, - - toJSON(_: MsgCreateCertificateResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgCreateCertificateResponse { - return MsgCreateCertificateResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgCreateCertificateResponse { - const message = createBaseMsgCreateCertificateResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCreateCertificateResponse.$type, - MsgCreateCertificateResponse, -); - -function createBaseMsgRevokeCertificate(): MsgRevokeCertificate { - return { $type: "akash.cert.v1beta2.MsgRevokeCertificate", id: undefined }; -} - -export const MsgRevokeCertificate = { - $type: "akash.cert.v1beta2.MsgRevokeCertificate" as const, - - encode( - message: MsgRevokeCertificate, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - CertificateID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgRevokeCertificate { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgRevokeCertificate(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = CertificateID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgRevokeCertificate { - return { - $type: MsgRevokeCertificate.$type, - id: isSet(object.id) ? CertificateID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgRevokeCertificate): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = CertificateID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgRevokeCertificate { - return MsgRevokeCertificate.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgRevokeCertificate { - const message = createBaseMsgRevokeCertificate(); - message.id = - object.id !== undefined && object.id !== null - ? CertificateID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgRevokeCertificate.$type, MsgRevokeCertificate); - -function createBaseMsgRevokeCertificateResponse(): MsgRevokeCertificateResponse { - return { $type: "akash.cert.v1beta2.MsgRevokeCertificateResponse" }; -} - -export const MsgRevokeCertificateResponse = { - $type: "akash.cert.v1beta2.MsgRevokeCertificateResponse" as const, - - encode( - _: MsgRevokeCertificateResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgRevokeCertificateResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgRevokeCertificateResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgRevokeCertificateResponse { - return { $type: MsgRevokeCertificateResponse.$type }; - }, - - toJSON(_: MsgRevokeCertificateResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgRevokeCertificateResponse { - return MsgRevokeCertificateResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgRevokeCertificateResponse { - const message = createBaseMsgRevokeCertificateResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgRevokeCertificateResponse.$type, - MsgRevokeCertificateResponse, -); - -/** Msg defines the provider Msg service */ -export interface Msg { - /** CreateCertificate defines a method to create new certificate given proper inputs. */ - CreateCertificate( - request: MsgCreateCertificate, - ): Promise; - /** RevokeCertificate defines a method to revoke the certificate */ - RevokeCertificate( - request: MsgRevokeCertificate, - ): Promise; -} - -export const MsgServiceName = "akash.cert.v1beta2.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.CreateCertificate = this.CreateCertificate.bind(this); - this.RevokeCertificate = this.RevokeCertificate.bind(this); - } - CreateCertificate( - request: MsgCreateCertificate, - ): Promise { - const data = MsgCreateCertificate.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateCertificate", data); - return promise.then((data) => - MsgCreateCertificateResponse.decode(_m0.Reader.create(data)), - ); - } - - RevokeCertificate( - request: MsgRevokeCertificate, - ): Promise { - const data = MsgRevokeCertificate.encode(request).finish(); - const promise = this.rpc.request(this.service, "RevokeCertificate", data); - return promise.then((data) => - MsgRevokeCertificateResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -function bytesFromBase64(b64: string): Uint8Array { - if ((globalThis as any).Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if ((globalThis as any).Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(globalThis.String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/cert/v1beta2/genesis.ts b/ts/src/generated/akash/cert/v1beta2/genesis.ts deleted file mode 100644 index f2a900a0..00000000 --- a/ts/src/generated/akash/cert/v1beta2/genesis.ts +++ /dev/null @@ -1,218 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Certificate } from "./cert"; - -/** GenesisCertificate defines certificate entry at genesis */ -export interface GenesisCertificate { - $type: "akash.cert.v1beta2.GenesisCertificate"; - owner: string; - certificate: Certificate | undefined; -} - -/** GenesisState defines the basic genesis state used by cert module */ -export interface GenesisState { - $type: "akash.cert.v1beta2.GenesisState"; - certificates: GenesisCertificate[]; -} - -function createBaseGenesisCertificate(): GenesisCertificate { - return { - $type: "akash.cert.v1beta2.GenesisCertificate", - owner: "", - certificate: undefined, - }; -} - -export const GenesisCertificate = { - $type: "akash.cert.v1beta2.GenesisCertificate" as const, - - encode( - message: GenesisCertificate, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.certificate !== undefined) { - Certificate.encode( - message.certificate, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisCertificate { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisCertificate(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.certificate = Certificate.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisCertificate { - return { - $type: GenesisCertificate.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - certificate: isSet(object.certificate) - ? Certificate.fromJSON(object.certificate) - : undefined, - }; - }, - - toJSON(message: GenesisCertificate): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.certificate !== undefined) { - obj.certificate = Certificate.toJSON(message.certificate); - } - return obj; - }, - - create(base?: DeepPartial): GenesisCertificate { - return GenesisCertificate.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisCertificate { - const message = createBaseGenesisCertificate(); - message.owner = object.owner ?? ""; - message.certificate = - object.certificate !== undefined && object.certificate !== null - ? Certificate.fromPartial(object.certificate) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(GenesisCertificate.$type, GenesisCertificate); - -function createBaseGenesisState(): GenesisState { - return { $type: "akash.cert.v1beta2.GenesisState", certificates: [] }; -} - -export const GenesisState = { - $type: "akash.cert.v1beta2.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.certificates) { - GenesisCertificate.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.certificates.push( - GenesisCertificate.decode(reader, reader.uint32()), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - certificates: globalThis.Array.isArray(object?.certificates) - ? object.certificates.map((e: any) => GenesisCertificate.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.certificates?.length) { - obj.certificates = message.certificates.map((e) => - GenesisCertificate.toJSON(e), - ); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.certificates = - object.certificates?.map((e) => GenesisCertificate.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/cert/v1beta2/query.ts b/ts/src/generated/akash/cert/v1beta2/query.ts deleted file mode 100644 index 7dab2069..00000000 --- a/ts/src/generated/akash/cert/v1beta2/query.ts +++ /dev/null @@ -1,413 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Certificate, CertificateFilter } from "./cert"; - -/** CertificateResponse contains a single X509 certificate and its serial number */ -export interface CertificateResponse { - $type: "akash.cert.v1beta2.CertificateResponse"; - certificate: Certificate | undefined; - serial: string; -} - -/** QueryDeploymentsRequest is request type for the Query/Deployments RPC method */ -export interface QueryCertificatesRequest { - $type: "akash.cert.v1beta2.QueryCertificatesRequest"; - filter: CertificateFilter | undefined; - pagination: PageRequest | undefined; -} - -/** QueryCertificatesResponse is response type for the Query/Certificates RPC method */ -export interface QueryCertificatesResponse { - $type: "akash.cert.v1beta2.QueryCertificatesResponse"; - certificates: CertificateResponse[]; - pagination: PageResponse | undefined; -} - -function createBaseCertificateResponse(): CertificateResponse { - return { - $type: "akash.cert.v1beta2.CertificateResponse", - certificate: undefined, - serial: "", - }; -} - -export const CertificateResponse = { - $type: "akash.cert.v1beta2.CertificateResponse" as const, - - encode( - message: CertificateResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.certificate !== undefined) { - Certificate.encode( - message.certificate, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.serial !== "") { - writer.uint32(18).string(message.serial); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CertificateResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCertificateResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.certificate = Certificate.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.serial = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CertificateResponse { - return { - $type: CertificateResponse.$type, - certificate: isSet(object.certificate) - ? Certificate.fromJSON(object.certificate) - : undefined, - serial: isSet(object.serial) ? globalThis.String(object.serial) : "", - }; - }, - - toJSON(message: CertificateResponse): unknown { - const obj: any = {}; - if (message.certificate !== undefined) { - obj.certificate = Certificate.toJSON(message.certificate); - } - if (message.serial !== "") { - obj.serial = message.serial; - } - return obj; - }, - - create(base?: DeepPartial): CertificateResponse { - return CertificateResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): CertificateResponse { - const message = createBaseCertificateResponse(); - message.certificate = - object.certificate !== undefined && object.certificate !== null - ? Certificate.fromPartial(object.certificate) - : undefined; - message.serial = object.serial ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(CertificateResponse.$type, CertificateResponse); - -function createBaseQueryCertificatesRequest(): QueryCertificatesRequest { - return { - $type: "akash.cert.v1beta2.QueryCertificatesRequest", - filter: undefined, - pagination: undefined, - }; -} - -export const QueryCertificatesRequest = { - $type: "akash.cert.v1beta2.QueryCertificatesRequest" as const, - - encode( - message: QueryCertificatesRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filter !== undefined) { - CertificateFilter.encode( - message.filter, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryCertificatesRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryCertificatesRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filter = CertificateFilter.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryCertificatesRequest { - return { - $type: QueryCertificatesRequest.$type, - filter: isSet(object.filter) - ? CertificateFilter.fromJSON(object.filter) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryCertificatesRequest): unknown { - const obj: any = {}; - if (message.filter !== undefined) { - obj.filter = CertificateFilter.toJSON(message.filter); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryCertificatesRequest { - return QueryCertificatesRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryCertificatesRequest { - const message = createBaseQueryCertificatesRequest(); - message.filter = - object.filter !== undefined && object.filter !== null - ? CertificateFilter.fromPartial(object.filter) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryCertificatesRequest.$type, - QueryCertificatesRequest, -); - -function createBaseQueryCertificatesResponse(): QueryCertificatesResponse { - return { - $type: "akash.cert.v1beta2.QueryCertificatesResponse", - certificates: [], - pagination: undefined, - }; -} - -export const QueryCertificatesResponse = { - $type: "akash.cert.v1beta2.QueryCertificatesResponse" as const, - - encode( - message: QueryCertificatesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.certificates) { - CertificateResponse.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryCertificatesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryCertificatesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.certificates.push( - CertificateResponse.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryCertificatesResponse { - return { - $type: QueryCertificatesResponse.$type, - certificates: globalThis.Array.isArray(object?.certificates) - ? object.certificates.map((e: any) => CertificateResponse.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryCertificatesResponse): unknown { - const obj: any = {}; - if (message.certificates?.length) { - obj.certificates = message.certificates.map((e) => - CertificateResponse.toJSON(e), - ); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryCertificatesResponse { - return QueryCertificatesResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryCertificatesResponse { - const message = createBaseQueryCertificatesResponse(); - message.certificates = - object.certificates?.map((e) => CertificateResponse.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryCertificatesResponse.$type, - QueryCertificatesResponse, -); - -/** Query defines the gRPC querier service */ -export interface Query { - /** Certificates queries certificates */ - Certificates( - request: QueryCertificatesRequest, - ): Promise; -} - -export const QueryServiceName = "akash.cert.v1beta2.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.Certificates = this.Certificates.bind(this); - } - Certificates( - request: QueryCertificatesRequest, - ): Promise { - const data = QueryCertificatesRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Certificates", data); - return promise.then((data) => - QueryCertificatesResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/cert/v1beta3/cert.ts b/ts/src/generated/akash/cert/v1beta3/cert.ts deleted file mode 100644 index 8dbbae57..00000000 --- a/ts/src/generated/akash/cert/v1beta3/cert.ts +++ /dev/null @@ -1,799 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** CertificateID stores owner and sequence number */ -export interface CertificateID { - $type: "akash.cert.v1beta3.CertificateID"; - owner: string; - serial: string; -} - -/** Certificate stores state, certificate and it's public key */ -export interface Certificate { - $type: "akash.cert.v1beta3.Certificate"; - state: Certificate_State; - cert: Uint8Array; - pubkey: Uint8Array; -} - -/** State is an enum which refers to state of deployment */ -export enum Certificate_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** valid - CertificateValid denotes state for deployment active */ - valid = 1, - /** revoked - CertificateRevoked denotes state for deployment closed */ - revoked = 2, - UNRECOGNIZED = -1, -} - -export function certificate_StateFromJSON(object: any): Certificate_State { - switch (object) { - case 0: - case "invalid": - return Certificate_State.invalid; - case 1: - case "valid": - return Certificate_State.valid; - case 2: - case "revoked": - return Certificate_State.revoked; - case -1: - case "UNRECOGNIZED": - default: - return Certificate_State.UNRECOGNIZED; - } -} - -export function certificate_StateToJSON(object: Certificate_State): string { - switch (object) { - case Certificate_State.invalid: - return "invalid"; - case Certificate_State.valid: - return "valid"; - case Certificate_State.revoked: - return "revoked"; - case Certificate_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** CertificateFilter defines filters used to filter certificates */ -export interface CertificateFilter { - $type: "akash.cert.v1beta3.CertificateFilter"; - owner: string; - serial: string; - state: string; -} - -/** MsgCreateCertificate defines an SDK message for creating certificate */ -export interface MsgCreateCertificate { - $type: "akash.cert.v1beta3.MsgCreateCertificate"; - owner: string; - cert: Uint8Array; - pubkey: Uint8Array; -} - -/** MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. */ -export interface MsgCreateCertificateResponse { - $type: "akash.cert.v1beta3.MsgCreateCertificateResponse"; -} - -/** MsgRevokeCertificate defines an SDK message for revoking certificate */ -export interface MsgRevokeCertificate { - $type: "akash.cert.v1beta3.MsgRevokeCertificate"; - id: CertificateID | undefined; -} - -/** MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. */ -export interface MsgRevokeCertificateResponse { - $type: "akash.cert.v1beta3.MsgRevokeCertificateResponse"; -} - -function createBaseCertificateID(): CertificateID { - return { $type: "akash.cert.v1beta3.CertificateID", owner: "", serial: "" }; -} - -export const CertificateID = { - $type: "akash.cert.v1beta3.CertificateID" as const, - - encode( - message: CertificateID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.serial !== "") { - writer.uint32(18).string(message.serial); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CertificateID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCertificateID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.serial = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CertificateID { - return { - $type: CertificateID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - serial: isSet(object.serial) ? globalThis.String(object.serial) : "", - }; - }, - - toJSON(message: CertificateID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.serial !== "") { - obj.serial = message.serial; - } - return obj; - }, - - create(base?: DeepPartial): CertificateID { - return CertificateID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): CertificateID { - const message = createBaseCertificateID(); - message.owner = object.owner ?? ""; - message.serial = object.serial ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(CertificateID.$type, CertificateID); - -function createBaseCertificate(): Certificate { - return { - $type: "akash.cert.v1beta3.Certificate", - state: 0, - cert: new Uint8Array(0), - pubkey: new Uint8Array(0), - }; -} - -export const Certificate = { - $type: "akash.cert.v1beta3.Certificate" as const, - - encode( - message: Certificate, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.cert.length !== 0) { - writer.uint32(26).bytes(message.cert); - } - if (message.pubkey.length !== 0) { - writer.uint32(34).bytes(message.pubkey); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Certificate { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCertificate(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.cert = reader.bytes(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.pubkey = reader.bytes(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Certificate { - return { - $type: Certificate.$type, - state: isSet(object.state) ? certificate_StateFromJSON(object.state) : 0, - cert: isSet(object.cert) - ? bytesFromBase64(object.cert) - : new Uint8Array(0), - pubkey: isSet(object.pubkey) - ? bytesFromBase64(object.pubkey) - : new Uint8Array(0), - }; - }, - - toJSON(message: Certificate): unknown { - const obj: any = {}; - if (message.state !== 0) { - obj.state = certificate_StateToJSON(message.state); - } - if (message.cert.length !== 0) { - obj.cert = base64FromBytes(message.cert); - } - if (message.pubkey.length !== 0) { - obj.pubkey = base64FromBytes(message.pubkey); - } - return obj; - }, - - create(base?: DeepPartial): Certificate { - return Certificate.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Certificate { - const message = createBaseCertificate(); - message.state = object.state ?? 0; - message.cert = object.cert ?? new Uint8Array(0); - message.pubkey = object.pubkey ?? new Uint8Array(0); - return message; - }, -}; - -messageTypeRegistry.set(Certificate.$type, Certificate); - -function createBaseCertificateFilter(): CertificateFilter { - return { - $type: "akash.cert.v1beta3.CertificateFilter", - owner: "", - serial: "", - state: "", - }; -} - -export const CertificateFilter = { - $type: "akash.cert.v1beta3.CertificateFilter" as const, - - encode( - message: CertificateFilter, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.serial !== "") { - writer.uint32(18).string(message.serial); - } - if (message.state !== "") { - writer.uint32(26).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CertificateFilter { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCertificateFilter(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.serial = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CertificateFilter { - return { - $type: CertificateFilter.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - serial: isSet(object.serial) ? globalThis.String(object.serial) : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: CertificateFilter): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.serial !== "") { - obj.serial = message.serial; - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): CertificateFilter { - return CertificateFilter.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): CertificateFilter { - const message = createBaseCertificateFilter(); - message.owner = object.owner ?? ""; - message.serial = object.serial ?? ""; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(CertificateFilter.$type, CertificateFilter); - -function createBaseMsgCreateCertificate(): MsgCreateCertificate { - return { - $type: "akash.cert.v1beta3.MsgCreateCertificate", - owner: "", - cert: new Uint8Array(0), - pubkey: new Uint8Array(0), - }; -} - -export const MsgCreateCertificate = { - $type: "akash.cert.v1beta3.MsgCreateCertificate" as const, - - encode( - message: MsgCreateCertificate, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.cert.length !== 0) { - writer.uint32(18).bytes(message.cert); - } - if (message.pubkey.length !== 0) { - writer.uint32(26).bytes(message.pubkey); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateCertificate { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateCertificate(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.cert = reader.bytes(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.pubkey = reader.bytes(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateCertificate { - return { - $type: MsgCreateCertificate.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - cert: isSet(object.cert) - ? bytesFromBase64(object.cert) - : new Uint8Array(0), - pubkey: isSet(object.pubkey) - ? bytesFromBase64(object.pubkey) - : new Uint8Array(0), - }; - }, - - toJSON(message: MsgCreateCertificate): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.cert.length !== 0) { - obj.cert = base64FromBytes(message.cert); - } - if (message.pubkey.length !== 0) { - obj.pubkey = base64FromBytes(message.pubkey); - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateCertificate { - return MsgCreateCertificate.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateCertificate { - const message = createBaseMsgCreateCertificate(); - message.owner = object.owner ?? ""; - message.cert = object.cert ?? new Uint8Array(0); - message.pubkey = object.pubkey ?? new Uint8Array(0); - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateCertificate.$type, MsgCreateCertificate); - -function createBaseMsgCreateCertificateResponse(): MsgCreateCertificateResponse { - return { $type: "akash.cert.v1beta3.MsgCreateCertificateResponse" }; -} - -export const MsgCreateCertificateResponse = { - $type: "akash.cert.v1beta3.MsgCreateCertificateResponse" as const, - - encode( - _: MsgCreateCertificateResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateCertificateResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateCertificateResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateCertificateResponse { - return { $type: MsgCreateCertificateResponse.$type }; - }, - - toJSON(_: MsgCreateCertificateResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgCreateCertificateResponse { - return MsgCreateCertificateResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgCreateCertificateResponse { - const message = createBaseMsgCreateCertificateResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCreateCertificateResponse.$type, - MsgCreateCertificateResponse, -); - -function createBaseMsgRevokeCertificate(): MsgRevokeCertificate { - return { $type: "akash.cert.v1beta3.MsgRevokeCertificate", id: undefined }; -} - -export const MsgRevokeCertificate = { - $type: "akash.cert.v1beta3.MsgRevokeCertificate" as const, - - encode( - message: MsgRevokeCertificate, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - CertificateID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgRevokeCertificate { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgRevokeCertificate(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = CertificateID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgRevokeCertificate { - return { - $type: MsgRevokeCertificate.$type, - id: isSet(object.id) ? CertificateID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgRevokeCertificate): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = CertificateID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgRevokeCertificate { - return MsgRevokeCertificate.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgRevokeCertificate { - const message = createBaseMsgRevokeCertificate(); - message.id = - object.id !== undefined && object.id !== null - ? CertificateID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgRevokeCertificate.$type, MsgRevokeCertificate); - -function createBaseMsgRevokeCertificateResponse(): MsgRevokeCertificateResponse { - return { $type: "akash.cert.v1beta3.MsgRevokeCertificateResponse" }; -} - -export const MsgRevokeCertificateResponse = { - $type: "akash.cert.v1beta3.MsgRevokeCertificateResponse" as const, - - encode( - _: MsgRevokeCertificateResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgRevokeCertificateResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgRevokeCertificateResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgRevokeCertificateResponse { - return { $type: MsgRevokeCertificateResponse.$type }; - }, - - toJSON(_: MsgRevokeCertificateResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgRevokeCertificateResponse { - return MsgRevokeCertificateResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgRevokeCertificateResponse { - const message = createBaseMsgRevokeCertificateResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgRevokeCertificateResponse.$type, - MsgRevokeCertificateResponse, -); - -/** Msg defines the provider Msg service */ -export interface Msg { - /** CreateCertificate defines a method to create new certificate given proper inputs. */ - CreateCertificate( - request: MsgCreateCertificate, - ): Promise; - /** RevokeCertificate defines a method to revoke the certificate */ - RevokeCertificate( - request: MsgRevokeCertificate, - ): Promise; -} - -export const MsgServiceName = "akash.cert.v1beta3.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.CreateCertificate = this.CreateCertificate.bind(this); - this.RevokeCertificate = this.RevokeCertificate.bind(this); - } - CreateCertificate( - request: MsgCreateCertificate, - ): Promise { - const data = MsgCreateCertificate.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateCertificate", data); - return promise.then((data) => - MsgCreateCertificateResponse.decode(_m0.Reader.create(data)), - ); - } - - RevokeCertificate( - request: MsgRevokeCertificate, - ): Promise { - const data = MsgRevokeCertificate.encode(request).finish(); - const promise = this.rpc.request(this.service, "RevokeCertificate", data); - return promise.then((data) => - MsgRevokeCertificateResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -function bytesFromBase64(b64: string): Uint8Array { - if ((globalThis as any).Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if ((globalThis as any).Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(globalThis.String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/cert/v1beta3/genesis.ts b/ts/src/generated/akash/cert/v1beta3/genesis.ts deleted file mode 100644 index 0688d436..00000000 --- a/ts/src/generated/akash/cert/v1beta3/genesis.ts +++ /dev/null @@ -1,218 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Certificate } from "./cert"; - -/** GenesisCertificate defines certificate entry at genesis */ -export interface GenesisCertificate { - $type: "akash.cert.v1beta3.GenesisCertificate"; - owner: string; - certificate: Certificate | undefined; -} - -/** GenesisState defines the basic genesis state used by cert module */ -export interface GenesisState { - $type: "akash.cert.v1beta3.GenesisState"; - certificates: GenesisCertificate[]; -} - -function createBaseGenesisCertificate(): GenesisCertificate { - return { - $type: "akash.cert.v1beta3.GenesisCertificate", - owner: "", - certificate: undefined, - }; -} - -export const GenesisCertificate = { - $type: "akash.cert.v1beta3.GenesisCertificate" as const, - - encode( - message: GenesisCertificate, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.certificate !== undefined) { - Certificate.encode( - message.certificate, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisCertificate { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisCertificate(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.certificate = Certificate.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisCertificate { - return { - $type: GenesisCertificate.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - certificate: isSet(object.certificate) - ? Certificate.fromJSON(object.certificate) - : undefined, - }; - }, - - toJSON(message: GenesisCertificate): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.certificate !== undefined) { - obj.certificate = Certificate.toJSON(message.certificate); - } - return obj; - }, - - create(base?: DeepPartial): GenesisCertificate { - return GenesisCertificate.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisCertificate { - const message = createBaseGenesisCertificate(); - message.owner = object.owner ?? ""; - message.certificate = - object.certificate !== undefined && object.certificate !== null - ? Certificate.fromPartial(object.certificate) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(GenesisCertificate.$type, GenesisCertificate); - -function createBaseGenesisState(): GenesisState { - return { $type: "akash.cert.v1beta3.GenesisState", certificates: [] }; -} - -export const GenesisState = { - $type: "akash.cert.v1beta3.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.certificates) { - GenesisCertificate.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.certificates.push( - GenesisCertificate.decode(reader, reader.uint32()), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - certificates: globalThis.Array.isArray(object?.certificates) - ? object.certificates.map((e: any) => GenesisCertificate.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.certificates?.length) { - obj.certificates = message.certificates.map((e) => - GenesisCertificate.toJSON(e), - ); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.certificates = - object.certificates?.map((e) => GenesisCertificate.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/cert/v1beta3/query.ts b/ts/src/generated/akash/cert/v1beta3/query.ts deleted file mode 100644 index ecb8eb13..00000000 --- a/ts/src/generated/akash/cert/v1beta3/query.ts +++ /dev/null @@ -1,413 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Certificate, CertificateFilter } from "./cert"; - -/** CertificateResponse contains a single X509 certificate and its serial number */ -export interface CertificateResponse { - $type: "akash.cert.v1beta3.CertificateResponse"; - certificate: Certificate | undefined; - serial: string; -} - -/** QueryDeploymentsRequest is request type for the Query/Deployments RPC method */ -export interface QueryCertificatesRequest { - $type: "akash.cert.v1beta3.QueryCertificatesRequest"; - filter: CertificateFilter | undefined; - pagination: PageRequest | undefined; -} - -/** QueryCertificatesResponse is response type for the Query/Certificates RPC method */ -export interface QueryCertificatesResponse { - $type: "akash.cert.v1beta3.QueryCertificatesResponse"; - certificates: CertificateResponse[]; - pagination: PageResponse | undefined; -} - -function createBaseCertificateResponse(): CertificateResponse { - return { - $type: "akash.cert.v1beta3.CertificateResponse", - certificate: undefined, - serial: "", - }; -} - -export const CertificateResponse = { - $type: "akash.cert.v1beta3.CertificateResponse" as const, - - encode( - message: CertificateResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.certificate !== undefined) { - Certificate.encode( - message.certificate, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.serial !== "") { - writer.uint32(18).string(message.serial); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): CertificateResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseCertificateResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.certificate = Certificate.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.serial = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): CertificateResponse { - return { - $type: CertificateResponse.$type, - certificate: isSet(object.certificate) - ? Certificate.fromJSON(object.certificate) - : undefined, - serial: isSet(object.serial) ? globalThis.String(object.serial) : "", - }; - }, - - toJSON(message: CertificateResponse): unknown { - const obj: any = {}; - if (message.certificate !== undefined) { - obj.certificate = Certificate.toJSON(message.certificate); - } - if (message.serial !== "") { - obj.serial = message.serial; - } - return obj; - }, - - create(base?: DeepPartial): CertificateResponse { - return CertificateResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): CertificateResponse { - const message = createBaseCertificateResponse(); - message.certificate = - object.certificate !== undefined && object.certificate !== null - ? Certificate.fromPartial(object.certificate) - : undefined; - message.serial = object.serial ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(CertificateResponse.$type, CertificateResponse); - -function createBaseQueryCertificatesRequest(): QueryCertificatesRequest { - return { - $type: "akash.cert.v1beta3.QueryCertificatesRequest", - filter: undefined, - pagination: undefined, - }; -} - -export const QueryCertificatesRequest = { - $type: "akash.cert.v1beta3.QueryCertificatesRequest" as const, - - encode( - message: QueryCertificatesRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filter !== undefined) { - CertificateFilter.encode( - message.filter, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryCertificatesRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryCertificatesRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filter = CertificateFilter.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryCertificatesRequest { - return { - $type: QueryCertificatesRequest.$type, - filter: isSet(object.filter) - ? CertificateFilter.fromJSON(object.filter) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryCertificatesRequest): unknown { - const obj: any = {}; - if (message.filter !== undefined) { - obj.filter = CertificateFilter.toJSON(message.filter); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryCertificatesRequest { - return QueryCertificatesRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryCertificatesRequest { - const message = createBaseQueryCertificatesRequest(); - message.filter = - object.filter !== undefined && object.filter !== null - ? CertificateFilter.fromPartial(object.filter) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryCertificatesRequest.$type, - QueryCertificatesRequest, -); - -function createBaseQueryCertificatesResponse(): QueryCertificatesResponse { - return { - $type: "akash.cert.v1beta3.QueryCertificatesResponse", - certificates: [], - pagination: undefined, - }; -} - -export const QueryCertificatesResponse = { - $type: "akash.cert.v1beta3.QueryCertificatesResponse" as const, - - encode( - message: QueryCertificatesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.certificates) { - CertificateResponse.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryCertificatesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryCertificatesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.certificates.push( - CertificateResponse.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryCertificatesResponse { - return { - $type: QueryCertificatesResponse.$type, - certificates: globalThis.Array.isArray(object?.certificates) - ? object.certificates.map((e: any) => CertificateResponse.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryCertificatesResponse): unknown { - const obj: any = {}; - if (message.certificates?.length) { - obj.certificates = message.certificates.map((e) => - CertificateResponse.toJSON(e), - ); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryCertificatesResponse { - return QueryCertificatesResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryCertificatesResponse { - const message = createBaseQueryCertificatesResponse(); - message.certificates = - object.certificates?.map((e) => CertificateResponse.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryCertificatesResponse.$type, - QueryCertificatesResponse, -); - -/** Query defines the gRPC querier service */ -export interface Query { - /** Certificates queries certificates */ - Certificates( - request: QueryCertificatesRequest, - ): Promise; -} - -export const QueryServiceName = "akash.cert.v1beta3.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.Certificates = this.Certificates.bind(this); - } - Certificates( - request: QueryCertificatesRequest, - ): Promise { - const data = QueryCertificatesRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Certificates", data); - return promise.then((data) => - QueryCertificatesResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1/authz.ts b/ts/src/generated/akash/deployment/v1/authz.ts new file mode 100644 index 00000000..77474876 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1/authz.ts @@ -0,0 +1,141 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1/authz.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { Coin } from "../../../cosmos/base/v1beta1/coin"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** + * DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from + * the granter's account for a deployment. + */ +export interface DepositAuthorization { + $type: "akash.deployment.v1.DepositAuthorization"; + /** + * SpendLimit is the amount the grantee is authorized to spend from the granter's account for + * the purpose of deployment. + */ + spendLimit: Coin | undefined; +} + +function createBaseDepositAuthorization(): DepositAuthorization { + return { + $type: "akash.deployment.v1.DepositAuthorization", + spendLimit: undefined, + }; +} + +export const DepositAuthorization: MessageFns< + DepositAuthorization, + "akash.deployment.v1.DepositAuthorization" +> = { + $type: "akash.deployment.v1.DepositAuthorization" as const, + + encode( + message: DepositAuthorization, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.spendLimit !== undefined) { + Coin.encode(message.spendLimit, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): DepositAuthorization { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDepositAuthorization(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.spendLimit = Coin.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DepositAuthorization { + return { + $type: DepositAuthorization.$type, + spendLimit: isSet(object.spendLimit) + ? Coin.fromJSON(object.spendLimit) + : undefined, + }; + }, + + toJSON(message: DepositAuthorization): unknown { + const obj: any = {}; + if (message.spendLimit !== undefined) { + obj.spendLimit = Coin.toJSON(message.spendLimit); + } + return obj; + }, + + create(base?: DeepPartial): DepositAuthorization { + return DepositAuthorization.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DepositAuthorization { + const message = createBaseDepositAuthorization(); + message.spendLimit = + object.spendLimit !== undefined && object.spendLimit !== null + ? Coin.fromPartial(object.spendLimit) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(DepositAuthorization.$type, DepositAuthorization); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1/deployment.ts b/ts/src/generated/akash/deployment/v1/deployment.ts new file mode 100644 index 00000000..d9cf5027 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1/deployment.ts @@ -0,0 +1,354 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1/deployment.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** DeploymentID stores owner and sequence number */ +export interface DeploymentID { + $type: "akash.deployment.v1.DeploymentID"; + owner: string; + dseq: Long; +} + +/** Deployment stores deploymentID, state and checksum details */ +export interface Deployment { + $type: "akash.deployment.v1.Deployment"; + id: DeploymentID | undefined; + state: Deployment_State; + hash: Uint8Array; + createdAt: Long; +} + +/** State is an enum which refers to state of deployment */ +export enum Deployment_State { + /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ + invalid = 0, + /** active - DeploymentActive denotes state for deployment active */ + active = 1, + /** closed - DeploymentClosed denotes state for deployment closed */ + closed = 2, + UNRECOGNIZED = -1, +} + +export function deployment_StateFromJSON(object: any): Deployment_State { + switch (object) { + case 0: + case "invalid": + return Deployment_State.invalid; + case 1: + case "active": + return Deployment_State.active; + case 2: + case "closed": + return Deployment_State.closed; + case -1: + case "UNRECOGNIZED": + default: + return Deployment_State.UNRECOGNIZED; + } +} + +export function deployment_StateToJSON(object: Deployment_State): string { + switch (object) { + case Deployment_State.invalid: + return "invalid"; + case Deployment_State.active: + return "active"; + case Deployment_State.closed: + return "closed"; + case Deployment_State.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +function createBaseDeploymentID(): DeploymentID { + return { + $type: "akash.deployment.v1.DeploymentID", + owner: "", + dseq: Long.UZERO, + }; +} + +export const DeploymentID: MessageFns< + DeploymentID, + "akash.deployment.v1.DeploymentID" +> = { + $type: "akash.deployment.v1.DeploymentID" as const, + + encode( + message: DeploymentID, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq.toString()); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DeploymentID { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDeploymentID(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = Long.fromString(reader.uint64().toString(), true); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DeploymentID { + return { + $type: DeploymentID.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + }; + }, + + toJSON(message: DeploymentID): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + return obj; + }, + + create(base?: DeepPartial): DeploymentID { + return DeploymentID.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DeploymentID { + const message = createBaseDeploymentID(); + message.owner = object.owner ?? ""; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + return message; + }, +}; + +messageTypeRegistry.set(DeploymentID.$type, DeploymentID); + +function createBaseDeployment(): Deployment { + return { + $type: "akash.deployment.v1.Deployment", + id: undefined, + state: 0, + hash: new Uint8Array(0), + createdAt: Long.ZERO, + }; +} + +export const Deployment: MessageFns< + Deployment, + "akash.deployment.v1.Deployment" +> = { + $type: "akash.deployment.v1.Deployment" as const, + + encode( + message: Deployment, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).join(); + } + if (message.state !== 0) { + writer.uint32(16).int32(message.state); + } + if (message.hash.length !== 0) { + writer.uint32(26).bytes(message.hash); + } + if (!message.createdAt.equals(Long.ZERO)) { + writer.uint32(32).int64(message.createdAt.toString()); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Deployment { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDeployment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.state = reader.int32() as any; + continue; + case 3: + if (tag !== 26) { + break; + } + + message.hash = reader.bytes(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.createdAt = Long.fromString(reader.int64().toString()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Deployment { + return { + $type: Deployment.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + state: isSet(object.state) ? deployment_StateFromJSON(object.state) : 0, + hash: isSet(object.hash) + ? bytesFromBase64(object.hash) + : new Uint8Array(0), + createdAt: isSet(object.createdAt) + ? Long.fromValue(object.createdAt) + : Long.ZERO, + }; + }, + + toJSON(message: Deployment): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + if (message.state !== 0) { + obj.state = deployment_StateToJSON(message.state); + } + if (message.hash.length !== 0) { + obj.hash = base64FromBytes(message.hash); + } + if (!message.createdAt.equals(Long.ZERO)) { + obj.createdAt = (message.createdAt || Long.ZERO).toString(); + } + return obj; + }, + + create(base?: DeepPartial): Deployment { + return Deployment.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Deployment { + const message = createBaseDeployment(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + message.state = object.state ?? 0; + message.hash = object.hash ?? new Uint8Array(0); + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? Long.fromValue(object.createdAt) + : Long.ZERO; + return message; + }, +}; + +messageTypeRegistry.set(Deployment.$type, Deployment); + +function bytesFromBase64(b64: string): Uint8Array { + if ((globalThis as any).Buffer) { + return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); + } else { + const bin = globalThis.atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; + } +} + +function base64FromBytes(arr: Uint8Array): string { + if ((globalThis as any).Buffer) { + return globalThis.Buffer.from(arr).toString("base64"); + } else { + const bin: string[] = []; + arr.forEach((byte) => { + bin.push(globalThis.String.fromCharCode(byte)); + }); + return globalThis.btoa(bin.join("")); + } +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1/event.ts b/ts/src/generated/akash/deployment/v1/event.ts new file mode 100644 index 00000000..d6acc5cf --- /dev/null +++ b/ts/src/generated/akash/deployment/v1/event.ts @@ -0,0 +1,611 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1/event.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { DeploymentID } from "./deployment"; +import { GroupID } from "./group"; + +/** EventDeploymentCreated event is triggered when deployment is created on chain */ +export interface EventDeploymentCreated { + $type: "akash.deployment.v1.EventDeploymentCreated"; + id: DeploymentID | undefined; + hash: Uint8Array; +} + +/** EventDeploymentUpdated is triggered when deployment is updated on chain */ +export interface EventDeploymentUpdated { + $type: "akash.deployment.v1.EventDeploymentUpdated"; + id: DeploymentID | undefined; + hash: Uint8Array; +} + +/** EventDeploymentClosed is triggered when deployment is closed on chain */ +export interface EventDeploymentClosed { + $type: "akash.deployment.v1.EventDeploymentClosed"; + id: DeploymentID | undefined; +} + +/** EventGroupStarted is triggered when deployment group is started */ +export interface EventGroupStarted { + $type: "akash.deployment.v1.EventGroupStarted"; + id: GroupID | undefined; +} + +/** EventGroupPaused is triggered when deployment group is paused */ +export interface EventGroupPaused { + $type: "akash.deployment.v1.EventGroupPaused"; + id: GroupID | undefined; +} + +/** EventGroupClosed is triggered when deployment group is closed */ +export interface EventGroupClosed { + $type: "akash.deployment.v1.EventGroupClosed"; + id: GroupID | undefined; +} + +function createBaseEventDeploymentCreated(): EventDeploymentCreated { + return { + $type: "akash.deployment.v1.EventDeploymentCreated", + id: undefined, + hash: new Uint8Array(0), + }; +} + +export const EventDeploymentCreated: MessageFns< + EventDeploymentCreated, + "akash.deployment.v1.EventDeploymentCreated" +> = { + $type: "akash.deployment.v1.EventDeploymentCreated" as const, + + encode( + message: EventDeploymentCreated, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).join(); + } + if (message.hash.length !== 0) { + writer.uint32(18).bytes(message.hash); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): EventDeploymentCreated { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventDeploymentCreated(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.hash = reader.bytes(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventDeploymentCreated { + return { + $type: EventDeploymentCreated.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + hash: isSet(object.hash) + ? bytesFromBase64(object.hash) + : new Uint8Array(0), + }; + }, + + toJSON(message: EventDeploymentCreated): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + if (message.hash.length !== 0) { + obj.hash = base64FromBytes(message.hash); + } + return obj; + }, + + create(base?: DeepPartial): EventDeploymentCreated { + return EventDeploymentCreated.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): EventDeploymentCreated { + const message = createBaseEventDeploymentCreated(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + message.hash = object.hash ?? new Uint8Array(0); + return message; + }, +}; + +messageTypeRegistry.set(EventDeploymentCreated.$type, EventDeploymentCreated); + +function createBaseEventDeploymentUpdated(): EventDeploymentUpdated { + return { + $type: "akash.deployment.v1.EventDeploymentUpdated", + id: undefined, + hash: new Uint8Array(0), + }; +} + +export const EventDeploymentUpdated: MessageFns< + EventDeploymentUpdated, + "akash.deployment.v1.EventDeploymentUpdated" +> = { + $type: "akash.deployment.v1.EventDeploymentUpdated" as const, + + encode( + message: EventDeploymentUpdated, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).join(); + } + if (message.hash.length !== 0) { + writer.uint32(18).bytes(message.hash); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): EventDeploymentUpdated { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventDeploymentUpdated(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.hash = reader.bytes(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventDeploymentUpdated { + return { + $type: EventDeploymentUpdated.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + hash: isSet(object.hash) + ? bytesFromBase64(object.hash) + : new Uint8Array(0), + }; + }, + + toJSON(message: EventDeploymentUpdated): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + if (message.hash.length !== 0) { + obj.hash = base64FromBytes(message.hash); + } + return obj; + }, + + create(base?: DeepPartial): EventDeploymentUpdated { + return EventDeploymentUpdated.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): EventDeploymentUpdated { + const message = createBaseEventDeploymentUpdated(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + message.hash = object.hash ?? new Uint8Array(0); + return message; + }, +}; + +messageTypeRegistry.set(EventDeploymentUpdated.$type, EventDeploymentUpdated); + +function createBaseEventDeploymentClosed(): EventDeploymentClosed { + return { $type: "akash.deployment.v1.EventDeploymentClosed", id: undefined }; +} + +export const EventDeploymentClosed: MessageFns< + EventDeploymentClosed, + "akash.deployment.v1.EventDeploymentClosed" +> = { + $type: "akash.deployment.v1.EventDeploymentClosed" as const, + + encode( + message: EventDeploymentClosed, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): EventDeploymentClosed { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventDeploymentClosed(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventDeploymentClosed { + return { + $type: EventDeploymentClosed.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: EventDeploymentClosed): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): EventDeploymentClosed { + return EventDeploymentClosed.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): EventDeploymentClosed { + const message = createBaseEventDeploymentClosed(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(EventDeploymentClosed.$type, EventDeploymentClosed); + +function createBaseEventGroupStarted(): EventGroupStarted { + return { $type: "akash.deployment.v1.EventGroupStarted", id: undefined }; +} + +export const EventGroupStarted: MessageFns< + EventGroupStarted, + "akash.deployment.v1.EventGroupStarted" +> = { + $type: "akash.deployment.v1.EventGroupStarted" as const, + + encode( + message: EventGroupStarted, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + GroupID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): EventGroupStarted { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventGroupStarted(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = GroupID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventGroupStarted { + return { + $type: EventGroupStarted.$type, + id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: EventGroupStarted): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = GroupID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): EventGroupStarted { + return EventGroupStarted.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): EventGroupStarted { + const message = createBaseEventGroupStarted(); + message.id = + object.id !== undefined && object.id !== null + ? GroupID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(EventGroupStarted.$type, EventGroupStarted); + +function createBaseEventGroupPaused(): EventGroupPaused { + return { $type: "akash.deployment.v1.EventGroupPaused", id: undefined }; +} + +export const EventGroupPaused: MessageFns< + EventGroupPaused, + "akash.deployment.v1.EventGroupPaused" +> = { + $type: "akash.deployment.v1.EventGroupPaused" as const, + + encode( + message: EventGroupPaused, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + GroupID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): EventGroupPaused { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventGroupPaused(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = GroupID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventGroupPaused { + return { + $type: EventGroupPaused.$type, + id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: EventGroupPaused): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = GroupID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): EventGroupPaused { + return EventGroupPaused.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): EventGroupPaused { + const message = createBaseEventGroupPaused(); + message.id = + object.id !== undefined && object.id !== null + ? GroupID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(EventGroupPaused.$type, EventGroupPaused); + +function createBaseEventGroupClosed(): EventGroupClosed { + return { $type: "akash.deployment.v1.EventGroupClosed", id: undefined }; +} + +export const EventGroupClosed: MessageFns< + EventGroupClosed, + "akash.deployment.v1.EventGroupClosed" +> = { + $type: "akash.deployment.v1.EventGroupClosed" as const, + + encode( + message: EventGroupClosed, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + GroupID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): EventGroupClosed { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventGroupClosed(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = GroupID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventGroupClosed { + return { + $type: EventGroupClosed.$type, + id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: EventGroupClosed): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = GroupID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): EventGroupClosed { + return EventGroupClosed.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): EventGroupClosed { + const message = createBaseEventGroupClosed(); + message.id = + object.id !== undefined && object.id !== null + ? GroupID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(EventGroupClosed.$type, EventGroupClosed); + +function bytesFromBase64(b64: string): Uint8Array { + if ((globalThis as any).Buffer) { + return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); + } else { + const bin = globalThis.atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; + } +} + +function base64FromBytes(arr: Uint8Array): string { + if ((globalThis as any).Buffer) { + return globalThis.Buffer.from(arr).toString("base64"); + } else { + const bin: string[] = []; + arr.forEach((byte) => { + bin.push(globalThis.String.fromCharCode(byte)); + }); + return globalThis.btoa(bin.join("")); + } +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1/group.ts b/ts/src/generated/akash/deployment/v1/group.ts new file mode 100644 index 00000000..b1248d63 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1/group.ts @@ -0,0 +1,159 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1/group.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** GroupID stores owner, deployment sequence number and group sequence number */ +export interface GroupID { + $type: "akash.deployment.v1.GroupID"; + owner: string; + dseq: Long; + gseq: number; +} + +function createBaseGroupID(): GroupID { + return { + $type: "akash.deployment.v1.GroupID", + owner: "", + dseq: Long.UZERO, + gseq: 0, + }; +} + +export const GroupID: MessageFns = { + $type: "akash.deployment.v1.GroupID" as const, + + encode( + message: GroupID, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq.toString()); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GroupID { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGroupID(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = Long.fromString(reader.uint64().toString(), true); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GroupID { + return { + $type: GroupID.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + }; + }, + + toJSON(message: GroupID): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + return obj; + }, + + create(base?: DeepPartial): GroupID { + return GroupID.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GroupID { + const message = createBaseGroupID(); + message.owner = object.owner ?? ""; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(GroupID.$type, GroupID); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1/msg.ts b/ts/src/generated/akash/deployment/v1/msg.ts new file mode 100644 index 00000000..1fd619db --- /dev/null +++ b/ts/src/generated/akash/deployment/v1/msg.ts @@ -0,0 +1,242 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1/msg.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { Coin } from "../../../cosmos/base/v1beta1/coin"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { DeploymentID } from "./deployment"; + +/** MsgDepositDeployment deposits more funds into the deposit account */ +export interface MsgDepositDeployment { + $type: "akash.deployment.v1.MsgDepositDeployment"; + id: DeploymentID | undefined; + amount: Coin | undefined; + /** Depositor pays for the deposit */ + depositor: string; +} + +/** MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. */ +export interface MsgDepositDeploymentResponse { + $type: "akash.deployment.v1.MsgDepositDeploymentResponse"; +} + +function createBaseMsgDepositDeployment(): MsgDepositDeployment { + return { + $type: "akash.deployment.v1.MsgDepositDeployment", + id: undefined, + amount: undefined, + depositor: "", + }; +} + +export const MsgDepositDeployment: MessageFns< + MsgDepositDeployment, + "akash.deployment.v1.MsgDepositDeployment" +> = { + $type: "akash.deployment.v1.MsgDepositDeployment" as const, + + encode( + message: MsgDepositDeployment, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).join(); + } + if (message.amount !== undefined) { + Coin.encode(message.amount, writer.uint32(18).fork()).join(); + } + if (message.depositor !== "") { + writer.uint32(26).string(message.depositor); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgDepositDeployment { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgDepositDeployment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.amount = Coin.decode(reader, reader.uint32()); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.depositor = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgDepositDeployment { + return { + $type: MsgDepositDeployment.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + amount: isSet(object.amount) ? Coin.fromJSON(object.amount) : undefined, + depositor: isSet(object.depositor) + ? globalThis.String(object.depositor) + : "", + }; + }, + + toJSON(message: MsgDepositDeployment): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + if (message.amount !== undefined) { + obj.amount = Coin.toJSON(message.amount); + } + if (message.depositor !== "") { + obj.depositor = message.depositor; + } + return obj; + }, + + create(base?: DeepPartial): MsgDepositDeployment { + return MsgDepositDeployment.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgDepositDeployment { + const message = createBaseMsgDepositDeployment(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + message.amount = + object.amount !== undefined && object.amount !== null + ? Coin.fromPartial(object.amount) + : undefined; + message.depositor = object.depositor ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MsgDepositDeployment.$type, MsgDepositDeployment); + +function createBaseMsgDepositDeploymentResponse(): MsgDepositDeploymentResponse { + return { $type: "akash.deployment.v1.MsgDepositDeploymentResponse" }; +} + +export const MsgDepositDeploymentResponse: MessageFns< + MsgDepositDeploymentResponse, + "akash.deployment.v1.MsgDepositDeploymentResponse" +> = { + $type: "akash.deployment.v1.MsgDepositDeploymentResponse" as const, + + encode( + _: MsgDepositDeploymentResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgDepositDeploymentResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgDepositDeploymentResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgDepositDeploymentResponse { + return { $type: MsgDepositDeploymentResponse.$type }; + }, + + toJSON(_: MsgDepositDeploymentResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgDepositDeploymentResponse { + return MsgDepositDeploymentResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgDepositDeploymentResponse { + const message = createBaseMsgDepositDeploymentResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgDepositDeploymentResponse.$type, + MsgDepositDeploymentResponse, +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1beta1/authz.ts b/ts/src/generated/akash/deployment/v1beta1/authz.ts deleted file mode 100644 index 14ed88e4..00000000 --- a/ts/src/generated/akash/deployment/v1beta1/authz.ts +++ /dev/null @@ -1,134 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** - * DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from - * the granter's account for a deployment. - */ -export interface DepositDeploymentAuthorization { - $type: "akash.deployment.v1beta1.DepositDeploymentAuthorization"; - /** - * SpendLimit is the amount the grantee is authorized to spend from the granter's account for - * the purpose of deployment. - */ - spendLimit: Coin | undefined; -} - -function createBaseDepositDeploymentAuthorization(): DepositDeploymentAuthorization { - return { - $type: "akash.deployment.v1beta1.DepositDeploymentAuthorization", - spendLimit: undefined, - }; -} - -export const DepositDeploymentAuthorization = { - $type: "akash.deployment.v1beta1.DepositDeploymentAuthorization" as const, - - encode( - message: DepositDeploymentAuthorization, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.spendLimit !== undefined) { - Coin.encode(message.spendLimit, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): DepositDeploymentAuthorization { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDepositDeploymentAuthorization(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.spendLimit = Coin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DepositDeploymentAuthorization { - return { - $type: DepositDeploymentAuthorization.$type, - spendLimit: isSet(object.spendLimit) - ? Coin.fromJSON(object.spendLimit) - : undefined, - }; - }, - - toJSON(message: DepositDeploymentAuthorization): unknown { - const obj: any = {}; - if (message.spendLimit !== undefined) { - obj.spendLimit = Coin.toJSON(message.spendLimit); - } - return obj; - }, - - create( - base?: DeepPartial, - ): DepositDeploymentAuthorization { - return DepositDeploymentAuthorization.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): DepositDeploymentAuthorization { - const message = createBaseDepositDeploymentAuthorization(); - message.spendLimit = - object.spendLimit !== undefined && object.spendLimit !== null - ? Coin.fromPartial(object.spendLimit) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - DepositDeploymentAuthorization.$type, - DepositDeploymentAuthorization, -); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta1/deployment.ts b/ts/src/generated/akash/deployment/v1beta1/deployment.ts deleted file mode 100644 index b04902c4..00000000 --- a/ts/src/generated/akash/deployment/v1beta1/deployment.ts +++ /dev/null @@ -1,1285 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { - GroupSpec, - MsgCloseGroup, - MsgCloseGroupResponse, - MsgPauseGroup, - MsgPauseGroupResponse, - MsgStartGroup, - MsgStartGroupResponse, -} from "./group"; - -/** MsgCreateDeployment defines an SDK message for creating deployment */ -export interface MsgCreateDeployment { - $type: "akash.deployment.v1beta1.MsgCreateDeployment"; - id: DeploymentID | undefined; - groups: GroupSpec[]; - version: Uint8Array; - deposit: Coin | undefined; -} - -/** MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. */ -export interface MsgCreateDeploymentResponse { - $type: "akash.deployment.v1beta1.MsgCreateDeploymentResponse"; -} - -/** MsgDepositDeployment deposits more funds into the deposit account */ -export interface MsgDepositDeployment { - $type: "akash.deployment.v1beta1.MsgDepositDeployment"; - id: DeploymentID | undefined; - amount: Coin | undefined; -} - -/** MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. */ -export interface MsgDepositDeploymentResponse { - $type: "akash.deployment.v1beta1.MsgDepositDeploymentResponse"; -} - -/** MsgUpdateDeployment defines an SDK message for updating deployment */ -export interface MsgUpdateDeployment { - $type: "akash.deployment.v1beta1.MsgUpdateDeployment"; - id: DeploymentID | undefined; - groups: GroupSpec[]; - version: Uint8Array; -} - -/** MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. */ -export interface MsgUpdateDeploymentResponse { - $type: "akash.deployment.v1beta1.MsgUpdateDeploymentResponse"; -} - -/** MsgCloseDeployment defines an SDK message for closing deployment */ -export interface MsgCloseDeployment { - $type: "akash.deployment.v1beta1.MsgCloseDeployment"; - id: DeploymentID | undefined; -} - -/** MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. */ -export interface MsgCloseDeploymentResponse { - $type: "akash.deployment.v1beta1.MsgCloseDeploymentResponse"; -} - -/** DeploymentID stores owner and sequence number */ -export interface DeploymentID { - $type: "akash.deployment.v1beta1.DeploymentID"; - owner: string; - dseq: Long; -} - -/** Deployment stores deploymentID, state and version details */ -export interface Deployment { - $type: "akash.deployment.v1beta1.Deployment"; - deploymentId: DeploymentID | undefined; - state: Deployment_State; - version: Uint8Array; - createdAt: Long; -} - -/** State is an enum which refers to state of deployment */ -export enum Deployment_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** active - DeploymentActive denotes state for deployment active */ - active = 1, - /** closed - DeploymentClosed denotes state for deployment closed */ - closed = 2, - UNRECOGNIZED = -1, -} - -export function deployment_StateFromJSON(object: any): Deployment_State { - switch (object) { - case 0: - case "invalid": - return Deployment_State.invalid; - case 1: - case "active": - return Deployment_State.active; - case 2: - case "closed": - return Deployment_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Deployment_State.UNRECOGNIZED; - } -} - -export function deployment_StateToJSON(object: Deployment_State): string { - switch (object) { - case Deployment_State.invalid: - return "invalid"; - case Deployment_State.active: - return "active"; - case Deployment_State.closed: - return "closed"; - case Deployment_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** DeploymentFilters defines filters used to filter deployments */ -export interface DeploymentFilters { - $type: "akash.deployment.v1beta1.DeploymentFilters"; - owner: string; - dseq: Long; - state: string; -} - -function createBaseMsgCreateDeployment(): MsgCreateDeployment { - return { - $type: "akash.deployment.v1beta1.MsgCreateDeployment", - id: undefined, - groups: [], - version: new Uint8Array(0), - deposit: undefined, - }; -} - -export const MsgCreateDeployment = { - $type: "akash.deployment.v1beta1.MsgCreateDeployment" as const, - - encode( - message: MsgCreateDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - GroupSpec.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.version.length !== 0) { - writer.uint32(26).bytes(message.version); - } - if (message.deposit !== undefined) { - Coin.encode(message.deposit, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.groups.push(GroupSpec.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.version = reader.bytes(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.deposit = Coin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateDeployment { - return { - $type: MsgCreateDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - groups: globalThis.Array.isArray(object?.groups) - ? object.groups.map((e: any) => GroupSpec.fromJSON(e)) - : [], - version: isSet(object.version) - ? bytesFromBase64(object.version) - : new Uint8Array(0), - deposit: isSet(object.deposit) - ? Coin.fromJSON(object.deposit) - : undefined, - }; - }, - - toJSON(message: MsgCreateDeployment): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - if (message.groups?.length) { - obj.groups = message.groups.map((e) => GroupSpec.toJSON(e)); - } - if (message.version.length !== 0) { - obj.version = base64FromBytes(message.version); - } - if (message.deposit !== undefined) { - obj.deposit = Coin.toJSON(message.deposit); - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateDeployment { - return MsgCreateDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateDeployment { - const message = createBaseMsgCreateDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - message.groups = object.groups?.map((e) => GroupSpec.fromPartial(e)) || []; - message.version = object.version ?? new Uint8Array(0); - message.deposit = - object.deposit !== undefined && object.deposit !== null - ? Coin.fromPartial(object.deposit) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateDeployment.$type, MsgCreateDeployment); - -function createBaseMsgCreateDeploymentResponse(): MsgCreateDeploymentResponse { - return { $type: "akash.deployment.v1beta1.MsgCreateDeploymentResponse" }; -} - -export const MsgCreateDeploymentResponse = { - $type: "akash.deployment.v1beta1.MsgCreateDeploymentResponse" as const, - - encode( - _: MsgCreateDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateDeploymentResponse { - return { $type: MsgCreateDeploymentResponse.$type }; - }, - - toJSON(_: MsgCreateDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgCreateDeploymentResponse { - return MsgCreateDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgCreateDeploymentResponse { - const message = createBaseMsgCreateDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCreateDeploymentResponse.$type, - MsgCreateDeploymentResponse, -); - -function createBaseMsgDepositDeployment(): MsgDepositDeployment { - return { - $type: "akash.deployment.v1beta1.MsgDepositDeployment", - id: undefined, - amount: undefined, - }; -} - -export const MsgDepositDeployment = { - $type: "akash.deployment.v1beta1.MsgDepositDeployment" as const, - - encode( - message: MsgDepositDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - if (message.amount !== undefined) { - Coin.encode(message.amount, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDepositDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDepositDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.amount = Coin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgDepositDeployment { - return { - $type: MsgDepositDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - amount: isSet(object.amount) ? Coin.fromJSON(object.amount) : undefined, - }; - }, - - toJSON(message: MsgDepositDeployment): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - if (message.amount !== undefined) { - obj.amount = Coin.toJSON(message.amount); - } - return obj; - }, - - create(base?: DeepPartial): MsgDepositDeployment { - return MsgDepositDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgDepositDeployment { - const message = createBaseMsgDepositDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - message.amount = - object.amount !== undefined && object.amount !== null - ? Coin.fromPartial(object.amount) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgDepositDeployment.$type, MsgDepositDeployment); - -function createBaseMsgDepositDeploymentResponse(): MsgDepositDeploymentResponse { - return { $type: "akash.deployment.v1beta1.MsgDepositDeploymentResponse" }; -} - -export const MsgDepositDeploymentResponse = { - $type: "akash.deployment.v1beta1.MsgDepositDeploymentResponse" as const, - - encode( - _: MsgDepositDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDepositDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDepositDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgDepositDeploymentResponse { - return { $type: MsgDepositDeploymentResponse.$type }; - }, - - toJSON(_: MsgDepositDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgDepositDeploymentResponse { - return MsgDepositDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgDepositDeploymentResponse { - const message = createBaseMsgDepositDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgDepositDeploymentResponse.$type, - MsgDepositDeploymentResponse, -); - -function createBaseMsgUpdateDeployment(): MsgUpdateDeployment { - return { - $type: "akash.deployment.v1beta1.MsgUpdateDeployment", - id: undefined, - groups: [], - version: new Uint8Array(0), - }; -} - -export const MsgUpdateDeployment = { - $type: "akash.deployment.v1beta1.MsgUpdateDeployment" as const, - - encode( - message: MsgUpdateDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - GroupSpec.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.version.length !== 0) { - writer.uint32(26).bytes(message.version); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgUpdateDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.groups.push(GroupSpec.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.version = reader.bytes(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgUpdateDeployment { - return { - $type: MsgUpdateDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - groups: globalThis.Array.isArray(object?.groups) - ? object.groups.map((e: any) => GroupSpec.fromJSON(e)) - : [], - version: isSet(object.version) - ? bytesFromBase64(object.version) - : new Uint8Array(0), - }; - }, - - toJSON(message: MsgUpdateDeployment): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - if (message.groups?.length) { - obj.groups = message.groups.map((e) => GroupSpec.toJSON(e)); - } - if (message.version.length !== 0) { - obj.version = base64FromBytes(message.version); - } - return obj; - }, - - create(base?: DeepPartial): MsgUpdateDeployment { - return MsgUpdateDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgUpdateDeployment { - const message = createBaseMsgUpdateDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - message.groups = object.groups?.map((e) => GroupSpec.fromPartial(e)) || []; - message.version = object.version ?? new Uint8Array(0); - return message; - }, -}; - -messageTypeRegistry.set(MsgUpdateDeployment.$type, MsgUpdateDeployment); - -function createBaseMsgUpdateDeploymentResponse(): MsgUpdateDeploymentResponse { - return { $type: "akash.deployment.v1beta1.MsgUpdateDeploymentResponse" }; -} - -export const MsgUpdateDeploymentResponse = { - $type: "akash.deployment.v1beta1.MsgUpdateDeploymentResponse" as const, - - encode( - _: MsgUpdateDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgUpdateDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgUpdateDeploymentResponse { - return { $type: MsgUpdateDeploymentResponse.$type }; - }, - - toJSON(_: MsgUpdateDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgUpdateDeploymentResponse { - return MsgUpdateDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgUpdateDeploymentResponse { - const message = createBaseMsgUpdateDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgUpdateDeploymentResponse.$type, - MsgUpdateDeploymentResponse, -); - -function createBaseMsgCloseDeployment(): MsgCloseDeployment { - return { - $type: "akash.deployment.v1beta1.MsgCloseDeployment", - id: undefined, - }; -} - -export const MsgCloseDeployment = { - $type: "akash.deployment.v1beta1.MsgCloseDeployment" as const, - - encode( - message: MsgCloseDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCloseDeployment { - return { - $type: MsgCloseDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgCloseDeployment): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgCloseDeployment { - return MsgCloseDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCloseDeployment { - const message = createBaseMsgCloseDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseDeployment.$type, MsgCloseDeployment); - -function createBaseMsgCloseDeploymentResponse(): MsgCloseDeploymentResponse { - return { $type: "akash.deployment.v1beta1.MsgCloseDeploymentResponse" }; -} - -export const MsgCloseDeploymentResponse = { - $type: "akash.deployment.v1beta1.MsgCloseDeploymentResponse" as const, - - encode( - _: MsgCloseDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCloseDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCloseDeploymentResponse { - return { $type: MsgCloseDeploymentResponse.$type }; - }, - - toJSON(_: MsgCloseDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgCloseDeploymentResponse { - return MsgCloseDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgCloseDeploymentResponse { - const message = createBaseMsgCloseDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCloseDeploymentResponse.$type, - MsgCloseDeploymentResponse, -); - -function createBaseDeploymentID(): DeploymentID { - return { - $type: "akash.deployment.v1beta1.DeploymentID", - owner: "", - dseq: Long.UZERO, - }; -} - -export const DeploymentID = { - $type: "akash.deployment.v1beta1.DeploymentID" as const, - - encode( - message: DeploymentID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DeploymentID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDeploymentID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DeploymentID { - return { - $type: DeploymentID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - }; - }, - - toJSON(message: DeploymentID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): DeploymentID { - return DeploymentID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): DeploymentID { - const message = createBaseDeploymentID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - return message; - }, -}; - -messageTypeRegistry.set(DeploymentID.$type, DeploymentID); - -function createBaseDeployment(): Deployment { - return { - $type: "akash.deployment.v1beta1.Deployment", - deploymentId: undefined, - state: 0, - version: new Uint8Array(0), - createdAt: Long.ZERO, - }; -} - -export const Deployment = { - $type: "akash.deployment.v1beta1.Deployment" as const, - - encode( - message: Deployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deploymentId !== undefined) { - DeploymentID.encode( - message.deploymentId, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.version.length !== 0) { - writer.uint32(26).bytes(message.version); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Deployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deploymentId = DeploymentID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.version = reader.bytes(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Deployment { - return { - $type: Deployment.$type, - deploymentId: isSet(object.deploymentId) - ? DeploymentID.fromJSON(object.deploymentId) - : undefined, - state: isSet(object.state) ? deployment_StateFromJSON(object.state) : 0, - version: isSet(object.version) - ? bytesFromBase64(object.version) - : new Uint8Array(0), - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Deployment): unknown { - const obj: any = {}; - if (message.deploymentId !== undefined) { - obj.deploymentId = DeploymentID.toJSON(message.deploymentId); - } - if (message.state !== 0) { - obj.state = deployment_StateToJSON(message.state); - } - if (message.version.length !== 0) { - obj.version = base64FromBytes(message.version); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Deployment { - return Deployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Deployment { - const message = createBaseDeployment(); - message.deploymentId = - object.deploymentId !== undefined && object.deploymentId !== null - ? DeploymentID.fromPartial(object.deploymentId) - : undefined; - message.state = object.state ?? 0; - message.version = object.version ?? new Uint8Array(0); - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Deployment.$type, Deployment); - -function createBaseDeploymentFilters(): DeploymentFilters { - return { - $type: "akash.deployment.v1beta1.DeploymentFilters", - owner: "", - dseq: Long.UZERO, - state: "", - }; -} - -export const DeploymentFilters = { - $type: "akash.deployment.v1beta1.DeploymentFilters" as const, - - encode( - message: DeploymentFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.state !== "") { - writer.uint32(26).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DeploymentFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDeploymentFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DeploymentFilters { - return { - $type: DeploymentFilters.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: DeploymentFilters): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): DeploymentFilters { - return DeploymentFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): DeploymentFilters { - const message = createBaseDeploymentFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(DeploymentFilters.$type, DeploymentFilters); - -/** Msg defines the deployment Msg service. */ -export interface Msg { - /** CreateDeployment defines a method to create new deployment given proper inputs. */ - CreateDeployment( - request: MsgCreateDeployment, - ): Promise; - /** DepositDeployment deposits more funds into the deployment account */ - DepositDeployment( - request: MsgDepositDeployment, - ): Promise; - /** UpdateDeployment defines a method to update a deployment given proper inputs. */ - UpdateDeployment( - request: MsgUpdateDeployment, - ): Promise; - /** CloseDeployment defines a method to close a deployment given proper inputs. */ - CloseDeployment( - request: MsgCloseDeployment, - ): Promise; - /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ - CloseGroup(request: MsgCloseGroup): Promise; - /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ - PauseGroup(request: MsgPauseGroup): Promise; - /** StartGroup defines a method to close a group of a deployment given proper inputs. */ - StartGroup(request: MsgStartGroup): Promise; -} - -export const MsgServiceName = "akash.deployment.v1beta1.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.CreateDeployment = this.CreateDeployment.bind(this); - this.DepositDeployment = this.DepositDeployment.bind(this); - this.UpdateDeployment = this.UpdateDeployment.bind(this); - this.CloseDeployment = this.CloseDeployment.bind(this); - this.CloseGroup = this.CloseGroup.bind(this); - this.PauseGroup = this.PauseGroup.bind(this); - this.StartGroup = this.StartGroup.bind(this); - } - CreateDeployment( - request: MsgCreateDeployment, - ): Promise { - const data = MsgCreateDeployment.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateDeployment", data); - return promise.then((data) => - MsgCreateDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - DepositDeployment( - request: MsgDepositDeployment, - ): Promise { - const data = MsgDepositDeployment.encode(request).finish(); - const promise = this.rpc.request(this.service, "DepositDeployment", data); - return promise.then((data) => - MsgDepositDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - UpdateDeployment( - request: MsgUpdateDeployment, - ): Promise { - const data = MsgUpdateDeployment.encode(request).finish(); - const promise = this.rpc.request(this.service, "UpdateDeployment", data); - return promise.then((data) => - MsgUpdateDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - CloseDeployment( - request: MsgCloseDeployment, - ): Promise { - const data = MsgCloseDeployment.encode(request).finish(); - const promise = this.rpc.request(this.service, "CloseDeployment", data); - return promise.then((data) => - MsgCloseDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - CloseGroup(request: MsgCloseGroup): Promise { - const data = MsgCloseGroup.encode(request).finish(); - const promise = this.rpc.request(this.service, "CloseGroup", data); - return promise.then((data) => - MsgCloseGroupResponse.decode(_m0.Reader.create(data)), - ); - } - - PauseGroup(request: MsgPauseGroup): Promise { - const data = MsgPauseGroup.encode(request).finish(); - const promise = this.rpc.request(this.service, "PauseGroup", data); - return promise.then((data) => - MsgPauseGroupResponse.decode(_m0.Reader.create(data)), - ); - } - - StartGroup(request: MsgStartGroup): Promise { - const data = MsgStartGroup.encode(request).finish(); - const promise = this.rpc.request(this.service, "StartGroup", data); - return promise.then((data) => - MsgStartGroupResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -function bytesFromBase64(b64: string): Uint8Array { - if ((globalThis as any).Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if ((globalThis as any).Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(globalThis.String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta1/genesis.ts b/ts/src/generated/akash/deployment/v1beta1/genesis.ts deleted file mode 100644 index c908cd60..00000000 --- a/ts/src/generated/akash/deployment/v1beta1/genesis.ts +++ /dev/null @@ -1,242 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Deployment } from "./deployment"; -import { Group } from "./group"; -import { Params } from "./params"; - -/** GenesisDeployment defines the basic genesis state used by deployment module */ -export interface GenesisDeployment { - $type: "akash.deployment.v1beta1.GenesisDeployment"; - deployment: Deployment | undefined; - groups: Group[]; -} - -/** GenesisState stores slice of genesis deployment instance */ -export interface GenesisState { - $type: "akash.deployment.v1beta1.GenesisState"; - deployments: GenesisDeployment[]; - params: Params | undefined; -} - -function createBaseGenesisDeployment(): GenesisDeployment { - return { - $type: "akash.deployment.v1beta1.GenesisDeployment", - deployment: undefined, - groups: [], - }; -} - -export const GenesisDeployment = { - $type: "akash.deployment.v1beta1.GenesisDeployment" as const, - - encode( - message: GenesisDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deployment !== undefined) { - Deployment.encode(message.deployment, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - Group.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deployment = Deployment.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.groups.push(Group.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisDeployment { - return { - $type: GenesisDeployment.$type, - deployment: isSet(object.deployment) - ? Deployment.fromJSON(object.deployment) - : undefined, - groups: globalThis.Array.isArray(object?.groups) - ? object.groups.map((e: any) => Group.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisDeployment): unknown { - const obj: any = {}; - if (message.deployment !== undefined) { - obj.deployment = Deployment.toJSON(message.deployment); - } - if (message.groups?.length) { - obj.groups = message.groups.map((e) => Group.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GenesisDeployment { - return GenesisDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisDeployment { - const message = createBaseGenesisDeployment(); - message.deployment = - object.deployment !== undefined && object.deployment !== null - ? Deployment.fromPartial(object.deployment) - : undefined; - message.groups = object.groups?.map((e) => Group.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisDeployment.$type, GenesisDeployment); - -function createBaseGenesisState(): GenesisState { - return { - $type: "akash.deployment.v1beta1.GenesisState", - deployments: [], - params: undefined, - }; -} - -export const GenesisState = { - $type: "akash.deployment.v1beta1.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.deployments) { - GenesisDeployment.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.params !== undefined) { - Params.encode(message.params, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deployments.push( - GenesisDeployment.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.params = Params.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - deployments: globalThis.Array.isArray(object?.deployments) - ? object.deployments.map((e: any) => GenesisDeployment.fromJSON(e)) - : [], - params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.deployments?.length) { - obj.deployments = message.deployments.map((e) => - GenesisDeployment.toJSON(e), - ); - } - if (message.params !== undefined) { - obj.params = Params.toJSON(message.params); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.deployments = - object.deployments?.map((e) => GenesisDeployment.fromPartial(e)) || []; - message.params = - object.params !== undefined && object.params !== null - ? Params.fromPartial(object.params) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta1/group.ts b/ts/src/generated/akash/deployment/v1beta1/group.ts deleted file mode 100644 index 7a94d1ce..00000000 --- a/ts/src/generated/akash/deployment/v1beta1/group.ts +++ /dev/null @@ -1,1003 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { PlacementRequirements } from "../../base/v1beta1/attribute"; -import { ResourceUnits } from "../../base/v1beta1/resource"; - -/** MsgCloseGroup defines SDK message to close a single Group within a Deployment. */ -export interface MsgCloseGroup { - $type: "akash.deployment.v1beta1.MsgCloseGroup"; - id: GroupID | undefined; -} - -/** MsgCloseGroupResponse defines the Msg/CloseGroup response type. */ -export interface MsgCloseGroupResponse { - $type: "akash.deployment.v1beta1.MsgCloseGroupResponse"; -} - -/** MsgPauseGroup defines SDK message to close a single Group within a Deployment. */ -export interface MsgPauseGroup { - $type: "akash.deployment.v1beta1.MsgPauseGroup"; - id: GroupID | undefined; -} - -/** MsgPauseGroupResponse defines the Msg/PauseGroup response type. */ -export interface MsgPauseGroupResponse { - $type: "akash.deployment.v1beta1.MsgPauseGroupResponse"; -} - -/** MsgStartGroup defines SDK message to close a single Group within a Deployment. */ -export interface MsgStartGroup { - $type: "akash.deployment.v1beta1.MsgStartGroup"; - id: GroupID | undefined; -} - -/** MsgStartGroupResponse defines the Msg/StartGroup response type. */ -export interface MsgStartGroupResponse { - $type: "akash.deployment.v1beta1.MsgStartGroupResponse"; -} - -/** GroupID stores owner, deployment sequence number and group sequence number */ -export interface GroupID { - $type: "akash.deployment.v1beta1.GroupID"; - owner: string; - dseq: Long; - gseq: number; -} - -/** GroupSpec stores group specifications */ -export interface GroupSpec { - $type: "akash.deployment.v1beta1.GroupSpec"; - name: string; - requirements: PlacementRequirements | undefined; - resources: Resource[]; -} - -/** Group stores group id, state and specifications of group */ -export interface Group { - $type: "akash.deployment.v1beta1.Group"; - groupId: GroupID | undefined; - state: Group_State; - groupSpec: GroupSpec | undefined; - createdAt: Long; -} - -/** State is an enum which refers to state of group */ -export enum Group_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** open - GroupOpen denotes state for group open */ - open = 1, - /** paused - GroupOrdered denotes state for group ordered */ - paused = 2, - /** insufficient_funds - GroupInsufficientFunds denotes state for group insufficient_funds */ - insufficient_funds = 3, - /** closed - GroupClosed denotes state for group closed */ - closed = 4, - UNRECOGNIZED = -1, -} - -export function group_StateFromJSON(object: any): Group_State { - switch (object) { - case 0: - case "invalid": - return Group_State.invalid; - case 1: - case "open": - return Group_State.open; - case 2: - case "paused": - return Group_State.paused; - case 3: - case "insufficient_funds": - return Group_State.insufficient_funds; - case 4: - case "closed": - return Group_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Group_State.UNRECOGNIZED; - } -} - -export function group_StateToJSON(object: Group_State): string { - switch (object) { - case Group_State.invalid: - return "invalid"; - case Group_State.open: - return "open"; - case Group_State.paused: - return "paused"; - case Group_State.insufficient_funds: - return "insufficient_funds"; - case Group_State.closed: - return "closed"; - case Group_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** Resource stores unit, total count and price of resource */ -export interface Resource { - $type: "akash.deployment.v1beta1.Resource"; - resources: ResourceUnits | undefined; - count: number; - price: Coin | undefined; -} - -function createBaseMsgCloseGroup(): MsgCloseGroup { - return { $type: "akash.deployment.v1beta1.MsgCloseGroup", id: undefined }; -} - -export const MsgCloseGroup = { - $type: "akash.deployment.v1beta1.MsgCloseGroup" as const, - - encode( - message: MsgCloseGroup, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseGroup { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = GroupID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCloseGroup { - return { - $type: MsgCloseGroup.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgCloseGroup): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = GroupID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgCloseGroup { - return MsgCloseGroup.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCloseGroup { - const message = createBaseMsgCloseGroup(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseGroup.$type, MsgCloseGroup); - -function createBaseMsgCloseGroupResponse(): MsgCloseGroupResponse { - return { $type: "akash.deployment.v1beta1.MsgCloseGroupResponse" }; -} - -export const MsgCloseGroupResponse = { - $type: "akash.deployment.v1beta1.MsgCloseGroupResponse" as const, - - encode( - _: MsgCloseGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCloseGroupResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCloseGroupResponse { - return { $type: MsgCloseGroupResponse.$type }; - }, - - toJSON(_: MsgCloseGroupResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCloseGroupResponse { - return MsgCloseGroupResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCloseGroupResponse { - const message = createBaseMsgCloseGroupResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseGroupResponse.$type, MsgCloseGroupResponse); - -function createBaseMsgPauseGroup(): MsgPauseGroup { - return { $type: "akash.deployment.v1beta1.MsgPauseGroup", id: undefined }; -} - -export const MsgPauseGroup = { - $type: "akash.deployment.v1beta1.MsgPauseGroup" as const, - - encode( - message: MsgPauseGroup, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgPauseGroup { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgPauseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = GroupID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgPauseGroup { - return { - $type: MsgPauseGroup.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgPauseGroup): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = GroupID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgPauseGroup { - return MsgPauseGroup.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgPauseGroup { - const message = createBaseMsgPauseGroup(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgPauseGroup.$type, MsgPauseGroup); - -function createBaseMsgPauseGroupResponse(): MsgPauseGroupResponse { - return { $type: "akash.deployment.v1beta1.MsgPauseGroupResponse" }; -} - -export const MsgPauseGroupResponse = { - $type: "akash.deployment.v1beta1.MsgPauseGroupResponse" as const, - - encode( - _: MsgPauseGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgPauseGroupResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgPauseGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgPauseGroupResponse { - return { $type: MsgPauseGroupResponse.$type }; - }, - - toJSON(_: MsgPauseGroupResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgPauseGroupResponse { - return MsgPauseGroupResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgPauseGroupResponse { - const message = createBaseMsgPauseGroupResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgPauseGroupResponse.$type, MsgPauseGroupResponse); - -function createBaseMsgStartGroup(): MsgStartGroup { - return { $type: "akash.deployment.v1beta1.MsgStartGroup", id: undefined }; -} - -export const MsgStartGroup = { - $type: "akash.deployment.v1beta1.MsgStartGroup" as const, - - encode( - message: MsgStartGroup, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgStartGroup { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgStartGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = GroupID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgStartGroup { - return { - $type: MsgStartGroup.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgStartGroup): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = GroupID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgStartGroup { - return MsgStartGroup.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgStartGroup { - const message = createBaseMsgStartGroup(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgStartGroup.$type, MsgStartGroup); - -function createBaseMsgStartGroupResponse(): MsgStartGroupResponse { - return { $type: "akash.deployment.v1beta1.MsgStartGroupResponse" }; -} - -export const MsgStartGroupResponse = { - $type: "akash.deployment.v1beta1.MsgStartGroupResponse" as const, - - encode( - _: MsgStartGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgStartGroupResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgStartGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgStartGroupResponse { - return { $type: MsgStartGroupResponse.$type }; - }, - - toJSON(_: MsgStartGroupResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgStartGroupResponse { - return MsgStartGroupResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgStartGroupResponse { - const message = createBaseMsgStartGroupResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgStartGroupResponse.$type, MsgStartGroupResponse); - -function createBaseGroupID(): GroupID { - return { - $type: "akash.deployment.v1beta1.GroupID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - }; -} - -export const GroupID = { - $type: "akash.deployment.v1beta1.GroupID" as const, - - encode( - message: GroupID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GroupID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroupID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GroupID { - return { - $type: GroupID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - }; - }, - - toJSON(message: GroupID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - return obj; - }, - - create(base?: DeepPartial): GroupID { - return GroupID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GroupID { - const message = createBaseGroupID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(GroupID.$type, GroupID); - -function createBaseGroupSpec(): GroupSpec { - return { - $type: "akash.deployment.v1beta1.GroupSpec", - name: "", - requirements: undefined, - resources: [], - }; -} - -export const GroupSpec = { - $type: "akash.deployment.v1beta1.GroupSpec" as const, - - encode( - message: GroupSpec, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.requirements !== undefined) { - PlacementRequirements.encode( - message.requirements, - writer.uint32(18).fork(), - ).ldelim(); - } - for (const v of message.resources) { - Resource.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GroupSpec { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroupSpec(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.requirements = PlacementRequirements.decode( - reader, - reader.uint32(), - ); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.resources.push(Resource.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GroupSpec { - return { - $type: GroupSpec.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - requirements: isSet(object.requirements) - ? PlacementRequirements.fromJSON(object.requirements) - : undefined, - resources: globalThis.Array.isArray(object?.resources) - ? object.resources.map((e: any) => Resource.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GroupSpec): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.requirements !== undefined) { - obj.requirements = PlacementRequirements.toJSON(message.requirements); - } - if (message.resources?.length) { - obj.resources = message.resources.map((e) => Resource.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GroupSpec { - return GroupSpec.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GroupSpec { - const message = createBaseGroupSpec(); - message.name = object.name ?? ""; - message.requirements = - object.requirements !== undefined && object.requirements !== null - ? PlacementRequirements.fromPartial(object.requirements) - : undefined; - message.resources = - object.resources?.map((e) => Resource.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GroupSpec.$type, GroupSpec); - -function createBaseGroup(): Group { - return { - $type: "akash.deployment.v1beta1.Group", - groupId: undefined, - state: 0, - groupSpec: undefined, - createdAt: Long.ZERO, - }; -} - -export const Group = { - $type: "akash.deployment.v1beta1.Group" as const, - - encode(message: Group, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.groupId !== undefined) { - GroupID.encode(message.groupId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.groupSpec !== undefined) { - GroupSpec.encode(message.groupSpec, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Group { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.groupId = GroupID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.groupSpec = GroupSpec.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Group { - return { - $type: Group.$type, - groupId: isSet(object.groupId) - ? GroupID.fromJSON(object.groupId) - : undefined, - state: isSet(object.state) ? group_StateFromJSON(object.state) : 0, - groupSpec: isSet(object.groupSpec) - ? GroupSpec.fromJSON(object.groupSpec) - : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Group): unknown { - const obj: any = {}; - if (message.groupId !== undefined) { - obj.groupId = GroupID.toJSON(message.groupId); - } - if (message.state !== 0) { - obj.state = group_StateToJSON(message.state); - } - if (message.groupSpec !== undefined) { - obj.groupSpec = GroupSpec.toJSON(message.groupSpec); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Group { - return Group.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Group { - const message = createBaseGroup(); - message.groupId = - object.groupId !== undefined && object.groupId !== null - ? GroupID.fromPartial(object.groupId) - : undefined; - message.state = object.state ?? 0; - message.groupSpec = - object.groupSpec !== undefined && object.groupSpec !== null - ? GroupSpec.fromPartial(object.groupSpec) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Group.$type, Group); - -function createBaseResource(): Resource { - return { - $type: "akash.deployment.v1beta1.Resource", - resources: undefined, - count: 0, - price: undefined, - }; -} - -export const Resource = { - $type: "akash.deployment.v1beta1.Resource" as const, - - encode( - message: Resource, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.resources !== undefined) { - ResourceUnits.encode( - message.resources, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.count !== 0) { - writer.uint32(16).uint32(message.count); - } - if (message.price !== undefined) { - Coin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Resource { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseResource(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.resources = ResourceUnits.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.count = reader.uint32(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.price = Coin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Resource { - return { - $type: Resource.$type, - resources: isSet(object.resources) - ? ResourceUnits.fromJSON(object.resources) - : undefined, - count: isSet(object.count) ? globalThis.Number(object.count) : 0, - price: isSet(object.price) ? Coin.fromJSON(object.price) : undefined, - }; - }, - - toJSON(message: Resource): unknown { - const obj: any = {}; - if (message.resources !== undefined) { - obj.resources = ResourceUnits.toJSON(message.resources); - } - if (message.count !== 0) { - obj.count = Math.round(message.count); - } - if (message.price !== undefined) { - obj.price = Coin.toJSON(message.price); - } - return obj; - }, - - create(base?: DeepPartial): Resource { - return Resource.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Resource { - const message = createBaseResource(); - message.resources = - object.resources !== undefined && object.resources !== null - ? ResourceUnits.fromPartial(object.resources) - : undefined; - message.count = object.count ?? 0; - message.price = - object.price !== undefined && object.price !== null - ? Coin.fromPartial(object.price) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Resource.$type, Resource); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta1/params.ts b/ts/src/generated/akash/deployment/v1beta1/params.ts deleted file mode 100644 index c503ef41..00000000 --- a/ts/src/generated/akash/deployment/v1beta1/params.ts +++ /dev/null @@ -1,121 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Params defines the parameters for the x/deployment package */ -export interface Params { - $type: "akash.deployment.v1beta1.Params"; - deploymentMinDeposit: Coin | undefined; -} - -function createBaseParams(): Params { - return { - $type: "akash.deployment.v1beta1.Params", - deploymentMinDeposit: undefined, - }; -} - -export const Params = { - $type: "akash.deployment.v1beta1.Params" as const, - - encode( - message: Params, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deploymentMinDeposit !== undefined) { - Coin.encode( - message.deploymentMinDeposit, - writer.uint32(10).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Params { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deploymentMinDeposit = Coin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Params { - return { - $type: Params.$type, - deploymentMinDeposit: isSet(object.deploymentMinDeposit) - ? Coin.fromJSON(object.deploymentMinDeposit) - : undefined, - }; - }, - - toJSON(message: Params): unknown { - const obj: any = {}; - if (message.deploymentMinDeposit !== undefined) { - obj.deploymentMinDeposit = Coin.toJSON(message.deploymentMinDeposit); - } - return obj; - }, - - create(base?: DeepPartial): Params { - return Params.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Params { - const message = createBaseParams(); - message.deploymentMinDeposit = - object.deploymentMinDeposit !== undefined && - object.deploymentMinDeposit !== null - ? Coin.fromPartial(object.deploymentMinDeposit) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Params.$type, Params); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta1/query.ts b/ts/src/generated/akash/deployment/v1beta1/query.ts deleted file mode 100644 index fc02636b..00000000 --- a/ts/src/generated/akash/deployment/v1beta1/query.ts +++ /dev/null @@ -1,705 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Account } from "../../escrow/v1beta1/types"; -import { Deployment, DeploymentFilters, DeploymentID } from "./deployment"; -import { Group, GroupID } from "./group"; - -/** QueryDeploymentsRequest is request type for the Query/Deployments RPC method */ -export interface QueryDeploymentsRequest { - $type: "akash.deployment.v1beta1.QueryDeploymentsRequest"; - filters: DeploymentFilters | undefined; - pagination: PageRequest | undefined; -} - -/** QueryDeploymentsResponse is response type for the Query/Deployments RPC method */ -export interface QueryDeploymentsResponse { - $type: "akash.deployment.v1beta1.QueryDeploymentsResponse"; - deployments: QueryDeploymentResponse[]; - pagination: PageResponse | undefined; -} - -/** QueryDeploymentRequest is request type for the Query/Deployment RPC method */ -export interface QueryDeploymentRequest { - $type: "akash.deployment.v1beta1.QueryDeploymentRequest"; - id: DeploymentID | undefined; -} - -/** QueryDeploymentResponse is response type for the Query/Deployment RPC method */ -export interface QueryDeploymentResponse { - $type: "akash.deployment.v1beta1.QueryDeploymentResponse"; - deployment: Deployment | undefined; - groups: Group[]; - escrowAccount: Account | undefined; -} - -/** QueryGroupRequest is request type for the Query/Group RPC method */ -export interface QueryGroupRequest { - $type: "akash.deployment.v1beta1.QueryGroupRequest"; - id: GroupID | undefined; -} - -/** QueryGroupResponse is response type for the Query/Group RPC method */ -export interface QueryGroupResponse { - $type: "akash.deployment.v1beta1.QueryGroupResponse"; - group: Group | undefined; -} - -function createBaseQueryDeploymentsRequest(): QueryDeploymentsRequest { - return { - $type: "akash.deployment.v1beta1.QueryDeploymentsRequest", - filters: undefined, - pagination: undefined, - }; -} - -export const QueryDeploymentsRequest = { - $type: "akash.deployment.v1beta1.QueryDeploymentsRequest" as const, - - encode( - message: QueryDeploymentsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filters !== undefined) { - DeploymentFilters.encode( - message.filters, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentsRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filters = DeploymentFilters.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryDeploymentsRequest { - return { - $type: QueryDeploymentsRequest.$type, - filters: isSet(object.filters) - ? DeploymentFilters.fromJSON(object.filters) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryDeploymentsRequest): unknown { - const obj: any = {}; - if (message.filters !== undefined) { - obj.filters = DeploymentFilters.toJSON(message.filters); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryDeploymentsRequest { - return QueryDeploymentsRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryDeploymentsRequest { - const message = createBaseQueryDeploymentsRequest(); - message.filters = - object.filters !== undefined && object.filters !== null - ? DeploymentFilters.fromPartial(object.filters) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryDeploymentsRequest.$type, QueryDeploymentsRequest); - -function createBaseQueryDeploymentsResponse(): QueryDeploymentsResponse { - return { - $type: "akash.deployment.v1beta1.QueryDeploymentsResponse", - deployments: [], - pagination: undefined, - }; -} - -export const QueryDeploymentsResponse = { - $type: "akash.deployment.v1beta1.QueryDeploymentsResponse" as const, - - encode( - message: QueryDeploymentsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.deployments) { - QueryDeploymentResponse.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentsResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deployments.push( - QueryDeploymentResponse.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryDeploymentsResponse { - return { - $type: QueryDeploymentsResponse.$type, - deployments: globalThis.Array.isArray(object?.deployments) - ? object.deployments.map((e: any) => - QueryDeploymentResponse.fromJSON(e), - ) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryDeploymentsResponse): unknown { - const obj: any = {}; - if (message.deployments?.length) { - obj.deployments = message.deployments.map((e) => - QueryDeploymentResponse.toJSON(e), - ); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryDeploymentsResponse { - return QueryDeploymentsResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryDeploymentsResponse { - const message = createBaseQueryDeploymentsResponse(); - message.deployments = - object.deployments?.map((e) => QueryDeploymentResponse.fromPartial(e)) || - []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryDeploymentsResponse.$type, - QueryDeploymentsResponse, -); - -function createBaseQueryDeploymentRequest(): QueryDeploymentRequest { - return { - $type: "akash.deployment.v1beta1.QueryDeploymentRequest", - id: undefined, - }; -} - -export const QueryDeploymentRequest = { - $type: "akash.deployment.v1beta1.QueryDeploymentRequest" as const, - - encode( - message: QueryDeploymentRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryDeploymentRequest { - return { - $type: QueryDeploymentRequest.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryDeploymentRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryDeploymentRequest { - return QueryDeploymentRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryDeploymentRequest { - const message = createBaseQueryDeploymentRequest(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryDeploymentRequest.$type, QueryDeploymentRequest); - -function createBaseQueryDeploymentResponse(): QueryDeploymentResponse { - return { - $type: "akash.deployment.v1beta1.QueryDeploymentResponse", - deployment: undefined, - groups: [], - escrowAccount: undefined, - }; -} - -export const QueryDeploymentResponse = { - $type: "akash.deployment.v1beta1.QueryDeploymentResponse" as const, - - encode( - message: QueryDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deployment !== undefined) { - Deployment.encode(message.deployment, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - Group.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.escrowAccount !== undefined) { - Account.encode(message.escrowAccount, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deployment = Deployment.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.groups.push(Group.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.escrowAccount = Account.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryDeploymentResponse { - return { - $type: QueryDeploymentResponse.$type, - deployment: isSet(object.deployment) - ? Deployment.fromJSON(object.deployment) - : undefined, - groups: globalThis.Array.isArray(object?.groups) - ? object.groups.map((e: any) => Group.fromJSON(e)) - : [], - escrowAccount: isSet(object.escrowAccount) - ? Account.fromJSON(object.escrowAccount) - : undefined, - }; - }, - - toJSON(message: QueryDeploymentResponse): unknown { - const obj: any = {}; - if (message.deployment !== undefined) { - obj.deployment = Deployment.toJSON(message.deployment); - } - if (message.groups?.length) { - obj.groups = message.groups.map((e) => Group.toJSON(e)); - } - if (message.escrowAccount !== undefined) { - obj.escrowAccount = Account.toJSON(message.escrowAccount); - } - return obj; - }, - - create(base?: DeepPartial): QueryDeploymentResponse { - return QueryDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryDeploymentResponse { - const message = createBaseQueryDeploymentResponse(); - message.deployment = - object.deployment !== undefined && object.deployment !== null - ? Deployment.fromPartial(object.deployment) - : undefined; - message.groups = object.groups?.map((e) => Group.fromPartial(e)) || []; - message.escrowAccount = - object.escrowAccount !== undefined && object.escrowAccount !== null - ? Account.fromPartial(object.escrowAccount) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryDeploymentResponse.$type, QueryDeploymentResponse); - -function createBaseQueryGroupRequest(): QueryGroupRequest { - return { $type: "akash.deployment.v1beta1.QueryGroupRequest", id: undefined }; -} - -export const QueryGroupRequest = { - $type: "akash.deployment.v1beta1.QueryGroupRequest" as const, - - encode( - message: QueryGroupRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryGroupRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryGroupRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = GroupID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryGroupRequest { - return { - $type: QueryGroupRequest.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryGroupRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = GroupID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryGroupRequest { - return QueryGroupRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryGroupRequest { - const message = createBaseQueryGroupRequest(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryGroupRequest.$type, QueryGroupRequest); - -function createBaseQueryGroupResponse(): QueryGroupResponse { - return { - $type: "akash.deployment.v1beta1.QueryGroupResponse", - group: undefined, - }; -} - -export const QueryGroupResponse = { - $type: "akash.deployment.v1beta1.QueryGroupResponse" as const, - - encode( - message: QueryGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.group !== undefined) { - Group.encode(message.group, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryGroupResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.group = Group.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryGroupResponse { - return { - $type: QueryGroupResponse.$type, - group: isSet(object.group) ? Group.fromJSON(object.group) : undefined, - }; - }, - - toJSON(message: QueryGroupResponse): unknown { - const obj: any = {}; - if (message.group !== undefined) { - obj.group = Group.toJSON(message.group); - } - return obj; - }, - - create(base?: DeepPartial): QueryGroupResponse { - return QueryGroupResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryGroupResponse { - const message = createBaseQueryGroupResponse(); - message.group = - object.group !== undefined && object.group !== null - ? Group.fromPartial(object.group) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryGroupResponse.$type, QueryGroupResponse); - -/** Query defines the gRPC querier service */ -export interface Query { - /** Deployments queries deployments */ - Deployments( - request: QueryDeploymentsRequest, - ): Promise; - /** Deployment queries deployment details */ - Deployment(request: QueryDeploymentRequest): Promise; - /** Group queries group details */ - Group(request: QueryGroupRequest): Promise; -} - -export const QueryServiceName = "akash.deployment.v1beta1.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.Deployments = this.Deployments.bind(this); - this.Deployment = this.Deployment.bind(this); - this.Group = this.Group.bind(this); - } - Deployments( - request: QueryDeploymentsRequest, - ): Promise { - const data = QueryDeploymentsRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Deployments", data); - return promise.then((data) => - QueryDeploymentsResponse.decode(_m0.Reader.create(data)), - ); - } - - Deployment( - request: QueryDeploymentRequest, - ): Promise { - const data = QueryDeploymentRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Deployment", data); - return promise.then((data) => - QueryDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - Group(request: QueryGroupRequest): Promise { - const data = QueryGroupRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Group", data); - return promise.then((data) => - QueryGroupResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta2/authz.ts b/ts/src/generated/akash/deployment/v1beta2/authz.ts deleted file mode 100644 index 37285798..00000000 --- a/ts/src/generated/akash/deployment/v1beta2/authz.ts +++ /dev/null @@ -1,134 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** - * DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from - * the granter's account for a deployment. - */ -export interface DepositDeploymentAuthorization { - $type: "akash.deployment.v1beta2.DepositDeploymentAuthorization"; - /** - * SpendLimit is the amount the grantee is authorized to spend from the granter's account for - * the purpose of deployment. - */ - spendLimit: Coin | undefined; -} - -function createBaseDepositDeploymentAuthorization(): DepositDeploymentAuthorization { - return { - $type: "akash.deployment.v1beta2.DepositDeploymentAuthorization", - spendLimit: undefined, - }; -} - -export const DepositDeploymentAuthorization = { - $type: "akash.deployment.v1beta2.DepositDeploymentAuthorization" as const, - - encode( - message: DepositDeploymentAuthorization, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.spendLimit !== undefined) { - Coin.encode(message.spendLimit, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): DepositDeploymentAuthorization { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDepositDeploymentAuthorization(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.spendLimit = Coin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DepositDeploymentAuthorization { - return { - $type: DepositDeploymentAuthorization.$type, - spendLimit: isSet(object.spendLimit) - ? Coin.fromJSON(object.spendLimit) - : undefined, - }; - }, - - toJSON(message: DepositDeploymentAuthorization): unknown { - const obj: any = {}; - if (message.spendLimit !== undefined) { - obj.spendLimit = Coin.toJSON(message.spendLimit); - } - return obj; - }, - - create( - base?: DeepPartial, - ): DepositDeploymentAuthorization { - return DepositDeploymentAuthorization.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): DepositDeploymentAuthorization { - const message = createBaseDepositDeploymentAuthorization(); - message.spendLimit = - object.spendLimit !== undefined && object.spendLimit !== null - ? Coin.fromPartial(object.spendLimit) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - DepositDeploymentAuthorization.$type, - DepositDeploymentAuthorization, -); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta2/deployment.ts b/ts/src/generated/akash/deployment/v1beta2/deployment.ts deleted file mode 100644 index c33b3079..00000000 --- a/ts/src/generated/akash/deployment/v1beta2/deployment.ts +++ /dev/null @@ -1,456 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** DeploymentID stores owner and sequence number */ -export interface DeploymentID { - $type: "akash.deployment.v1beta2.DeploymentID"; - owner: string; - dseq: Long; -} - -/** Deployment stores deploymentID, state and version details */ -export interface Deployment { - $type: "akash.deployment.v1beta2.Deployment"; - deploymentId: DeploymentID | undefined; - state: Deployment_State; - version: Uint8Array; - createdAt: Long; -} - -/** State is an enum which refers to state of deployment */ -export enum Deployment_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** active - DeploymentActive denotes state for deployment active */ - active = 1, - /** closed - DeploymentClosed denotes state for deployment closed */ - closed = 2, - UNRECOGNIZED = -1, -} - -export function deployment_StateFromJSON(object: any): Deployment_State { - switch (object) { - case 0: - case "invalid": - return Deployment_State.invalid; - case 1: - case "active": - return Deployment_State.active; - case 2: - case "closed": - return Deployment_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Deployment_State.UNRECOGNIZED; - } -} - -export function deployment_StateToJSON(object: Deployment_State): string { - switch (object) { - case Deployment_State.invalid: - return "invalid"; - case Deployment_State.active: - return "active"; - case Deployment_State.closed: - return "closed"; - case Deployment_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** DeploymentFilters defines filters used to filter deployments */ -export interface DeploymentFilters { - $type: "akash.deployment.v1beta2.DeploymentFilters"; - owner: string; - dseq: Long; - state: string; -} - -function createBaseDeploymentID(): DeploymentID { - return { - $type: "akash.deployment.v1beta2.DeploymentID", - owner: "", - dseq: Long.UZERO, - }; -} - -export const DeploymentID = { - $type: "akash.deployment.v1beta2.DeploymentID" as const, - - encode( - message: DeploymentID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DeploymentID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDeploymentID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DeploymentID { - return { - $type: DeploymentID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - }; - }, - - toJSON(message: DeploymentID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): DeploymentID { - return DeploymentID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): DeploymentID { - const message = createBaseDeploymentID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - return message; - }, -}; - -messageTypeRegistry.set(DeploymentID.$type, DeploymentID); - -function createBaseDeployment(): Deployment { - return { - $type: "akash.deployment.v1beta2.Deployment", - deploymentId: undefined, - state: 0, - version: new Uint8Array(0), - createdAt: Long.ZERO, - }; -} - -export const Deployment = { - $type: "akash.deployment.v1beta2.Deployment" as const, - - encode( - message: Deployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deploymentId !== undefined) { - DeploymentID.encode( - message.deploymentId, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.version.length !== 0) { - writer.uint32(26).bytes(message.version); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Deployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deploymentId = DeploymentID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.version = reader.bytes(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Deployment { - return { - $type: Deployment.$type, - deploymentId: isSet(object.deploymentId) - ? DeploymentID.fromJSON(object.deploymentId) - : undefined, - state: isSet(object.state) ? deployment_StateFromJSON(object.state) : 0, - version: isSet(object.version) - ? bytesFromBase64(object.version) - : new Uint8Array(0), - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Deployment): unknown { - const obj: any = {}; - if (message.deploymentId !== undefined) { - obj.deploymentId = DeploymentID.toJSON(message.deploymentId); - } - if (message.state !== 0) { - obj.state = deployment_StateToJSON(message.state); - } - if (message.version.length !== 0) { - obj.version = base64FromBytes(message.version); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Deployment { - return Deployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Deployment { - const message = createBaseDeployment(); - message.deploymentId = - object.deploymentId !== undefined && object.deploymentId !== null - ? DeploymentID.fromPartial(object.deploymentId) - : undefined; - message.state = object.state ?? 0; - message.version = object.version ?? new Uint8Array(0); - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Deployment.$type, Deployment); - -function createBaseDeploymentFilters(): DeploymentFilters { - return { - $type: "akash.deployment.v1beta2.DeploymentFilters", - owner: "", - dseq: Long.UZERO, - state: "", - }; -} - -export const DeploymentFilters = { - $type: "akash.deployment.v1beta2.DeploymentFilters" as const, - - encode( - message: DeploymentFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.state !== "") { - writer.uint32(26).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DeploymentFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDeploymentFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DeploymentFilters { - return { - $type: DeploymentFilters.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: DeploymentFilters): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): DeploymentFilters { - return DeploymentFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): DeploymentFilters { - const message = createBaseDeploymentFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(DeploymentFilters.$type, DeploymentFilters); - -function bytesFromBase64(b64: string): Uint8Array { - if ((globalThis as any).Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if ((globalThis as any).Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(globalThis.String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta2/deploymentmsg.ts b/ts/src/generated/akash/deployment/v1beta2/deploymentmsg.ts deleted file mode 100644 index 8b7ff67a..00000000 --- a/ts/src/generated/akash/deployment/v1beta2/deploymentmsg.ts +++ /dev/null @@ -1,788 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { DeploymentID } from "./deployment"; -import { GroupSpec } from "./groupspec"; - -/** MsgCreateDeployment defines an SDK message for creating deployment */ -export interface MsgCreateDeployment { - $type: "akash.deployment.v1beta2.MsgCreateDeployment"; - id: DeploymentID | undefined; - groups: GroupSpec[]; - version: Uint8Array; - deposit: Coin | undefined; - /** Depositor pays for the deposit */ - depositor: string; -} - -/** MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. */ -export interface MsgCreateDeploymentResponse { - $type: "akash.deployment.v1beta2.MsgCreateDeploymentResponse"; -} - -/** MsgDepositDeployment deposits more funds into the deposit account */ -export interface MsgDepositDeployment { - $type: "akash.deployment.v1beta2.MsgDepositDeployment"; - id: DeploymentID | undefined; - amount: Coin | undefined; - /** Depositor pays for the deposit */ - depositor: string; -} - -/** MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. */ -export interface MsgDepositDeploymentResponse { - $type: "akash.deployment.v1beta2.MsgDepositDeploymentResponse"; -} - -/** MsgUpdateDeployment defines an SDK message for updating deployment */ -export interface MsgUpdateDeployment { - $type: "akash.deployment.v1beta2.MsgUpdateDeployment"; - id: DeploymentID | undefined; - version: Uint8Array; -} - -/** MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. */ -export interface MsgUpdateDeploymentResponse { - $type: "akash.deployment.v1beta2.MsgUpdateDeploymentResponse"; -} - -/** MsgCloseDeployment defines an SDK message for closing deployment */ -export interface MsgCloseDeployment { - $type: "akash.deployment.v1beta2.MsgCloseDeployment"; - id: DeploymentID | undefined; -} - -/** MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. */ -export interface MsgCloseDeploymentResponse { - $type: "akash.deployment.v1beta2.MsgCloseDeploymentResponse"; -} - -function createBaseMsgCreateDeployment(): MsgCreateDeployment { - return { - $type: "akash.deployment.v1beta2.MsgCreateDeployment", - id: undefined, - groups: [], - version: new Uint8Array(0), - deposit: undefined, - depositor: "", - }; -} - -export const MsgCreateDeployment = { - $type: "akash.deployment.v1beta2.MsgCreateDeployment" as const, - - encode( - message: MsgCreateDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - GroupSpec.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.version.length !== 0) { - writer.uint32(26).bytes(message.version); - } - if (message.deposit !== undefined) { - Coin.encode(message.deposit, writer.uint32(34).fork()).ldelim(); - } - if (message.depositor !== "") { - writer.uint32(42).string(message.depositor); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.groups.push(GroupSpec.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.version = reader.bytes(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.deposit = Coin.decode(reader, reader.uint32()); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.depositor = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateDeployment { - return { - $type: MsgCreateDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - groups: globalThis.Array.isArray(object?.groups) - ? object.groups.map((e: any) => GroupSpec.fromJSON(e)) - : [], - version: isSet(object.version) - ? bytesFromBase64(object.version) - : new Uint8Array(0), - deposit: isSet(object.deposit) - ? Coin.fromJSON(object.deposit) - : undefined, - depositor: isSet(object.depositor) - ? globalThis.String(object.depositor) - : "", - }; - }, - - toJSON(message: MsgCreateDeployment): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - if (message.groups?.length) { - obj.groups = message.groups.map((e) => GroupSpec.toJSON(e)); - } - if (message.version.length !== 0) { - obj.version = base64FromBytes(message.version); - } - if (message.deposit !== undefined) { - obj.deposit = Coin.toJSON(message.deposit); - } - if (message.depositor !== "") { - obj.depositor = message.depositor; - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateDeployment { - return MsgCreateDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateDeployment { - const message = createBaseMsgCreateDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - message.groups = object.groups?.map((e) => GroupSpec.fromPartial(e)) || []; - message.version = object.version ?? new Uint8Array(0); - message.deposit = - object.deposit !== undefined && object.deposit !== null - ? Coin.fromPartial(object.deposit) - : undefined; - message.depositor = object.depositor ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateDeployment.$type, MsgCreateDeployment); - -function createBaseMsgCreateDeploymentResponse(): MsgCreateDeploymentResponse { - return { $type: "akash.deployment.v1beta2.MsgCreateDeploymentResponse" }; -} - -export const MsgCreateDeploymentResponse = { - $type: "akash.deployment.v1beta2.MsgCreateDeploymentResponse" as const, - - encode( - _: MsgCreateDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateDeploymentResponse { - return { $type: MsgCreateDeploymentResponse.$type }; - }, - - toJSON(_: MsgCreateDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgCreateDeploymentResponse { - return MsgCreateDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgCreateDeploymentResponse { - const message = createBaseMsgCreateDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCreateDeploymentResponse.$type, - MsgCreateDeploymentResponse, -); - -function createBaseMsgDepositDeployment(): MsgDepositDeployment { - return { - $type: "akash.deployment.v1beta2.MsgDepositDeployment", - id: undefined, - amount: undefined, - depositor: "", - }; -} - -export const MsgDepositDeployment = { - $type: "akash.deployment.v1beta2.MsgDepositDeployment" as const, - - encode( - message: MsgDepositDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - if (message.amount !== undefined) { - Coin.encode(message.amount, writer.uint32(18).fork()).ldelim(); - } - if (message.depositor !== "") { - writer.uint32(26).string(message.depositor); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDepositDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDepositDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.amount = Coin.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.depositor = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgDepositDeployment { - return { - $type: MsgDepositDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - amount: isSet(object.amount) ? Coin.fromJSON(object.amount) : undefined, - depositor: isSet(object.depositor) - ? globalThis.String(object.depositor) - : "", - }; - }, - - toJSON(message: MsgDepositDeployment): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - if (message.amount !== undefined) { - obj.amount = Coin.toJSON(message.amount); - } - if (message.depositor !== "") { - obj.depositor = message.depositor; - } - return obj; - }, - - create(base?: DeepPartial): MsgDepositDeployment { - return MsgDepositDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgDepositDeployment { - const message = createBaseMsgDepositDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - message.amount = - object.amount !== undefined && object.amount !== null - ? Coin.fromPartial(object.amount) - : undefined; - message.depositor = object.depositor ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(MsgDepositDeployment.$type, MsgDepositDeployment); - -function createBaseMsgDepositDeploymentResponse(): MsgDepositDeploymentResponse { - return { $type: "akash.deployment.v1beta2.MsgDepositDeploymentResponse" }; -} - -export const MsgDepositDeploymentResponse = { - $type: "akash.deployment.v1beta2.MsgDepositDeploymentResponse" as const, - - encode( - _: MsgDepositDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDepositDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDepositDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgDepositDeploymentResponse { - return { $type: MsgDepositDeploymentResponse.$type }; - }, - - toJSON(_: MsgDepositDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgDepositDeploymentResponse { - return MsgDepositDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgDepositDeploymentResponse { - const message = createBaseMsgDepositDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgDepositDeploymentResponse.$type, - MsgDepositDeploymentResponse, -); - -function createBaseMsgUpdateDeployment(): MsgUpdateDeployment { - return { - $type: "akash.deployment.v1beta2.MsgUpdateDeployment", - id: undefined, - version: new Uint8Array(0), - }; -} - -export const MsgUpdateDeployment = { - $type: "akash.deployment.v1beta2.MsgUpdateDeployment" as const, - - encode( - message: MsgUpdateDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - if (message.version.length !== 0) { - writer.uint32(26).bytes(message.version); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgUpdateDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.version = reader.bytes(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgUpdateDeployment { - return { - $type: MsgUpdateDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - version: isSet(object.version) - ? bytesFromBase64(object.version) - : new Uint8Array(0), - }; - }, - - toJSON(message: MsgUpdateDeployment): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - if (message.version.length !== 0) { - obj.version = base64FromBytes(message.version); - } - return obj; - }, - - create(base?: DeepPartial): MsgUpdateDeployment { - return MsgUpdateDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgUpdateDeployment { - const message = createBaseMsgUpdateDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - message.version = object.version ?? new Uint8Array(0); - return message; - }, -}; - -messageTypeRegistry.set(MsgUpdateDeployment.$type, MsgUpdateDeployment); - -function createBaseMsgUpdateDeploymentResponse(): MsgUpdateDeploymentResponse { - return { $type: "akash.deployment.v1beta2.MsgUpdateDeploymentResponse" }; -} - -export const MsgUpdateDeploymentResponse = { - $type: "akash.deployment.v1beta2.MsgUpdateDeploymentResponse" as const, - - encode( - _: MsgUpdateDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgUpdateDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgUpdateDeploymentResponse { - return { $type: MsgUpdateDeploymentResponse.$type }; - }, - - toJSON(_: MsgUpdateDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgUpdateDeploymentResponse { - return MsgUpdateDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgUpdateDeploymentResponse { - const message = createBaseMsgUpdateDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgUpdateDeploymentResponse.$type, - MsgUpdateDeploymentResponse, -); - -function createBaseMsgCloseDeployment(): MsgCloseDeployment { - return { - $type: "akash.deployment.v1beta2.MsgCloseDeployment", - id: undefined, - }; -} - -export const MsgCloseDeployment = { - $type: "akash.deployment.v1beta2.MsgCloseDeployment" as const, - - encode( - message: MsgCloseDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCloseDeployment { - return { - $type: MsgCloseDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgCloseDeployment): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgCloseDeployment { - return MsgCloseDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCloseDeployment { - const message = createBaseMsgCloseDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseDeployment.$type, MsgCloseDeployment); - -function createBaseMsgCloseDeploymentResponse(): MsgCloseDeploymentResponse { - return { $type: "akash.deployment.v1beta2.MsgCloseDeploymentResponse" }; -} - -export const MsgCloseDeploymentResponse = { - $type: "akash.deployment.v1beta2.MsgCloseDeploymentResponse" as const, - - encode( - _: MsgCloseDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCloseDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCloseDeploymentResponse { - return { $type: MsgCloseDeploymentResponse.$type }; - }, - - toJSON(_: MsgCloseDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgCloseDeploymentResponse { - return MsgCloseDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgCloseDeploymentResponse { - const message = createBaseMsgCloseDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCloseDeploymentResponse.$type, - MsgCloseDeploymentResponse, -); - -function bytesFromBase64(b64: string): Uint8Array { - if ((globalThis as any).Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if ((globalThis as any).Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(globalThis.String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta2/genesis.ts b/ts/src/generated/akash/deployment/v1beta2/genesis.ts deleted file mode 100644 index 2d59ae25..00000000 --- a/ts/src/generated/akash/deployment/v1beta2/genesis.ts +++ /dev/null @@ -1,242 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Deployment } from "./deployment"; -import { Group } from "./group"; -import { Params } from "./params"; - -/** GenesisDeployment defines the basic genesis state used by deployment module */ -export interface GenesisDeployment { - $type: "akash.deployment.v1beta2.GenesisDeployment"; - deployment: Deployment | undefined; - groups: Group[]; -} - -/** GenesisState stores slice of genesis deployment instance */ -export interface GenesisState { - $type: "akash.deployment.v1beta2.GenesisState"; - deployments: GenesisDeployment[]; - params: Params | undefined; -} - -function createBaseGenesisDeployment(): GenesisDeployment { - return { - $type: "akash.deployment.v1beta2.GenesisDeployment", - deployment: undefined, - groups: [], - }; -} - -export const GenesisDeployment = { - $type: "akash.deployment.v1beta2.GenesisDeployment" as const, - - encode( - message: GenesisDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deployment !== undefined) { - Deployment.encode(message.deployment, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - Group.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deployment = Deployment.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.groups.push(Group.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisDeployment { - return { - $type: GenesisDeployment.$type, - deployment: isSet(object.deployment) - ? Deployment.fromJSON(object.deployment) - : undefined, - groups: globalThis.Array.isArray(object?.groups) - ? object.groups.map((e: any) => Group.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisDeployment): unknown { - const obj: any = {}; - if (message.deployment !== undefined) { - obj.deployment = Deployment.toJSON(message.deployment); - } - if (message.groups?.length) { - obj.groups = message.groups.map((e) => Group.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GenesisDeployment { - return GenesisDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisDeployment { - const message = createBaseGenesisDeployment(); - message.deployment = - object.deployment !== undefined && object.deployment !== null - ? Deployment.fromPartial(object.deployment) - : undefined; - message.groups = object.groups?.map((e) => Group.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisDeployment.$type, GenesisDeployment); - -function createBaseGenesisState(): GenesisState { - return { - $type: "akash.deployment.v1beta2.GenesisState", - deployments: [], - params: undefined, - }; -} - -export const GenesisState = { - $type: "akash.deployment.v1beta2.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.deployments) { - GenesisDeployment.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.params !== undefined) { - Params.encode(message.params, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deployments.push( - GenesisDeployment.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.params = Params.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - deployments: globalThis.Array.isArray(object?.deployments) - ? object.deployments.map((e: any) => GenesisDeployment.fromJSON(e)) - : [], - params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.deployments?.length) { - obj.deployments = message.deployments.map((e) => - GenesisDeployment.toJSON(e), - ); - } - if (message.params !== undefined) { - obj.params = Params.toJSON(message.params); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.deployments = - object.deployments?.map((e) => GenesisDeployment.fromPartial(e)) || []; - message.params = - object.params !== undefined && object.params !== null - ? Params.fromPartial(object.params) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta2/group.ts b/ts/src/generated/akash/deployment/v1beta2/group.ts deleted file mode 100644 index c03ad700..00000000 --- a/ts/src/generated/akash/deployment/v1beta2/group.ts +++ /dev/null @@ -1,233 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { GroupID } from "./groupid"; -import { GroupSpec } from "./groupspec"; - -/** Group stores group id, state and specifications of group */ -export interface Group { - $type: "akash.deployment.v1beta2.Group"; - groupId: GroupID | undefined; - state: Group_State; - groupSpec: GroupSpec | undefined; - createdAt: Long; -} - -/** State is an enum which refers to state of group */ -export enum Group_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** open - GroupOpen denotes state for group open */ - open = 1, - /** paused - GroupOrdered denotes state for group ordered */ - paused = 2, - /** insufficient_funds - GroupInsufficientFunds denotes state for group insufficient_funds */ - insufficient_funds = 3, - /** closed - GroupClosed denotes state for group closed */ - closed = 4, - UNRECOGNIZED = -1, -} - -export function group_StateFromJSON(object: any): Group_State { - switch (object) { - case 0: - case "invalid": - return Group_State.invalid; - case 1: - case "open": - return Group_State.open; - case 2: - case "paused": - return Group_State.paused; - case 3: - case "insufficient_funds": - return Group_State.insufficient_funds; - case 4: - case "closed": - return Group_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Group_State.UNRECOGNIZED; - } -} - -export function group_StateToJSON(object: Group_State): string { - switch (object) { - case Group_State.invalid: - return "invalid"; - case Group_State.open: - return "open"; - case Group_State.paused: - return "paused"; - case Group_State.insufficient_funds: - return "insufficient_funds"; - case Group_State.closed: - return "closed"; - case Group_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -function createBaseGroup(): Group { - return { - $type: "akash.deployment.v1beta2.Group", - groupId: undefined, - state: 0, - groupSpec: undefined, - createdAt: Long.ZERO, - }; -} - -export const Group = { - $type: "akash.deployment.v1beta2.Group" as const, - - encode(message: Group, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.groupId !== undefined) { - GroupID.encode(message.groupId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.groupSpec !== undefined) { - GroupSpec.encode(message.groupSpec, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Group { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.groupId = GroupID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.groupSpec = GroupSpec.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Group { - return { - $type: Group.$type, - groupId: isSet(object.groupId) - ? GroupID.fromJSON(object.groupId) - : undefined, - state: isSet(object.state) ? group_StateFromJSON(object.state) : 0, - groupSpec: isSet(object.groupSpec) - ? GroupSpec.fromJSON(object.groupSpec) - : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Group): unknown { - const obj: any = {}; - if (message.groupId !== undefined) { - obj.groupId = GroupID.toJSON(message.groupId); - } - if (message.state !== 0) { - obj.state = group_StateToJSON(message.state); - } - if (message.groupSpec !== undefined) { - obj.groupSpec = GroupSpec.toJSON(message.groupSpec); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Group { - return Group.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Group { - const message = createBaseGroup(); - message.groupId = - object.groupId !== undefined && object.groupId !== null - ? GroupID.fromPartial(object.groupId) - : undefined; - message.state = object.state ?? 0; - message.groupSpec = - object.groupSpec !== undefined && object.groupSpec !== null - ? GroupSpec.fromPartial(object.groupSpec) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Group.$type, Group); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta2/groupid.ts b/ts/src/generated/akash/deployment/v1beta2/groupid.ts deleted file mode 100644 index 90ef6276..00000000 --- a/ts/src/generated/akash/deployment/v1beta2/groupid.ts +++ /dev/null @@ -1,148 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** GroupID stores owner, deployment sequence number and group sequence number */ -export interface GroupID { - $type: "akash.deployment.v1beta2.GroupID"; - owner: string; - dseq: Long; - gseq: number; -} - -function createBaseGroupID(): GroupID { - return { - $type: "akash.deployment.v1beta2.GroupID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - }; -} - -export const GroupID = { - $type: "akash.deployment.v1beta2.GroupID" as const, - - encode( - message: GroupID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GroupID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroupID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GroupID { - return { - $type: GroupID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - }; - }, - - toJSON(message: GroupID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - return obj; - }, - - create(base?: DeepPartial): GroupID { - return GroupID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GroupID { - const message = createBaseGroupID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(GroupID.$type, GroupID); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta2/groupmsg.ts b/ts/src/generated/akash/deployment/v1beta2/groupmsg.ts deleted file mode 100644 index 998ac0f1..00000000 --- a/ts/src/generated/akash/deployment/v1beta2/groupmsg.ts +++ /dev/null @@ -1,443 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { GroupID } from "./groupid"; - -/** MsgCloseGroup defines SDK message to close a single Group within a Deployment. */ -export interface MsgCloseGroup { - $type: "akash.deployment.v1beta2.MsgCloseGroup"; - id: GroupID | undefined; -} - -/** MsgCloseGroupResponse defines the Msg/CloseGroup response type. */ -export interface MsgCloseGroupResponse { - $type: "akash.deployment.v1beta2.MsgCloseGroupResponse"; -} - -/** MsgPauseGroup defines SDK message to close a single Group within a Deployment. */ -export interface MsgPauseGroup { - $type: "akash.deployment.v1beta2.MsgPauseGroup"; - id: GroupID | undefined; -} - -/** MsgPauseGroupResponse defines the Msg/PauseGroup response type. */ -export interface MsgPauseGroupResponse { - $type: "akash.deployment.v1beta2.MsgPauseGroupResponse"; -} - -/** MsgStartGroup defines SDK message to close a single Group within a Deployment. */ -export interface MsgStartGroup { - $type: "akash.deployment.v1beta2.MsgStartGroup"; - id: GroupID | undefined; -} - -/** MsgStartGroupResponse defines the Msg/StartGroup response type. */ -export interface MsgStartGroupResponse { - $type: "akash.deployment.v1beta2.MsgStartGroupResponse"; -} - -function createBaseMsgCloseGroup(): MsgCloseGroup { - return { $type: "akash.deployment.v1beta2.MsgCloseGroup", id: undefined }; -} - -export const MsgCloseGroup = { - $type: "akash.deployment.v1beta2.MsgCloseGroup" as const, - - encode( - message: MsgCloseGroup, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseGroup { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = GroupID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCloseGroup { - return { - $type: MsgCloseGroup.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgCloseGroup): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = GroupID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgCloseGroup { - return MsgCloseGroup.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCloseGroup { - const message = createBaseMsgCloseGroup(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseGroup.$type, MsgCloseGroup); - -function createBaseMsgCloseGroupResponse(): MsgCloseGroupResponse { - return { $type: "akash.deployment.v1beta2.MsgCloseGroupResponse" }; -} - -export const MsgCloseGroupResponse = { - $type: "akash.deployment.v1beta2.MsgCloseGroupResponse" as const, - - encode( - _: MsgCloseGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCloseGroupResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCloseGroupResponse { - return { $type: MsgCloseGroupResponse.$type }; - }, - - toJSON(_: MsgCloseGroupResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCloseGroupResponse { - return MsgCloseGroupResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCloseGroupResponse { - const message = createBaseMsgCloseGroupResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseGroupResponse.$type, MsgCloseGroupResponse); - -function createBaseMsgPauseGroup(): MsgPauseGroup { - return { $type: "akash.deployment.v1beta2.MsgPauseGroup", id: undefined }; -} - -export const MsgPauseGroup = { - $type: "akash.deployment.v1beta2.MsgPauseGroup" as const, - - encode( - message: MsgPauseGroup, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgPauseGroup { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgPauseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = GroupID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgPauseGroup { - return { - $type: MsgPauseGroup.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgPauseGroup): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = GroupID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgPauseGroup { - return MsgPauseGroup.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgPauseGroup { - const message = createBaseMsgPauseGroup(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgPauseGroup.$type, MsgPauseGroup); - -function createBaseMsgPauseGroupResponse(): MsgPauseGroupResponse { - return { $type: "akash.deployment.v1beta2.MsgPauseGroupResponse" }; -} - -export const MsgPauseGroupResponse = { - $type: "akash.deployment.v1beta2.MsgPauseGroupResponse" as const, - - encode( - _: MsgPauseGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgPauseGroupResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgPauseGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgPauseGroupResponse { - return { $type: MsgPauseGroupResponse.$type }; - }, - - toJSON(_: MsgPauseGroupResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgPauseGroupResponse { - return MsgPauseGroupResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgPauseGroupResponse { - const message = createBaseMsgPauseGroupResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgPauseGroupResponse.$type, MsgPauseGroupResponse); - -function createBaseMsgStartGroup(): MsgStartGroup { - return { $type: "akash.deployment.v1beta2.MsgStartGroup", id: undefined }; -} - -export const MsgStartGroup = { - $type: "akash.deployment.v1beta2.MsgStartGroup" as const, - - encode( - message: MsgStartGroup, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgStartGroup { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgStartGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = GroupID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgStartGroup { - return { - $type: MsgStartGroup.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgStartGroup): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = GroupID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgStartGroup { - return MsgStartGroup.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgStartGroup { - const message = createBaseMsgStartGroup(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgStartGroup.$type, MsgStartGroup); - -function createBaseMsgStartGroupResponse(): MsgStartGroupResponse { - return { $type: "akash.deployment.v1beta2.MsgStartGroupResponse" }; -} - -export const MsgStartGroupResponse = { - $type: "akash.deployment.v1beta2.MsgStartGroupResponse" as const, - - encode( - _: MsgStartGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgStartGroupResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgStartGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgStartGroupResponse { - return { $type: MsgStartGroupResponse.$type }; - }, - - toJSON(_: MsgStartGroupResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgStartGroupResponse { - return MsgStartGroupResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgStartGroupResponse { - const message = createBaseMsgStartGroupResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgStartGroupResponse.$type, MsgStartGroupResponse); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta2/groupspec.ts b/ts/src/generated/akash/deployment/v1beta2/groupspec.ts deleted file mode 100644 index 500efa9b..00000000 --- a/ts/src/generated/akash/deployment/v1beta2/groupspec.ts +++ /dev/null @@ -1,161 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { PlacementRequirements } from "../../base/v1beta2/attribute"; -import { Resource } from "./resource"; - -/** GroupSpec stores group specifications */ -export interface GroupSpec { - $type: "akash.deployment.v1beta2.GroupSpec"; - name: string; - requirements: PlacementRequirements | undefined; - resources: Resource[]; -} - -function createBaseGroupSpec(): GroupSpec { - return { - $type: "akash.deployment.v1beta2.GroupSpec", - name: "", - requirements: undefined, - resources: [], - }; -} - -export const GroupSpec = { - $type: "akash.deployment.v1beta2.GroupSpec" as const, - - encode( - message: GroupSpec, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.requirements !== undefined) { - PlacementRequirements.encode( - message.requirements, - writer.uint32(18).fork(), - ).ldelim(); - } - for (const v of message.resources) { - Resource.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GroupSpec { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroupSpec(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.requirements = PlacementRequirements.decode( - reader, - reader.uint32(), - ); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.resources.push(Resource.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GroupSpec { - return { - $type: GroupSpec.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - requirements: isSet(object.requirements) - ? PlacementRequirements.fromJSON(object.requirements) - : undefined, - resources: globalThis.Array.isArray(object?.resources) - ? object.resources.map((e: any) => Resource.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GroupSpec): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.requirements !== undefined) { - obj.requirements = PlacementRequirements.toJSON(message.requirements); - } - if (message.resources?.length) { - obj.resources = message.resources.map((e) => Resource.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GroupSpec { - return GroupSpec.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GroupSpec { - const message = createBaseGroupSpec(); - message.name = object.name ?? ""; - message.requirements = - object.requirements !== undefined && object.requirements !== null - ? PlacementRequirements.fromPartial(object.requirements) - : undefined; - message.resources = - object.resources?.map((e) => Resource.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GroupSpec.$type, GroupSpec); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta2/params.ts b/ts/src/generated/akash/deployment/v1beta2/params.ts deleted file mode 100644 index d9a2cece..00000000 --- a/ts/src/generated/akash/deployment/v1beta2/params.ts +++ /dev/null @@ -1,121 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Params defines the parameters for the x/deployment package */ -export interface Params { - $type: "akash.deployment.v1beta2.Params"; - deploymentMinDeposit: Coin | undefined; -} - -function createBaseParams(): Params { - return { - $type: "akash.deployment.v1beta2.Params", - deploymentMinDeposit: undefined, - }; -} - -export const Params = { - $type: "akash.deployment.v1beta2.Params" as const, - - encode( - message: Params, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deploymentMinDeposit !== undefined) { - Coin.encode( - message.deploymentMinDeposit, - writer.uint32(10).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Params { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deploymentMinDeposit = Coin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Params { - return { - $type: Params.$type, - deploymentMinDeposit: isSet(object.deploymentMinDeposit) - ? Coin.fromJSON(object.deploymentMinDeposit) - : undefined, - }; - }, - - toJSON(message: Params): unknown { - const obj: any = {}; - if (message.deploymentMinDeposit !== undefined) { - obj.deploymentMinDeposit = Coin.toJSON(message.deploymentMinDeposit); - } - return obj; - }, - - create(base?: DeepPartial): Params { - return Params.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Params { - const message = createBaseParams(); - message.deploymentMinDeposit = - object.deploymentMinDeposit !== undefined && - object.deploymentMinDeposit !== null - ? Coin.fromPartial(object.deploymentMinDeposit) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Params.$type, Params); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta2/query.ts b/ts/src/generated/akash/deployment/v1beta2/query.ts deleted file mode 100644 index 252b5dc3..00000000 --- a/ts/src/generated/akash/deployment/v1beta2/query.ts +++ /dev/null @@ -1,706 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Account } from "../../escrow/v1beta2/types"; -import { Deployment, DeploymentFilters, DeploymentID } from "./deployment"; -import { Group } from "./group"; -import { GroupID } from "./groupid"; - -/** QueryDeploymentsRequest is request type for the Query/Deployments RPC method */ -export interface QueryDeploymentsRequest { - $type: "akash.deployment.v1beta2.QueryDeploymentsRequest"; - filters: DeploymentFilters | undefined; - pagination: PageRequest | undefined; -} - -/** QueryDeploymentsResponse is response type for the Query/Deployments RPC method */ -export interface QueryDeploymentsResponse { - $type: "akash.deployment.v1beta2.QueryDeploymentsResponse"; - deployments: QueryDeploymentResponse[]; - pagination: PageResponse | undefined; -} - -/** QueryDeploymentRequest is request type for the Query/Deployment RPC method */ -export interface QueryDeploymentRequest { - $type: "akash.deployment.v1beta2.QueryDeploymentRequest"; - id: DeploymentID | undefined; -} - -/** QueryDeploymentResponse is response type for the Query/Deployment RPC method */ -export interface QueryDeploymentResponse { - $type: "akash.deployment.v1beta2.QueryDeploymentResponse"; - deployment: Deployment | undefined; - groups: Group[]; - escrowAccount: Account | undefined; -} - -/** QueryGroupRequest is request type for the Query/Group RPC method */ -export interface QueryGroupRequest { - $type: "akash.deployment.v1beta2.QueryGroupRequest"; - id: GroupID | undefined; -} - -/** QueryGroupResponse is response type for the Query/Group RPC method */ -export interface QueryGroupResponse { - $type: "akash.deployment.v1beta2.QueryGroupResponse"; - group: Group | undefined; -} - -function createBaseQueryDeploymentsRequest(): QueryDeploymentsRequest { - return { - $type: "akash.deployment.v1beta2.QueryDeploymentsRequest", - filters: undefined, - pagination: undefined, - }; -} - -export const QueryDeploymentsRequest = { - $type: "akash.deployment.v1beta2.QueryDeploymentsRequest" as const, - - encode( - message: QueryDeploymentsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filters !== undefined) { - DeploymentFilters.encode( - message.filters, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentsRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filters = DeploymentFilters.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryDeploymentsRequest { - return { - $type: QueryDeploymentsRequest.$type, - filters: isSet(object.filters) - ? DeploymentFilters.fromJSON(object.filters) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryDeploymentsRequest): unknown { - const obj: any = {}; - if (message.filters !== undefined) { - obj.filters = DeploymentFilters.toJSON(message.filters); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryDeploymentsRequest { - return QueryDeploymentsRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryDeploymentsRequest { - const message = createBaseQueryDeploymentsRequest(); - message.filters = - object.filters !== undefined && object.filters !== null - ? DeploymentFilters.fromPartial(object.filters) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryDeploymentsRequest.$type, QueryDeploymentsRequest); - -function createBaseQueryDeploymentsResponse(): QueryDeploymentsResponse { - return { - $type: "akash.deployment.v1beta2.QueryDeploymentsResponse", - deployments: [], - pagination: undefined, - }; -} - -export const QueryDeploymentsResponse = { - $type: "akash.deployment.v1beta2.QueryDeploymentsResponse" as const, - - encode( - message: QueryDeploymentsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.deployments) { - QueryDeploymentResponse.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentsResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deployments.push( - QueryDeploymentResponse.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryDeploymentsResponse { - return { - $type: QueryDeploymentsResponse.$type, - deployments: globalThis.Array.isArray(object?.deployments) - ? object.deployments.map((e: any) => - QueryDeploymentResponse.fromJSON(e), - ) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryDeploymentsResponse): unknown { - const obj: any = {}; - if (message.deployments?.length) { - obj.deployments = message.deployments.map((e) => - QueryDeploymentResponse.toJSON(e), - ); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryDeploymentsResponse { - return QueryDeploymentsResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryDeploymentsResponse { - const message = createBaseQueryDeploymentsResponse(); - message.deployments = - object.deployments?.map((e) => QueryDeploymentResponse.fromPartial(e)) || - []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryDeploymentsResponse.$type, - QueryDeploymentsResponse, -); - -function createBaseQueryDeploymentRequest(): QueryDeploymentRequest { - return { - $type: "akash.deployment.v1beta2.QueryDeploymentRequest", - id: undefined, - }; -} - -export const QueryDeploymentRequest = { - $type: "akash.deployment.v1beta2.QueryDeploymentRequest" as const, - - encode( - message: QueryDeploymentRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryDeploymentRequest { - return { - $type: QueryDeploymentRequest.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryDeploymentRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryDeploymentRequest { - return QueryDeploymentRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryDeploymentRequest { - const message = createBaseQueryDeploymentRequest(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryDeploymentRequest.$type, QueryDeploymentRequest); - -function createBaseQueryDeploymentResponse(): QueryDeploymentResponse { - return { - $type: "akash.deployment.v1beta2.QueryDeploymentResponse", - deployment: undefined, - groups: [], - escrowAccount: undefined, - }; -} - -export const QueryDeploymentResponse = { - $type: "akash.deployment.v1beta2.QueryDeploymentResponse" as const, - - encode( - message: QueryDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deployment !== undefined) { - Deployment.encode(message.deployment, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - Group.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.escrowAccount !== undefined) { - Account.encode(message.escrowAccount, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deployment = Deployment.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.groups.push(Group.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.escrowAccount = Account.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryDeploymentResponse { - return { - $type: QueryDeploymentResponse.$type, - deployment: isSet(object.deployment) - ? Deployment.fromJSON(object.deployment) - : undefined, - groups: globalThis.Array.isArray(object?.groups) - ? object.groups.map((e: any) => Group.fromJSON(e)) - : [], - escrowAccount: isSet(object.escrowAccount) - ? Account.fromJSON(object.escrowAccount) - : undefined, - }; - }, - - toJSON(message: QueryDeploymentResponse): unknown { - const obj: any = {}; - if (message.deployment !== undefined) { - obj.deployment = Deployment.toJSON(message.deployment); - } - if (message.groups?.length) { - obj.groups = message.groups.map((e) => Group.toJSON(e)); - } - if (message.escrowAccount !== undefined) { - obj.escrowAccount = Account.toJSON(message.escrowAccount); - } - return obj; - }, - - create(base?: DeepPartial): QueryDeploymentResponse { - return QueryDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryDeploymentResponse { - const message = createBaseQueryDeploymentResponse(); - message.deployment = - object.deployment !== undefined && object.deployment !== null - ? Deployment.fromPartial(object.deployment) - : undefined; - message.groups = object.groups?.map((e) => Group.fromPartial(e)) || []; - message.escrowAccount = - object.escrowAccount !== undefined && object.escrowAccount !== null - ? Account.fromPartial(object.escrowAccount) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryDeploymentResponse.$type, QueryDeploymentResponse); - -function createBaseQueryGroupRequest(): QueryGroupRequest { - return { $type: "akash.deployment.v1beta2.QueryGroupRequest", id: undefined }; -} - -export const QueryGroupRequest = { - $type: "akash.deployment.v1beta2.QueryGroupRequest" as const, - - encode( - message: QueryGroupRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryGroupRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryGroupRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = GroupID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryGroupRequest { - return { - $type: QueryGroupRequest.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryGroupRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = GroupID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryGroupRequest { - return QueryGroupRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryGroupRequest { - const message = createBaseQueryGroupRequest(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryGroupRequest.$type, QueryGroupRequest); - -function createBaseQueryGroupResponse(): QueryGroupResponse { - return { - $type: "akash.deployment.v1beta2.QueryGroupResponse", - group: undefined, - }; -} - -export const QueryGroupResponse = { - $type: "akash.deployment.v1beta2.QueryGroupResponse" as const, - - encode( - message: QueryGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.group !== undefined) { - Group.encode(message.group, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryGroupResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.group = Group.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryGroupResponse { - return { - $type: QueryGroupResponse.$type, - group: isSet(object.group) ? Group.fromJSON(object.group) : undefined, - }; - }, - - toJSON(message: QueryGroupResponse): unknown { - const obj: any = {}; - if (message.group !== undefined) { - obj.group = Group.toJSON(message.group); - } - return obj; - }, - - create(base?: DeepPartial): QueryGroupResponse { - return QueryGroupResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryGroupResponse { - const message = createBaseQueryGroupResponse(); - message.group = - object.group !== undefined && object.group !== null - ? Group.fromPartial(object.group) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryGroupResponse.$type, QueryGroupResponse); - -/** Query defines the gRPC querier service */ -export interface Query { - /** Deployments queries deployments */ - Deployments( - request: QueryDeploymentsRequest, - ): Promise; - /** Deployment queries deployment details */ - Deployment(request: QueryDeploymentRequest): Promise; - /** Group queries group details */ - Group(request: QueryGroupRequest): Promise; -} - -export const QueryServiceName = "akash.deployment.v1beta2.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.Deployments = this.Deployments.bind(this); - this.Deployment = this.Deployment.bind(this); - this.Group = this.Group.bind(this); - } - Deployments( - request: QueryDeploymentsRequest, - ): Promise { - const data = QueryDeploymentsRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Deployments", data); - return promise.then((data) => - QueryDeploymentsResponse.decode(_m0.Reader.create(data)), - ); - } - - Deployment( - request: QueryDeploymentRequest, - ): Promise { - const data = QueryDeploymentRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Deployment", data); - return promise.then((data) => - QueryDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - Group(request: QueryGroupRequest): Promise { - const data = QueryGroupRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Group", data); - return promise.then((data) => - QueryGroupResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta2/resource.ts b/ts/src/generated/akash/deployment/v1beta2/resource.ts deleted file mode 100644 index 48d733e3..00000000 --- a/ts/src/generated/akash/deployment/v1beta2/resource.ts +++ /dev/null @@ -1,158 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { DecCoin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { ResourceUnits } from "../../base/v1beta2/resourceunits"; - -/** Resource stores unit, total count and price of resource */ -export interface Resource { - $type: "akash.deployment.v1beta2.Resource"; - resources: ResourceUnits | undefined; - count: number; - price: DecCoin | undefined; -} - -function createBaseResource(): Resource { - return { - $type: "akash.deployment.v1beta2.Resource", - resources: undefined, - count: 0, - price: undefined, - }; -} - -export const Resource = { - $type: "akash.deployment.v1beta2.Resource" as const, - - encode( - message: Resource, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.resources !== undefined) { - ResourceUnits.encode( - message.resources, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.count !== 0) { - writer.uint32(16).uint32(message.count); - } - if (message.price !== undefined) { - DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Resource { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseResource(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.resources = ResourceUnits.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.count = reader.uint32(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.price = DecCoin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Resource { - return { - $type: Resource.$type, - resources: isSet(object.resources) - ? ResourceUnits.fromJSON(object.resources) - : undefined, - count: isSet(object.count) ? globalThis.Number(object.count) : 0, - price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, - }; - }, - - toJSON(message: Resource): unknown { - const obj: any = {}; - if (message.resources !== undefined) { - obj.resources = ResourceUnits.toJSON(message.resources); - } - if (message.count !== 0) { - obj.count = Math.round(message.count); - } - if (message.price !== undefined) { - obj.price = DecCoin.toJSON(message.price); - } - return obj; - }, - - create(base?: DeepPartial): Resource { - return Resource.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Resource { - const message = createBaseResource(); - message.resources = - object.resources !== undefined && object.resources !== null - ? ResourceUnits.fromPartial(object.resources) - : undefined; - message.count = object.count ?? 0; - message.price = - object.price !== undefined && object.price !== null - ? DecCoin.fromPartial(object.price) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Resource.$type, Resource); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta2/service.grpc-js.ts b/ts/src/generated/akash/deployment/v1beta2/service.grpc-js.ts deleted file mode 100644 index 6f39dd42..00000000 --- a/ts/src/generated/akash/deployment/v1beta2/service.grpc-js.ts +++ /dev/null @@ -1,348 +0,0 @@ -/* eslint-disable */ -import { - ChannelCredentials, - Client, - makeGenericClientConstructor, - Metadata, -} from "@grpc/grpc-js"; -import type { - CallOptions, - ClientOptions, - ClientUnaryCall, - handleUnaryCall, - ServiceError, - UntypedServiceImplementation, -} from "@grpc/grpc-js"; -import { - MsgCloseDeployment, - MsgCloseDeploymentResponse, - MsgCreateDeployment, - MsgCreateDeploymentResponse, - MsgDepositDeployment, - MsgDepositDeploymentResponse, - MsgUpdateDeployment, - MsgUpdateDeploymentResponse, -} from "./deploymentmsg"; -import { - MsgCloseGroup, - MsgCloseGroupResponse, - MsgPauseGroup, - MsgPauseGroupResponse, - MsgStartGroup, - MsgStartGroupResponse, -} from "./groupmsg"; - -export const protobufPackage = "akash.deployment.v1beta2"; - -/** Msg defines the deployment Msg service. */ -export type MsgService = typeof MsgService; -export const MsgService = { - /** CreateDeployment defines a method to create new deployment given proper inputs. */ - createDeployment: { - path: "/akash.deployment.v1beta2.Msg/CreateDeployment", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCreateDeployment) => - Buffer.from(MsgCreateDeployment.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCreateDeployment.decode(value), - responseSerialize: (value: MsgCreateDeploymentResponse) => - Buffer.from(MsgCreateDeploymentResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgCreateDeploymentResponse.decode(value), - }, - /** DepositDeployment deposits more funds into the deployment account */ - depositDeployment: { - path: "/akash.deployment.v1beta2.Msg/DepositDeployment", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgDepositDeployment) => - Buffer.from(MsgDepositDeployment.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgDepositDeployment.decode(value), - responseSerialize: (value: MsgDepositDeploymentResponse) => - Buffer.from(MsgDepositDeploymentResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgDepositDeploymentResponse.decode(value), - }, - /** UpdateDeployment defines a method to update a deployment given proper inputs. */ - updateDeployment: { - path: "/akash.deployment.v1beta2.Msg/UpdateDeployment", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgUpdateDeployment) => - Buffer.from(MsgUpdateDeployment.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgUpdateDeployment.decode(value), - responseSerialize: (value: MsgUpdateDeploymentResponse) => - Buffer.from(MsgUpdateDeploymentResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgUpdateDeploymentResponse.decode(value), - }, - /** CloseDeployment defines a method to close a deployment given proper inputs. */ - closeDeployment: { - path: "/akash.deployment.v1beta2.Msg/CloseDeployment", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCloseDeployment) => - Buffer.from(MsgCloseDeployment.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCloseDeployment.decode(value), - responseSerialize: (value: MsgCloseDeploymentResponse) => - Buffer.from(MsgCloseDeploymentResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgCloseDeploymentResponse.decode(value), - }, - /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ - closeGroup: { - path: "/akash.deployment.v1beta2.Msg/CloseGroup", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCloseGroup) => - Buffer.from(MsgCloseGroup.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCloseGroup.decode(value), - responseSerialize: (value: MsgCloseGroupResponse) => - Buffer.from(MsgCloseGroupResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgCloseGroupResponse.decode(value), - }, - /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ - pauseGroup: { - path: "/akash.deployment.v1beta2.Msg/PauseGroup", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgPauseGroup) => - Buffer.from(MsgPauseGroup.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgPauseGroup.decode(value), - responseSerialize: (value: MsgPauseGroupResponse) => - Buffer.from(MsgPauseGroupResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgPauseGroupResponse.decode(value), - }, - /** StartGroup defines a method to close a group of a deployment given proper inputs. */ - startGroup: { - path: "/akash.deployment.v1beta2.Msg/StartGroup", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgStartGroup) => - Buffer.from(MsgStartGroup.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgStartGroup.decode(value), - responseSerialize: (value: MsgStartGroupResponse) => - Buffer.from(MsgStartGroupResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgStartGroupResponse.decode(value), - }, -} as const; - -export interface MsgServer extends UntypedServiceImplementation { - /** CreateDeployment defines a method to create new deployment given proper inputs. */ - createDeployment: handleUnaryCall< - MsgCreateDeployment, - MsgCreateDeploymentResponse - >; - /** DepositDeployment deposits more funds into the deployment account */ - depositDeployment: handleUnaryCall< - MsgDepositDeployment, - MsgDepositDeploymentResponse - >; - /** UpdateDeployment defines a method to update a deployment given proper inputs. */ - updateDeployment: handleUnaryCall< - MsgUpdateDeployment, - MsgUpdateDeploymentResponse - >; - /** CloseDeployment defines a method to close a deployment given proper inputs. */ - closeDeployment: handleUnaryCall< - MsgCloseDeployment, - MsgCloseDeploymentResponse - >; - /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ - closeGroup: handleUnaryCall; - /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ - pauseGroup: handleUnaryCall; - /** StartGroup defines a method to close a group of a deployment given proper inputs. */ - startGroup: handleUnaryCall; -} - -export interface MsgClient extends Client { - /** CreateDeployment defines a method to create new deployment given proper inputs. */ - createDeployment( - request: MsgCreateDeployment, - callback: ( - error: ServiceError | null, - response: MsgCreateDeploymentResponse, - ) => void, - ): ClientUnaryCall; - createDeployment( - request: MsgCreateDeployment, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCreateDeploymentResponse, - ) => void, - ): ClientUnaryCall; - createDeployment( - request: MsgCreateDeployment, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCreateDeploymentResponse, - ) => void, - ): ClientUnaryCall; - /** DepositDeployment deposits more funds into the deployment account */ - depositDeployment( - request: MsgDepositDeployment, - callback: ( - error: ServiceError | null, - response: MsgDepositDeploymentResponse, - ) => void, - ): ClientUnaryCall; - depositDeployment( - request: MsgDepositDeployment, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgDepositDeploymentResponse, - ) => void, - ): ClientUnaryCall; - depositDeployment( - request: MsgDepositDeployment, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgDepositDeploymentResponse, - ) => void, - ): ClientUnaryCall; - /** UpdateDeployment defines a method to update a deployment given proper inputs. */ - updateDeployment( - request: MsgUpdateDeployment, - callback: ( - error: ServiceError | null, - response: MsgUpdateDeploymentResponse, - ) => void, - ): ClientUnaryCall; - updateDeployment( - request: MsgUpdateDeployment, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgUpdateDeploymentResponse, - ) => void, - ): ClientUnaryCall; - updateDeployment( - request: MsgUpdateDeployment, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgUpdateDeploymentResponse, - ) => void, - ): ClientUnaryCall; - /** CloseDeployment defines a method to close a deployment given proper inputs. */ - closeDeployment( - request: MsgCloseDeployment, - callback: ( - error: ServiceError | null, - response: MsgCloseDeploymentResponse, - ) => void, - ): ClientUnaryCall; - closeDeployment( - request: MsgCloseDeployment, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCloseDeploymentResponse, - ) => void, - ): ClientUnaryCall; - closeDeployment( - request: MsgCloseDeployment, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCloseDeploymentResponse, - ) => void, - ): ClientUnaryCall; - /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ - closeGroup( - request: MsgCloseGroup, - callback: ( - error: ServiceError | null, - response: MsgCloseGroupResponse, - ) => void, - ): ClientUnaryCall; - closeGroup( - request: MsgCloseGroup, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCloseGroupResponse, - ) => void, - ): ClientUnaryCall; - closeGroup( - request: MsgCloseGroup, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCloseGroupResponse, - ) => void, - ): ClientUnaryCall; - /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ - pauseGroup( - request: MsgPauseGroup, - callback: ( - error: ServiceError | null, - response: MsgPauseGroupResponse, - ) => void, - ): ClientUnaryCall; - pauseGroup( - request: MsgPauseGroup, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgPauseGroupResponse, - ) => void, - ): ClientUnaryCall; - pauseGroup( - request: MsgPauseGroup, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgPauseGroupResponse, - ) => void, - ): ClientUnaryCall; - /** StartGroup defines a method to close a group of a deployment given proper inputs. */ - startGroup( - request: MsgStartGroup, - callback: ( - error: ServiceError | null, - response: MsgStartGroupResponse, - ) => void, - ): ClientUnaryCall; - startGroup( - request: MsgStartGroup, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgStartGroupResponse, - ) => void, - ): ClientUnaryCall; - startGroup( - request: MsgStartGroup, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgStartGroupResponse, - ) => void, - ): ClientUnaryCall; -} - -export const MsgClient = makeGenericClientConstructor( - MsgService, - "akash.deployment.v1beta2.Msg", -) as unknown as { - new ( - address: string, - credentials: ChannelCredentials, - options?: Partial, - ): MsgClient; - service: typeof MsgService; - serviceName: string; -}; diff --git a/ts/src/generated/akash/deployment/v1beta2/service.ts b/ts/src/generated/akash/deployment/v1beta2/service.ts deleted file mode 100644 index 568b6335..00000000 --- a/ts/src/generated/akash/deployment/v1beta2/service.ts +++ /dev/null @@ -1,134 +0,0 @@ -/* eslint-disable */ -import _m0 from "protobufjs/minimal"; -import { - MsgCloseDeployment, - MsgCloseDeploymentResponse, - MsgCreateDeployment, - MsgCreateDeploymentResponse, - MsgDepositDeployment, - MsgDepositDeploymentResponse, - MsgUpdateDeployment, - MsgUpdateDeploymentResponse, -} from "./deploymentmsg"; -import { - MsgCloseGroup, - MsgCloseGroupResponse, - MsgPauseGroup, - MsgPauseGroupResponse, - MsgStartGroup, - MsgStartGroupResponse, -} from "./groupmsg"; - -/** Msg defines the deployment Msg service. */ -export interface Msg { - /** CreateDeployment defines a method to create new deployment given proper inputs. */ - CreateDeployment( - request: MsgCreateDeployment, - ): Promise; - /** DepositDeployment deposits more funds into the deployment account */ - DepositDeployment( - request: MsgDepositDeployment, - ): Promise; - /** UpdateDeployment defines a method to update a deployment given proper inputs. */ - UpdateDeployment( - request: MsgUpdateDeployment, - ): Promise; - /** CloseDeployment defines a method to close a deployment given proper inputs. */ - CloseDeployment( - request: MsgCloseDeployment, - ): Promise; - /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ - CloseGroup(request: MsgCloseGroup): Promise; - /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ - PauseGroup(request: MsgPauseGroup): Promise; - /** StartGroup defines a method to close a group of a deployment given proper inputs. */ - StartGroup(request: MsgStartGroup): Promise; -} - -export const MsgServiceName = "akash.deployment.v1beta2.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.CreateDeployment = this.CreateDeployment.bind(this); - this.DepositDeployment = this.DepositDeployment.bind(this); - this.UpdateDeployment = this.UpdateDeployment.bind(this); - this.CloseDeployment = this.CloseDeployment.bind(this); - this.CloseGroup = this.CloseGroup.bind(this); - this.PauseGroup = this.PauseGroup.bind(this); - this.StartGroup = this.StartGroup.bind(this); - } - CreateDeployment( - request: MsgCreateDeployment, - ): Promise { - const data = MsgCreateDeployment.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateDeployment", data); - return promise.then((data) => - MsgCreateDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - DepositDeployment( - request: MsgDepositDeployment, - ): Promise { - const data = MsgDepositDeployment.encode(request).finish(); - const promise = this.rpc.request(this.service, "DepositDeployment", data); - return promise.then((data) => - MsgDepositDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - UpdateDeployment( - request: MsgUpdateDeployment, - ): Promise { - const data = MsgUpdateDeployment.encode(request).finish(); - const promise = this.rpc.request(this.service, "UpdateDeployment", data); - return promise.then((data) => - MsgUpdateDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - CloseDeployment( - request: MsgCloseDeployment, - ): Promise { - const data = MsgCloseDeployment.encode(request).finish(); - const promise = this.rpc.request(this.service, "CloseDeployment", data); - return promise.then((data) => - MsgCloseDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - CloseGroup(request: MsgCloseGroup): Promise { - const data = MsgCloseGroup.encode(request).finish(); - const promise = this.rpc.request(this.service, "CloseGroup", data); - return promise.then((data) => - MsgCloseGroupResponse.decode(_m0.Reader.create(data)), - ); - } - - PauseGroup(request: MsgPauseGroup): Promise { - const data = MsgPauseGroup.encode(request).finish(); - const promise = this.rpc.request(this.service, "PauseGroup", data); - return promise.then((data) => - MsgPauseGroupResponse.decode(_m0.Reader.create(data)), - ); - } - - StartGroup(request: MsgStartGroup): Promise { - const data = MsgStartGroup.encode(request).finish(); - const promise = this.rpc.request(this.service, "StartGroup", data); - return promise.then((data) => - MsgStartGroupResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} diff --git a/ts/src/generated/akash/deployment/v1beta3/authz.ts b/ts/src/generated/akash/deployment/v1beta3/authz.ts deleted file mode 100644 index cc14df4d..00000000 --- a/ts/src/generated/akash/deployment/v1beta3/authz.ts +++ /dev/null @@ -1,134 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** - * DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from - * the granter's account for a deployment. - */ -export interface DepositDeploymentAuthorization { - $type: "akash.deployment.v1beta3.DepositDeploymentAuthorization"; - /** - * SpendLimit is the amount the grantee is authorized to spend from the granter's account for - * the purpose of deployment. - */ - spendLimit: Coin | undefined; -} - -function createBaseDepositDeploymentAuthorization(): DepositDeploymentAuthorization { - return { - $type: "akash.deployment.v1beta3.DepositDeploymentAuthorization", - spendLimit: undefined, - }; -} - -export const DepositDeploymentAuthorization = { - $type: "akash.deployment.v1beta3.DepositDeploymentAuthorization" as const, - - encode( - message: DepositDeploymentAuthorization, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.spendLimit !== undefined) { - Coin.encode(message.spendLimit, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): DepositDeploymentAuthorization { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDepositDeploymentAuthorization(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.spendLimit = Coin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DepositDeploymentAuthorization { - return { - $type: DepositDeploymentAuthorization.$type, - spendLimit: isSet(object.spendLimit) - ? Coin.fromJSON(object.spendLimit) - : undefined, - }; - }, - - toJSON(message: DepositDeploymentAuthorization): unknown { - const obj: any = {}; - if (message.spendLimit !== undefined) { - obj.spendLimit = Coin.toJSON(message.spendLimit); - } - return obj; - }, - - create( - base?: DeepPartial, - ): DepositDeploymentAuthorization { - return DepositDeploymentAuthorization.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): DepositDeploymentAuthorization { - const message = createBaseDepositDeploymentAuthorization(); - message.spendLimit = - object.spendLimit !== undefined && object.spendLimit !== null - ? Coin.fromPartial(object.spendLimit) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - DepositDeploymentAuthorization.$type, - DepositDeploymentAuthorization, -); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta3/deployment.ts b/ts/src/generated/akash/deployment/v1beta3/deployment.ts deleted file mode 100644 index df69663d..00000000 --- a/ts/src/generated/akash/deployment/v1beta3/deployment.ts +++ /dev/null @@ -1,456 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** DeploymentID stores owner and sequence number */ -export interface DeploymentID { - $type: "akash.deployment.v1beta3.DeploymentID"; - owner: string; - dseq: Long; -} - -/** Deployment stores deploymentID, state and version details */ -export interface Deployment { - $type: "akash.deployment.v1beta3.Deployment"; - deploymentId: DeploymentID | undefined; - state: Deployment_State; - version: Uint8Array; - createdAt: Long; -} - -/** State is an enum which refers to state of deployment */ -export enum Deployment_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** active - DeploymentActive denotes state for deployment active */ - active = 1, - /** closed - DeploymentClosed denotes state for deployment closed */ - closed = 2, - UNRECOGNIZED = -1, -} - -export function deployment_StateFromJSON(object: any): Deployment_State { - switch (object) { - case 0: - case "invalid": - return Deployment_State.invalid; - case 1: - case "active": - return Deployment_State.active; - case 2: - case "closed": - return Deployment_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Deployment_State.UNRECOGNIZED; - } -} - -export function deployment_StateToJSON(object: Deployment_State): string { - switch (object) { - case Deployment_State.invalid: - return "invalid"; - case Deployment_State.active: - return "active"; - case Deployment_State.closed: - return "closed"; - case Deployment_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** DeploymentFilters defines filters used to filter deployments */ -export interface DeploymentFilters { - $type: "akash.deployment.v1beta3.DeploymentFilters"; - owner: string; - dseq: Long; - state: string; -} - -function createBaseDeploymentID(): DeploymentID { - return { - $type: "akash.deployment.v1beta3.DeploymentID", - owner: "", - dseq: Long.UZERO, - }; -} - -export const DeploymentID = { - $type: "akash.deployment.v1beta3.DeploymentID" as const, - - encode( - message: DeploymentID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DeploymentID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDeploymentID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DeploymentID { - return { - $type: DeploymentID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - }; - }, - - toJSON(message: DeploymentID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): DeploymentID { - return DeploymentID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): DeploymentID { - const message = createBaseDeploymentID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - return message; - }, -}; - -messageTypeRegistry.set(DeploymentID.$type, DeploymentID); - -function createBaseDeployment(): Deployment { - return { - $type: "akash.deployment.v1beta3.Deployment", - deploymentId: undefined, - state: 0, - version: new Uint8Array(0), - createdAt: Long.ZERO, - }; -} - -export const Deployment = { - $type: "akash.deployment.v1beta3.Deployment" as const, - - encode( - message: Deployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deploymentId !== undefined) { - DeploymentID.encode( - message.deploymentId, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.version.length !== 0) { - writer.uint32(26).bytes(message.version); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Deployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deploymentId = DeploymentID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.version = reader.bytes(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Deployment { - return { - $type: Deployment.$type, - deploymentId: isSet(object.deploymentId) - ? DeploymentID.fromJSON(object.deploymentId) - : undefined, - state: isSet(object.state) ? deployment_StateFromJSON(object.state) : 0, - version: isSet(object.version) - ? bytesFromBase64(object.version) - : new Uint8Array(0), - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Deployment): unknown { - const obj: any = {}; - if (message.deploymentId !== undefined) { - obj.deploymentId = DeploymentID.toJSON(message.deploymentId); - } - if (message.state !== 0) { - obj.state = deployment_StateToJSON(message.state); - } - if (message.version.length !== 0) { - obj.version = base64FromBytes(message.version); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Deployment { - return Deployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Deployment { - const message = createBaseDeployment(); - message.deploymentId = - object.deploymentId !== undefined && object.deploymentId !== null - ? DeploymentID.fromPartial(object.deploymentId) - : undefined; - message.state = object.state ?? 0; - message.version = object.version ?? new Uint8Array(0); - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Deployment.$type, Deployment); - -function createBaseDeploymentFilters(): DeploymentFilters { - return { - $type: "akash.deployment.v1beta3.DeploymentFilters", - owner: "", - dseq: Long.UZERO, - state: "", - }; -} - -export const DeploymentFilters = { - $type: "akash.deployment.v1beta3.DeploymentFilters" as const, - - encode( - message: DeploymentFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.state !== "") { - writer.uint32(26).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DeploymentFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDeploymentFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DeploymentFilters { - return { - $type: DeploymentFilters.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: DeploymentFilters): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): DeploymentFilters { - return DeploymentFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): DeploymentFilters { - const message = createBaseDeploymentFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(DeploymentFilters.$type, DeploymentFilters); - -function bytesFromBase64(b64: string): Uint8Array { - if ((globalThis as any).Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if ((globalThis as any).Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(globalThis.String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta3/deploymentmsg.ts b/ts/src/generated/akash/deployment/v1beta3/deploymentmsg.ts deleted file mode 100644 index 297e56da..00000000 --- a/ts/src/generated/akash/deployment/v1beta3/deploymentmsg.ts +++ /dev/null @@ -1,788 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { DeploymentID } from "./deployment"; -import { GroupSpec } from "./groupspec"; - -/** MsgCreateDeployment defines an SDK message for creating deployment */ -export interface MsgCreateDeployment { - $type: "akash.deployment.v1beta3.MsgCreateDeployment"; - id: DeploymentID | undefined; - groups: GroupSpec[]; - version: Uint8Array; - deposit: Coin | undefined; - /** Depositor pays for the deposit */ - depositor: string; -} - -/** MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. */ -export interface MsgCreateDeploymentResponse { - $type: "akash.deployment.v1beta3.MsgCreateDeploymentResponse"; -} - -/** MsgDepositDeployment deposits more funds into the deposit account */ -export interface MsgDepositDeployment { - $type: "akash.deployment.v1beta3.MsgDepositDeployment"; - id: DeploymentID | undefined; - amount: Coin | undefined; - /** Depositor pays for the deposit */ - depositor: string; -} - -/** MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. */ -export interface MsgDepositDeploymentResponse { - $type: "akash.deployment.v1beta3.MsgDepositDeploymentResponse"; -} - -/** MsgUpdateDeployment defines an SDK message for updating deployment */ -export interface MsgUpdateDeployment { - $type: "akash.deployment.v1beta3.MsgUpdateDeployment"; - id: DeploymentID | undefined; - version: Uint8Array; -} - -/** MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. */ -export interface MsgUpdateDeploymentResponse { - $type: "akash.deployment.v1beta3.MsgUpdateDeploymentResponse"; -} - -/** MsgCloseDeployment defines an SDK message for closing deployment */ -export interface MsgCloseDeployment { - $type: "akash.deployment.v1beta3.MsgCloseDeployment"; - id: DeploymentID | undefined; -} - -/** MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. */ -export interface MsgCloseDeploymentResponse { - $type: "akash.deployment.v1beta3.MsgCloseDeploymentResponse"; -} - -function createBaseMsgCreateDeployment(): MsgCreateDeployment { - return { - $type: "akash.deployment.v1beta3.MsgCreateDeployment", - id: undefined, - groups: [], - version: new Uint8Array(0), - deposit: undefined, - depositor: "", - }; -} - -export const MsgCreateDeployment = { - $type: "akash.deployment.v1beta3.MsgCreateDeployment" as const, - - encode( - message: MsgCreateDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - GroupSpec.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.version.length !== 0) { - writer.uint32(26).bytes(message.version); - } - if (message.deposit !== undefined) { - Coin.encode(message.deposit, writer.uint32(34).fork()).ldelim(); - } - if (message.depositor !== "") { - writer.uint32(42).string(message.depositor); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.groups.push(GroupSpec.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.version = reader.bytes(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.deposit = Coin.decode(reader, reader.uint32()); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.depositor = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateDeployment { - return { - $type: MsgCreateDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - groups: globalThis.Array.isArray(object?.groups) - ? object.groups.map((e: any) => GroupSpec.fromJSON(e)) - : [], - version: isSet(object.version) - ? bytesFromBase64(object.version) - : new Uint8Array(0), - deposit: isSet(object.deposit) - ? Coin.fromJSON(object.deposit) - : undefined, - depositor: isSet(object.depositor) - ? globalThis.String(object.depositor) - : "", - }; - }, - - toJSON(message: MsgCreateDeployment): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - if (message.groups?.length) { - obj.groups = message.groups.map((e) => GroupSpec.toJSON(e)); - } - if (message.version.length !== 0) { - obj.version = base64FromBytes(message.version); - } - if (message.deposit !== undefined) { - obj.deposit = Coin.toJSON(message.deposit); - } - if (message.depositor !== "") { - obj.depositor = message.depositor; - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateDeployment { - return MsgCreateDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateDeployment { - const message = createBaseMsgCreateDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - message.groups = object.groups?.map((e) => GroupSpec.fromPartial(e)) || []; - message.version = object.version ?? new Uint8Array(0); - message.deposit = - object.deposit !== undefined && object.deposit !== null - ? Coin.fromPartial(object.deposit) - : undefined; - message.depositor = object.depositor ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateDeployment.$type, MsgCreateDeployment); - -function createBaseMsgCreateDeploymentResponse(): MsgCreateDeploymentResponse { - return { $type: "akash.deployment.v1beta3.MsgCreateDeploymentResponse" }; -} - -export const MsgCreateDeploymentResponse = { - $type: "akash.deployment.v1beta3.MsgCreateDeploymentResponse" as const, - - encode( - _: MsgCreateDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateDeploymentResponse { - return { $type: MsgCreateDeploymentResponse.$type }; - }, - - toJSON(_: MsgCreateDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgCreateDeploymentResponse { - return MsgCreateDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgCreateDeploymentResponse { - const message = createBaseMsgCreateDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCreateDeploymentResponse.$type, - MsgCreateDeploymentResponse, -); - -function createBaseMsgDepositDeployment(): MsgDepositDeployment { - return { - $type: "akash.deployment.v1beta3.MsgDepositDeployment", - id: undefined, - amount: undefined, - depositor: "", - }; -} - -export const MsgDepositDeployment = { - $type: "akash.deployment.v1beta3.MsgDepositDeployment" as const, - - encode( - message: MsgDepositDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - if (message.amount !== undefined) { - Coin.encode(message.amount, writer.uint32(18).fork()).ldelim(); - } - if (message.depositor !== "") { - writer.uint32(26).string(message.depositor); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDepositDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDepositDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.amount = Coin.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.depositor = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgDepositDeployment { - return { - $type: MsgDepositDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - amount: isSet(object.amount) ? Coin.fromJSON(object.amount) : undefined, - depositor: isSet(object.depositor) - ? globalThis.String(object.depositor) - : "", - }; - }, - - toJSON(message: MsgDepositDeployment): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - if (message.amount !== undefined) { - obj.amount = Coin.toJSON(message.amount); - } - if (message.depositor !== "") { - obj.depositor = message.depositor; - } - return obj; - }, - - create(base?: DeepPartial): MsgDepositDeployment { - return MsgDepositDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgDepositDeployment { - const message = createBaseMsgDepositDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - message.amount = - object.amount !== undefined && object.amount !== null - ? Coin.fromPartial(object.amount) - : undefined; - message.depositor = object.depositor ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(MsgDepositDeployment.$type, MsgDepositDeployment); - -function createBaseMsgDepositDeploymentResponse(): MsgDepositDeploymentResponse { - return { $type: "akash.deployment.v1beta3.MsgDepositDeploymentResponse" }; -} - -export const MsgDepositDeploymentResponse = { - $type: "akash.deployment.v1beta3.MsgDepositDeploymentResponse" as const, - - encode( - _: MsgDepositDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDepositDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDepositDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgDepositDeploymentResponse { - return { $type: MsgDepositDeploymentResponse.$type }; - }, - - toJSON(_: MsgDepositDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgDepositDeploymentResponse { - return MsgDepositDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgDepositDeploymentResponse { - const message = createBaseMsgDepositDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgDepositDeploymentResponse.$type, - MsgDepositDeploymentResponse, -); - -function createBaseMsgUpdateDeployment(): MsgUpdateDeployment { - return { - $type: "akash.deployment.v1beta3.MsgUpdateDeployment", - id: undefined, - version: new Uint8Array(0), - }; -} - -export const MsgUpdateDeployment = { - $type: "akash.deployment.v1beta3.MsgUpdateDeployment" as const, - - encode( - message: MsgUpdateDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - if (message.version.length !== 0) { - writer.uint32(26).bytes(message.version); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgUpdateDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.version = reader.bytes(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgUpdateDeployment { - return { - $type: MsgUpdateDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - version: isSet(object.version) - ? bytesFromBase64(object.version) - : new Uint8Array(0), - }; - }, - - toJSON(message: MsgUpdateDeployment): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - if (message.version.length !== 0) { - obj.version = base64FromBytes(message.version); - } - return obj; - }, - - create(base?: DeepPartial): MsgUpdateDeployment { - return MsgUpdateDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgUpdateDeployment { - const message = createBaseMsgUpdateDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - message.version = object.version ?? new Uint8Array(0); - return message; - }, -}; - -messageTypeRegistry.set(MsgUpdateDeployment.$type, MsgUpdateDeployment); - -function createBaseMsgUpdateDeploymentResponse(): MsgUpdateDeploymentResponse { - return { $type: "akash.deployment.v1beta3.MsgUpdateDeploymentResponse" }; -} - -export const MsgUpdateDeploymentResponse = { - $type: "akash.deployment.v1beta3.MsgUpdateDeploymentResponse" as const, - - encode( - _: MsgUpdateDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgUpdateDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgUpdateDeploymentResponse { - return { $type: MsgUpdateDeploymentResponse.$type }; - }, - - toJSON(_: MsgUpdateDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgUpdateDeploymentResponse { - return MsgUpdateDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgUpdateDeploymentResponse { - const message = createBaseMsgUpdateDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgUpdateDeploymentResponse.$type, - MsgUpdateDeploymentResponse, -); - -function createBaseMsgCloseDeployment(): MsgCloseDeployment { - return { - $type: "akash.deployment.v1beta3.MsgCloseDeployment", - id: undefined, - }; -} - -export const MsgCloseDeployment = { - $type: "akash.deployment.v1beta3.MsgCloseDeployment" as const, - - encode( - message: MsgCloseDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCloseDeployment { - return { - $type: MsgCloseDeployment.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgCloseDeployment): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgCloseDeployment { - return MsgCloseDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCloseDeployment { - const message = createBaseMsgCloseDeployment(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseDeployment.$type, MsgCloseDeployment); - -function createBaseMsgCloseDeploymentResponse(): MsgCloseDeploymentResponse { - return { $type: "akash.deployment.v1beta3.MsgCloseDeploymentResponse" }; -} - -export const MsgCloseDeploymentResponse = { - $type: "akash.deployment.v1beta3.MsgCloseDeploymentResponse" as const, - - encode( - _: MsgCloseDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCloseDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCloseDeploymentResponse { - return { $type: MsgCloseDeploymentResponse.$type }; - }, - - toJSON(_: MsgCloseDeploymentResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgCloseDeploymentResponse { - return MsgCloseDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgCloseDeploymentResponse { - const message = createBaseMsgCloseDeploymentResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCloseDeploymentResponse.$type, - MsgCloseDeploymentResponse, -); - -function bytesFromBase64(b64: string): Uint8Array { - if ((globalThis as any).Buffer) { - return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); - } else { - const bin = globalThis.atob(b64); - const arr = new Uint8Array(bin.length); - for (let i = 0; i < bin.length; ++i) { - arr[i] = bin.charCodeAt(i); - } - return arr; - } -} - -function base64FromBytes(arr: Uint8Array): string { - if ((globalThis as any).Buffer) { - return globalThis.Buffer.from(arr).toString("base64"); - } else { - const bin: string[] = []; - arr.forEach((byte) => { - bin.push(globalThis.String.fromCharCode(byte)); - }); - return globalThis.btoa(bin.join("")); - } -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta3/genesis.ts b/ts/src/generated/akash/deployment/v1beta3/genesis.ts deleted file mode 100644 index 6fef26d0..00000000 --- a/ts/src/generated/akash/deployment/v1beta3/genesis.ts +++ /dev/null @@ -1,242 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Deployment } from "./deployment"; -import { Group } from "./group"; -import { Params } from "./params"; - -/** GenesisDeployment defines the basic genesis state used by deployment module */ -export interface GenesisDeployment { - $type: "akash.deployment.v1beta3.GenesisDeployment"; - deployment: Deployment | undefined; - groups: Group[]; -} - -/** GenesisState stores slice of genesis deployment instance */ -export interface GenesisState { - $type: "akash.deployment.v1beta3.GenesisState"; - deployments: GenesisDeployment[]; - params: Params | undefined; -} - -function createBaseGenesisDeployment(): GenesisDeployment { - return { - $type: "akash.deployment.v1beta3.GenesisDeployment", - deployment: undefined, - groups: [], - }; -} - -export const GenesisDeployment = { - $type: "akash.deployment.v1beta3.GenesisDeployment" as const, - - encode( - message: GenesisDeployment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deployment !== undefined) { - Deployment.encode(message.deployment, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - Group.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisDeployment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisDeployment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deployment = Deployment.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.groups.push(Group.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisDeployment { - return { - $type: GenesisDeployment.$type, - deployment: isSet(object.deployment) - ? Deployment.fromJSON(object.deployment) - : undefined, - groups: globalThis.Array.isArray(object?.groups) - ? object.groups.map((e: any) => Group.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisDeployment): unknown { - const obj: any = {}; - if (message.deployment !== undefined) { - obj.deployment = Deployment.toJSON(message.deployment); - } - if (message.groups?.length) { - obj.groups = message.groups.map((e) => Group.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GenesisDeployment { - return GenesisDeployment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisDeployment { - const message = createBaseGenesisDeployment(); - message.deployment = - object.deployment !== undefined && object.deployment !== null - ? Deployment.fromPartial(object.deployment) - : undefined; - message.groups = object.groups?.map((e) => Group.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisDeployment.$type, GenesisDeployment); - -function createBaseGenesisState(): GenesisState { - return { - $type: "akash.deployment.v1beta3.GenesisState", - deployments: [], - params: undefined, - }; -} - -export const GenesisState = { - $type: "akash.deployment.v1beta3.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.deployments) { - GenesisDeployment.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.params !== undefined) { - Params.encode(message.params, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deployments.push( - GenesisDeployment.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.params = Params.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - deployments: globalThis.Array.isArray(object?.deployments) - ? object.deployments.map((e: any) => GenesisDeployment.fromJSON(e)) - : [], - params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.deployments?.length) { - obj.deployments = message.deployments.map((e) => - GenesisDeployment.toJSON(e), - ); - } - if (message.params !== undefined) { - obj.params = Params.toJSON(message.params); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.deployments = - object.deployments?.map((e) => GenesisDeployment.fromPartial(e)) || []; - message.params = - object.params !== undefined && object.params !== null - ? Params.fromPartial(object.params) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta3/group.ts b/ts/src/generated/akash/deployment/v1beta3/group.ts deleted file mode 100644 index e8792cca..00000000 --- a/ts/src/generated/akash/deployment/v1beta3/group.ts +++ /dev/null @@ -1,233 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { GroupID } from "./groupid"; -import { GroupSpec } from "./groupspec"; - -/** Group stores group id, state and specifications of group */ -export interface Group { - $type: "akash.deployment.v1beta3.Group"; - groupId: GroupID | undefined; - state: Group_State; - groupSpec: GroupSpec | undefined; - createdAt: Long; -} - -/** State is an enum which refers to state of group */ -export enum Group_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** open - GroupOpen denotes state for group open */ - open = 1, - /** paused - GroupOrdered denotes state for group ordered */ - paused = 2, - /** insufficient_funds - GroupInsufficientFunds denotes state for group insufficient_funds */ - insufficient_funds = 3, - /** closed - GroupClosed denotes state for group closed */ - closed = 4, - UNRECOGNIZED = -1, -} - -export function group_StateFromJSON(object: any): Group_State { - switch (object) { - case 0: - case "invalid": - return Group_State.invalid; - case 1: - case "open": - return Group_State.open; - case 2: - case "paused": - return Group_State.paused; - case 3: - case "insufficient_funds": - return Group_State.insufficient_funds; - case 4: - case "closed": - return Group_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Group_State.UNRECOGNIZED; - } -} - -export function group_StateToJSON(object: Group_State): string { - switch (object) { - case Group_State.invalid: - return "invalid"; - case Group_State.open: - return "open"; - case Group_State.paused: - return "paused"; - case Group_State.insufficient_funds: - return "insufficient_funds"; - case Group_State.closed: - return "closed"; - case Group_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -function createBaseGroup(): Group { - return { - $type: "akash.deployment.v1beta3.Group", - groupId: undefined, - state: 0, - groupSpec: undefined, - createdAt: Long.ZERO, - }; -} - -export const Group = { - $type: "akash.deployment.v1beta3.Group" as const, - - encode(message: Group, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.groupId !== undefined) { - GroupID.encode(message.groupId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.groupSpec !== undefined) { - GroupSpec.encode(message.groupSpec, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Group { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.groupId = GroupID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.groupSpec = GroupSpec.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Group { - return { - $type: Group.$type, - groupId: isSet(object.groupId) - ? GroupID.fromJSON(object.groupId) - : undefined, - state: isSet(object.state) ? group_StateFromJSON(object.state) : 0, - groupSpec: isSet(object.groupSpec) - ? GroupSpec.fromJSON(object.groupSpec) - : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Group): unknown { - const obj: any = {}; - if (message.groupId !== undefined) { - obj.groupId = GroupID.toJSON(message.groupId); - } - if (message.state !== 0) { - obj.state = group_StateToJSON(message.state); - } - if (message.groupSpec !== undefined) { - obj.groupSpec = GroupSpec.toJSON(message.groupSpec); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Group { - return Group.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Group { - const message = createBaseGroup(); - message.groupId = - object.groupId !== undefined && object.groupId !== null - ? GroupID.fromPartial(object.groupId) - : undefined; - message.state = object.state ?? 0; - message.groupSpec = - object.groupSpec !== undefined && object.groupSpec !== null - ? GroupSpec.fromPartial(object.groupSpec) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Group.$type, Group); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta3/groupid.ts b/ts/src/generated/akash/deployment/v1beta3/groupid.ts deleted file mode 100644 index cc55fe67..00000000 --- a/ts/src/generated/akash/deployment/v1beta3/groupid.ts +++ /dev/null @@ -1,148 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** GroupID stores owner, deployment sequence number and group sequence number */ -export interface GroupID { - $type: "akash.deployment.v1beta3.GroupID"; - owner: string; - dseq: Long; - gseq: number; -} - -function createBaseGroupID(): GroupID { - return { - $type: "akash.deployment.v1beta3.GroupID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - }; -} - -export const GroupID = { - $type: "akash.deployment.v1beta3.GroupID" as const, - - encode( - message: GroupID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GroupID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroupID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GroupID { - return { - $type: GroupID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - }; - }, - - toJSON(message: GroupID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - return obj; - }, - - create(base?: DeepPartial): GroupID { - return GroupID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GroupID { - const message = createBaseGroupID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(GroupID.$type, GroupID); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta3/groupmsg.ts b/ts/src/generated/akash/deployment/v1beta3/groupmsg.ts deleted file mode 100644 index 4b053ca3..00000000 --- a/ts/src/generated/akash/deployment/v1beta3/groupmsg.ts +++ /dev/null @@ -1,443 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { GroupID } from "./groupid"; - -/** MsgCloseGroup defines SDK message to close a single Group within a Deployment. */ -export interface MsgCloseGroup { - $type: "akash.deployment.v1beta3.MsgCloseGroup"; - id: GroupID | undefined; -} - -/** MsgCloseGroupResponse defines the Msg/CloseGroup response type. */ -export interface MsgCloseGroupResponse { - $type: "akash.deployment.v1beta3.MsgCloseGroupResponse"; -} - -/** MsgPauseGroup defines SDK message to close a single Group within a Deployment. */ -export interface MsgPauseGroup { - $type: "akash.deployment.v1beta3.MsgPauseGroup"; - id: GroupID | undefined; -} - -/** MsgPauseGroupResponse defines the Msg/PauseGroup response type. */ -export interface MsgPauseGroupResponse { - $type: "akash.deployment.v1beta3.MsgPauseGroupResponse"; -} - -/** MsgStartGroup defines SDK message to close a single Group within a Deployment. */ -export interface MsgStartGroup { - $type: "akash.deployment.v1beta3.MsgStartGroup"; - id: GroupID | undefined; -} - -/** MsgStartGroupResponse defines the Msg/StartGroup response type. */ -export interface MsgStartGroupResponse { - $type: "akash.deployment.v1beta3.MsgStartGroupResponse"; -} - -function createBaseMsgCloseGroup(): MsgCloseGroup { - return { $type: "akash.deployment.v1beta3.MsgCloseGroup", id: undefined }; -} - -export const MsgCloseGroup = { - $type: "akash.deployment.v1beta3.MsgCloseGroup" as const, - - encode( - message: MsgCloseGroup, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseGroup { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = GroupID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCloseGroup { - return { - $type: MsgCloseGroup.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgCloseGroup): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = GroupID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgCloseGroup { - return MsgCloseGroup.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCloseGroup { - const message = createBaseMsgCloseGroup(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseGroup.$type, MsgCloseGroup); - -function createBaseMsgCloseGroupResponse(): MsgCloseGroupResponse { - return { $type: "akash.deployment.v1beta3.MsgCloseGroupResponse" }; -} - -export const MsgCloseGroupResponse = { - $type: "akash.deployment.v1beta3.MsgCloseGroupResponse" as const, - - encode( - _: MsgCloseGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCloseGroupResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCloseGroupResponse { - return { $type: MsgCloseGroupResponse.$type }; - }, - - toJSON(_: MsgCloseGroupResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCloseGroupResponse { - return MsgCloseGroupResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCloseGroupResponse { - const message = createBaseMsgCloseGroupResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseGroupResponse.$type, MsgCloseGroupResponse); - -function createBaseMsgPauseGroup(): MsgPauseGroup { - return { $type: "akash.deployment.v1beta3.MsgPauseGroup", id: undefined }; -} - -export const MsgPauseGroup = { - $type: "akash.deployment.v1beta3.MsgPauseGroup" as const, - - encode( - message: MsgPauseGroup, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgPauseGroup { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgPauseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = GroupID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgPauseGroup { - return { - $type: MsgPauseGroup.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgPauseGroup): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = GroupID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgPauseGroup { - return MsgPauseGroup.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgPauseGroup { - const message = createBaseMsgPauseGroup(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgPauseGroup.$type, MsgPauseGroup); - -function createBaseMsgPauseGroupResponse(): MsgPauseGroupResponse { - return { $type: "akash.deployment.v1beta3.MsgPauseGroupResponse" }; -} - -export const MsgPauseGroupResponse = { - $type: "akash.deployment.v1beta3.MsgPauseGroupResponse" as const, - - encode( - _: MsgPauseGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgPauseGroupResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgPauseGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgPauseGroupResponse { - return { $type: MsgPauseGroupResponse.$type }; - }, - - toJSON(_: MsgPauseGroupResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgPauseGroupResponse { - return MsgPauseGroupResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgPauseGroupResponse { - const message = createBaseMsgPauseGroupResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgPauseGroupResponse.$type, MsgPauseGroupResponse); - -function createBaseMsgStartGroup(): MsgStartGroup { - return { $type: "akash.deployment.v1beta3.MsgStartGroup", id: undefined }; -} - -export const MsgStartGroup = { - $type: "akash.deployment.v1beta3.MsgStartGroup" as const, - - encode( - message: MsgStartGroup, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgStartGroup { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgStartGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = GroupID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgStartGroup { - return { - $type: MsgStartGroup.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: MsgStartGroup): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = GroupID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): MsgStartGroup { - return MsgStartGroup.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgStartGroup { - const message = createBaseMsgStartGroup(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgStartGroup.$type, MsgStartGroup); - -function createBaseMsgStartGroupResponse(): MsgStartGroupResponse { - return { $type: "akash.deployment.v1beta3.MsgStartGroupResponse" }; -} - -export const MsgStartGroupResponse = { - $type: "akash.deployment.v1beta3.MsgStartGroupResponse" as const, - - encode( - _: MsgStartGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgStartGroupResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgStartGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgStartGroupResponse { - return { $type: MsgStartGroupResponse.$type }; - }, - - toJSON(_: MsgStartGroupResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgStartGroupResponse { - return MsgStartGroupResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgStartGroupResponse { - const message = createBaseMsgStartGroupResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgStartGroupResponse.$type, MsgStartGroupResponse); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta3/groupspec.ts b/ts/src/generated/akash/deployment/v1beta3/groupspec.ts deleted file mode 100644 index 34069208..00000000 --- a/ts/src/generated/akash/deployment/v1beta3/groupspec.ts +++ /dev/null @@ -1,161 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { PlacementRequirements } from "../../base/v1beta3/attribute"; -import { ResourceUnit } from "./resourceunit"; - -/** GroupSpec stores group specifications */ -export interface GroupSpec { - $type: "akash.deployment.v1beta3.GroupSpec"; - name: string; - requirements: PlacementRequirements | undefined; - resources: ResourceUnit[]; -} - -function createBaseGroupSpec(): GroupSpec { - return { - $type: "akash.deployment.v1beta3.GroupSpec", - name: "", - requirements: undefined, - resources: [], - }; -} - -export const GroupSpec = { - $type: "akash.deployment.v1beta3.GroupSpec" as const, - - encode( - message: GroupSpec, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.requirements !== undefined) { - PlacementRequirements.encode( - message.requirements, - writer.uint32(18).fork(), - ).ldelim(); - } - for (const v of message.resources) { - ResourceUnit.encode(v!, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GroupSpec { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroupSpec(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.requirements = PlacementRequirements.decode( - reader, - reader.uint32(), - ); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.resources.push(ResourceUnit.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GroupSpec { - return { - $type: GroupSpec.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - requirements: isSet(object.requirements) - ? PlacementRequirements.fromJSON(object.requirements) - : undefined, - resources: globalThis.Array.isArray(object?.resources) - ? object.resources.map((e: any) => ResourceUnit.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GroupSpec): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.requirements !== undefined) { - obj.requirements = PlacementRequirements.toJSON(message.requirements); - } - if (message.resources?.length) { - obj.resources = message.resources.map((e) => ResourceUnit.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GroupSpec { - return GroupSpec.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GroupSpec { - const message = createBaseGroupSpec(); - message.name = object.name ?? ""; - message.requirements = - object.requirements !== undefined && object.requirements !== null - ? PlacementRequirements.fromPartial(object.requirements) - : undefined; - message.resources = - object.resources?.map((e) => ResourceUnit.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GroupSpec.$type, GroupSpec); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta3/params.ts b/ts/src/generated/akash/deployment/v1beta3/params.ts deleted file mode 100644 index f1015e64..00000000 --- a/ts/src/generated/akash/deployment/v1beta3/params.ts +++ /dev/null @@ -1,108 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Params defines the parameters for the x/deployment package */ -export interface Params { - $type: "akash.deployment.v1beta3.Params"; - minDeposits: Coin[]; -} - -function createBaseParams(): Params { - return { $type: "akash.deployment.v1beta3.Params", minDeposits: [] }; -} - -export const Params = { - $type: "akash.deployment.v1beta3.Params" as const, - - encode( - message: Params, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.minDeposits) { - Coin.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Params { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.minDeposits.push(Coin.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Params { - return { - $type: Params.$type, - minDeposits: globalThis.Array.isArray(object?.minDeposits) - ? object.minDeposits.map((e: any) => Coin.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Params): unknown { - const obj: any = {}; - if (message.minDeposits?.length) { - obj.minDeposits = message.minDeposits.map((e) => Coin.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): Params { - return Params.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Params { - const message = createBaseParams(); - message.minDeposits = - object.minDeposits?.map((e) => Coin.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Params.$type, Params); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} diff --git a/ts/src/generated/akash/deployment/v1beta3/query.ts b/ts/src/generated/akash/deployment/v1beta3/query.ts deleted file mode 100644 index e2017c33..00000000 --- a/ts/src/generated/akash/deployment/v1beta3/query.ts +++ /dev/null @@ -1,706 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Account } from "../../escrow/v1beta3/types"; -import { Deployment, DeploymentFilters, DeploymentID } from "./deployment"; -import { Group } from "./group"; -import { GroupID } from "./groupid"; - -/** QueryDeploymentsRequest is request type for the Query/Deployments RPC method */ -export interface QueryDeploymentsRequest { - $type: "akash.deployment.v1beta3.QueryDeploymentsRequest"; - filters: DeploymentFilters | undefined; - pagination: PageRequest | undefined; -} - -/** QueryDeploymentsResponse is response type for the Query/Deployments RPC method */ -export interface QueryDeploymentsResponse { - $type: "akash.deployment.v1beta3.QueryDeploymentsResponse"; - deployments: QueryDeploymentResponse[]; - pagination: PageResponse | undefined; -} - -/** QueryDeploymentRequest is request type for the Query/Deployment RPC method */ -export interface QueryDeploymentRequest { - $type: "akash.deployment.v1beta3.QueryDeploymentRequest"; - id: DeploymentID | undefined; -} - -/** QueryDeploymentResponse is response type for the Query/Deployment RPC method */ -export interface QueryDeploymentResponse { - $type: "akash.deployment.v1beta3.QueryDeploymentResponse"; - deployment: Deployment | undefined; - groups: Group[]; - escrowAccount: Account | undefined; -} - -/** QueryGroupRequest is request type for the Query/Group RPC method */ -export interface QueryGroupRequest { - $type: "akash.deployment.v1beta3.QueryGroupRequest"; - id: GroupID | undefined; -} - -/** QueryGroupResponse is response type for the Query/Group RPC method */ -export interface QueryGroupResponse { - $type: "akash.deployment.v1beta3.QueryGroupResponse"; - group: Group | undefined; -} - -function createBaseQueryDeploymentsRequest(): QueryDeploymentsRequest { - return { - $type: "akash.deployment.v1beta3.QueryDeploymentsRequest", - filters: undefined, - pagination: undefined, - }; -} - -export const QueryDeploymentsRequest = { - $type: "akash.deployment.v1beta3.QueryDeploymentsRequest" as const, - - encode( - message: QueryDeploymentsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filters !== undefined) { - DeploymentFilters.encode( - message.filters, - writer.uint32(10).fork(), - ).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentsRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filters = DeploymentFilters.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryDeploymentsRequest { - return { - $type: QueryDeploymentsRequest.$type, - filters: isSet(object.filters) - ? DeploymentFilters.fromJSON(object.filters) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryDeploymentsRequest): unknown { - const obj: any = {}; - if (message.filters !== undefined) { - obj.filters = DeploymentFilters.toJSON(message.filters); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryDeploymentsRequest { - return QueryDeploymentsRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryDeploymentsRequest { - const message = createBaseQueryDeploymentsRequest(); - message.filters = - object.filters !== undefined && object.filters !== null - ? DeploymentFilters.fromPartial(object.filters) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryDeploymentsRequest.$type, QueryDeploymentsRequest); - -function createBaseQueryDeploymentsResponse(): QueryDeploymentsResponse { - return { - $type: "akash.deployment.v1beta3.QueryDeploymentsResponse", - deployments: [], - pagination: undefined, - }; -} - -export const QueryDeploymentsResponse = { - $type: "akash.deployment.v1beta3.QueryDeploymentsResponse" as const, - - encode( - message: QueryDeploymentsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.deployments) { - QueryDeploymentResponse.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentsResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deployments.push( - QueryDeploymentResponse.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryDeploymentsResponse { - return { - $type: QueryDeploymentsResponse.$type, - deployments: globalThis.Array.isArray(object?.deployments) - ? object.deployments.map((e: any) => - QueryDeploymentResponse.fromJSON(e), - ) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryDeploymentsResponse): unknown { - const obj: any = {}; - if (message.deployments?.length) { - obj.deployments = message.deployments.map((e) => - QueryDeploymentResponse.toJSON(e), - ); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create( - base?: DeepPartial, - ): QueryDeploymentsResponse { - return QueryDeploymentsResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryDeploymentsResponse { - const message = createBaseQueryDeploymentsResponse(); - message.deployments = - object.deployments?.map((e) => QueryDeploymentResponse.fromPartial(e)) || - []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set( - QueryDeploymentsResponse.$type, - QueryDeploymentsResponse, -); - -function createBaseQueryDeploymentRequest(): QueryDeploymentRequest { - return { - $type: "akash.deployment.v1beta3.QueryDeploymentRequest", - id: undefined, - }; -} - -export const QueryDeploymentRequest = { - $type: "akash.deployment.v1beta3.QueryDeploymentRequest" as const, - - encode( - message: QueryDeploymentRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = DeploymentID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryDeploymentRequest { - return { - $type: QueryDeploymentRequest.$type, - id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryDeploymentRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = DeploymentID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryDeploymentRequest { - return QueryDeploymentRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryDeploymentRequest { - const message = createBaseQueryDeploymentRequest(); - message.id = - object.id !== undefined && object.id !== null - ? DeploymentID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryDeploymentRequest.$type, QueryDeploymentRequest); - -function createBaseQueryDeploymentResponse(): QueryDeploymentResponse { - return { - $type: "akash.deployment.v1beta3.QueryDeploymentResponse", - deployment: undefined, - groups: [], - escrowAccount: undefined, - }; -} - -export const QueryDeploymentResponse = { - $type: "akash.deployment.v1beta3.QueryDeploymentResponse" as const, - - encode( - message: QueryDeploymentResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.deployment !== undefined) { - Deployment.encode(message.deployment, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.groups) { - Group.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.escrowAccount !== undefined) { - Account.encode(message.escrowAccount, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryDeploymentResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryDeploymentResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.deployment = Deployment.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.groups.push(Group.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.escrowAccount = Account.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryDeploymentResponse { - return { - $type: QueryDeploymentResponse.$type, - deployment: isSet(object.deployment) - ? Deployment.fromJSON(object.deployment) - : undefined, - groups: globalThis.Array.isArray(object?.groups) - ? object.groups.map((e: any) => Group.fromJSON(e)) - : [], - escrowAccount: isSet(object.escrowAccount) - ? Account.fromJSON(object.escrowAccount) - : undefined, - }; - }, - - toJSON(message: QueryDeploymentResponse): unknown { - const obj: any = {}; - if (message.deployment !== undefined) { - obj.deployment = Deployment.toJSON(message.deployment); - } - if (message.groups?.length) { - obj.groups = message.groups.map((e) => Group.toJSON(e)); - } - if (message.escrowAccount !== undefined) { - obj.escrowAccount = Account.toJSON(message.escrowAccount); - } - return obj; - }, - - create(base?: DeepPartial): QueryDeploymentResponse { - return QueryDeploymentResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryDeploymentResponse { - const message = createBaseQueryDeploymentResponse(); - message.deployment = - object.deployment !== undefined && object.deployment !== null - ? Deployment.fromPartial(object.deployment) - : undefined; - message.groups = object.groups?.map((e) => Group.fromPartial(e)) || []; - message.escrowAccount = - object.escrowAccount !== undefined && object.escrowAccount !== null - ? Account.fromPartial(object.escrowAccount) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryDeploymentResponse.$type, QueryDeploymentResponse); - -function createBaseQueryGroupRequest(): QueryGroupRequest { - return { $type: "akash.deployment.v1beta3.QueryGroupRequest", id: undefined }; -} - -export const QueryGroupRequest = { - $type: "akash.deployment.v1beta3.QueryGroupRequest" as const, - - encode( - message: QueryGroupRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryGroupRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryGroupRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = GroupID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryGroupRequest { - return { - $type: QueryGroupRequest.$type, - id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryGroupRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = GroupID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryGroupRequest { - return QueryGroupRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryGroupRequest { - const message = createBaseQueryGroupRequest(); - message.id = - object.id !== undefined && object.id !== null - ? GroupID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryGroupRequest.$type, QueryGroupRequest); - -function createBaseQueryGroupResponse(): QueryGroupResponse { - return { - $type: "akash.deployment.v1beta3.QueryGroupResponse", - group: undefined, - }; -} - -export const QueryGroupResponse = { - $type: "akash.deployment.v1beta3.QueryGroupResponse" as const, - - encode( - message: QueryGroupResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.group !== undefined) { - Group.encode(message.group, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryGroupResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryGroupResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.group = Group.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryGroupResponse { - return { - $type: QueryGroupResponse.$type, - group: isSet(object.group) ? Group.fromJSON(object.group) : undefined, - }; - }, - - toJSON(message: QueryGroupResponse): unknown { - const obj: any = {}; - if (message.group !== undefined) { - obj.group = Group.toJSON(message.group); - } - return obj; - }, - - create(base?: DeepPartial): QueryGroupResponse { - return QueryGroupResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryGroupResponse { - const message = createBaseQueryGroupResponse(); - message.group = - object.group !== undefined && object.group !== null - ? Group.fromPartial(object.group) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryGroupResponse.$type, QueryGroupResponse); - -/** Query defines the gRPC querier service */ -export interface Query { - /** Deployments queries deployments */ - Deployments( - request: QueryDeploymentsRequest, - ): Promise; - /** Deployment queries deployment details */ - Deployment(request: QueryDeploymentRequest): Promise; - /** Group queries group details */ - Group(request: QueryGroupRequest): Promise; -} - -export const QueryServiceName = "akash.deployment.v1beta3.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.Deployments = this.Deployments.bind(this); - this.Deployment = this.Deployment.bind(this); - this.Group = this.Group.bind(this); - } - Deployments( - request: QueryDeploymentsRequest, - ): Promise { - const data = QueryDeploymentsRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Deployments", data); - return promise.then((data) => - QueryDeploymentsResponse.decode(_m0.Reader.create(data)), - ); - } - - Deployment( - request: QueryDeploymentRequest, - ): Promise { - const data = QueryDeploymentRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Deployment", data); - return promise.then((data) => - QueryDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - Group(request: QueryGroupRequest): Promise { - const data = QueryGroupRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Group", data); - return promise.then((data) => - QueryGroupResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta3/resourceunit.ts b/ts/src/generated/akash/deployment/v1beta3/resourceunit.ts deleted file mode 100644 index e9dc28e1..00000000 --- a/ts/src/generated/akash/deployment/v1beta3/resourceunit.ts +++ /dev/null @@ -1,155 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { DecCoin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Resources } from "../../base/v1beta3/resources"; - -/** ResourceUnit extends Resources and adds Count along with the Price */ -export interface ResourceUnit { - $type: "akash.deployment.v1beta3.ResourceUnit"; - resource: Resources | undefined; - count: number; - price: DecCoin | undefined; -} - -function createBaseResourceUnit(): ResourceUnit { - return { - $type: "akash.deployment.v1beta3.ResourceUnit", - resource: undefined, - count: 0, - price: undefined, - }; -} - -export const ResourceUnit = { - $type: "akash.deployment.v1beta3.ResourceUnit" as const, - - encode( - message: ResourceUnit, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.resource !== undefined) { - Resources.encode(message.resource, writer.uint32(10).fork()).ldelim(); - } - if (message.count !== 0) { - writer.uint32(16).uint32(message.count); - } - if (message.price !== undefined) { - DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ResourceUnit { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseResourceUnit(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.resource = Resources.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.count = reader.uint32(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.price = DecCoin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ResourceUnit { - return { - $type: ResourceUnit.$type, - resource: isSet(object.resource) - ? Resources.fromJSON(object.resource) - : undefined, - count: isSet(object.count) ? globalThis.Number(object.count) : 0, - price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, - }; - }, - - toJSON(message: ResourceUnit): unknown { - const obj: any = {}; - if (message.resource !== undefined) { - obj.resource = Resources.toJSON(message.resource); - } - if (message.count !== 0) { - obj.count = Math.round(message.count); - } - if (message.price !== undefined) { - obj.price = DecCoin.toJSON(message.price); - } - return obj; - }, - - create(base?: DeepPartial): ResourceUnit { - return ResourceUnit.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ResourceUnit { - const message = createBaseResourceUnit(); - message.resource = - object.resource !== undefined && object.resource !== null - ? Resources.fromPartial(object.resource) - : undefined; - message.count = object.count ?? 0; - message.price = - object.price !== undefined && object.price !== null - ? DecCoin.fromPartial(object.price) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(ResourceUnit.$type, ResourceUnit); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/deployment/v1beta3/service.grpc-js.ts b/ts/src/generated/akash/deployment/v1beta3/service.grpc-js.ts deleted file mode 100644 index 646018af..00000000 --- a/ts/src/generated/akash/deployment/v1beta3/service.grpc-js.ts +++ /dev/null @@ -1,348 +0,0 @@ -/* eslint-disable */ -import { - ChannelCredentials, - Client, - makeGenericClientConstructor, - Metadata, -} from "@grpc/grpc-js"; -import type { - CallOptions, - ClientOptions, - ClientUnaryCall, - handleUnaryCall, - ServiceError, - UntypedServiceImplementation, -} from "@grpc/grpc-js"; -import { - MsgCloseDeployment, - MsgCloseDeploymentResponse, - MsgCreateDeployment, - MsgCreateDeploymentResponse, - MsgDepositDeployment, - MsgDepositDeploymentResponse, - MsgUpdateDeployment, - MsgUpdateDeploymentResponse, -} from "./deploymentmsg"; -import { - MsgCloseGroup, - MsgCloseGroupResponse, - MsgPauseGroup, - MsgPauseGroupResponse, - MsgStartGroup, - MsgStartGroupResponse, -} from "./groupmsg"; - -export const protobufPackage = "akash.deployment.v1beta3"; - -/** Msg defines the deployment Msg service. */ -export type MsgService = typeof MsgService; -export const MsgService = { - /** CreateDeployment defines a method to create new deployment given proper inputs. */ - createDeployment: { - path: "/akash.deployment.v1beta3.Msg/CreateDeployment", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCreateDeployment) => - Buffer.from(MsgCreateDeployment.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCreateDeployment.decode(value), - responseSerialize: (value: MsgCreateDeploymentResponse) => - Buffer.from(MsgCreateDeploymentResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgCreateDeploymentResponse.decode(value), - }, - /** DepositDeployment deposits more funds into the deployment account */ - depositDeployment: { - path: "/akash.deployment.v1beta3.Msg/DepositDeployment", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgDepositDeployment) => - Buffer.from(MsgDepositDeployment.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgDepositDeployment.decode(value), - responseSerialize: (value: MsgDepositDeploymentResponse) => - Buffer.from(MsgDepositDeploymentResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgDepositDeploymentResponse.decode(value), - }, - /** UpdateDeployment defines a method to update a deployment given proper inputs. */ - updateDeployment: { - path: "/akash.deployment.v1beta3.Msg/UpdateDeployment", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgUpdateDeployment) => - Buffer.from(MsgUpdateDeployment.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgUpdateDeployment.decode(value), - responseSerialize: (value: MsgUpdateDeploymentResponse) => - Buffer.from(MsgUpdateDeploymentResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgUpdateDeploymentResponse.decode(value), - }, - /** CloseDeployment defines a method to close a deployment given proper inputs. */ - closeDeployment: { - path: "/akash.deployment.v1beta3.Msg/CloseDeployment", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCloseDeployment) => - Buffer.from(MsgCloseDeployment.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCloseDeployment.decode(value), - responseSerialize: (value: MsgCloseDeploymentResponse) => - Buffer.from(MsgCloseDeploymentResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgCloseDeploymentResponse.decode(value), - }, - /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ - closeGroup: { - path: "/akash.deployment.v1beta3.Msg/CloseGroup", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCloseGroup) => - Buffer.from(MsgCloseGroup.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCloseGroup.decode(value), - responseSerialize: (value: MsgCloseGroupResponse) => - Buffer.from(MsgCloseGroupResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgCloseGroupResponse.decode(value), - }, - /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ - pauseGroup: { - path: "/akash.deployment.v1beta3.Msg/PauseGroup", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgPauseGroup) => - Buffer.from(MsgPauseGroup.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgPauseGroup.decode(value), - responseSerialize: (value: MsgPauseGroupResponse) => - Buffer.from(MsgPauseGroupResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgPauseGroupResponse.decode(value), - }, - /** StartGroup defines a method to close a group of a deployment given proper inputs. */ - startGroup: { - path: "/akash.deployment.v1beta3.Msg/StartGroup", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgStartGroup) => - Buffer.from(MsgStartGroup.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgStartGroup.decode(value), - responseSerialize: (value: MsgStartGroupResponse) => - Buffer.from(MsgStartGroupResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgStartGroupResponse.decode(value), - }, -} as const; - -export interface MsgServer extends UntypedServiceImplementation { - /** CreateDeployment defines a method to create new deployment given proper inputs. */ - createDeployment: handleUnaryCall< - MsgCreateDeployment, - MsgCreateDeploymentResponse - >; - /** DepositDeployment deposits more funds into the deployment account */ - depositDeployment: handleUnaryCall< - MsgDepositDeployment, - MsgDepositDeploymentResponse - >; - /** UpdateDeployment defines a method to update a deployment given proper inputs. */ - updateDeployment: handleUnaryCall< - MsgUpdateDeployment, - MsgUpdateDeploymentResponse - >; - /** CloseDeployment defines a method to close a deployment given proper inputs. */ - closeDeployment: handleUnaryCall< - MsgCloseDeployment, - MsgCloseDeploymentResponse - >; - /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ - closeGroup: handleUnaryCall; - /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ - pauseGroup: handleUnaryCall; - /** StartGroup defines a method to close a group of a deployment given proper inputs. */ - startGroup: handleUnaryCall; -} - -export interface MsgClient extends Client { - /** CreateDeployment defines a method to create new deployment given proper inputs. */ - createDeployment( - request: MsgCreateDeployment, - callback: ( - error: ServiceError | null, - response: MsgCreateDeploymentResponse, - ) => void, - ): ClientUnaryCall; - createDeployment( - request: MsgCreateDeployment, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCreateDeploymentResponse, - ) => void, - ): ClientUnaryCall; - createDeployment( - request: MsgCreateDeployment, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCreateDeploymentResponse, - ) => void, - ): ClientUnaryCall; - /** DepositDeployment deposits more funds into the deployment account */ - depositDeployment( - request: MsgDepositDeployment, - callback: ( - error: ServiceError | null, - response: MsgDepositDeploymentResponse, - ) => void, - ): ClientUnaryCall; - depositDeployment( - request: MsgDepositDeployment, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgDepositDeploymentResponse, - ) => void, - ): ClientUnaryCall; - depositDeployment( - request: MsgDepositDeployment, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgDepositDeploymentResponse, - ) => void, - ): ClientUnaryCall; - /** UpdateDeployment defines a method to update a deployment given proper inputs. */ - updateDeployment( - request: MsgUpdateDeployment, - callback: ( - error: ServiceError | null, - response: MsgUpdateDeploymentResponse, - ) => void, - ): ClientUnaryCall; - updateDeployment( - request: MsgUpdateDeployment, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgUpdateDeploymentResponse, - ) => void, - ): ClientUnaryCall; - updateDeployment( - request: MsgUpdateDeployment, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgUpdateDeploymentResponse, - ) => void, - ): ClientUnaryCall; - /** CloseDeployment defines a method to close a deployment given proper inputs. */ - closeDeployment( - request: MsgCloseDeployment, - callback: ( - error: ServiceError | null, - response: MsgCloseDeploymentResponse, - ) => void, - ): ClientUnaryCall; - closeDeployment( - request: MsgCloseDeployment, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCloseDeploymentResponse, - ) => void, - ): ClientUnaryCall; - closeDeployment( - request: MsgCloseDeployment, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCloseDeploymentResponse, - ) => void, - ): ClientUnaryCall; - /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ - closeGroup( - request: MsgCloseGroup, - callback: ( - error: ServiceError | null, - response: MsgCloseGroupResponse, - ) => void, - ): ClientUnaryCall; - closeGroup( - request: MsgCloseGroup, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCloseGroupResponse, - ) => void, - ): ClientUnaryCall; - closeGroup( - request: MsgCloseGroup, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCloseGroupResponse, - ) => void, - ): ClientUnaryCall; - /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ - pauseGroup( - request: MsgPauseGroup, - callback: ( - error: ServiceError | null, - response: MsgPauseGroupResponse, - ) => void, - ): ClientUnaryCall; - pauseGroup( - request: MsgPauseGroup, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgPauseGroupResponse, - ) => void, - ): ClientUnaryCall; - pauseGroup( - request: MsgPauseGroup, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgPauseGroupResponse, - ) => void, - ): ClientUnaryCall; - /** StartGroup defines a method to close a group of a deployment given proper inputs. */ - startGroup( - request: MsgStartGroup, - callback: ( - error: ServiceError | null, - response: MsgStartGroupResponse, - ) => void, - ): ClientUnaryCall; - startGroup( - request: MsgStartGroup, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgStartGroupResponse, - ) => void, - ): ClientUnaryCall; - startGroup( - request: MsgStartGroup, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgStartGroupResponse, - ) => void, - ): ClientUnaryCall; -} - -export const MsgClient = makeGenericClientConstructor( - MsgService, - "akash.deployment.v1beta3.Msg", -) as unknown as { - new ( - address: string, - credentials: ChannelCredentials, - options?: Partial, - ): MsgClient; - service: typeof MsgService; - serviceName: string; -}; diff --git a/ts/src/generated/akash/deployment/v1beta3/service.ts b/ts/src/generated/akash/deployment/v1beta3/service.ts deleted file mode 100644 index 82f2710a..00000000 --- a/ts/src/generated/akash/deployment/v1beta3/service.ts +++ /dev/null @@ -1,134 +0,0 @@ -/* eslint-disable */ -import _m0 from "protobufjs/minimal"; -import { - MsgCloseDeployment, - MsgCloseDeploymentResponse, - MsgCreateDeployment, - MsgCreateDeploymentResponse, - MsgDepositDeployment, - MsgDepositDeploymentResponse, - MsgUpdateDeployment, - MsgUpdateDeploymentResponse, -} from "./deploymentmsg"; -import { - MsgCloseGroup, - MsgCloseGroupResponse, - MsgPauseGroup, - MsgPauseGroupResponse, - MsgStartGroup, - MsgStartGroupResponse, -} from "./groupmsg"; - -/** Msg defines the deployment Msg service. */ -export interface Msg { - /** CreateDeployment defines a method to create new deployment given proper inputs. */ - CreateDeployment( - request: MsgCreateDeployment, - ): Promise; - /** DepositDeployment deposits more funds into the deployment account */ - DepositDeployment( - request: MsgDepositDeployment, - ): Promise; - /** UpdateDeployment defines a method to update a deployment given proper inputs. */ - UpdateDeployment( - request: MsgUpdateDeployment, - ): Promise; - /** CloseDeployment defines a method to close a deployment given proper inputs. */ - CloseDeployment( - request: MsgCloseDeployment, - ): Promise; - /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ - CloseGroup(request: MsgCloseGroup): Promise; - /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ - PauseGroup(request: MsgPauseGroup): Promise; - /** StartGroup defines a method to close a group of a deployment given proper inputs. */ - StartGroup(request: MsgStartGroup): Promise; -} - -export const MsgServiceName = "akash.deployment.v1beta3.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.CreateDeployment = this.CreateDeployment.bind(this); - this.DepositDeployment = this.DepositDeployment.bind(this); - this.UpdateDeployment = this.UpdateDeployment.bind(this); - this.CloseDeployment = this.CloseDeployment.bind(this); - this.CloseGroup = this.CloseGroup.bind(this); - this.PauseGroup = this.PauseGroup.bind(this); - this.StartGroup = this.StartGroup.bind(this); - } - CreateDeployment( - request: MsgCreateDeployment, - ): Promise { - const data = MsgCreateDeployment.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateDeployment", data); - return promise.then((data) => - MsgCreateDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - DepositDeployment( - request: MsgDepositDeployment, - ): Promise { - const data = MsgDepositDeployment.encode(request).finish(); - const promise = this.rpc.request(this.service, "DepositDeployment", data); - return promise.then((data) => - MsgDepositDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - UpdateDeployment( - request: MsgUpdateDeployment, - ): Promise { - const data = MsgUpdateDeployment.encode(request).finish(); - const promise = this.rpc.request(this.service, "UpdateDeployment", data); - return promise.then((data) => - MsgUpdateDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - CloseDeployment( - request: MsgCloseDeployment, - ): Promise { - const data = MsgCloseDeployment.encode(request).finish(); - const promise = this.rpc.request(this.service, "CloseDeployment", data); - return promise.then((data) => - MsgCloseDeploymentResponse.decode(_m0.Reader.create(data)), - ); - } - - CloseGroup(request: MsgCloseGroup): Promise { - const data = MsgCloseGroup.encode(request).finish(); - const promise = this.rpc.request(this.service, "CloseGroup", data); - return promise.then((data) => - MsgCloseGroupResponse.decode(_m0.Reader.create(data)), - ); - } - - PauseGroup(request: MsgPauseGroup): Promise { - const data = MsgPauseGroup.encode(request).finish(); - const promise = this.rpc.request(this.service, "PauseGroup", data); - return promise.then((data) => - MsgPauseGroupResponse.decode(_m0.Reader.create(data)), - ); - } - - StartGroup(request: MsgStartGroup): Promise { - const data = MsgStartGroup.encode(request).finish(); - const promise = this.rpc.request(this.service, "StartGroup", data); - return promise.then((data) => - MsgStartGroupResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} diff --git a/ts/src/generated/akash/deployment/v1beta4/deploymentmsg.ts b/ts/src/generated/akash/deployment/v1beta4/deploymentmsg.ts new file mode 100644 index 00000000..1dcb702a --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/deploymentmsg.ts @@ -0,0 +1,637 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1beta4/deploymentmsg.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { Coin } from "../../../cosmos/base/v1beta1/coin"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { DeploymentID } from "../v1/deployment"; +import { GroupSpec } from "./groupspec"; + +/** MsgCreateDeployment defines an SDK message for creating deployment */ +export interface MsgCreateDeployment { + $type: "akash.deployment.v1beta4.MsgCreateDeployment"; + id: DeploymentID | undefined; + groups: GroupSpec[]; + hash: Uint8Array; + deposit: Coin | undefined; + /** Depositor pays for the deposit */ + depositor: string; +} + +/** MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. */ +export interface MsgCreateDeploymentResponse { + $type: "akash.deployment.v1beta4.MsgCreateDeploymentResponse"; +} + +/** MsgUpdateDeployment defines an SDK message for updating deployment */ +export interface MsgUpdateDeployment { + $type: "akash.deployment.v1beta4.MsgUpdateDeployment"; + id: DeploymentID | undefined; + hash: Uint8Array; +} + +/** MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. */ +export interface MsgUpdateDeploymentResponse { + $type: "akash.deployment.v1beta4.MsgUpdateDeploymentResponse"; +} + +/** MsgCloseDeployment defines an SDK message for closing deployment */ +export interface MsgCloseDeployment { + $type: "akash.deployment.v1beta4.MsgCloseDeployment"; + id: DeploymentID | undefined; +} + +/** MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. */ +export interface MsgCloseDeploymentResponse { + $type: "akash.deployment.v1beta4.MsgCloseDeploymentResponse"; +} + +function createBaseMsgCreateDeployment(): MsgCreateDeployment { + return { + $type: "akash.deployment.v1beta4.MsgCreateDeployment", + id: undefined, + groups: [], + hash: new Uint8Array(0), + deposit: undefined, + depositor: "", + }; +} + +export const MsgCreateDeployment: MessageFns< + MsgCreateDeployment, + "akash.deployment.v1beta4.MsgCreateDeployment" +> = { + $type: "akash.deployment.v1beta4.MsgCreateDeployment" as const, + + encode( + message: MsgCreateDeployment, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).join(); + } + for (const v of message.groups) { + GroupSpec.encode(v!, writer.uint32(18).fork()).join(); + } + if (message.hash.length !== 0) { + writer.uint32(26).bytes(message.hash); + } + if (message.deposit !== undefined) { + Coin.encode(message.deposit, writer.uint32(34).fork()).join(); + } + if (message.depositor !== "") { + writer.uint32(42).string(message.depositor); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgCreateDeployment { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateDeployment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.groups.push(GroupSpec.decode(reader, reader.uint32())); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.hash = reader.bytes(); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.deposit = Coin.decode(reader, reader.uint32()); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.depositor = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCreateDeployment { + return { + $type: MsgCreateDeployment.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + groups: globalThis.Array.isArray(object?.groups) + ? object.groups.map((e: any) => GroupSpec.fromJSON(e)) + : [], + hash: isSet(object.hash) + ? bytesFromBase64(object.hash) + : new Uint8Array(0), + deposit: isSet(object.deposit) + ? Coin.fromJSON(object.deposit) + : undefined, + depositor: isSet(object.depositor) + ? globalThis.String(object.depositor) + : "", + }; + }, + + toJSON(message: MsgCreateDeployment): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + if (message.groups?.length) { + obj.groups = message.groups.map((e) => GroupSpec.toJSON(e)); + } + if (message.hash.length !== 0) { + obj.hash = base64FromBytes(message.hash); + } + if (message.deposit !== undefined) { + obj.deposit = Coin.toJSON(message.deposit); + } + if (message.depositor !== "") { + obj.depositor = message.depositor; + } + return obj; + }, + + create(base?: DeepPartial): MsgCreateDeployment { + return MsgCreateDeployment.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCreateDeployment { + const message = createBaseMsgCreateDeployment(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + message.groups = object.groups?.map((e) => GroupSpec.fromPartial(e)) || []; + message.hash = object.hash ?? new Uint8Array(0); + message.deposit = + object.deposit !== undefined && object.deposit !== null + ? Coin.fromPartial(object.deposit) + : undefined; + message.depositor = object.depositor ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MsgCreateDeployment.$type, MsgCreateDeployment); + +function createBaseMsgCreateDeploymentResponse(): MsgCreateDeploymentResponse { + return { $type: "akash.deployment.v1beta4.MsgCreateDeploymentResponse" }; +} + +export const MsgCreateDeploymentResponse: MessageFns< + MsgCreateDeploymentResponse, + "akash.deployment.v1beta4.MsgCreateDeploymentResponse" +> = { + $type: "akash.deployment.v1beta4.MsgCreateDeploymentResponse" as const, + + encode( + _: MsgCreateDeploymentResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgCreateDeploymentResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateDeploymentResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCreateDeploymentResponse { + return { $type: MsgCreateDeploymentResponse.$type }; + }, + + toJSON(_: MsgCreateDeploymentResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgCreateDeploymentResponse { + return MsgCreateDeploymentResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgCreateDeploymentResponse { + const message = createBaseMsgCreateDeploymentResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgCreateDeploymentResponse.$type, + MsgCreateDeploymentResponse, +); + +function createBaseMsgUpdateDeployment(): MsgUpdateDeployment { + return { + $type: "akash.deployment.v1beta4.MsgUpdateDeployment", + id: undefined, + hash: new Uint8Array(0), + }; +} + +export const MsgUpdateDeployment: MessageFns< + MsgUpdateDeployment, + "akash.deployment.v1beta4.MsgUpdateDeployment" +> = { + $type: "akash.deployment.v1beta4.MsgUpdateDeployment" as const, + + encode( + message: MsgUpdateDeployment, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).join(); + } + if (message.hash.length !== 0) { + writer.uint32(26).bytes(message.hash); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgUpdateDeployment { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateDeployment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.hash = reader.bytes(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgUpdateDeployment { + return { + $type: MsgUpdateDeployment.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + hash: isSet(object.hash) + ? bytesFromBase64(object.hash) + : new Uint8Array(0), + }; + }, + + toJSON(message: MsgUpdateDeployment): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + if (message.hash.length !== 0) { + obj.hash = base64FromBytes(message.hash); + } + return obj; + }, + + create(base?: DeepPartial): MsgUpdateDeployment { + return MsgUpdateDeployment.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgUpdateDeployment { + const message = createBaseMsgUpdateDeployment(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + message.hash = object.hash ?? new Uint8Array(0); + return message; + }, +}; + +messageTypeRegistry.set(MsgUpdateDeployment.$type, MsgUpdateDeployment); + +function createBaseMsgUpdateDeploymentResponse(): MsgUpdateDeploymentResponse { + return { $type: "akash.deployment.v1beta4.MsgUpdateDeploymentResponse" }; +} + +export const MsgUpdateDeploymentResponse: MessageFns< + MsgUpdateDeploymentResponse, + "akash.deployment.v1beta4.MsgUpdateDeploymentResponse" +> = { + $type: "akash.deployment.v1beta4.MsgUpdateDeploymentResponse" as const, + + encode( + _: MsgUpdateDeploymentResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgUpdateDeploymentResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateDeploymentResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgUpdateDeploymentResponse { + return { $type: MsgUpdateDeploymentResponse.$type }; + }, + + toJSON(_: MsgUpdateDeploymentResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgUpdateDeploymentResponse { + return MsgUpdateDeploymentResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgUpdateDeploymentResponse { + const message = createBaseMsgUpdateDeploymentResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgUpdateDeploymentResponse.$type, + MsgUpdateDeploymentResponse, +); + +function createBaseMsgCloseDeployment(): MsgCloseDeployment { + return { + $type: "akash.deployment.v1beta4.MsgCloseDeployment", + id: undefined, + }; +} + +export const MsgCloseDeployment: MessageFns< + MsgCloseDeployment, + "akash.deployment.v1beta4.MsgCloseDeployment" +> = { + $type: "akash.deployment.v1beta4.MsgCloseDeployment" as const, + + encode( + message: MsgCloseDeployment, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgCloseDeployment { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseDeployment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCloseDeployment { + return { + $type: MsgCloseDeployment.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: MsgCloseDeployment): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): MsgCloseDeployment { + return MsgCloseDeployment.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCloseDeployment { + const message = createBaseMsgCloseDeployment(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseDeployment.$type, MsgCloseDeployment); + +function createBaseMsgCloseDeploymentResponse(): MsgCloseDeploymentResponse { + return { $type: "akash.deployment.v1beta4.MsgCloseDeploymentResponse" }; +} + +export const MsgCloseDeploymentResponse: MessageFns< + MsgCloseDeploymentResponse, + "akash.deployment.v1beta4.MsgCloseDeploymentResponse" +> = { + $type: "akash.deployment.v1beta4.MsgCloseDeploymentResponse" as const, + + encode( + _: MsgCloseDeploymentResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgCloseDeploymentResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseDeploymentResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCloseDeploymentResponse { + return { $type: MsgCloseDeploymentResponse.$type }; + }, + + toJSON(_: MsgCloseDeploymentResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgCloseDeploymentResponse { + return MsgCloseDeploymentResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgCloseDeploymentResponse { + const message = createBaseMsgCloseDeploymentResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgCloseDeploymentResponse.$type, + MsgCloseDeploymentResponse, +); + +function bytesFromBase64(b64: string): Uint8Array { + if ((globalThis as any).Buffer) { + return Uint8Array.from(globalThis.Buffer.from(b64, "base64")); + } else { + const bin = globalThis.atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; + } +} + +function base64FromBytes(arr: Uint8Array): string { + if ((globalThis as any).Buffer) { + return globalThis.Buffer.from(arr).toString("base64"); + } else { + const bin: string[] = []; + arr.forEach((byte) => { + bin.push(globalThis.String.fromCharCode(byte)); + }); + return globalThis.btoa(bin.join("")); + } +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/filters.ts b/ts/src/generated/akash/deployment/v1beta4/filters.ts new file mode 100644 index 00000000..94bd482b --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/filters.ts @@ -0,0 +1,299 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1beta4/filters.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** DeploymentFilters defines filters used to filter deployments */ +export interface DeploymentFilters { + $type: "akash.deployment.v1beta4.DeploymentFilters"; + owner: string; + dseq: Long; + state: string; +} + +/** GroupFilters defines filters used to filter groups */ +export interface GroupFilters { + $type: "akash.deployment.v1beta4.GroupFilters"; + owner: string; + dseq: Long; + gseq: Long; + state: string; +} + +function createBaseDeploymentFilters(): DeploymentFilters { + return { + $type: "akash.deployment.v1beta4.DeploymentFilters", + owner: "", + dseq: Long.UZERO, + state: "", + }; +} + +export const DeploymentFilters: MessageFns< + DeploymentFilters, + "akash.deployment.v1beta4.DeploymentFilters" +> = { + $type: "akash.deployment.v1beta4.DeploymentFilters" as const, + + encode( + message: DeploymentFilters, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq.toString()); + } + if (message.state !== "") { + writer.uint32(26).string(message.state); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DeploymentFilters { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDeploymentFilters(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = Long.fromString(reader.uint64().toString(), true); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.state = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DeploymentFilters { + return { + $type: DeploymentFilters.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + state: isSet(object.state) ? globalThis.String(object.state) : "", + }; + }, + + toJSON(message: DeploymentFilters): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.state !== "") { + obj.state = message.state; + } + return obj; + }, + + create(base?: DeepPartial): DeploymentFilters { + return DeploymentFilters.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DeploymentFilters { + const message = createBaseDeploymentFilters(); + message.owner = object.owner ?? ""; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.state = object.state ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(DeploymentFilters.$type, DeploymentFilters); + +function createBaseGroupFilters(): GroupFilters { + return { + $type: "akash.deployment.v1beta4.GroupFilters", + owner: "", + dseq: Long.UZERO, + gseq: Long.UZERO, + state: "", + }; +} + +export const GroupFilters: MessageFns< + GroupFilters, + "akash.deployment.v1beta4.GroupFilters" +> = { + $type: "akash.deployment.v1beta4.GroupFilters" as const, + + encode( + message: GroupFilters, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq.toString()); + } + if (!message.gseq.equals(Long.UZERO)) { + writer.uint32(24).uint64(message.gseq.toString()); + } + if (message.state !== "") { + writer.uint32(34).string(message.state); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GroupFilters { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGroupFilters(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = Long.fromString(reader.uint64().toString(), true); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = Long.fromString(reader.uint64().toString(), true); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.state = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GroupFilters { + return { + $type: GroupFilters.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? Long.fromValue(object.gseq) : Long.UZERO, + state: isSet(object.state) ? globalThis.String(object.state) : "", + }; + }, + + toJSON(message: GroupFilters): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (!message.gseq.equals(Long.UZERO)) { + obj.gseq = (message.gseq || Long.UZERO).toString(); + } + if (message.state !== "") { + obj.state = message.state; + } + return obj; + }, + + create(base?: DeepPartial): GroupFilters { + return GroupFilters.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GroupFilters { + const message = createBaseGroupFilters(); + message.owner = object.owner ?? ""; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = + object.gseq !== undefined && object.gseq !== null + ? Long.fromValue(object.gseq) + : Long.UZERO; + message.state = object.state ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(GroupFilters.$type, GroupFilters); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/genesis.ts b/ts/src/generated/akash/deployment/v1beta4/genesis.ts new file mode 100644 index 00000000..c24ed52c --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/genesis.ts @@ -0,0 +1,259 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1beta4/genesis.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Deployment } from "../v1/deployment"; +import { Group } from "./group"; +import { Params } from "./params"; + +/** GenesisDeployment defines the basic genesis state used by deployment module */ +export interface GenesisDeployment { + $type: "akash.deployment.v1beta4.GenesisDeployment"; + deployment: Deployment | undefined; + groups: Group[]; +} + +/** GenesisState stores slice of genesis deployment instance */ +export interface GenesisState { + $type: "akash.deployment.v1beta4.GenesisState"; + deployments: GenesisDeployment[]; + params: Params | undefined; +} + +function createBaseGenesisDeployment(): GenesisDeployment { + return { + $type: "akash.deployment.v1beta4.GenesisDeployment", + deployment: undefined, + groups: [], + }; +} + +export const GenesisDeployment: MessageFns< + GenesisDeployment, + "akash.deployment.v1beta4.GenesisDeployment" +> = { + $type: "akash.deployment.v1beta4.GenesisDeployment" as const, + + encode( + message: GenesisDeployment, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.deployment !== undefined) { + Deployment.encode(message.deployment, writer.uint32(10).fork()).join(); + } + for (const v of message.groups) { + Group.encode(v!, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GenesisDeployment { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisDeployment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.deployment = Deployment.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.groups.push(Group.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisDeployment { + return { + $type: GenesisDeployment.$type, + deployment: isSet(object.deployment) + ? Deployment.fromJSON(object.deployment) + : undefined, + groups: globalThis.Array.isArray(object?.groups) + ? object.groups.map((e: any) => Group.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GenesisDeployment): unknown { + const obj: any = {}; + if (message.deployment !== undefined) { + obj.deployment = Deployment.toJSON(message.deployment); + } + if (message.groups?.length) { + obj.groups = message.groups.map((e) => Group.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): GenesisDeployment { + return GenesisDeployment.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisDeployment { + const message = createBaseGenesisDeployment(); + message.deployment = + object.deployment !== undefined && object.deployment !== null + ? Deployment.fromPartial(object.deployment) + : undefined; + message.groups = object.groups?.map((e) => Group.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GenesisDeployment.$type, GenesisDeployment); + +function createBaseGenesisState(): GenesisState { + return { + $type: "akash.deployment.v1beta4.GenesisState", + deployments: [], + params: undefined, + }; +} + +export const GenesisState: MessageFns< + GenesisState, + "akash.deployment.v1beta4.GenesisState" +> = { + $type: "akash.deployment.v1beta4.GenesisState" as const, + + encode( + message: GenesisState, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.deployments) { + GenesisDeployment.encode(v!, writer.uint32(10).fork()).join(); + } + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GenesisState { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisState(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.deployments.push( + GenesisDeployment.decode(reader, reader.uint32()), + ); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.params = Params.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisState { + return { + $type: GenesisState.$type, + deployments: globalThis.Array.isArray(object?.deployments) + ? object.deployments.map((e: any) => GenesisDeployment.fromJSON(e)) + : [], + params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, + }; + }, + + toJSON(message: GenesisState): unknown { + const obj: any = {}; + if (message.deployments?.length) { + obj.deployments = message.deployments.map((e) => + GenesisDeployment.toJSON(e), + ); + } + if (message.params !== undefined) { + obj.params = Params.toJSON(message.params); + } + return obj; + }, + + create(base?: DeepPartial): GenesisState { + return GenesisState.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisState { + const message = createBaseGenesisState(); + message.deployments = + object.deployments?.map((e) => GenesisDeployment.fromPartial(e)) || []; + message.params = + object.params !== undefined && object.params !== null + ? Params.fromPartial(object.params) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(GenesisState.$type, GenesisState); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/group.ts b/ts/src/generated/akash/deployment/v1beta4/group.ts new file mode 100644 index 00000000..a2e04f99 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/group.ts @@ -0,0 +1,245 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1beta4/group.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { GroupID } from "../v1/group"; +import { GroupSpec } from "./groupspec"; + +/** Group stores group id, state and specifications of group */ +export interface Group { + $type: "akash.deployment.v1beta4.Group"; + id: GroupID | undefined; + state: Group_State; + groupSpec: GroupSpec | undefined; + createdAt: Long; +} + +/** State is an enum which refers to state of group */ +export enum Group_State { + /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ + invalid = 0, + /** open - GroupOpen denotes state for group open */ + open = 1, + /** paused - GroupOrdered denotes state for group ordered */ + paused = 2, + /** insufficient_funds - GroupInsufficientFunds denotes state for group insufficient_funds */ + insufficient_funds = 3, + /** closed - GroupClosed denotes state for group closed */ + closed = 4, + UNRECOGNIZED = -1, +} + +export function group_StateFromJSON(object: any): Group_State { + switch (object) { + case 0: + case "invalid": + return Group_State.invalid; + case 1: + case "open": + return Group_State.open; + case 2: + case "paused": + return Group_State.paused; + case 3: + case "insufficient_funds": + return Group_State.insufficient_funds; + case 4: + case "closed": + return Group_State.closed; + case -1: + case "UNRECOGNIZED": + default: + return Group_State.UNRECOGNIZED; + } +} + +export function group_StateToJSON(object: Group_State): string { + switch (object) { + case Group_State.invalid: + return "invalid"; + case Group_State.open: + return "open"; + case Group_State.paused: + return "paused"; + case Group_State.insufficient_funds: + return "insufficient_funds"; + case Group_State.closed: + return "closed"; + case Group_State.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +function createBaseGroup(): Group { + return { + $type: "akash.deployment.v1beta4.Group", + id: undefined, + state: 0, + groupSpec: undefined, + createdAt: Long.ZERO, + }; +} + +export const Group: MessageFns = { + $type: "akash.deployment.v1beta4.Group" as const, + + encode( + message: Group, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + GroupID.encode(message.id, writer.uint32(10).fork()).join(); + } + if (message.state !== 0) { + writer.uint32(16).int32(message.state); + } + if (message.groupSpec !== undefined) { + GroupSpec.encode(message.groupSpec, writer.uint32(26).fork()).join(); + } + if (!message.createdAt.equals(Long.ZERO)) { + writer.uint32(32).int64(message.createdAt.toString()); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Group { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGroup(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = GroupID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.state = reader.int32() as any; + continue; + case 3: + if (tag !== 26) { + break; + } + + message.groupSpec = GroupSpec.decode(reader, reader.uint32()); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.createdAt = Long.fromString(reader.int64().toString()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Group { + return { + $type: Group.$type, + id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, + state: isSet(object.state) ? group_StateFromJSON(object.state) : 0, + groupSpec: isSet(object.groupSpec) + ? GroupSpec.fromJSON(object.groupSpec) + : undefined, + createdAt: isSet(object.createdAt) + ? Long.fromValue(object.createdAt) + : Long.ZERO, + }; + }, + + toJSON(message: Group): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = GroupID.toJSON(message.id); + } + if (message.state !== 0) { + obj.state = group_StateToJSON(message.state); + } + if (message.groupSpec !== undefined) { + obj.groupSpec = GroupSpec.toJSON(message.groupSpec); + } + if (!message.createdAt.equals(Long.ZERO)) { + obj.createdAt = (message.createdAt || Long.ZERO).toString(); + } + return obj; + }, + + create(base?: DeepPartial): Group { + return Group.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Group { + const message = createBaseGroup(); + message.id = + object.id !== undefined && object.id !== null + ? GroupID.fromPartial(object.id) + : undefined; + message.state = object.state ?? 0; + message.groupSpec = + object.groupSpec !== undefined && object.groupSpec !== null + ? GroupSpec.fromPartial(object.groupSpec) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? Long.fromValue(object.createdAt) + : Long.ZERO; + return message; + }, +}; + +messageTypeRegistry.set(Group.$type, Group); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/groupmsg.ts b/ts/src/generated/akash/deployment/v1beta4/groupmsg.ts new file mode 100644 index 00000000..e6462e4c --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/groupmsg.ts @@ -0,0 +1,472 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1beta4/groupmsg.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { GroupID } from "../v1/group"; + +/** MsgCloseGroup defines SDK message to close a single Group within a Deployment. */ +export interface MsgCloseGroup { + $type: "akash.deployment.v1beta4.MsgCloseGroup"; + id: GroupID | undefined; +} + +/** MsgCloseGroupResponse defines the Msg/CloseGroup response type. */ +export interface MsgCloseGroupResponse { + $type: "akash.deployment.v1beta4.MsgCloseGroupResponse"; +} + +/** MsgPauseGroup defines SDK message to close a single Group within a Deployment. */ +export interface MsgPauseGroup { + $type: "akash.deployment.v1beta4.MsgPauseGroup"; + id: GroupID | undefined; +} + +/** MsgPauseGroupResponse defines the Msg/PauseGroup response type. */ +export interface MsgPauseGroupResponse { + $type: "akash.deployment.v1beta4.MsgPauseGroupResponse"; +} + +/** MsgStartGroup defines SDK message to close a single Group within a Deployment. */ +export interface MsgStartGroup { + $type: "akash.deployment.v1beta4.MsgStartGroup"; + id: GroupID | undefined; +} + +/** MsgStartGroupResponse defines the Msg/StartGroup response type. */ +export interface MsgStartGroupResponse { + $type: "akash.deployment.v1beta4.MsgStartGroupResponse"; +} + +function createBaseMsgCloseGroup(): MsgCloseGroup { + return { $type: "akash.deployment.v1beta4.MsgCloseGroup", id: undefined }; +} + +export const MsgCloseGroup: MessageFns< + MsgCloseGroup, + "akash.deployment.v1beta4.MsgCloseGroup" +> = { + $type: "akash.deployment.v1beta4.MsgCloseGroup" as const, + + encode( + message: MsgCloseGroup, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + GroupID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgCloseGroup { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseGroup(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = GroupID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCloseGroup { + return { + $type: MsgCloseGroup.$type, + id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: MsgCloseGroup): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = GroupID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): MsgCloseGroup { + return MsgCloseGroup.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCloseGroup { + const message = createBaseMsgCloseGroup(); + message.id = + object.id !== undefined && object.id !== null + ? GroupID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseGroup.$type, MsgCloseGroup); + +function createBaseMsgCloseGroupResponse(): MsgCloseGroupResponse { + return { $type: "akash.deployment.v1beta4.MsgCloseGroupResponse" }; +} + +export const MsgCloseGroupResponse: MessageFns< + MsgCloseGroupResponse, + "akash.deployment.v1beta4.MsgCloseGroupResponse" +> = { + $type: "akash.deployment.v1beta4.MsgCloseGroupResponse" as const, + + encode( + _: MsgCloseGroupResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgCloseGroupResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseGroupResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCloseGroupResponse { + return { $type: MsgCloseGroupResponse.$type }; + }, + + toJSON(_: MsgCloseGroupResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgCloseGroupResponse { + return MsgCloseGroupResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgCloseGroupResponse { + const message = createBaseMsgCloseGroupResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseGroupResponse.$type, MsgCloseGroupResponse); + +function createBaseMsgPauseGroup(): MsgPauseGroup { + return { $type: "akash.deployment.v1beta4.MsgPauseGroup", id: undefined }; +} + +export const MsgPauseGroup: MessageFns< + MsgPauseGroup, + "akash.deployment.v1beta4.MsgPauseGroup" +> = { + $type: "akash.deployment.v1beta4.MsgPauseGroup" as const, + + encode( + message: MsgPauseGroup, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + GroupID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgPauseGroup { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgPauseGroup(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = GroupID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgPauseGroup { + return { + $type: MsgPauseGroup.$type, + id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: MsgPauseGroup): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = GroupID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): MsgPauseGroup { + return MsgPauseGroup.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgPauseGroup { + const message = createBaseMsgPauseGroup(); + message.id = + object.id !== undefined && object.id !== null + ? GroupID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgPauseGroup.$type, MsgPauseGroup); + +function createBaseMsgPauseGroupResponse(): MsgPauseGroupResponse { + return { $type: "akash.deployment.v1beta4.MsgPauseGroupResponse" }; +} + +export const MsgPauseGroupResponse: MessageFns< + MsgPauseGroupResponse, + "akash.deployment.v1beta4.MsgPauseGroupResponse" +> = { + $type: "akash.deployment.v1beta4.MsgPauseGroupResponse" as const, + + encode( + _: MsgPauseGroupResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgPauseGroupResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgPauseGroupResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgPauseGroupResponse { + return { $type: MsgPauseGroupResponse.$type }; + }, + + toJSON(_: MsgPauseGroupResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgPauseGroupResponse { + return MsgPauseGroupResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgPauseGroupResponse { + const message = createBaseMsgPauseGroupResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgPauseGroupResponse.$type, MsgPauseGroupResponse); + +function createBaseMsgStartGroup(): MsgStartGroup { + return { $type: "akash.deployment.v1beta4.MsgStartGroup", id: undefined }; +} + +export const MsgStartGroup: MessageFns< + MsgStartGroup, + "akash.deployment.v1beta4.MsgStartGroup" +> = { + $type: "akash.deployment.v1beta4.MsgStartGroup" as const, + + encode( + message: MsgStartGroup, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + GroupID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgStartGroup { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgStartGroup(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = GroupID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgStartGroup { + return { + $type: MsgStartGroup.$type, + id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: MsgStartGroup): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = GroupID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): MsgStartGroup { + return MsgStartGroup.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgStartGroup { + const message = createBaseMsgStartGroup(); + message.id = + object.id !== undefined && object.id !== null + ? GroupID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgStartGroup.$type, MsgStartGroup); + +function createBaseMsgStartGroupResponse(): MsgStartGroupResponse { + return { $type: "akash.deployment.v1beta4.MsgStartGroupResponse" }; +} + +export const MsgStartGroupResponse: MessageFns< + MsgStartGroupResponse, + "akash.deployment.v1beta4.MsgStartGroupResponse" +> = { + $type: "akash.deployment.v1beta4.MsgStartGroupResponse" as const, + + encode( + _: MsgStartGroupResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgStartGroupResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgStartGroupResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgStartGroupResponse { + return { $type: MsgStartGroupResponse.$type }; + }, + + toJSON(_: MsgStartGroupResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgStartGroupResponse { + return MsgStartGroupResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgStartGroupResponse { + const message = createBaseMsgStartGroupResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgStartGroupResponse.$type, MsgStartGroupResponse); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/groupspec.ts b/ts/src/generated/akash/deployment/v1beta4/groupspec.ts new file mode 100644 index 00000000..c3d44f03 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/groupspec.ts @@ -0,0 +1,175 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1beta4/groupspec.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { PlacementRequirements } from "../../base/attributes/v1/attribute"; +import { ResourceUnit } from "./resourceunit"; + +/** Spec stores group specifications */ +export interface GroupSpec { + $type: "akash.deployment.v1beta4.GroupSpec"; + name: string; + requirements: PlacementRequirements | undefined; + resources: ResourceUnit[]; +} + +function createBaseGroupSpec(): GroupSpec { + return { + $type: "akash.deployment.v1beta4.GroupSpec", + name: "", + requirements: undefined, + resources: [], + }; +} + +export const GroupSpec: MessageFns< + GroupSpec, + "akash.deployment.v1beta4.GroupSpec" +> = { + $type: "akash.deployment.v1beta4.GroupSpec" as const, + + encode( + message: GroupSpec, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.requirements !== undefined) { + PlacementRequirements.encode( + message.requirements, + writer.uint32(18).fork(), + ).join(); + } + for (const v of message.resources) { + ResourceUnit.encode(v!, writer.uint32(26).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GroupSpec { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGroupSpec(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.requirements = PlacementRequirements.decode( + reader, + reader.uint32(), + ); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.resources.push(ResourceUnit.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GroupSpec { + return { + $type: GroupSpec.$type, + name: isSet(object.name) ? globalThis.String(object.name) : "", + requirements: isSet(object.requirements) + ? PlacementRequirements.fromJSON(object.requirements) + : undefined, + resources: globalThis.Array.isArray(object?.resources) + ? object.resources.map((e: any) => ResourceUnit.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GroupSpec): unknown { + const obj: any = {}; + if (message.name !== "") { + obj.name = message.name; + } + if (message.requirements !== undefined) { + obj.requirements = PlacementRequirements.toJSON(message.requirements); + } + if (message.resources?.length) { + obj.resources = message.resources.map((e) => ResourceUnit.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): GroupSpec { + return GroupSpec.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GroupSpec { + const message = createBaseGroupSpec(); + message.name = object.name ?? ""; + message.requirements = + object.requirements !== undefined && object.requirements !== null + ? PlacementRequirements.fromPartial(object.requirements) + : undefined; + message.resources = + object.resources?.map((e) => ResourceUnit.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GroupSpec.$type, GroupSpec); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/params.ts b/ts/src/generated/akash/deployment/v1beta4/params.ts new file mode 100644 index 00000000..97177780 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/params.ts @@ -0,0 +1,119 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1beta4/params.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { Coin } from "../../../cosmos/base/v1beta1/coin"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** Params defines the parameters for the x/deployment module */ +export interface Params { + $type: "akash.deployment.v1beta4.Params"; + minDeposits: Coin[]; +} + +function createBaseParams(): Params { + return { $type: "akash.deployment.v1beta4.Params", minDeposits: [] }; +} + +export const Params: MessageFns = { + $type: "akash.deployment.v1beta4.Params" as const, + + encode( + message: Params, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.minDeposits) { + Coin.encode(v!, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Params { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.minDeposits.push(Coin.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Params { + return { + $type: Params.$type, + minDeposits: globalThis.Array.isArray(object?.minDeposits) + ? object.minDeposits.map((e: any) => Coin.fromJSON(e)) + : [], + }; + }, + + toJSON(message: Params): unknown { + const obj: any = {}; + if (message.minDeposits?.length) { + obj.minDeposits = message.minDeposits.map((e) => Coin.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): Params { + return Params.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Params { + const message = createBaseParams(); + message.minDeposits = + object.minDeposits?.map((e) => Coin.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Params.$type, Params); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/paramsmsg.ts b/ts/src/generated/akash/deployment/v1beta4/paramsmsg.ts new file mode 100644 index 00000000..9d9631d6 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/paramsmsg.ts @@ -0,0 +1,227 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1beta4/paramsmsg.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Params } from "./params"; + +/** + * MsgUpdateParams is the Msg/UpdateParams request type. + * + * Since: akash v1.0.0 + */ +export interface MsgUpdateParams { + $type: "akash.deployment.v1beta4.MsgUpdateParams"; + /** authority is the address of the governance account. */ + authority: string; + /** + * params defines the x/deployment parameters to update. + * + * NOTE: All parameters must be supplied. + */ + params: Params | undefined; +} + +/** + * MsgUpdateParamsResponse defines the response structure for executing a + * MsgUpdateParams message. + * + * Since: akash v1.0.0 + */ +export interface MsgUpdateParamsResponse { + $type: "akash.deployment.v1beta4.MsgUpdateParamsResponse"; +} + +function createBaseMsgUpdateParams(): MsgUpdateParams { + return { + $type: "akash.deployment.v1beta4.MsgUpdateParams", + authority: "", + params: undefined, + }; +} + +export const MsgUpdateParams: MessageFns< + MsgUpdateParams, + "akash.deployment.v1beta4.MsgUpdateParams" +> = { + $type: "akash.deployment.v1beta4.MsgUpdateParams" as const, + + encode( + message: MsgUpdateParams, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.authority !== "") { + writer.uint32(10).string(message.authority); + } + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgUpdateParams { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.authority = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.params = Params.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgUpdateParams { + return { + $type: MsgUpdateParams.$type, + authority: isSet(object.authority) + ? globalThis.String(object.authority) + : "", + params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, + }; + }, + + toJSON(message: MsgUpdateParams): unknown { + const obj: any = {}; + if (message.authority !== "") { + obj.authority = message.authority; + } + if (message.params !== undefined) { + obj.params = Params.toJSON(message.params); + } + return obj; + }, + + create(base?: DeepPartial): MsgUpdateParams { + return MsgUpdateParams.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgUpdateParams { + const message = createBaseMsgUpdateParams(); + message.authority = object.authority ?? ""; + message.params = + object.params !== undefined && object.params !== null + ? Params.fromPartial(object.params) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgUpdateParams.$type, MsgUpdateParams); + +function createBaseMsgUpdateParamsResponse(): MsgUpdateParamsResponse { + return { $type: "akash.deployment.v1beta4.MsgUpdateParamsResponse" }; +} + +export const MsgUpdateParamsResponse: MessageFns< + MsgUpdateParamsResponse, + "akash.deployment.v1beta4.MsgUpdateParamsResponse" +> = { + $type: "akash.deployment.v1beta4.MsgUpdateParamsResponse" as const, + + encode( + _: MsgUpdateParamsResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgUpdateParamsResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateParamsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgUpdateParamsResponse { + return { $type: MsgUpdateParamsResponse.$type }; + }, + + toJSON(_: MsgUpdateParamsResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgUpdateParamsResponse { + return MsgUpdateParamsResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgUpdateParamsResponse { + const message = createBaseMsgUpdateParamsResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgUpdateParamsResponse.$type, MsgUpdateParamsResponse); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/query.ts b/ts/src/generated/akash/deployment/v1beta4/query.ts new file mode 100644 index 00000000..ad6dc67c --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/query.ts @@ -0,0 +1,897 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1beta4/query.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { + PageRequest, + PageResponse, +} from "../../../cosmos/base/query/v1beta1/pagination"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Account } from "../../escrow/v1/account"; +import { Deployment, DeploymentID } from "../v1/deployment"; +import { GroupID } from "../v1/group"; +import { DeploymentFilters } from "./filters"; +import { Group } from "./group"; +import { Params } from "./params"; + +/** QueryDeploymentsRequest is request type for the Query/Deployments RPC method */ +export interface QueryDeploymentsRequest { + $type: "akash.deployment.v1beta4.QueryDeploymentsRequest"; + filters: DeploymentFilters | undefined; + pagination: PageRequest | undefined; +} + +/** QueryDeploymentsResponse is response type for the Query/Deployments RPC method */ +export interface QueryDeploymentsResponse { + $type: "akash.deployment.v1beta4.QueryDeploymentsResponse"; + deployments: QueryDeploymentResponse[]; + pagination: PageResponse | undefined; +} + +/** QueryDeploymentRequest is request type for the Query/Deployment RPC method */ +export interface QueryDeploymentRequest { + $type: "akash.deployment.v1beta4.QueryDeploymentRequest"; + id: DeploymentID | undefined; +} + +/** QueryDeploymentResponse is response type for the Query/Deployment RPC method */ +export interface QueryDeploymentResponse { + $type: "akash.deployment.v1beta4.QueryDeploymentResponse"; + deployment: Deployment | undefined; + groups: Group[]; + escrowAccount: Account | undefined; +} + +/** QueryGroupRequest is request type for the Query/Group RPC method */ +export interface QueryGroupRequest { + $type: "akash.deployment.v1beta4.QueryGroupRequest"; + id: GroupID | undefined; +} + +/** QueryGroupResponse is response type for the Query/Group RPC method */ +export interface QueryGroupResponse { + $type: "akash.deployment.v1beta4.QueryGroupResponse"; + group: Group | undefined; +} + +/** QueryParamsRequest is the request type for the Query/Params RPC method. */ +export interface QueryParamsRequest { + $type: "akash.deployment.v1beta4.QueryParamsRequest"; +} + +/** QueryParamsResponse is the response type for the Query/Params RPC method. */ +export interface QueryParamsResponse { + $type: "akash.deployment.v1beta4.QueryParamsResponse"; + /** params defines the parameters of the module. */ + params: Params | undefined; +} + +function createBaseQueryDeploymentsRequest(): QueryDeploymentsRequest { + return { + $type: "akash.deployment.v1beta4.QueryDeploymentsRequest", + filters: undefined, + pagination: undefined, + }; +} + +export const QueryDeploymentsRequest: MessageFns< + QueryDeploymentsRequest, + "akash.deployment.v1beta4.QueryDeploymentsRequest" +> = { + $type: "akash.deployment.v1beta4.QueryDeploymentsRequest" as const, + + encode( + message: QueryDeploymentsRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.filters !== undefined) { + DeploymentFilters.encode( + message.filters, + writer.uint32(10).fork(), + ).join(); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryDeploymentsRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryDeploymentsRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.filters = DeploymentFilters.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryDeploymentsRequest { + return { + $type: QueryDeploymentsRequest.$type, + filters: isSet(object.filters) + ? DeploymentFilters.fromJSON(object.filters) + : undefined, + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryDeploymentsRequest): unknown { + const obj: any = {}; + if (message.filters !== undefined) { + obj.filters = DeploymentFilters.toJSON(message.filters); + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryDeploymentsRequest { + return QueryDeploymentsRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryDeploymentsRequest { + const message = createBaseQueryDeploymentsRequest(); + message.filters = + object.filters !== undefined && object.filters !== null + ? DeploymentFilters.fromPartial(object.filters) + : undefined; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryDeploymentsRequest.$type, QueryDeploymentsRequest); + +function createBaseQueryDeploymentsResponse(): QueryDeploymentsResponse { + return { + $type: "akash.deployment.v1beta4.QueryDeploymentsResponse", + deployments: [], + pagination: undefined, + }; +} + +export const QueryDeploymentsResponse: MessageFns< + QueryDeploymentsResponse, + "akash.deployment.v1beta4.QueryDeploymentsResponse" +> = { + $type: "akash.deployment.v1beta4.QueryDeploymentsResponse" as const, + + encode( + message: QueryDeploymentsResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.deployments) { + QueryDeploymentResponse.encode(v!, writer.uint32(10).fork()).join(); + } + if (message.pagination !== undefined) { + PageResponse.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryDeploymentsResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryDeploymentsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.deployments.push( + QueryDeploymentResponse.decode(reader, reader.uint32()), + ); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryDeploymentsResponse { + return { + $type: QueryDeploymentsResponse.$type, + deployments: globalThis.Array.isArray(object?.deployments) + ? object.deployments.map((e: any) => + QueryDeploymentResponse.fromJSON(e), + ) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryDeploymentsResponse): unknown { + const obj: any = {}; + if (message.deployments?.length) { + obj.deployments = message.deployments.map((e) => + QueryDeploymentResponse.toJSON(e), + ); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create( + base?: DeepPartial, + ): QueryDeploymentsResponse { + return QueryDeploymentsResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryDeploymentsResponse { + const message = createBaseQueryDeploymentsResponse(); + message.deployments = + object.deployments?.map((e) => QueryDeploymentResponse.fromPartial(e)) || + []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + QueryDeploymentsResponse.$type, + QueryDeploymentsResponse, +); + +function createBaseQueryDeploymentRequest(): QueryDeploymentRequest { + return { + $type: "akash.deployment.v1beta4.QueryDeploymentRequest", + id: undefined, + }; +} + +export const QueryDeploymentRequest: MessageFns< + QueryDeploymentRequest, + "akash.deployment.v1beta4.QueryDeploymentRequest" +> = { + $type: "akash.deployment.v1beta4.QueryDeploymentRequest" as const, + + encode( + message: QueryDeploymentRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryDeploymentRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryDeploymentRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryDeploymentRequest { + return { + $type: QueryDeploymentRequest.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: QueryDeploymentRequest): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): QueryDeploymentRequest { + return QueryDeploymentRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryDeploymentRequest { + const message = createBaseQueryDeploymentRequest(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryDeploymentRequest.$type, QueryDeploymentRequest); + +function createBaseQueryDeploymentResponse(): QueryDeploymentResponse { + return { + $type: "akash.deployment.v1beta4.QueryDeploymentResponse", + deployment: undefined, + groups: [], + escrowAccount: undefined, + }; +} + +export const QueryDeploymentResponse: MessageFns< + QueryDeploymentResponse, + "akash.deployment.v1beta4.QueryDeploymentResponse" +> = { + $type: "akash.deployment.v1beta4.QueryDeploymentResponse" as const, + + encode( + message: QueryDeploymentResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.deployment !== undefined) { + Deployment.encode(message.deployment, writer.uint32(10).fork()).join(); + } + for (const v of message.groups) { + Group.encode(v!, writer.uint32(18).fork()).join(); + } + if (message.escrowAccount !== undefined) { + Account.encode(message.escrowAccount, writer.uint32(26).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryDeploymentResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryDeploymentResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.deployment = Deployment.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.groups.push(Group.decode(reader, reader.uint32())); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.escrowAccount = Account.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryDeploymentResponse { + return { + $type: QueryDeploymentResponse.$type, + deployment: isSet(object.deployment) + ? Deployment.fromJSON(object.deployment) + : undefined, + groups: globalThis.Array.isArray(object?.groups) + ? object.groups.map((e: any) => Group.fromJSON(e)) + : [], + escrowAccount: isSet(object.escrowAccount) + ? Account.fromJSON(object.escrowAccount) + : undefined, + }; + }, + + toJSON(message: QueryDeploymentResponse): unknown { + const obj: any = {}; + if (message.deployment !== undefined) { + obj.deployment = Deployment.toJSON(message.deployment); + } + if (message.groups?.length) { + obj.groups = message.groups.map((e) => Group.toJSON(e)); + } + if (message.escrowAccount !== undefined) { + obj.escrowAccount = Account.toJSON(message.escrowAccount); + } + return obj; + }, + + create(base?: DeepPartial): QueryDeploymentResponse { + return QueryDeploymentResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryDeploymentResponse { + const message = createBaseQueryDeploymentResponse(); + message.deployment = + object.deployment !== undefined && object.deployment !== null + ? Deployment.fromPartial(object.deployment) + : undefined; + message.groups = object.groups?.map((e) => Group.fromPartial(e)) || []; + message.escrowAccount = + object.escrowAccount !== undefined && object.escrowAccount !== null + ? Account.fromPartial(object.escrowAccount) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryDeploymentResponse.$type, QueryDeploymentResponse); + +function createBaseQueryGroupRequest(): QueryGroupRequest { + return { $type: "akash.deployment.v1beta4.QueryGroupRequest", id: undefined }; +} + +export const QueryGroupRequest: MessageFns< + QueryGroupRequest, + "akash.deployment.v1beta4.QueryGroupRequest" +> = { + $type: "akash.deployment.v1beta4.QueryGroupRequest" as const, + + encode( + message: QueryGroupRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + GroupID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): QueryGroupRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryGroupRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = GroupID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryGroupRequest { + return { + $type: QueryGroupRequest.$type, + id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: QueryGroupRequest): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = GroupID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): QueryGroupRequest { + return QueryGroupRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryGroupRequest { + const message = createBaseQueryGroupRequest(); + message.id = + object.id !== undefined && object.id !== null + ? GroupID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryGroupRequest.$type, QueryGroupRequest); + +function createBaseQueryGroupResponse(): QueryGroupResponse { + return { + $type: "akash.deployment.v1beta4.QueryGroupResponse", + group: undefined, + }; +} + +export const QueryGroupResponse: MessageFns< + QueryGroupResponse, + "akash.deployment.v1beta4.QueryGroupResponse" +> = { + $type: "akash.deployment.v1beta4.QueryGroupResponse" as const, + + encode( + message: QueryGroupResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.group !== undefined) { + Group.encode(message.group, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryGroupResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryGroupResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.group = Group.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryGroupResponse { + return { + $type: QueryGroupResponse.$type, + group: isSet(object.group) ? Group.fromJSON(object.group) : undefined, + }; + }, + + toJSON(message: QueryGroupResponse): unknown { + const obj: any = {}; + if (message.group !== undefined) { + obj.group = Group.toJSON(message.group); + } + return obj; + }, + + create(base?: DeepPartial): QueryGroupResponse { + return QueryGroupResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryGroupResponse { + const message = createBaseQueryGroupResponse(); + message.group = + object.group !== undefined && object.group !== null + ? Group.fromPartial(object.group) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryGroupResponse.$type, QueryGroupResponse); + +function createBaseQueryParamsRequest(): QueryParamsRequest { + return { $type: "akash.deployment.v1beta4.QueryParamsRequest" }; +} + +export const QueryParamsRequest: MessageFns< + QueryParamsRequest, + "akash.deployment.v1beta4.QueryParamsRequest" +> = { + $type: "akash.deployment.v1beta4.QueryParamsRequest" as const, + + encode( + _: QueryParamsRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryParamsRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryParamsRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): QueryParamsRequest { + return { $type: QueryParamsRequest.$type }; + }, + + toJSON(_: QueryParamsRequest): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): QueryParamsRequest { + return QueryParamsRequest.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): QueryParamsRequest { + const message = createBaseQueryParamsRequest(); + return message; + }, +}; + +messageTypeRegistry.set(QueryParamsRequest.$type, QueryParamsRequest); + +function createBaseQueryParamsResponse(): QueryParamsResponse { + return { + $type: "akash.deployment.v1beta4.QueryParamsResponse", + params: undefined, + }; +} + +export const QueryParamsResponse: MessageFns< + QueryParamsResponse, + "akash.deployment.v1beta4.QueryParamsResponse" +> = { + $type: "akash.deployment.v1beta4.QueryParamsResponse" as const, + + encode( + message: QueryParamsResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryParamsResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryParamsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.params = Params.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryParamsResponse { + return { + $type: QueryParamsResponse.$type, + params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, + }; + }, + + toJSON(message: QueryParamsResponse): unknown { + const obj: any = {}; + if (message.params !== undefined) { + obj.params = Params.toJSON(message.params); + } + return obj; + }, + + create(base?: DeepPartial): QueryParamsResponse { + return QueryParamsResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryParamsResponse { + const message = createBaseQueryParamsResponse(); + message.params = + object.params !== undefined && object.params !== null + ? Params.fromPartial(object.params) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryParamsResponse.$type, QueryParamsResponse); + +/** Query defines the gRPC querier service */ +export interface Query { + /** Deployments queries deployments */ + Deployments( + request: QueryDeploymentsRequest, + ): Promise; + /** Deployment queries deployment details */ + Deployment(request: QueryDeploymentRequest): Promise; + /** Group queries group details */ + Group(request: QueryGroupRequest): Promise; + /** Params returns the total set of minting parameters. */ + Params(request: QueryParamsRequest): Promise; +} + +export const QueryServiceName = "akash.deployment.v1beta4.Query"; +export class QueryClientImpl implements Query { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || QueryServiceName; + this.rpc = rpc; + this.Deployments = this.Deployments.bind(this); + this.Deployment = this.Deployment.bind(this); + this.Group = this.Group.bind(this); + this.Params = this.Params.bind(this); + } + Deployments( + request: QueryDeploymentsRequest, + ): Promise { + const data = QueryDeploymentsRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Deployments", data); + return promise.then((data) => + QueryDeploymentsResponse.decode(new BinaryReader(data)), + ); + } + + Deployment( + request: QueryDeploymentRequest, + ): Promise { + const data = QueryDeploymentRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Deployment", data); + return promise.then((data) => + QueryDeploymentResponse.decode(new BinaryReader(data)), + ); + } + + Group(request: QueryGroupRequest): Promise { + const data = QueryGroupRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Group", data); + return promise.then((data) => + QueryGroupResponse.decode(new BinaryReader(data)), + ); + } + + Params(request: QueryParamsRequest): Promise { + const data = QueryParamsRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Params", data); + return promise.then((data) => + QueryParamsResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/resourceunit.ts b/ts/src/generated/akash/deployment/v1beta4/resourceunit.ts new file mode 100644 index 00000000..3c774f5f --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/resourceunit.ts @@ -0,0 +1,169 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1beta4/resourceunit.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { DecCoin } from "../../../cosmos/base/v1beta1/coin"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Resources } from "../../base/resources/v1beta4/resources"; + +/** ResourceUnit extends Resources and adds Count along with the Price */ +export interface ResourceUnit { + $type: "akash.deployment.v1beta4.ResourceUnit"; + resource: Resources | undefined; + count: number; + price: DecCoin | undefined; +} + +function createBaseResourceUnit(): ResourceUnit { + return { + $type: "akash.deployment.v1beta4.ResourceUnit", + resource: undefined, + count: 0, + price: undefined, + }; +} + +export const ResourceUnit: MessageFns< + ResourceUnit, + "akash.deployment.v1beta4.ResourceUnit" +> = { + $type: "akash.deployment.v1beta4.ResourceUnit" as const, + + encode( + message: ResourceUnit, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.resource !== undefined) { + Resources.encode(message.resource, writer.uint32(10).fork()).join(); + } + if (message.count !== 0) { + writer.uint32(16).uint32(message.count); + } + if (message.price !== undefined) { + DecCoin.encode(message.price, writer.uint32(26).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): ResourceUnit { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseResourceUnit(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.resource = Resources.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.count = reader.uint32(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.price = DecCoin.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): ResourceUnit { + return { + $type: ResourceUnit.$type, + resource: isSet(object.resource) + ? Resources.fromJSON(object.resource) + : undefined, + count: isSet(object.count) ? globalThis.Number(object.count) : 0, + price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, + }; + }, + + toJSON(message: ResourceUnit): unknown { + const obj: any = {}; + if (message.resource !== undefined) { + obj.resource = Resources.toJSON(message.resource); + } + if (message.count !== 0) { + obj.count = Math.round(message.count); + } + if (message.price !== undefined) { + obj.price = DecCoin.toJSON(message.price); + } + return obj; + }, + + create(base?: DeepPartial): ResourceUnit { + return ResourceUnit.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ResourceUnit { + const message = createBaseResourceUnit(); + message.resource = + object.resource !== undefined && object.resource !== null + ? Resources.fromPartial(object.resource) + : undefined; + message.count = object.count ?? 0; + message.price = + object.price !== undefined && object.price !== null + ? DecCoin.fromPartial(object.price) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ResourceUnit.$type, ResourceUnit); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/service.grpc-js.ts b/ts/src/generated/akash/deployment/v1beta4/service.grpc-js.ts new file mode 100644 index 00000000..d5ef0da3 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/service.grpc-js.ts @@ -0,0 +1,409 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1beta4/service.proto + +/* eslint-disable */ +import { + ChannelCredentials, + Client, + makeGenericClientConstructor, + Metadata, +} from "@grpc/grpc-js"; +import type { + CallOptions, + ClientOptions, + ClientUnaryCall, + handleUnaryCall, + ServiceError, + UntypedServiceImplementation, +} from "@grpc/grpc-js"; +import { MsgDepositDeployment, MsgDepositDeploymentResponse } from "../v1/msg"; +import { + MsgCloseDeployment, + MsgCloseDeploymentResponse, + MsgCreateDeployment, + MsgCreateDeploymentResponse, + MsgUpdateDeployment, + MsgUpdateDeploymentResponse, +} from "./deploymentmsg"; +import { + MsgCloseGroup, + MsgCloseGroupResponse, + MsgPauseGroup, + MsgPauseGroupResponse, + MsgStartGroup, + MsgStartGroupResponse, +} from "./groupmsg"; +import { MsgUpdateParams, MsgUpdateParamsResponse } from "./paramsmsg"; + +export const protobufPackage = "akash.deployment.v1beta4"; + +/** Msg defines the x/deployment Msg service. */ +export type MsgService = typeof MsgService; +export const MsgService = { + /** CreateDeployment defines a method to create new deployment given proper inputs. */ + createDeployment: { + path: "/akash.deployment.v1beta4.Msg/CreateDeployment", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCreateDeployment) => + Buffer.from(MsgCreateDeployment.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCreateDeployment.decode(value), + responseSerialize: (value: MsgCreateDeploymentResponse) => + Buffer.from(MsgCreateDeploymentResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgCreateDeploymentResponse.decode(value), + }, + /** DepositDeployment deposits more funds into the deployment account */ + depositDeployment: { + path: "/akash.deployment.v1beta4.Msg/DepositDeployment", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgDepositDeployment) => + Buffer.from(MsgDepositDeployment.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgDepositDeployment.decode(value), + responseSerialize: (value: MsgDepositDeploymentResponse) => + Buffer.from(MsgDepositDeploymentResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgDepositDeploymentResponse.decode(value), + }, + /** UpdateDeployment defines a method to update a deployment given proper inputs. */ + updateDeployment: { + path: "/akash.deployment.v1beta4.Msg/UpdateDeployment", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgUpdateDeployment) => + Buffer.from(MsgUpdateDeployment.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgUpdateDeployment.decode(value), + responseSerialize: (value: MsgUpdateDeploymentResponse) => + Buffer.from(MsgUpdateDeploymentResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgUpdateDeploymentResponse.decode(value), + }, + /** CloseDeployment defines a method to close a deployment given proper inputs. */ + closeDeployment: { + path: "/akash.deployment.v1beta4.Msg/CloseDeployment", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCloseDeployment) => + Buffer.from(MsgCloseDeployment.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCloseDeployment.decode(value), + responseSerialize: (value: MsgCloseDeploymentResponse) => + Buffer.from(MsgCloseDeploymentResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgCloseDeploymentResponse.decode(value), + }, + /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ + closeGroup: { + path: "/akash.deployment.v1beta4.Msg/CloseGroup", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCloseGroup) => + Buffer.from(MsgCloseGroup.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCloseGroup.decode(value), + responseSerialize: (value: MsgCloseGroupResponse) => + Buffer.from(MsgCloseGroupResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => MsgCloseGroupResponse.decode(value), + }, + /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ + pauseGroup: { + path: "/akash.deployment.v1beta4.Msg/PauseGroup", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgPauseGroup) => + Buffer.from(MsgPauseGroup.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgPauseGroup.decode(value), + responseSerialize: (value: MsgPauseGroupResponse) => + Buffer.from(MsgPauseGroupResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => MsgPauseGroupResponse.decode(value), + }, + /** StartGroup defines a method to close a group of a deployment given proper inputs. */ + startGroup: { + path: "/akash.deployment.v1beta4.Msg/StartGroup", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgStartGroup) => + Buffer.from(MsgStartGroup.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgStartGroup.decode(value), + responseSerialize: (value: MsgStartGroupResponse) => + Buffer.from(MsgStartGroupResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => MsgStartGroupResponse.decode(value), + }, + /** + * UpdateParams defines a governance operation for updating the x/deployment module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + updateParams: { + path: "/akash.deployment.v1beta4.Msg/UpdateParams", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgUpdateParams) => + Buffer.from(MsgUpdateParams.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgUpdateParams.decode(value), + responseSerialize: (value: MsgUpdateParamsResponse) => + Buffer.from(MsgUpdateParamsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgUpdateParamsResponse.decode(value), + }, +} as const; + +export interface MsgServer extends UntypedServiceImplementation { + /** CreateDeployment defines a method to create new deployment given proper inputs. */ + createDeployment: handleUnaryCall< + MsgCreateDeployment, + MsgCreateDeploymentResponse + >; + /** DepositDeployment deposits more funds into the deployment account */ + depositDeployment: handleUnaryCall< + MsgDepositDeployment, + MsgDepositDeploymentResponse + >; + /** UpdateDeployment defines a method to update a deployment given proper inputs. */ + updateDeployment: handleUnaryCall< + MsgUpdateDeployment, + MsgUpdateDeploymentResponse + >; + /** CloseDeployment defines a method to close a deployment given proper inputs. */ + closeDeployment: handleUnaryCall< + MsgCloseDeployment, + MsgCloseDeploymentResponse + >; + /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ + closeGroup: handleUnaryCall; + /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ + pauseGroup: handleUnaryCall; + /** StartGroup defines a method to close a group of a deployment given proper inputs. */ + startGroup: handleUnaryCall; + /** + * UpdateParams defines a governance operation for updating the x/deployment module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + updateParams: handleUnaryCall; +} + +export interface MsgClient extends Client { + /** CreateDeployment defines a method to create new deployment given proper inputs. */ + createDeployment( + request: MsgCreateDeployment, + callback: ( + error: ServiceError | null, + response: MsgCreateDeploymentResponse, + ) => void, + ): ClientUnaryCall; + createDeployment( + request: MsgCreateDeployment, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCreateDeploymentResponse, + ) => void, + ): ClientUnaryCall; + createDeployment( + request: MsgCreateDeployment, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCreateDeploymentResponse, + ) => void, + ): ClientUnaryCall; + /** DepositDeployment deposits more funds into the deployment account */ + depositDeployment( + request: MsgDepositDeployment, + callback: ( + error: ServiceError | null, + response: MsgDepositDeploymentResponse, + ) => void, + ): ClientUnaryCall; + depositDeployment( + request: MsgDepositDeployment, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgDepositDeploymentResponse, + ) => void, + ): ClientUnaryCall; + depositDeployment( + request: MsgDepositDeployment, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgDepositDeploymentResponse, + ) => void, + ): ClientUnaryCall; + /** UpdateDeployment defines a method to update a deployment given proper inputs. */ + updateDeployment( + request: MsgUpdateDeployment, + callback: ( + error: ServiceError | null, + response: MsgUpdateDeploymentResponse, + ) => void, + ): ClientUnaryCall; + updateDeployment( + request: MsgUpdateDeployment, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgUpdateDeploymentResponse, + ) => void, + ): ClientUnaryCall; + updateDeployment( + request: MsgUpdateDeployment, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgUpdateDeploymentResponse, + ) => void, + ): ClientUnaryCall; + /** CloseDeployment defines a method to close a deployment given proper inputs. */ + closeDeployment( + request: MsgCloseDeployment, + callback: ( + error: ServiceError | null, + response: MsgCloseDeploymentResponse, + ) => void, + ): ClientUnaryCall; + closeDeployment( + request: MsgCloseDeployment, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCloseDeploymentResponse, + ) => void, + ): ClientUnaryCall; + closeDeployment( + request: MsgCloseDeployment, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCloseDeploymentResponse, + ) => void, + ): ClientUnaryCall; + /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ + closeGroup( + request: MsgCloseGroup, + callback: ( + error: ServiceError | null, + response: MsgCloseGroupResponse, + ) => void, + ): ClientUnaryCall; + closeGroup( + request: MsgCloseGroup, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCloseGroupResponse, + ) => void, + ): ClientUnaryCall; + closeGroup( + request: MsgCloseGroup, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCloseGroupResponse, + ) => void, + ): ClientUnaryCall; + /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ + pauseGroup( + request: MsgPauseGroup, + callback: ( + error: ServiceError | null, + response: MsgPauseGroupResponse, + ) => void, + ): ClientUnaryCall; + pauseGroup( + request: MsgPauseGroup, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgPauseGroupResponse, + ) => void, + ): ClientUnaryCall; + pauseGroup( + request: MsgPauseGroup, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgPauseGroupResponse, + ) => void, + ): ClientUnaryCall; + /** StartGroup defines a method to close a group of a deployment given proper inputs. */ + startGroup( + request: MsgStartGroup, + callback: ( + error: ServiceError | null, + response: MsgStartGroupResponse, + ) => void, + ): ClientUnaryCall; + startGroup( + request: MsgStartGroup, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgStartGroupResponse, + ) => void, + ): ClientUnaryCall; + startGroup( + request: MsgStartGroup, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgStartGroupResponse, + ) => void, + ): ClientUnaryCall; + /** + * UpdateParams defines a governance operation for updating the x/deployment module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + updateParams( + request: MsgUpdateParams, + callback: ( + error: ServiceError | null, + response: MsgUpdateParamsResponse, + ) => void, + ): ClientUnaryCall; + updateParams( + request: MsgUpdateParams, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgUpdateParamsResponse, + ) => void, + ): ClientUnaryCall; + updateParams( + request: MsgUpdateParams, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgUpdateParamsResponse, + ) => void, + ): ClientUnaryCall; +} + +export const MsgClient = makeGenericClientConstructor( + MsgService, + "akash.deployment.v1beta4.Msg", +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial, + ): MsgClient; + service: typeof MsgService; + serviceName: string; +}; diff --git a/ts/src/generated/akash/deployment/v1beta4/service.ts b/ts/src/generated/akash/deployment/v1beta4/service.ts new file mode 100644 index 00000000..0c535139 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/service.ts @@ -0,0 +1,156 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/deployment/v1beta4/service.proto + +/* eslint-disable */ +import { BinaryReader } from "@bufbuild/protobuf/wire"; +import { MsgDepositDeployment, MsgDepositDeploymentResponse } from "../v1/msg"; +import { + MsgCloseDeployment, + MsgCloseDeploymentResponse, + MsgCreateDeployment, + MsgCreateDeploymentResponse, + MsgUpdateDeployment, + MsgUpdateDeploymentResponse, +} from "./deploymentmsg"; +import { + MsgCloseGroup, + MsgCloseGroupResponse, + MsgPauseGroup, + MsgPauseGroupResponse, + MsgStartGroup, + MsgStartGroupResponse, +} from "./groupmsg"; +import { MsgUpdateParams, MsgUpdateParamsResponse } from "./paramsmsg"; + +/** Msg defines the x/deployment Msg service. */ +export interface Msg { + /** CreateDeployment defines a method to create new deployment given proper inputs. */ + CreateDeployment( + request: MsgCreateDeployment, + ): Promise; + /** DepositDeployment deposits more funds into the deployment account */ + DepositDeployment( + request: MsgDepositDeployment, + ): Promise; + /** UpdateDeployment defines a method to update a deployment given proper inputs. */ + UpdateDeployment( + request: MsgUpdateDeployment, + ): Promise; + /** CloseDeployment defines a method to close a deployment given proper inputs. */ + CloseDeployment( + request: MsgCloseDeployment, + ): Promise; + /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ + CloseGroup(request: MsgCloseGroup): Promise; + /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ + PauseGroup(request: MsgPauseGroup): Promise; + /** StartGroup defines a method to close a group of a deployment given proper inputs. */ + StartGroup(request: MsgStartGroup): Promise; + /** + * UpdateParams defines a governance operation for updating the x/deployment module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + UpdateParams(request: MsgUpdateParams): Promise; +} + +export const MsgServiceName = "akash.deployment.v1beta4.Msg"; +export class MsgClientImpl implements Msg { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || MsgServiceName; + this.rpc = rpc; + this.CreateDeployment = this.CreateDeployment.bind(this); + this.DepositDeployment = this.DepositDeployment.bind(this); + this.UpdateDeployment = this.UpdateDeployment.bind(this); + this.CloseDeployment = this.CloseDeployment.bind(this); + this.CloseGroup = this.CloseGroup.bind(this); + this.PauseGroup = this.PauseGroup.bind(this); + this.StartGroup = this.StartGroup.bind(this); + this.UpdateParams = this.UpdateParams.bind(this); + } + CreateDeployment( + request: MsgCreateDeployment, + ): Promise { + const data = MsgCreateDeployment.encode(request).finish(); + const promise = this.rpc.request(this.service, "CreateDeployment", data); + return promise.then((data) => + MsgCreateDeploymentResponse.decode(new BinaryReader(data)), + ); + } + + DepositDeployment( + request: MsgDepositDeployment, + ): Promise { + const data = MsgDepositDeployment.encode(request).finish(); + const promise = this.rpc.request(this.service, "DepositDeployment", data); + return promise.then((data) => + MsgDepositDeploymentResponse.decode(new BinaryReader(data)), + ); + } + + UpdateDeployment( + request: MsgUpdateDeployment, + ): Promise { + const data = MsgUpdateDeployment.encode(request).finish(); + const promise = this.rpc.request(this.service, "UpdateDeployment", data); + return promise.then((data) => + MsgUpdateDeploymentResponse.decode(new BinaryReader(data)), + ); + } + + CloseDeployment( + request: MsgCloseDeployment, + ): Promise { + const data = MsgCloseDeployment.encode(request).finish(); + const promise = this.rpc.request(this.service, "CloseDeployment", data); + return promise.then((data) => + MsgCloseDeploymentResponse.decode(new BinaryReader(data)), + ); + } + + CloseGroup(request: MsgCloseGroup): Promise { + const data = MsgCloseGroup.encode(request).finish(); + const promise = this.rpc.request(this.service, "CloseGroup", data); + return promise.then((data) => + MsgCloseGroupResponse.decode(new BinaryReader(data)), + ); + } + + PauseGroup(request: MsgPauseGroup): Promise { + const data = MsgPauseGroup.encode(request).finish(); + const promise = this.rpc.request(this.service, "PauseGroup", data); + return promise.then((data) => + MsgPauseGroupResponse.decode(new BinaryReader(data)), + ); + } + + StartGroup(request: MsgStartGroup): Promise { + const data = MsgStartGroup.encode(request).finish(); + const promise = this.rpc.request(this.service, "StartGroup", data); + return promise.then((data) => + MsgStartGroupResponse.decode(new BinaryReader(data)), + ); + } + + UpdateParams(request: MsgUpdateParams): Promise { + const data = MsgUpdateParams.encode(request).finish(); + const promise = this.rpc.request(this.service, "UpdateParams", data); + return promise.then((data) => + MsgUpdateParamsResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} diff --git a/ts/src/generated/akash/discovery/v1/akash.ts b/ts/src/generated/akash/discovery/v1/akash.ts index 4fcede6c..fd3cf6f4 100644 --- a/ts/src/generated/akash/discovery/v1/akash.ts +++ b/ts/src/generated/akash/discovery/v1/akash.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/discovery/v1/akash.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; import { ClientInfo } from "./client_info"; @@ -14,19 +20,22 @@ function createBaseAkash(): Akash { return { $type: "akash.discovery.v1.Akash", clientInfo: undefined }; } -export const Akash = { +export const Akash: MessageFns = { $type: "akash.discovery.v1.Akash" as const, - encode(message: Akash, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + encode( + message: Akash, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.clientInfo !== undefined) { - ClientInfo.encode(message.clientInfo, writer.uint32(10).fork()).ldelim(); + ClientInfo.encode(message.clientInfo, writer.uint32(10).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Akash { + decode(input: BinaryReader | Uint8Array, length?: number): Akash { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseAkash(); while (reader.pos < end) { @@ -43,7 +52,7 @@ export const Akash = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -101,11 +110,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/discovery/v1/client_info.ts b/ts/src/generated/akash/discovery/v1/client_info.ts index 51fb45e5..96eca743 100644 --- a/ts/src/generated/akash/discovery/v1/client_info.ts +++ b/ts/src/generated/akash/discovery/v1/client_info.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/discovery/v1/client_info.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; /** ClientInfo akash specific client info */ @@ -13,22 +19,25 @@ function createBaseClientInfo(): ClientInfo { return { $type: "akash.discovery.v1.ClientInfo", apiVersion: "" }; } -export const ClientInfo = { +export const ClientInfo: MessageFns< + ClientInfo, + "akash.discovery.v1.ClientInfo" +> = { $type: "akash.discovery.v1.ClientInfo" as const, encode( message: ClientInfo, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.apiVersion !== "") { writer.uint32(10).string(message.apiVersion); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ClientInfo { + decode(input: BinaryReader | Uint8Array, length?: number): ClientInfo { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseClientInfo(); while (reader.pos < end) { @@ -45,7 +54,7 @@ export const ClientInfo = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -100,11 +109,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/escrow/v1/account.ts b/ts/src/generated/akash/escrow/v1/account.ts new file mode 100644 index 00000000..62717140 --- /dev/null +++ b/ts/src/generated/akash/escrow/v1/account.ts @@ -0,0 +1,331 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/escrow/v1/account.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { DecCoin } from "../../../cosmos/base/v1beta1/coin"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { AccountID } from "./accountid"; + +/** Account stores state for an escrow account */ +export interface Account { + $type: "akash.escrow.v1.Account"; + /** unique identifier for this escrow account */ + id: AccountID | undefined; + /** bech32 encoded account address of the owner of this escrow account */ + owner: string; + /** current state of this escrow account */ + state: Account_State; + /** unspent coins received from the owner's wallet */ + balance: DecCoin | undefined; + /** total coins spent by this account */ + transferred: DecCoin | undefined; + /** block height at which this account was last settled */ + settledAt: Long; + /** + * bech32 encoded account address of the depositor. + * If depositor is same as the owner, then any incoming coins are added to the Balance. + * If depositor isn't same as the owner, then any incoming coins are added to the Funds. + */ + depositor: string; + /** + * Funds are unspent coins received from the (non-Owner) Depositor's wallet. + * If there are any funds, they should be spent before spending the Balance. + */ + funds: DecCoin | undefined; +} + +/** State stores state for an escrow account */ +export enum Account_State { + /** invalid - AccountStateInvalid is an invalid state */ + invalid = 0, + /** open - AccountOpen is the state when an account is open */ + open = 1, + /** closed - AccountClosed is the state when an account is closed */ + closed = 2, + /** overdrawn - AccountOverdrawn is the state when an account is overdrawn */ + overdrawn = 3, + UNRECOGNIZED = -1, +} + +export function account_StateFromJSON(object: any): Account_State { + switch (object) { + case 0: + case "invalid": + return Account_State.invalid; + case 1: + case "open": + return Account_State.open; + case 2: + case "closed": + return Account_State.closed; + case 3: + case "overdrawn": + return Account_State.overdrawn; + case -1: + case "UNRECOGNIZED": + default: + return Account_State.UNRECOGNIZED; + } +} + +export function account_StateToJSON(object: Account_State): string { + switch (object) { + case Account_State.invalid: + return "invalid"; + case Account_State.open: + return "open"; + case Account_State.closed: + return "closed"; + case Account_State.overdrawn: + return "overdrawn"; + case Account_State.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +function createBaseAccount(): Account { + return { + $type: "akash.escrow.v1.Account", + id: undefined, + owner: "", + state: 0, + balance: undefined, + transferred: undefined, + settledAt: Long.ZERO, + depositor: "", + funds: undefined, + }; +} + +export const Account: MessageFns = { + $type: "akash.escrow.v1.Account" as const, + + encode( + message: Account, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + AccountID.encode(message.id, writer.uint32(10).fork()).join(); + } + if (message.owner !== "") { + writer.uint32(18).string(message.owner); + } + if (message.state !== 0) { + writer.uint32(24).int32(message.state); + } + if (message.balance !== undefined) { + DecCoin.encode(message.balance, writer.uint32(34).fork()).join(); + } + if (message.transferred !== undefined) { + DecCoin.encode(message.transferred, writer.uint32(42).fork()).join(); + } + if (!message.settledAt.equals(Long.ZERO)) { + writer.uint32(48).int64(message.settledAt.toString()); + } + if (message.depositor !== "") { + writer.uint32(58).string(message.depositor); + } + if (message.funds !== undefined) { + DecCoin.encode(message.funds, writer.uint32(66).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Account { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseAccount(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = AccountID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.owner = reader.string(); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.state = reader.int32() as any; + continue; + case 4: + if (tag !== 34) { + break; + } + + message.balance = DecCoin.decode(reader, reader.uint32()); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.transferred = DecCoin.decode(reader, reader.uint32()); + continue; + case 6: + if (tag !== 48) { + break; + } + + message.settledAt = Long.fromString(reader.int64().toString()); + continue; + case 7: + if (tag !== 58) { + break; + } + + message.depositor = reader.string(); + continue; + case 8: + if (tag !== 66) { + break; + } + + message.funds = DecCoin.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Account { + return { + $type: Account.$type, + id: isSet(object.id) ? AccountID.fromJSON(object.id) : undefined, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + state: isSet(object.state) ? account_StateFromJSON(object.state) : 0, + balance: isSet(object.balance) + ? DecCoin.fromJSON(object.balance) + : undefined, + transferred: isSet(object.transferred) + ? DecCoin.fromJSON(object.transferred) + : undefined, + settledAt: isSet(object.settledAt) + ? Long.fromValue(object.settledAt) + : Long.ZERO, + depositor: isSet(object.depositor) + ? globalThis.String(object.depositor) + : "", + funds: isSet(object.funds) ? DecCoin.fromJSON(object.funds) : undefined, + }; + }, + + toJSON(message: Account): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = AccountID.toJSON(message.id); + } + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.state !== 0) { + obj.state = account_StateToJSON(message.state); + } + if (message.balance !== undefined) { + obj.balance = DecCoin.toJSON(message.balance); + } + if (message.transferred !== undefined) { + obj.transferred = DecCoin.toJSON(message.transferred); + } + if (!message.settledAt.equals(Long.ZERO)) { + obj.settledAt = (message.settledAt || Long.ZERO).toString(); + } + if (message.depositor !== "") { + obj.depositor = message.depositor; + } + if (message.funds !== undefined) { + obj.funds = DecCoin.toJSON(message.funds); + } + return obj; + }, + + create(base?: DeepPartial): Account { + return Account.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Account { + const message = createBaseAccount(); + message.id = + object.id !== undefined && object.id !== null + ? AccountID.fromPartial(object.id) + : undefined; + message.owner = object.owner ?? ""; + message.state = object.state ?? 0; + message.balance = + object.balance !== undefined && object.balance !== null + ? DecCoin.fromPartial(object.balance) + : undefined; + message.transferred = + object.transferred !== undefined && object.transferred !== null + ? DecCoin.fromPartial(object.transferred) + : undefined; + message.settledAt = + object.settledAt !== undefined && object.settledAt !== null + ? Long.fromValue(object.settledAt) + : Long.ZERO; + message.depositor = object.depositor ?? ""; + message.funds = + object.funds !== undefined && object.funds !== null + ? DecCoin.fromPartial(object.funds) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Account.$type, Account); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/escrow/v1/accountid.ts b/ts/src/generated/akash/escrow/v1/accountid.ts new file mode 100644 index 00000000..26277036 --- /dev/null +++ b/ts/src/generated/akash/escrow/v1/accountid.ts @@ -0,0 +1,135 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/escrow/v1/accountid.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** AccountID is the account identifier */ +export interface AccountID { + $type: "akash.escrow.v1.AccountID"; + scope: string; + xid: string; +} + +function createBaseAccountID(): AccountID { + return { $type: "akash.escrow.v1.AccountID", scope: "", xid: "" }; +} + +export const AccountID: MessageFns = { + $type: "akash.escrow.v1.AccountID" as const, + + encode( + message: AccountID, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.scope !== "") { + writer.uint32(10).string(message.scope); + } + if (message.xid !== "") { + writer.uint32(18).string(message.xid); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): AccountID { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseAccountID(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.scope = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.xid = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): AccountID { + return { + $type: AccountID.$type, + scope: isSet(object.scope) ? globalThis.String(object.scope) : "", + xid: isSet(object.xid) ? globalThis.String(object.xid) : "", + }; + }, + + toJSON(message: AccountID): unknown { + const obj: any = {}; + if (message.scope !== "") { + obj.scope = message.scope; + } + if (message.xid !== "") { + obj.xid = message.xid; + } + return obj; + }, + + create(base?: DeepPartial): AccountID { + return AccountID.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): AccountID { + const message = createBaseAccountID(); + message.scope = object.scope ?? ""; + message.xid = object.xid ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(AccountID.$type, AccountID); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/escrow/v1/fractional_payment.ts b/ts/src/generated/akash/escrow/v1/fractional_payment.ts new file mode 100644 index 00000000..8c84c891 --- /dev/null +++ b/ts/src/generated/akash/escrow/v1/fractional_payment.ts @@ -0,0 +1,305 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/escrow/v1/fractional_payment.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { Coin, DecCoin } from "../../../cosmos/base/v1beta1/coin"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { AccountID } from "./accountid"; + +/** Payment stores state for a payment */ +export interface FractionalPayment { + $type: "akash.escrow.v1.FractionalPayment"; + accountId: AccountID | undefined; + paymentId: string; + owner: string; + state: FractionalPayment_State; + rate: DecCoin | undefined; + balance: DecCoin | undefined; + withdrawn: Coin | undefined; +} + +/** State defines payment state */ +export enum FractionalPayment_State { + /** invalid - PaymentStateInvalid is the state when the payment is invalid */ + invalid = 0, + /** open - PaymentStateOpen is the state when the payment is open */ + open = 1, + /** closed - PaymentStateClosed is the state when the payment is closed */ + closed = 2, + /** overdrawn - PaymentStateOverdrawn is the state when the payment is overdrawn */ + overdrawn = 3, + UNRECOGNIZED = -1, +} + +export function fractionalPayment_StateFromJSON( + object: any, +): FractionalPayment_State { + switch (object) { + case 0: + case "invalid": + return FractionalPayment_State.invalid; + case 1: + case "open": + return FractionalPayment_State.open; + case 2: + case "closed": + return FractionalPayment_State.closed; + case 3: + case "overdrawn": + return FractionalPayment_State.overdrawn; + case -1: + case "UNRECOGNIZED": + default: + return FractionalPayment_State.UNRECOGNIZED; + } +} + +export function fractionalPayment_StateToJSON( + object: FractionalPayment_State, +): string { + switch (object) { + case FractionalPayment_State.invalid: + return "invalid"; + case FractionalPayment_State.open: + return "open"; + case FractionalPayment_State.closed: + return "closed"; + case FractionalPayment_State.overdrawn: + return "overdrawn"; + case FractionalPayment_State.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +function createBaseFractionalPayment(): FractionalPayment { + return { + $type: "akash.escrow.v1.FractionalPayment", + accountId: undefined, + paymentId: "", + owner: "", + state: 0, + rate: undefined, + balance: undefined, + withdrawn: undefined, + }; +} + +export const FractionalPayment: MessageFns< + FractionalPayment, + "akash.escrow.v1.FractionalPayment" +> = { + $type: "akash.escrow.v1.FractionalPayment" as const, + + encode( + message: FractionalPayment, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.accountId !== undefined) { + AccountID.encode(message.accountId, writer.uint32(10).fork()).join(); + } + if (message.paymentId !== "") { + writer.uint32(18).string(message.paymentId); + } + if (message.owner !== "") { + writer.uint32(26).string(message.owner); + } + if (message.state !== 0) { + writer.uint32(32).int32(message.state); + } + if (message.rate !== undefined) { + DecCoin.encode(message.rate, writer.uint32(42).fork()).join(); + } + if (message.balance !== undefined) { + DecCoin.encode(message.balance, writer.uint32(50).fork()).join(); + } + if (message.withdrawn !== undefined) { + Coin.encode(message.withdrawn, writer.uint32(58).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): FractionalPayment { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseFractionalPayment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.accountId = AccountID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.paymentId = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.owner = reader.string(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.state = reader.int32() as any; + continue; + case 5: + if (tag !== 42) { + break; + } + + message.rate = DecCoin.decode(reader, reader.uint32()); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.balance = DecCoin.decode(reader, reader.uint32()); + continue; + case 7: + if (tag !== 58) { + break; + } + + message.withdrawn = Coin.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): FractionalPayment { + return { + $type: FractionalPayment.$type, + accountId: isSet(object.accountId) + ? AccountID.fromJSON(object.accountId) + : undefined, + paymentId: isSet(object.paymentId) + ? globalThis.String(object.paymentId) + : "", + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + state: isSet(object.state) + ? fractionalPayment_StateFromJSON(object.state) + : 0, + rate: isSet(object.rate) ? DecCoin.fromJSON(object.rate) : undefined, + balance: isSet(object.balance) + ? DecCoin.fromJSON(object.balance) + : undefined, + withdrawn: isSet(object.withdrawn) + ? Coin.fromJSON(object.withdrawn) + : undefined, + }; + }, + + toJSON(message: FractionalPayment): unknown { + const obj: any = {}; + if (message.accountId !== undefined) { + obj.accountId = AccountID.toJSON(message.accountId); + } + if (message.paymentId !== "") { + obj.paymentId = message.paymentId; + } + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.state !== 0) { + obj.state = fractionalPayment_StateToJSON(message.state); + } + if (message.rate !== undefined) { + obj.rate = DecCoin.toJSON(message.rate); + } + if (message.balance !== undefined) { + obj.balance = DecCoin.toJSON(message.balance); + } + if (message.withdrawn !== undefined) { + obj.withdrawn = Coin.toJSON(message.withdrawn); + } + return obj; + }, + + create(base?: DeepPartial): FractionalPayment { + return FractionalPayment.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): FractionalPayment { + const message = createBaseFractionalPayment(); + message.accountId = + object.accountId !== undefined && object.accountId !== null + ? AccountID.fromPartial(object.accountId) + : undefined; + message.paymentId = object.paymentId ?? ""; + message.owner = object.owner ?? ""; + message.state = object.state ?? 0; + message.rate = + object.rate !== undefined && object.rate !== null + ? DecCoin.fromPartial(object.rate) + : undefined; + message.balance = + object.balance !== undefined && object.balance !== null + ? DecCoin.fromPartial(object.balance) + : undefined; + message.withdrawn = + object.withdrawn !== undefined && object.withdrawn !== null + ? Coin.fromPartial(object.withdrawn) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(FractionalPayment.$type, FractionalPayment); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/escrow/v1/genesis.ts b/ts/src/generated/akash/escrow/v1/genesis.ts new file mode 100644 index 00000000..d3f06c4b --- /dev/null +++ b/ts/src/generated/akash/escrow/v1/genesis.ts @@ -0,0 +1,144 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/escrow/v1/genesis.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Account } from "./account"; +import { FractionalPayment } from "./fractional_payment"; + +/** GenesisState defines the basic genesis state used by the escrow module */ +export interface GenesisState { + $type: "akash.escrow.v1.GenesisState"; + accounts: Account[]; + payments: FractionalPayment[]; +} + +function createBaseGenesisState(): GenesisState { + return { $type: "akash.escrow.v1.GenesisState", accounts: [], payments: [] }; +} + +export const GenesisState: MessageFns< + GenesisState, + "akash.escrow.v1.GenesisState" +> = { + $type: "akash.escrow.v1.GenesisState" as const, + + encode( + message: GenesisState, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.accounts) { + Account.encode(v!, writer.uint32(10).fork()).join(); + } + for (const v of message.payments) { + FractionalPayment.encode(v!, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GenesisState { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisState(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.accounts.push(Account.decode(reader, reader.uint32())); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.payments.push( + FractionalPayment.decode(reader, reader.uint32()), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisState { + return { + $type: GenesisState.$type, + accounts: globalThis.Array.isArray(object?.accounts) + ? object.accounts.map((e: any) => Account.fromJSON(e)) + : [], + payments: globalThis.Array.isArray(object?.payments) + ? object.payments.map((e: any) => FractionalPayment.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GenesisState): unknown { + const obj: any = {}; + if (message.accounts?.length) { + obj.accounts = message.accounts.map((e) => Account.toJSON(e)); + } + if (message.payments?.length) { + obj.payments = message.payments.map((e) => FractionalPayment.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): GenesisState { + return GenesisState.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisState { + const message = createBaseGenesisState(); + message.accounts = + object.accounts?.map((e) => Account.fromPartial(e)) || []; + message.payments = + object.payments?.map((e) => FractionalPayment.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GenesisState.$type, GenesisState); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/escrow/v1/query.ts b/ts/src/generated/akash/escrow/v1/query.ts new file mode 100644 index 00000000..f9226086 --- /dev/null +++ b/ts/src/generated/akash/escrow/v1/query.ts @@ -0,0 +1,653 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/escrow/v1/query.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { + PageRequest, + PageResponse, +} from "../../../cosmos/base/query/v1beta1/pagination"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Account } from "./account"; +import { FractionalPayment } from "./fractional_payment"; + +/** QueryAccountRequest is request type for the Query/Account RPC method */ +export interface QueryAccountsRequest { + $type: "akash.escrow.v1.QueryAccountsRequest"; + scope: string; + xid: string; + owner: string; + state: string; + pagination: PageRequest | undefined; +} + +/** QueryProvidersResponse is response type for the Query/Providers RPC method */ +export interface QueryAccountsResponse { + $type: "akash.escrow.v1.QueryAccountsResponse"; + accounts: Account[]; + pagination: PageResponse | undefined; +} + +/** QueryPaymentRequest is request type for the Query/Payment RPC method */ +export interface QueryPaymentsRequest { + $type: "akash.escrow.v1.QueryPaymentsRequest"; + scope: string; + xid: string; + id: string; + owner: string; + state: string; + pagination: PageRequest | undefined; +} + +/** QueryProvidersResponse is response type for the Query/Providers RPC method */ +export interface QueryPaymentsResponse { + $type: "akash.escrow.v1.QueryPaymentsResponse"; + payments: FractionalPayment[]; + pagination: PageResponse | undefined; +} + +function createBaseQueryAccountsRequest(): QueryAccountsRequest { + return { + $type: "akash.escrow.v1.QueryAccountsRequest", + scope: "", + xid: "", + owner: "", + state: "", + pagination: undefined, + }; +} + +export const QueryAccountsRequest: MessageFns< + QueryAccountsRequest, + "akash.escrow.v1.QueryAccountsRequest" +> = { + $type: "akash.escrow.v1.QueryAccountsRequest" as const, + + encode( + message: QueryAccountsRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.scope !== "") { + writer.uint32(10).string(message.scope); + } + if (message.xid !== "") { + writer.uint32(18).string(message.xid); + } + if (message.owner !== "") { + writer.uint32(26).string(message.owner); + } + if (message.state !== "") { + writer.uint32(34).string(message.state); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(42).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryAccountsRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryAccountsRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.scope = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.xid = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.owner = reader.string(); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.state = reader.string(); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryAccountsRequest { + return { + $type: QueryAccountsRequest.$type, + scope: isSet(object.scope) ? globalThis.String(object.scope) : "", + xid: isSet(object.xid) ? globalThis.String(object.xid) : "", + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + state: isSet(object.state) ? globalThis.String(object.state) : "", + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryAccountsRequest): unknown { + const obj: any = {}; + if (message.scope !== "") { + obj.scope = message.scope; + } + if (message.xid !== "") { + obj.xid = message.xid; + } + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.state !== "") { + obj.state = message.state; + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryAccountsRequest { + return QueryAccountsRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryAccountsRequest { + const message = createBaseQueryAccountsRequest(); + message.scope = object.scope ?? ""; + message.xid = object.xid ?? ""; + message.owner = object.owner ?? ""; + message.state = object.state ?? ""; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryAccountsRequest.$type, QueryAccountsRequest); + +function createBaseQueryAccountsResponse(): QueryAccountsResponse { + return { + $type: "akash.escrow.v1.QueryAccountsResponse", + accounts: [], + pagination: undefined, + }; +} + +export const QueryAccountsResponse: MessageFns< + QueryAccountsResponse, + "akash.escrow.v1.QueryAccountsResponse" +> = { + $type: "akash.escrow.v1.QueryAccountsResponse" as const, + + encode( + message: QueryAccountsResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.accounts) { + Account.encode(v!, writer.uint32(10).fork()).join(); + } + if (message.pagination !== undefined) { + PageResponse.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryAccountsResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryAccountsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.accounts.push(Account.decode(reader, reader.uint32())); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryAccountsResponse { + return { + $type: QueryAccountsResponse.$type, + accounts: globalThis.Array.isArray(object?.accounts) + ? object.accounts.map((e: any) => Account.fromJSON(e)) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryAccountsResponse): unknown { + const obj: any = {}; + if (message.accounts?.length) { + obj.accounts = message.accounts.map((e) => Account.toJSON(e)); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryAccountsResponse { + return QueryAccountsResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryAccountsResponse { + const message = createBaseQueryAccountsResponse(); + message.accounts = + object.accounts?.map((e) => Account.fromPartial(e)) || []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryAccountsResponse.$type, QueryAccountsResponse); + +function createBaseQueryPaymentsRequest(): QueryPaymentsRequest { + return { + $type: "akash.escrow.v1.QueryPaymentsRequest", + scope: "", + xid: "", + id: "", + owner: "", + state: "", + pagination: undefined, + }; +} + +export const QueryPaymentsRequest: MessageFns< + QueryPaymentsRequest, + "akash.escrow.v1.QueryPaymentsRequest" +> = { + $type: "akash.escrow.v1.QueryPaymentsRequest" as const, + + encode( + message: QueryPaymentsRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.scope !== "") { + writer.uint32(10).string(message.scope); + } + if (message.xid !== "") { + writer.uint32(18).string(message.xid); + } + if (message.id !== "") { + writer.uint32(26).string(message.id); + } + if (message.owner !== "") { + writer.uint32(34).string(message.owner); + } + if (message.state !== "") { + writer.uint32(42).string(message.state); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(50).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryPaymentsRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryPaymentsRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.scope = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.xid = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.id = reader.string(); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.owner = reader.string(); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.state = reader.string(); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryPaymentsRequest { + return { + $type: QueryPaymentsRequest.$type, + scope: isSet(object.scope) ? globalThis.String(object.scope) : "", + xid: isSet(object.xid) ? globalThis.String(object.xid) : "", + id: isSet(object.id) ? globalThis.String(object.id) : "", + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + state: isSet(object.state) ? globalThis.String(object.state) : "", + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryPaymentsRequest): unknown { + const obj: any = {}; + if (message.scope !== "") { + obj.scope = message.scope; + } + if (message.xid !== "") { + obj.xid = message.xid; + } + if (message.id !== "") { + obj.id = message.id; + } + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.state !== "") { + obj.state = message.state; + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryPaymentsRequest { + return QueryPaymentsRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryPaymentsRequest { + const message = createBaseQueryPaymentsRequest(); + message.scope = object.scope ?? ""; + message.xid = object.xid ?? ""; + message.id = object.id ?? ""; + message.owner = object.owner ?? ""; + message.state = object.state ?? ""; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryPaymentsRequest.$type, QueryPaymentsRequest); + +function createBaseQueryPaymentsResponse(): QueryPaymentsResponse { + return { + $type: "akash.escrow.v1.QueryPaymentsResponse", + payments: [], + pagination: undefined, + }; +} + +export const QueryPaymentsResponse: MessageFns< + QueryPaymentsResponse, + "akash.escrow.v1.QueryPaymentsResponse" +> = { + $type: "akash.escrow.v1.QueryPaymentsResponse" as const, + + encode( + message: QueryPaymentsResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.payments) { + FractionalPayment.encode(v!, writer.uint32(10).fork()).join(); + } + if (message.pagination !== undefined) { + PageResponse.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryPaymentsResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryPaymentsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.payments.push( + FractionalPayment.decode(reader, reader.uint32()), + ); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryPaymentsResponse { + return { + $type: QueryPaymentsResponse.$type, + payments: globalThis.Array.isArray(object?.payments) + ? object.payments.map((e: any) => FractionalPayment.fromJSON(e)) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryPaymentsResponse): unknown { + const obj: any = {}; + if (message.payments?.length) { + obj.payments = message.payments.map((e) => FractionalPayment.toJSON(e)); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryPaymentsResponse { + return QueryPaymentsResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryPaymentsResponse { + const message = createBaseQueryPaymentsResponse(); + message.payments = + object.payments?.map((e) => FractionalPayment.fromPartial(e)) || []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryPaymentsResponse.$type, QueryPaymentsResponse); + +/** Query defines the gRPC querier service */ +export interface Query { + /** + * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + * Accounts queries all accounts + */ + Accounts(request: QueryAccountsRequest): Promise; + /** + * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + * Payments queries all payments + */ + Payments(request: QueryPaymentsRequest): Promise; +} + +export const QueryServiceName = "akash.escrow.v1.Query"; +export class QueryClientImpl implements Query { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || QueryServiceName; + this.rpc = rpc; + this.Accounts = this.Accounts.bind(this); + this.Payments = this.Payments.bind(this); + } + Accounts(request: QueryAccountsRequest): Promise { + const data = QueryAccountsRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Accounts", data); + return promise.then((data) => + QueryAccountsResponse.decode(new BinaryReader(data)), + ); + } + + Payments(request: QueryPaymentsRequest): Promise { + const data = QueryPaymentsRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Payments", data); + return promise.then((data) => + QueryPaymentsResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/escrow/v1beta1/genesis.ts b/ts/src/generated/akash/escrow/v1beta1/genesis.ts deleted file mode 100644 index 0d739c16..00000000 --- a/ts/src/generated/akash/escrow/v1beta1/genesis.ts +++ /dev/null @@ -1,131 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Account, Payment } from "./types"; - -/** GenesisState defines the basic genesis state used by escrow module */ -export interface GenesisState { - $type: "akash.escrow.v1beta1.GenesisState"; - accounts: Account[]; - payments: Payment[]; -} - -function createBaseGenesisState(): GenesisState { - return { - $type: "akash.escrow.v1beta1.GenesisState", - accounts: [], - payments: [], - }; -} - -export const GenesisState = { - $type: "akash.escrow.v1beta1.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.accounts) { - Account.encode(v!, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.payments) { - Payment.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.accounts.push(Account.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.payments.push(Payment.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - accounts: globalThis.Array.isArray(object?.accounts) - ? object.accounts.map((e: any) => Account.fromJSON(e)) - : [], - payments: globalThis.Array.isArray(object?.payments) - ? object.payments.map((e: any) => Payment.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.accounts?.length) { - obj.accounts = message.accounts.map((e) => Account.toJSON(e)); - } - if (message.payments?.length) { - obj.payments = message.payments.map((e) => Payment.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.accounts = - object.accounts?.map((e) => Account.fromPartial(e)) || []; - message.payments = - object.payments?.map((e) => Payment.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} diff --git a/ts/src/generated/akash/escrow/v1beta1/query.ts b/ts/src/generated/akash/escrow/v1beta1/query.ts deleted file mode 100644 index 063256d7..00000000 --- a/ts/src/generated/akash/escrow/v1beta1/query.ts +++ /dev/null @@ -1,633 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Account, Payment } from "./types"; - -/** QueryAccountRequest is request type for the Query/Account RPC method */ -export interface QueryAccountsRequest { - $type: "akash.escrow.v1beta1.QueryAccountsRequest"; - scope: string; - xid: string; - owner: string; - state: string; - pagination: PageRequest | undefined; -} - -/** QueryProvidersResponse is response type for the Query/Providers RPC method */ -export interface QueryAccountsResponse { - $type: "akash.escrow.v1beta1.QueryAccountsResponse"; - accounts: Account[]; - pagination: PageResponse | undefined; -} - -/** QueryPaymentRequest is request type for the Query/Payment RPC method */ -export interface QueryPaymentsRequest { - $type: "akash.escrow.v1beta1.QueryPaymentsRequest"; - scope: string; - xid: string; - id: string; - owner: string; - state: string; - pagination: PageRequest | undefined; -} - -/** QueryProvidersResponse is response type for the Query/Providers RPC method */ -export interface QueryPaymentsResponse { - $type: "akash.escrow.v1beta1.QueryPaymentsResponse"; - payments: Payment[]; - pagination: PageResponse | undefined; -} - -function createBaseQueryAccountsRequest(): QueryAccountsRequest { - return { - $type: "akash.escrow.v1beta1.QueryAccountsRequest", - scope: "", - xid: "", - owner: "", - state: "", - pagination: undefined, - }; -} - -export const QueryAccountsRequest = { - $type: "akash.escrow.v1beta1.QueryAccountsRequest" as const, - - encode( - message: QueryAccountsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.scope !== "") { - writer.uint32(10).string(message.scope); - } - if (message.xid !== "") { - writer.uint32(18).string(message.xid); - } - if (message.owner !== "") { - writer.uint32(26).string(message.owner); - } - if (message.state !== "") { - writer.uint32(34).string(message.state); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(42).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryAccountsRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryAccountsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.scope = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.xid = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.owner = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.state = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryAccountsRequest { - return { - $type: QueryAccountsRequest.$type, - scope: isSet(object.scope) ? globalThis.String(object.scope) : "", - xid: isSet(object.xid) ? globalThis.String(object.xid) : "", - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryAccountsRequest): unknown { - const obj: any = {}; - if (message.scope !== "") { - obj.scope = message.scope; - } - if (message.xid !== "") { - obj.xid = message.xid; - } - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.state !== "") { - obj.state = message.state; - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryAccountsRequest { - return QueryAccountsRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryAccountsRequest { - const message = createBaseQueryAccountsRequest(); - message.scope = object.scope ?? ""; - message.xid = object.xid ?? ""; - message.owner = object.owner ?? ""; - message.state = object.state ?? ""; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryAccountsRequest.$type, QueryAccountsRequest); - -function createBaseQueryAccountsResponse(): QueryAccountsResponse { - return { - $type: "akash.escrow.v1beta1.QueryAccountsResponse", - accounts: [], - pagination: undefined, - }; -} - -export const QueryAccountsResponse = { - $type: "akash.escrow.v1beta1.QueryAccountsResponse" as const, - - encode( - message: QueryAccountsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.accounts) { - Account.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryAccountsResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryAccountsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.accounts.push(Account.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryAccountsResponse { - return { - $type: QueryAccountsResponse.$type, - accounts: globalThis.Array.isArray(object?.accounts) - ? object.accounts.map((e: any) => Account.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryAccountsResponse): unknown { - const obj: any = {}; - if (message.accounts?.length) { - obj.accounts = message.accounts.map((e) => Account.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryAccountsResponse { - return QueryAccountsResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryAccountsResponse { - const message = createBaseQueryAccountsResponse(); - message.accounts = - object.accounts?.map((e) => Account.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryAccountsResponse.$type, QueryAccountsResponse); - -function createBaseQueryPaymentsRequest(): QueryPaymentsRequest { - return { - $type: "akash.escrow.v1beta1.QueryPaymentsRequest", - scope: "", - xid: "", - id: "", - owner: "", - state: "", - pagination: undefined, - }; -} - -export const QueryPaymentsRequest = { - $type: "akash.escrow.v1beta1.QueryPaymentsRequest" as const, - - encode( - message: QueryPaymentsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.scope !== "") { - writer.uint32(10).string(message.scope); - } - if (message.xid !== "") { - writer.uint32(18).string(message.xid); - } - if (message.id !== "") { - writer.uint32(26).string(message.id); - } - if (message.owner !== "") { - writer.uint32(34).string(message.owner); - } - if (message.state !== "") { - writer.uint32(42).string(message.state); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(50).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryPaymentsRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryPaymentsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.scope = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.xid = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.id = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.owner = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.state = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryPaymentsRequest { - return { - $type: QueryPaymentsRequest.$type, - scope: isSet(object.scope) ? globalThis.String(object.scope) : "", - xid: isSet(object.xid) ? globalThis.String(object.xid) : "", - id: isSet(object.id) ? globalThis.String(object.id) : "", - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryPaymentsRequest): unknown { - const obj: any = {}; - if (message.scope !== "") { - obj.scope = message.scope; - } - if (message.xid !== "") { - obj.xid = message.xid; - } - if (message.id !== "") { - obj.id = message.id; - } - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.state !== "") { - obj.state = message.state; - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryPaymentsRequest { - return QueryPaymentsRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryPaymentsRequest { - const message = createBaseQueryPaymentsRequest(); - message.scope = object.scope ?? ""; - message.xid = object.xid ?? ""; - message.id = object.id ?? ""; - message.owner = object.owner ?? ""; - message.state = object.state ?? ""; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryPaymentsRequest.$type, QueryPaymentsRequest); - -function createBaseQueryPaymentsResponse(): QueryPaymentsResponse { - return { - $type: "akash.escrow.v1beta1.QueryPaymentsResponse", - payments: [], - pagination: undefined, - }; -} - -export const QueryPaymentsResponse = { - $type: "akash.escrow.v1beta1.QueryPaymentsResponse" as const, - - encode( - message: QueryPaymentsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.payments) { - Payment.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryPaymentsResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryPaymentsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.payments.push(Payment.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryPaymentsResponse { - return { - $type: QueryPaymentsResponse.$type, - payments: globalThis.Array.isArray(object?.payments) - ? object.payments.map((e: any) => Payment.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryPaymentsResponse): unknown { - const obj: any = {}; - if (message.payments?.length) { - obj.payments = message.payments.map((e) => Payment.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryPaymentsResponse { - return QueryPaymentsResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryPaymentsResponse { - const message = createBaseQueryPaymentsResponse(); - message.payments = - object.payments?.map((e) => Payment.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryPaymentsResponse.$type, QueryPaymentsResponse); - -/** Query defines the gRPC querier service */ -export interface Query { - /** - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - * Accounts queries all accounts - */ - Accounts(request: QueryAccountsRequest): Promise; - /** - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - * Payments queries all payments - */ - Payments(request: QueryPaymentsRequest): Promise; -} - -export const QueryServiceName = "akash.escrow.v1beta1.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.Accounts = this.Accounts.bind(this); - this.Payments = this.Payments.bind(this); - } - Accounts(request: QueryAccountsRequest): Promise { - const data = QueryAccountsRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Accounts", data); - return promise.then((data) => - QueryAccountsResponse.decode(_m0.Reader.create(data)), - ); - } - - Payments(request: QueryPaymentsRequest): Promise { - const data = QueryPaymentsRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Payments", data); - return promise.then((data) => - QueryPaymentsResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/escrow/v1beta1/types.ts b/ts/src/generated/akash/escrow/v1beta1/types.ts deleted file mode 100644 index 1ccf4bfe..00000000 --- a/ts/src/generated/akash/escrow/v1beta1/types.ts +++ /dev/null @@ -1,610 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** AccountID is the account identifier */ -export interface AccountID { - $type: "akash.escrow.v1beta1.AccountID"; - scope: string; - xid: string; -} - -/** Account stores state for an escrow account */ -export interface Account { - $type: "akash.escrow.v1beta1.Account"; - /** unique identifier for this escrow account */ - id: AccountID | undefined; - /** bech32 encoded account address of the owner of this escrow account */ - owner: string; - /** current state of this escrow account */ - state: Account_State; - /** unspent coins received from the owner's wallet */ - balance: Coin | undefined; - /** total coins spent by this account */ - transferred: Coin | undefined; - /** block height at which this account was last settled */ - settledAt: Long; -} - -/** State stores state for an escrow account */ -export enum Account_State { - /** invalid - AccountStateInvalid is an invalid state */ - invalid = 0, - /** open - AccountOpen is the state when an account is open */ - open = 1, - /** closed - AccountClosed is the state when an account is closed */ - closed = 2, - /** overdrawn - AccountOverdrawn is the state when an account is overdrawn */ - overdrawn = 3, - UNRECOGNIZED = -1, -} - -export function account_StateFromJSON(object: any): Account_State { - switch (object) { - case 0: - case "invalid": - return Account_State.invalid; - case 1: - case "open": - return Account_State.open; - case 2: - case "closed": - return Account_State.closed; - case 3: - case "overdrawn": - return Account_State.overdrawn; - case -1: - case "UNRECOGNIZED": - default: - return Account_State.UNRECOGNIZED; - } -} - -export function account_StateToJSON(object: Account_State): string { - switch (object) { - case Account_State.invalid: - return "invalid"; - case Account_State.open: - return "open"; - case Account_State.closed: - return "closed"; - case Account_State.overdrawn: - return "overdrawn"; - case Account_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** Payment stores state for a payment */ -export interface Payment { - $type: "akash.escrow.v1beta1.Payment"; - accountId: AccountID | undefined; - paymentId: string; - owner: string; - state: Payment_State; - rate: Coin | undefined; - balance: Coin | undefined; - withdrawn: Coin | undefined; -} - -/** Payment State */ -export enum Payment_State { - /** invalid - PaymentStateInvalid is the state when the payment is invalid */ - invalid = 0, - /** open - PaymentStateOpen is the state when the payment is open */ - open = 1, - /** closed - PaymentStateClosed is the state when the payment is closed */ - closed = 2, - /** overdrawn - PaymentStateOverdrawn is the state when the payment is overdrawn */ - overdrawn = 3, - UNRECOGNIZED = -1, -} - -export function payment_StateFromJSON(object: any): Payment_State { - switch (object) { - case 0: - case "invalid": - return Payment_State.invalid; - case 1: - case "open": - return Payment_State.open; - case 2: - case "closed": - return Payment_State.closed; - case 3: - case "overdrawn": - return Payment_State.overdrawn; - case -1: - case "UNRECOGNIZED": - default: - return Payment_State.UNRECOGNIZED; - } -} - -export function payment_StateToJSON(object: Payment_State): string { - switch (object) { - case Payment_State.invalid: - return "invalid"; - case Payment_State.open: - return "open"; - case Payment_State.closed: - return "closed"; - case Payment_State.overdrawn: - return "overdrawn"; - case Payment_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -function createBaseAccountID(): AccountID { - return { $type: "akash.escrow.v1beta1.AccountID", scope: "", xid: "" }; -} - -export const AccountID = { - $type: "akash.escrow.v1beta1.AccountID" as const, - - encode( - message: AccountID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.scope !== "") { - writer.uint32(10).string(message.scope); - } - if (message.xid !== "") { - writer.uint32(18).string(message.xid); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): AccountID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAccountID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.scope = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.xid = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): AccountID { - return { - $type: AccountID.$type, - scope: isSet(object.scope) ? globalThis.String(object.scope) : "", - xid: isSet(object.xid) ? globalThis.String(object.xid) : "", - }; - }, - - toJSON(message: AccountID): unknown { - const obj: any = {}; - if (message.scope !== "") { - obj.scope = message.scope; - } - if (message.xid !== "") { - obj.xid = message.xid; - } - return obj; - }, - - create(base?: DeepPartial): AccountID { - return AccountID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): AccountID { - const message = createBaseAccountID(); - message.scope = object.scope ?? ""; - message.xid = object.xid ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(AccountID.$type, AccountID); - -function createBaseAccount(): Account { - return { - $type: "akash.escrow.v1beta1.Account", - id: undefined, - owner: "", - state: 0, - balance: undefined, - transferred: undefined, - settledAt: Long.ZERO, - }; -} - -export const Account = { - $type: "akash.escrow.v1beta1.Account" as const, - - encode( - message: Account, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - AccountID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - if (message.owner !== "") { - writer.uint32(18).string(message.owner); - } - if (message.state !== 0) { - writer.uint32(24).int32(message.state); - } - if (message.balance !== undefined) { - Coin.encode(message.balance, writer.uint32(34).fork()).ldelim(); - } - if (message.transferred !== undefined) { - Coin.encode(message.transferred, writer.uint32(42).fork()).ldelim(); - } - if (!message.settledAt.equals(Long.ZERO)) { - writer.uint32(48).int64(message.settledAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Account { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAccount(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = AccountID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.owner = reader.string(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.state = reader.int32() as any; - continue; - case 4: - if (tag !== 34) { - break; - } - - message.balance = Coin.decode(reader, reader.uint32()); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.transferred = Coin.decode(reader, reader.uint32()); - continue; - case 6: - if (tag !== 48) { - break; - } - - message.settledAt = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Account { - return { - $type: Account.$type, - id: isSet(object.id) ? AccountID.fromJSON(object.id) : undefined, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - state: isSet(object.state) ? account_StateFromJSON(object.state) : 0, - balance: isSet(object.balance) - ? Coin.fromJSON(object.balance) - : undefined, - transferred: isSet(object.transferred) - ? Coin.fromJSON(object.transferred) - : undefined, - settledAt: isSet(object.settledAt) - ? Long.fromValue(object.settledAt) - : Long.ZERO, - }; - }, - - toJSON(message: Account): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = AccountID.toJSON(message.id); - } - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.state !== 0) { - obj.state = account_StateToJSON(message.state); - } - if (message.balance !== undefined) { - obj.balance = Coin.toJSON(message.balance); - } - if (message.transferred !== undefined) { - obj.transferred = Coin.toJSON(message.transferred); - } - if (!message.settledAt.equals(Long.ZERO)) { - obj.settledAt = (message.settledAt || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Account { - return Account.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Account { - const message = createBaseAccount(); - message.id = - object.id !== undefined && object.id !== null - ? AccountID.fromPartial(object.id) - : undefined; - message.owner = object.owner ?? ""; - message.state = object.state ?? 0; - message.balance = - object.balance !== undefined && object.balance !== null - ? Coin.fromPartial(object.balance) - : undefined; - message.transferred = - object.transferred !== undefined && object.transferred !== null - ? Coin.fromPartial(object.transferred) - : undefined; - message.settledAt = - object.settledAt !== undefined && object.settledAt !== null - ? Long.fromValue(object.settledAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Account.$type, Account); - -function createBasePayment(): Payment { - return { - $type: "akash.escrow.v1beta1.Payment", - accountId: undefined, - paymentId: "", - owner: "", - state: 0, - rate: undefined, - balance: undefined, - withdrawn: undefined, - }; -} - -export const Payment = { - $type: "akash.escrow.v1beta1.Payment" as const, - - encode( - message: Payment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.accountId !== undefined) { - AccountID.encode(message.accountId, writer.uint32(10).fork()).ldelim(); - } - if (message.paymentId !== "") { - writer.uint32(18).string(message.paymentId); - } - if (message.owner !== "") { - writer.uint32(26).string(message.owner); - } - if (message.state !== 0) { - writer.uint32(32).int32(message.state); - } - if (message.rate !== undefined) { - Coin.encode(message.rate, writer.uint32(42).fork()).ldelim(); - } - if (message.balance !== undefined) { - Coin.encode(message.balance, writer.uint32(50).fork()).ldelim(); - } - if (message.withdrawn !== undefined) { - Coin.encode(message.withdrawn, writer.uint32(58).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Payment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBasePayment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.accountId = AccountID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.paymentId = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.owner = reader.string(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.state = reader.int32() as any; - continue; - case 5: - if (tag !== 42) { - break; - } - - message.rate = Coin.decode(reader, reader.uint32()); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.balance = Coin.decode(reader, reader.uint32()); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.withdrawn = Coin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Payment { - return { - $type: Payment.$type, - accountId: isSet(object.accountId) - ? AccountID.fromJSON(object.accountId) - : undefined, - paymentId: isSet(object.paymentId) - ? globalThis.String(object.paymentId) - : "", - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - state: isSet(object.state) ? payment_StateFromJSON(object.state) : 0, - rate: isSet(object.rate) ? Coin.fromJSON(object.rate) : undefined, - balance: isSet(object.balance) - ? Coin.fromJSON(object.balance) - : undefined, - withdrawn: isSet(object.withdrawn) - ? Coin.fromJSON(object.withdrawn) - : undefined, - }; - }, - - toJSON(message: Payment): unknown { - const obj: any = {}; - if (message.accountId !== undefined) { - obj.accountId = AccountID.toJSON(message.accountId); - } - if (message.paymentId !== "") { - obj.paymentId = message.paymentId; - } - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.state !== 0) { - obj.state = payment_StateToJSON(message.state); - } - if (message.rate !== undefined) { - obj.rate = Coin.toJSON(message.rate); - } - if (message.balance !== undefined) { - obj.balance = Coin.toJSON(message.balance); - } - if (message.withdrawn !== undefined) { - obj.withdrawn = Coin.toJSON(message.withdrawn); - } - return obj; - }, - - create(base?: DeepPartial): Payment { - return Payment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Payment { - const message = createBasePayment(); - message.accountId = - object.accountId !== undefined && object.accountId !== null - ? AccountID.fromPartial(object.accountId) - : undefined; - message.paymentId = object.paymentId ?? ""; - message.owner = object.owner ?? ""; - message.state = object.state ?? 0; - message.rate = - object.rate !== undefined && object.rate !== null - ? Coin.fromPartial(object.rate) - : undefined; - message.balance = - object.balance !== undefined && object.balance !== null - ? Coin.fromPartial(object.balance) - : undefined; - message.withdrawn = - object.withdrawn !== undefined && object.withdrawn !== null - ? Coin.fromPartial(object.withdrawn) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Payment.$type, Payment); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/escrow/v1beta2/genesis.ts b/ts/src/generated/akash/escrow/v1beta2/genesis.ts deleted file mode 100644 index f1cb0462..00000000 --- a/ts/src/generated/akash/escrow/v1beta2/genesis.ts +++ /dev/null @@ -1,133 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Account, FractionalPayment } from "./types"; - -/** GenesisState defines the basic genesis state used by escrow module */ -export interface GenesisState { - $type: "akash.escrow.v1beta2.GenesisState"; - accounts: Account[]; - payments: FractionalPayment[]; -} - -function createBaseGenesisState(): GenesisState { - return { - $type: "akash.escrow.v1beta2.GenesisState", - accounts: [], - payments: [], - }; -} - -export const GenesisState = { - $type: "akash.escrow.v1beta2.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.accounts) { - Account.encode(v!, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.payments) { - FractionalPayment.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.accounts.push(Account.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.payments.push( - FractionalPayment.decode(reader, reader.uint32()), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - accounts: globalThis.Array.isArray(object?.accounts) - ? object.accounts.map((e: any) => Account.fromJSON(e)) - : [], - payments: globalThis.Array.isArray(object?.payments) - ? object.payments.map((e: any) => FractionalPayment.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.accounts?.length) { - obj.accounts = message.accounts.map((e) => Account.toJSON(e)); - } - if (message.payments?.length) { - obj.payments = message.payments.map((e) => FractionalPayment.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.accounts = - object.accounts?.map((e) => Account.fromPartial(e)) || []; - message.payments = - object.payments?.map((e) => FractionalPayment.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} diff --git a/ts/src/generated/akash/escrow/v1beta2/query.ts b/ts/src/generated/akash/escrow/v1beta2/query.ts deleted file mode 100644 index 7c4f801e..00000000 --- a/ts/src/generated/akash/escrow/v1beta2/query.ts +++ /dev/null @@ -1,635 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Account, FractionalPayment } from "./types"; - -/** QueryAccountRequest is request type for the Query/Account RPC method */ -export interface QueryAccountsRequest { - $type: "akash.escrow.v1beta2.QueryAccountsRequest"; - scope: string; - xid: string; - owner: string; - state: string; - pagination: PageRequest | undefined; -} - -/** QueryProvidersResponse is response type for the Query/Providers RPC method */ -export interface QueryAccountsResponse { - $type: "akash.escrow.v1beta2.QueryAccountsResponse"; - accounts: Account[]; - pagination: PageResponse | undefined; -} - -/** QueryPaymentRequest is request type for the Query/Payment RPC method */ -export interface QueryPaymentsRequest { - $type: "akash.escrow.v1beta2.QueryPaymentsRequest"; - scope: string; - xid: string; - id: string; - owner: string; - state: string; - pagination: PageRequest | undefined; -} - -/** QueryProvidersResponse is response type for the Query/Providers RPC method */ -export interface QueryPaymentsResponse { - $type: "akash.escrow.v1beta2.QueryPaymentsResponse"; - payments: FractionalPayment[]; - pagination: PageResponse | undefined; -} - -function createBaseQueryAccountsRequest(): QueryAccountsRequest { - return { - $type: "akash.escrow.v1beta2.QueryAccountsRequest", - scope: "", - xid: "", - owner: "", - state: "", - pagination: undefined, - }; -} - -export const QueryAccountsRequest = { - $type: "akash.escrow.v1beta2.QueryAccountsRequest" as const, - - encode( - message: QueryAccountsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.scope !== "") { - writer.uint32(10).string(message.scope); - } - if (message.xid !== "") { - writer.uint32(18).string(message.xid); - } - if (message.owner !== "") { - writer.uint32(26).string(message.owner); - } - if (message.state !== "") { - writer.uint32(34).string(message.state); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(42).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryAccountsRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryAccountsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.scope = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.xid = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.owner = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.state = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryAccountsRequest { - return { - $type: QueryAccountsRequest.$type, - scope: isSet(object.scope) ? globalThis.String(object.scope) : "", - xid: isSet(object.xid) ? globalThis.String(object.xid) : "", - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryAccountsRequest): unknown { - const obj: any = {}; - if (message.scope !== "") { - obj.scope = message.scope; - } - if (message.xid !== "") { - obj.xid = message.xid; - } - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.state !== "") { - obj.state = message.state; - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryAccountsRequest { - return QueryAccountsRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryAccountsRequest { - const message = createBaseQueryAccountsRequest(); - message.scope = object.scope ?? ""; - message.xid = object.xid ?? ""; - message.owner = object.owner ?? ""; - message.state = object.state ?? ""; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryAccountsRequest.$type, QueryAccountsRequest); - -function createBaseQueryAccountsResponse(): QueryAccountsResponse { - return { - $type: "akash.escrow.v1beta2.QueryAccountsResponse", - accounts: [], - pagination: undefined, - }; -} - -export const QueryAccountsResponse = { - $type: "akash.escrow.v1beta2.QueryAccountsResponse" as const, - - encode( - message: QueryAccountsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.accounts) { - Account.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryAccountsResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryAccountsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.accounts.push(Account.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryAccountsResponse { - return { - $type: QueryAccountsResponse.$type, - accounts: globalThis.Array.isArray(object?.accounts) - ? object.accounts.map((e: any) => Account.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryAccountsResponse): unknown { - const obj: any = {}; - if (message.accounts?.length) { - obj.accounts = message.accounts.map((e) => Account.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryAccountsResponse { - return QueryAccountsResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryAccountsResponse { - const message = createBaseQueryAccountsResponse(); - message.accounts = - object.accounts?.map((e) => Account.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryAccountsResponse.$type, QueryAccountsResponse); - -function createBaseQueryPaymentsRequest(): QueryPaymentsRequest { - return { - $type: "akash.escrow.v1beta2.QueryPaymentsRequest", - scope: "", - xid: "", - id: "", - owner: "", - state: "", - pagination: undefined, - }; -} - -export const QueryPaymentsRequest = { - $type: "akash.escrow.v1beta2.QueryPaymentsRequest" as const, - - encode( - message: QueryPaymentsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.scope !== "") { - writer.uint32(10).string(message.scope); - } - if (message.xid !== "") { - writer.uint32(18).string(message.xid); - } - if (message.id !== "") { - writer.uint32(26).string(message.id); - } - if (message.owner !== "") { - writer.uint32(34).string(message.owner); - } - if (message.state !== "") { - writer.uint32(42).string(message.state); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(50).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryPaymentsRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryPaymentsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.scope = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.xid = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.id = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.owner = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.state = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryPaymentsRequest { - return { - $type: QueryPaymentsRequest.$type, - scope: isSet(object.scope) ? globalThis.String(object.scope) : "", - xid: isSet(object.xid) ? globalThis.String(object.xid) : "", - id: isSet(object.id) ? globalThis.String(object.id) : "", - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryPaymentsRequest): unknown { - const obj: any = {}; - if (message.scope !== "") { - obj.scope = message.scope; - } - if (message.xid !== "") { - obj.xid = message.xid; - } - if (message.id !== "") { - obj.id = message.id; - } - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.state !== "") { - obj.state = message.state; - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryPaymentsRequest { - return QueryPaymentsRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryPaymentsRequest { - const message = createBaseQueryPaymentsRequest(); - message.scope = object.scope ?? ""; - message.xid = object.xid ?? ""; - message.id = object.id ?? ""; - message.owner = object.owner ?? ""; - message.state = object.state ?? ""; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryPaymentsRequest.$type, QueryPaymentsRequest); - -function createBaseQueryPaymentsResponse(): QueryPaymentsResponse { - return { - $type: "akash.escrow.v1beta2.QueryPaymentsResponse", - payments: [], - pagination: undefined, - }; -} - -export const QueryPaymentsResponse = { - $type: "akash.escrow.v1beta2.QueryPaymentsResponse" as const, - - encode( - message: QueryPaymentsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.payments) { - FractionalPayment.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryPaymentsResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryPaymentsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.payments.push( - FractionalPayment.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryPaymentsResponse { - return { - $type: QueryPaymentsResponse.$type, - payments: globalThis.Array.isArray(object?.payments) - ? object.payments.map((e: any) => FractionalPayment.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryPaymentsResponse): unknown { - const obj: any = {}; - if (message.payments?.length) { - obj.payments = message.payments.map((e) => FractionalPayment.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryPaymentsResponse { - return QueryPaymentsResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryPaymentsResponse { - const message = createBaseQueryPaymentsResponse(); - message.payments = - object.payments?.map((e) => FractionalPayment.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryPaymentsResponse.$type, QueryPaymentsResponse); - -/** Query defines the gRPC querier service */ -export interface Query { - /** - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - * Accounts queries all accounts - */ - Accounts(request: QueryAccountsRequest): Promise; - /** - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - * Payments queries all payments - */ - Payments(request: QueryPaymentsRequest): Promise; -} - -export const QueryServiceName = "akash.escrow.v1beta2.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.Accounts = this.Accounts.bind(this); - this.Payments = this.Payments.bind(this); - } - Accounts(request: QueryAccountsRequest): Promise { - const data = QueryAccountsRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Accounts", data); - return promise.then((data) => - QueryAccountsResponse.decode(_m0.Reader.create(data)), - ); - } - - Payments(request: QueryPaymentsRequest): Promise { - const data = QueryPaymentsRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Payments", data); - return promise.then((data) => - QueryPaymentsResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/escrow/v1beta2/types.ts b/ts/src/generated/akash/escrow/v1beta2/types.ts deleted file mode 100644 index 96117b67..00000000 --- a/ts/src/generated/akash/escrow/v1beta2/types.ts +++ /dev/null @@ -1,664 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin, DecCoin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** AccountID is the account identifier */ -export interface AccountID { - $type: "akash.escrow.v1beta2.AccountID"; - scope: string; - xid: string; -} - -/** Account stores state for an escrow account */ -export interface Account { - $type: "akash.escrow.v1beta2.Account"; - /** unique identifier for this escrow account */ - id: AccountID | undefined; - /** bech32 encoded account address of the owner of this escrow account */ - owner: string; - /** current state of this escrow account */ - state: Account_State; - /** unspent coins received from the owner's wallet */ - balance: DecCoin | undefined; - /** total coins spent by this account */ - transferred: DecCoin | undefined; - /** block height at which this account was last settled */ - settledAt: Long; - /** - * bech32 encoded account address of the depositor. - * If depositor is same as the owner, then any incoming coins are added to the Balance. - * If depositor isn't same as the owner, then any incoming coins are added to the Funds. - */ - depositor: string; - /** - * Funds are unspent coins received from the (non-Owner) Depositor's wallet. - * If there are any funds, they should be spent before spending the Balance. - */ - funds: DecCoin | undefined; -} - -/** State stores state for an escrow account */ -export enum Account_State { - /** invalid - AccountStateInvalid is an invalid state */ - invalid = 0, - /** open - AccountOpen is the state when an account is open */ - open = 1, - /** closed - AccountClosed is the state when an account is closed */ - closed = 2, - /** overdrawn - AccountOverdrawn is the state when an account is overdrawn */ - overdrawn = 3, - UNRECOGNIZED = -1, -} - -export function account_StateFromJSON(object: any): Account_State { - switch (object) { - case 0: - case "invalid": - return Account_State.invalid; - case 1: - case "open": - return Account_State.open; - case 2: - case "closed": - return Account_State.closed; - case 3: - case "overdrawn": - return Account_State.overdrawn; - case -1: - case "UNRECOGNIZED": - default: - return Account_State.UNRECOGNIZED; - } -} - -export function account_StateToJSON(object: Account_State): string { - switch (object) { - case Account_State.invalid: - return "invalid"; - case Account_State.open: - return "open"; - case Account_State.closed: - return "closed"; - case Account_State.overdrawn: - return "overdrawn"; - case Account_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** Payment stores state for a payment */ -export interface FractionalPayment { - $type: "akash.escrow.v1beta2.FractionalPayment"; - accountId: AccountID | undefined; - paymentId: string; - owner: string; - state: FractionalPayment_State; - rate: DecCoin | undefined; - balance: DecCoin | undefined; - withdrawn: Coin | undefined; -} - -/** Payment State */ -export enum FractionalPayment_State { - /** invalid - PaymentStateInvalid is the state when the payment is invalid */ - invalid = 0, - /** open - PaymentStateOpen is the state when the payment is open */ - open = 1, - /** closed - PaymentStateClosed is the state when the payment is closed */ - closed = 2, - /** overdrawn - PaymentStateOverdrawn is the state when the payment is overdrawn */ - overdrawn = 3, - UNRECOGNIZED = -1, -} - -export function fractionalPayment_StateFromJSON( - object: any, -): FractionalPayment_State { - switch (object) { - case 0: - case "invalid": - return FractionalPayment_State.invalid; - case 1: - case "open": - return FractionalPayment_State.open; - case 2: - case "closed": - return FractionalPayment_State.closed; - case 3: - case "overdrawn": - return FractionalPayment_State.overdrawn; - case -1: - case "UNRECOGNIZED": - default: - return FractionalPayment_State.UNRECOGNIZED; - } -} - -export function fractionalPayment_StateToJSON( - object: FractionalPayment_State, -): string { - switch (object) { - case FractionalPayment_State.invalid: - return "invalid"; - case FractionalPayment_State.open: - return "open"; - case FractionalPayment_State.closed: - return "closed"; - case FractionalPayment_State.overdrawn: - return "overdrawn"; - case FractionalPayment_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -function createBaseAccountID(): AccountID { - return { $type: "akash.escrow.v1beta2.AccountID", scope: "", xid: "" }; -} - -export const AccountID = { - $type: "akash.escrow.v1beta2.AccountID" as const, - - encode( - message: AccountID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.scope !== "") { - writer.uint32(10).string(message.scope); - } - if (message.xid !== "") { - writer.uint32(18).string(message.xid); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): AccountID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAccountID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.scope = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.xid = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): AccountID { - return { - $type: AccountID.$type, - scope: isSet(object.scope) ? globalThis.String(object.scope) : "", - xid: isSet(object.xid) ? globalThis.String(object.xid) : "", - }; - }, - - toJSON(message: AccountID): unknown { - const obj: any = {}; - if (message.scope !== "") { - obj.scope = message.scope; - } - if (message.xid !== "") { - obj.xid = message.xid; - } - return obj; - }, - - create(base?: DeepPartial): AccountID { - return AccountID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): AccountID { - const message = createBaseAccountID(); - message.scope = object.scope ?? ""; - message.xid = object.xid ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(AccountID.$type, AccountID); - -function createBaseAccount(): Account { - return { - $type: "akash.escrow.v1beta2.Account", - id: undefined, - owner: "", - state: 0, - balance: undefined, - transferred: undefined, - settledAt: Long.ZERO, - depositor: "", - funds: undefined, - }; -} - -export const Account = { - $type: "akash.escrow.v1beta2.Account" as const, - - encode( - message: Account, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - AccountID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - if (message.owner !== "") { - writer.uint32(18).string(message.owner); - } - if (message.state !== 0) { - writer.uint32(24).int32(message.state); - } - if (message.balance !== undefined) { - DecCoin.encode(message.balance, writer.uint32(34).fork()).ldelim(); - } - if (message.transferred !== undefined) { - DecCoin.encode(message.transferred, writer.uint32(42).fork()).ldelim(); - } - if (!message.settledAt.equals(Long.ZERO)) { - writer.uint32(48).int64(message.settledAt); - } - if (message.depositor !== "") { - writer.uint32(58).string(message.depositor); - } - if (message.funds !== undefined) { - DecCoin.encode(message.funds, writer.uint32(66).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Account { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAccount(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = AccountID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.owner = reader.string(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.state = reader.int32() as any; - continue; - case 4: - if (tag !== 34) { - break; - } - - message.balance = DecCoin.decode(reader, reader.uint32()); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.transferred = DecCoin.decode(reader, reader.uint32()); - continue; - case 6: - if (tag !== 48) { - break; - } - - message.settledAt = reader.int64() as Long; - continue; - case 7: - if (tag !== 58) { - break; - } - - message.depositor = reader.string(); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.funds = DecCoin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Account { - return { - $type: Account.$type, - id: isSet(object.id) ? AccountID.fromJSON(object.id) : undefined, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - state: isSet(object.state) ? account_StateFromJSON(object.state) : 0, - balance: isSet(object.balance) - ? DecCoin.fromJSON(object.balance) - : undefined, - transferred: isSet(object.transferred) - ? DecCoin.fromJSON(object.transferred) - : undefined, - settledAt: isSet(object.settledAt) - ? Long.fromValue(object.settledAt) - : Long.ZERO, - depositor: isSet(object.depositor) - ? globalThis.String(object.depositor) - : "", - funds: isSet(object.funds) ? DecCoin.fromJSON(object.funds) : undefined, - }; - }, - - toJSON(message: Account): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = AccountID.toJSON(message.id); - } - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.state !== 0) { - obj.state = account_StateToJSON(message.state); - } - if (message.balance !== undefined) { - obj.balance = DecCoin.toJSON(message.balance); - } - if (message.transferred !== undefined) { - obj.transferred = DecCoin.toJSON(message.transferred); - } - if (!message.settledAt.equals(Long.ZERO)) { - obj.settledAt = (message.settledAt || Long.ZERO).toString(); - } - if (message.depositor !== "") { - obj.depositor = message.depositor; - } - if (message.funds !== undefined) { - obj.funds = DecCoin.toJSON(message.funds); - } - return obj; - }, - - create(base?: DeepPartial): Account { - return Account.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Account { - const message = createBaseAccount(); - message.id = - object.id !== undefined && object.id !== null - ? AccountID.fromPartial(object.id) - : undefined; - message.owner = object.owner ?? ""; - message.state = object.state ?? 0; - message.balance = - object.balance !== undefined && object.balance !== null - ? DecCoin.fromPartial(object.balance) - : undefined; - message.transferred = - object.transferred !== undefined && object.transferred !== null - ? DecCoin.fromPartial(object.transferred) - : undefined; - message.settledAt = - object.settledAt !== undefined && object.settledAt !== null - ? Long.fromValue(object.settledAt) - : Long.ZERO; - message.depositor = object.depositor ?? ""; - message.funds = - object.funds !== undefined && object.funds !== null - ? DecCoin.fromPartial(object.funds) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Account.$type, Account); - -function createBaseFractionalPayment(): FractionalPayment { - return { - $type: "akash.escrow.v1beta2.FractionalPayment", - accountId: undefined, - paymentId: "", - owner: "", - state: 0, - rate: undefined, - balance: undefined, - withdrawn: undefined, - }; -} - -export const FractionalPayment = { - $type: "akash.escrow.v1beta2.FractionalPayment" as const, - - encode( - message: FractionalPayment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.accountId !== undefined) { - AccountID.encode(message.accountId, writer.uint32(10).fork()).ldelim(); - } - if (message.paymentId !== "") { - writer.uint32(18).string(message.paymentId); - } - if (message.owner !== "") { - writer.uint32(26).string(message.owner); - } - if (message.state !== 0) { - writer.uint32(32).int32(message.state); - } - if (message.rate !== undefined) { - DecCoin.encode(message.rate, writer.uint32(42).fork()).ldelim(); - } - if (message.balance !== undefined) { - DecCoin.encode(message.balance, writer.uint32(50).fork()).ldelim(); - } - if (message.withdrawn !== undefined) { - Coin.encode(message.withdrawn, writer.uint32(58).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): FractionalPayment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseFractionalPayment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.accountId = AccountID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.paymentId = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.owner = reader.string(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.state = reader.int32() as any; - continue; - case 5: - if (tag !== 42) { - break; - } - - message.rate = DecCoin.decode(reader, reader.uint32()); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.balance = DecCoin.decode(reader, reader.uint32()); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.withdrawn = Coin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): FractionalPayment { - return { - $type: FractionalPayment.$type, - accountId: isSet(object.accountId) - ? AccountID.fromJSON(object.accountId) - : undefined, - paymentId: isSet(object.paymentId) - ? globalThis.String(object.paymentId) - : "", - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - state: isSet(object.state) - ? fractionalPayment_StateFromJSON(object.state) - : 0, - rate: isSet(object.rate) ? DecCoin.fromJSON(object.rate) : undefined, - balance: isSet(object.balance) - ? DecCoin.fromJSON(object.balance) - : undefined, - withdrawn: isSet(object.withdrawn) - ? Coin.fromJSON(object.withdrawn) - : undefined, - }; - }, - - toJSON(message: FractionalPayment): unknown { - const obj: any = {}; - if (message.accountId !== undefined) { - obj.accountId = AccountID.toJSON(message.accountId); - } - if (message.paymentId !== "") { - obj.paymentId = message.paymentId; - } - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.state !== 0) { - obj.state = fractionalPayment_StateToJSON(message.state); - } - if (message.rate !== undefined) { - obj.rate = DecCoin.toJSON(message.rate); - } - if (message.balance !== undefined) { - obj.balance = DecCoin.toJSON(message.balance); - } - if (message.withdrawn !== undefined) { - obj.withdrawn = Coin.toJSON(message.withdrawn); - } - return obj; - }, - - create(base?: DeepPartial): FractionalPayment { - return FractionalPayment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): FractionalPayment { - const message = createBaseFractionalPayment(); - message.accountId = - object.accountId !== undefined && object.accountId !== null - ? AccountID.fromPartial(object.accountId) - : undefined; - message.paymentId = object.paymentId ?? ""; - message.owner = object.owner ?? ""; - message.state = object.state ?? 0; - message.rate = - object.rate !== undefined && object.rate !== null - ? DecCoin.fromPartial(object.rate) - : undefined; - message.balance = - object.balance !== undefined && object.balance !== null - ? DecCoin.fromPartial(object.balance) - : undefined; - message.withdrawn = - object.withdrawn !== undefined && object.withdrawn !== null - ? Coin.fromPartial(object.withdrawn) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(FractionalPayment.$type, FractionalPayment); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/escrow/v1beta3/genesis.ts b/ts/src/generated/akash/escrow/v1beta3/genesis.ts deleted file mode 100644 index 8f02fdfd..00000000 --- a/ts/src/generated/akash/escrow/v1beta3/genesis.ts +++ /dev/null @@ -1,133 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Account, FractionalPayment } from "./types"; - -/** GenesisState defines the basic genesis state used by escrow module */ -export interface GenesisState { - $type: "akash.escrow.v1beta3.GenesisState"; - accounts: Account[]; - payments: FractionalPayment[]; -} - -function createBaseGenesisState(): GenesisState { - return { - $type: "akash.escrow.v1beta3.GenesisState", - accounts: [], - payments: [], - }; -} - -export const GenesisState = { - $type: "akash.escrow.v1beta3.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.accounts) { - Account.encode(v!, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.payments) { - FractionalPayment.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.accounts.push(Account.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.payments.push( - FractionalPayment.decode(reader, reader.uint32()), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - accounts: globalThis.Array.isArray(object?.accounts) - ? object.accounts.map((e: any) => Account.fromJSON(e)) - : [], - payments: globalThis.Array.isArray(object?.payments) - ? object.payments.map((e: any) => FractionalPayment.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.accounts?.length) { - obj.accounts = message.accounts.map((e) => Account.toJSON(e)); - } - if (message.payments?.length) { - obj.payments = message.payments.map((e) => FractionalPayment.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.accounts = - object.accounts?.map((e) => Account.fromPartial(e)) || []; - message.payments = - object.payments?.map((e) => FractionalPayment.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} diff --git a/ts/src/generated/akash/escrow/v1beta3/query.ts b/ts/src/generated/akash/escrow/v1beta3/query.ts deleted file mode 100644 index 1e8e49ff..00000000 --- a/ts/src/generated/akash/escrow/v1beta3/query.ts +++ /dev/null @@ -1,635 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Account, FractionalPayment } from "./types"; - -/** QueryAccountRequest is request type for the Query/Account RPC method */ -export interface QueryAccountsRequest { - $type: "akash.escrow.v1beta3.QueryAccountsRequest"; - scope: string; - xid: string; - owner: string; - state: string; - pagination: PageRequest | undefined; -} - -/** QueryProvidersResponse is response type for the Query/Providers RPC method */ -export interface QueryAccountsResponse { - $type: "akash.escrow.v1beta3.QueryAccountsResponse"; - accounts: Account[]; - pagination: PageResponse | undefined; -} - -/** QueryPaymentRequest is request type for the Query/Payment RPC method */ -export interface QueryPaymentsRequest { - $type: "akash.escrow.v1beta3.QueryPaymentsRequest"; - scope: string; - xid: string; - id: string; - owner: string; - state: string; - pagination: PageRequest | undefined; -} - -/** QueryProvidersResponse is response type for the Query/Providers RPC method */ -export interface QueryPaymentsResponse { - $type: "akash.escrow.v1beta3.QueryPaymentsResponse"; - payments: FractionalPayment[]; - pagination: PageResponse | undefined; -} - -function createBaseQueryAccountsRequest(): QueryAccountsRequest { - return { - $type: "akash.escrow.v1beta3.QueryAccountsRequest", - scope: "", - xid: "", - owner: "", - state: "", - pagination: undefined, - }; -} - -export const QueryAccountsRequest = { - $type: "akash.escrow.v1beta3.QueryAccountsRequest" as const, - - encode( - message: QueryAccountsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.scope !== "") { - writer.uint32(10).string(message.scope); - } - if (message.xid !== "") { - writer.uint32(18).string(message.xid); - } - if (message.owner !== "") { - writer.uint32(26).string(message.owner); - } - if (message.state !== "") { - writer.uint32(34).string(message.state); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(42).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryAccountsRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryAccountsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.scope = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.xid = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.owner = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.state = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryAccountsRequest { - return { - $type: QueryAccountsRequest.$type, - scope: isSet(object.scope) ? globalThis.String(object.scope) : "", - xid: isSet(object.xid) ? globalThis.String(object.xid) : "", - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryAccountsRequest): unknown { - const obj: any = {}; - if (message.scope !== "") { - obj.scope = message.scope; - } - if (message.xid !== "") { - obj.xid = message.xid; - } - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.state !== "") { - obj.state = message.state; - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryAccountsRequest { - return QueryAccountsRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryAccountsRequest { - const message = createBaseQueryAccountsRequest(); - message.scope = object.scope ?? ""; - message.xid = object.xid ?? ""; - message.owner = object.owner ?? ""; - message.state = object.state ?? ""; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryAccountsRequest.$type, QueryAccountsRequest); - -function createBaseQueryAccountsResponse(): QueryAccountsResponse { - return { - $type: "akash.escrow.v1beta3.QueryAccountsResponse", - accounts: [], - pagination: undefined, - }; -} - -export const QueryAccountsResponse = { - $type: "akash.escrow.v1beta3.QueryAccountsResponse" as const, - - encode( - message: QueryAccountsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.accounts) { - Account.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryAccountsResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryAccountsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.accounts.push(Account.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryAccountsResponse { - return { - $type: QueryAccountsResponse.$type, - accounts: globalThis.Array.isArray(object?.accounts) - ? object.accounts.map((e: any) => Account.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryAccountsResponse): unknown { - const obj: any = {}; - if (message.accounts?.length) { - obj.accounts = message.accounts.map((e) => Account.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryAccountsResponse { - return QueryAccountsResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryAccountsResponse { - const message = createBaseQueryAccountsResponse(); - message.accounts = - object.accounts?.map((e) => Account.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryAccountsResponse.$type, QueryAccountsResponse); - -function createBaseQueryPaymentsRequest(): QueryPaymentsRequest { - return { - $type: "akash.escrow.v1beta3.QueryPaymentsRequest", - scope: "", - xid: "", - id: "", - owner: "", - state: "", - pagination: undefined, - }; -} - -export const QueryPaymentsRequest = { - $type: "akash.escrow.v1beta3.QueryPaymentsRequest" as const, - - encode( - message: QueryPaymentsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.scope !== "") { - writer.uint32(10).string(message.scope); - } - if (message.xid !== "") { - writer.uint32(18).string(message.xid); - } - if (message.id !== "") { - writer.uint32(26).string(message.id); - } - if (message.owner !== "") { - writer.uint32(34).string(message.owner); - } - if (message.state !== "") { - writer.uint32(42).string(message.state); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(50).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryPaymentsRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryPaymentsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.scope = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.xid = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.id = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.owner = reader.string(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.state = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryPaymentsRequest { - return { - $type: QueryPaymentsRequest.$type, - scope: isSet(object.scope) ? globalThis.String(object.scope) : "", - xid: isSet(object.xid) ? globalThis.String(object.xid) : "", - id: isSet(object.id) ? globalThis.String(object.id) : "", - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryPaymentsRequest): unknown { - const obj: any = {}; - if (message.scope !== "") { - obj.scope = message.scope; - } - if (message.xid !== "") { - obj.xid = message.xid; - } - if (message.id !== "") { - obj.id = message.id; - } - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.state !== "") { - obj.state = message.state; - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryPaymentsRequest { - return QueryPaymentsRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryPaymentsRequest { - const message = createBaseQueryPaymentsRequest(); - message.scope = object.scope ?? ""; - message.xid = object.xid ?? ""; - message.id = object.id ?? ""; - message.owner = object.owner ?? ""; - message.state = object.state ?? ""; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryPaymentsRequest.$type, QueryPaymentsRequest); - -function createBaseQueryPaymentsResponse(): QueryPaymentsResponse { - return { - $type: "akash.escrow.v1beta3.QueryPaymentsResponse", - payments: [], - pagination: undefined, - }; -} - -export const QueryPaymentsResponse = { - $type: "akash.escrow.v1beta3.QueryPaymentsResponse" as const, - - encode( - message: QueryPaymentsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.payments) { - FractionalPayment.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryPaymentsResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryPaymentsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.payments.push( - FractionalPayment.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryPaymentsResponse { - return { - $type: QueryPaymentsResponse.$type, - payments: globalThis.Array.isArray(object?.payments) - ? object.payments.map((e: any) => FractionalPayment.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryPaymentsResponse): unknown { - const obj: any = {}; - if (message.payments?.length) { - obj.payments = message.payments.map((e) => FractionalPayment.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryPaymentsResponse { - return QueryPaymentsResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryPaymentsResponse { - const message = createBaseQueryPaymentsResponse(); - message.payments = - object.payments?.map((e) => FractionalPayment.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryPaymentsResponse.$type, QueryPaymentsResponse); - -/** Query defines the gRPC querier service */ -export interface Query { - /** - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - * Accounts queries all accounts - */ - Accounts(request: QueryAccountsRequest): Promise; - /** - * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE - * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME - * Payments queries all payments - */ - Payments(request: QueryPaymentsRequest): Promise; -} - -export const QueryServiceName = "akash.escrow.v1beta3.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.Accounts = this.Accounts.bind(this); - this.Payments = this.Payments.bind(this); - } - Accounts(request: QueryAccountsRequest): Promise { - const data = QueryAccountsRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Accounts", data); - return promise.then((data) => - QueryAccountsResponse.decode(_m0.Reader.create(data)), - ); - } - - Payments(request: QueryPaymentsRequest): Promise { - const data = QueryPaymentsRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Payments", data); - return promise.then((data) => - QueryPaymentsResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/escrow/v1beta3/types.ts b/ts/src/generated/akash/escrow/v1beta3/types.ts deleted file mode 100644 index 65e01993..00000000 --- a/ts/src/generated/akash/escrow/v1beta3/types.ts +++ /dev/null @@ -1,664 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin, DecCoin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** AccountID is the account identifier */ -export interface AccountID { - $type: "akash.escrow.v1beta3.AccountID"; - scope: string; - xid: string; -} - -/** Account stores state for an escrow account */ -export interface Account { - $type: "akash.escrow.v1beta3.Account"; - /** unique identifier for this escrow account */ - id: AccountID | undefined; - /** bech32 encoded account address of the owner of this escrow account */ - owner: string; - /** current state of this escrow account */ - state: Account_State; - /** unspent coins received from the owner's wallet */ - balance: DecCoin | undefined; - /** total coins spent by this account */ - transferred: DecCoin | undefined; - /** block height at which this account was last settled */ - settledAt: Long; - /** - * bech32 encoded account address of the depositor. - * If depositor is same as the owner, then any incoming coins are added to the Balance. - * If depositor isn't same as the owner, then any incoming coins are added to the Funds. - */ - depositor: string; - /** - * Funds are unspent coins received from the (non-Owner) Depositor's wallet. - * If there are any funds, they should be spent before spending the Balance. - */ - funds: DecCoin | undefined; -} - -/** State stores state for an escrow account */ -export enum Account_State { - /** invalid - AccountStateInvalid is an invalid state */ - invalid = 0, - /** open - AccountOpen is the state when an account is open */ - open = 1, - /** closed - AccountClosed is the state when an account is closed */ - closed = 2, - /** overdrawn - AccountOverdrawn is the state when an account is overdrawn */ - overdrawn = 3, - UNRECOGNIZED = -1, -} - -export function account_StateFromJSON(object: any): Account_State { - switch (object) { - case 0: - case "invalid": - return Account_State.invalid; - case 1: - case "open": - return Account_State.open; - case 2: - case "closed": - return Account_State.closed; - case 3: - case "overdrawn": - return Account_State.overdrawn; - case -1: - case "UNRECOGNIZED": - default: - return Account_State.UNRECOGNIZED; - } -} - -export function account_StateToJSON(object: Account_State): string { - switch (object) { - case Account_State.invalid: - return "invalid"; - case Account_State.open: - return "open"; - case Account_State.closed: - return "closed"; - case Account_State.overdrawn: - return "overdrawn"; - case Account_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** Payment stores state for a payment */ -export interface FractionalPayment { - $type: "akash.escrow.v1beta3.FractionalPayment"; - accountId: AccountID | undefined; - paymentId: string; - owner: string; - state: FractionalPayment_State; - rate: DecCoin | undefined; - balance: DecCoin | undefined; - withdrawn: Coin | undefined; -} - -/** Payment State */ -export enum FractionalPayment_State { - /** invalid - PaymentStateInvalid is the state when the payment is invalid */ - invalid = 0, - /** open - PaymentStateOpen is the state when the payment is open */ - open = 1, - /** closed - PaymentStateClosed is the state when the payment is closed */ - closed = 2, - /** overdrawn - PaymentStateOverdrawn is the state when the payment is overdrawn */ - overdrawn = 3, - UNRECOGNIZED = -1, -} - -export function fractionalPayment_StateFromJSON( - object: any, -): FractionalPayment_State { - switch (object) { - case 0: - case "invalid": - return FractionalPayment_State.invalid; - case 1: - case "open": - return FractionalPayment_State.open; - case 2: - case "closed": - return FractionalPayment_State.closed; - case 3: - case "overdrawn": - return FractionalPayment_State.overdrawn; - case -1: - case "UNRECOGNIZED": - default: - return FractionalPayment_State.UNRECOGNIZED; - } -} - -export function fractionalPayment_StateToJSON( - object: FractionalPayment_State, -): string { - switch (object) { - case FractionalPayment_State.invalid: - return "invalid"; - case FractionalPayment_State.open: - return "open"; - case FractionalPayment_State.closed: - return "closed"; - case FractionalPayment_State.overdrawn: - return "overdrawn"; - case FractionalPayment_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -function createBaseAccountID(): AccountID { - return { $type: "akash.escrow.v1beta3.AccountID", scope: "", xid: "" }; -} - -export const AccountID = { - $type: "akash.escrow.v1beta3.AccountID" as const, - - encode( - message: AccountID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.scope !== "") { - writer.uint32(10).string(message.scope); - } - if (message.xid !== "") { - writer.uint32(18).string(message.xid); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): AccountID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAccountID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.scope = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.xid = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): AccountID { - return { - $type: AccountID.$type, - scope: isSet(object.scope) ? globalThis.String(object.scope) : "", - xid: isSet(object.xid) ? globalThis.String(object.xid) : "", - }; - }, - - toJSON(message: AccountID): unknown { - const obj: any = {}; - if (message.scope !== "") { - obj.scope = message.scope; - } - if (message.xid !== "") { - obj.xid = message.xid; - } - return obj; - }, - - create(base?: DeepPartial): AccountID { - return AccountID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): AccountID { - const message = createBaseAccountID(); - message.scope = object.scope ?? ""; - message.xid = object.xid ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(AccountID.$type, AccountID); - -function createBaseAccount(): Account { - return { - $type: "akash.escrow.v1beta3.Account", - id: undefined, - owner: "", - state: 0, - balance: undefined, - transferred: undefined, - settledAt: Long.ZERO, - depositor: "", - funds: undefined, - }; -} - -export const Account = { - $type: "akash.escrow.v1beta3.Account" as const, - - encode( - message: Account, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - AccountID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - if (message.owner !== "") { - writer.uint32(18).string(message.owner); - } - if (message.state !== 0) { - writer.uint32(24).int32(message.state); - } - if (message.balance !== undefined) { - DecCoin.encode(message.balance, writer.uint32(34).fork()).ldelim(); - } - if (message.transferred !== undefined) { - DecCoin.encode(message.transferred, writer.uint32(42).fork()).ldelim(); - } - if (!message.settledAt.equals(Long.ZERO)) { - writer.uint32(48).int64(message.settledAt); - } - if (message.depositor !== "") { - writer.uint32(58).string(message.depositor); - } - if (message.funds !== undefined) { - DecCoin.encode(message.funds, writer.uint32(66).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Account { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseAccount(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = AccountID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.owner = reader.string(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.state = reader.int32() as any; - continue; - case 4: - if (tag !== 34) { - break; - } - - message.balance = DecCoin.decode(reader, reader.uint32()); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.transferred = DecCoin.decode(reader, reader.uint32()); - continue; - case 6: - if (tag !== 48) { - break; - } - - message.settledAt = reader.int64() as Long; - continue; - case 7: - if (tag !== 58) { - break; - } - - message.depositor = reader.string(); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.funds = DecCoin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Account { - return { - $type: Account.$type, - id: isSet(object.id) ? AccountID.fromJSON(object.id) : undefined, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - state: isSet(object.state) ? account_StateFromJSON(object.state) : 0, - balance: isSet(object.balance) - ? DecCoin.fromJSON(object.balance) - : undefined, - transferred: isSet(object.transferred) - ? DecCoin.fromJSON(object.transferred) - : undefined, - settledAt: isSet(object.settledAt) - ? Long.fromValue(object.settledAt) - : Long.ZERO, - depositor: isSet(object.depositor) - ? globalThis.String(object.depositor) - : "", - funds: isSet(object.funds) ? DecCoin.fromJSON(object.funds) : undefined, - }; - }, - - toJSON(message: Account): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = AccountID.toJSON(message.id); - } - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.state !== 0) { - obj.state = account_StateToJSON(message.state); - } - if (message.balance !== undefined) { - obj.balance = DecCoin.toJSON(message.balance); - } - if (message.transferred !== undefined) { - obj.transferred = DecCoin.toJSON(message.transferred); - } - if (!message.settledAt.equals(Long.ZERO)) { - obj.settledAt = (message.settledAt || Long.ZERO).toString(); - } - if (message.depositor !== "") { - obj.depositor = message.depositor; - } - if (message.funds !== undefined) { - obj.funds = DecCoin.toJSON(message.funds); - } - return obj; - }, - - create(base?: DeepPartial): Account { - return Account.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Account { - const message = createBaseAccount(); - message.id = - object.id !== undefined && object.id !== null - ? AccountID.fromPartial(object.id) - : undefined; - message.owner = object.owner ?? ""; - message.state = object.state ?? 0; - message.balance = - object.balance !== undefined && object.balance !== null - ? DecCoin.fromPartial(object.balance) - : undefined; - message.transferred = - object.transferred !== undefined && object.transferred !== null - ? DecCoin.fromPartial(object.transferred) - : undefined; - message.settledAt = - object.settledAt !== undefined && object.settledAt !== null - ? Long.fromValue(object.settledAt) - : Long.ZERO; - message.depositor = object.depositor ?? ""; - message.funds = - object.funds !== undefined && object.funds !== null - ? DecCoin.fromPartial(object.funds) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Account.$type, Account); - -function createBaseFractionalPayment(): FractionalPayment { - return { - $type: "akash.escrow.v1beta3.FractionalPayment", - accountId: undefined, - paymentId: "", - owner: "", - state: 0, - rate: undefined, - balance: undefined, - withdrawn: undefined, - }; -} - -export const FractionalPayment = { - $type: "akash.escrow.v1beta3.FractionalPayment" as const, - - encode( - message: FractionalPayment, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.accountId !== undefined) { - AccountID.encode(message.accountId, writer.uint32(10).fork()).ldelim(); - } - if (message.paymentId !== "") { - writer.uint32(18).string(message.paymentId); - } - if (message.owner !== "") { - writer.uint32(26).string(message.owner); - } - if (message.state !== 0) { - writer.uint32(32).int32(message.state); - } - if (message.rate !== undefined) { - DecCoin.encode(message.rate, writer.uint32(42).fork()).ldelim(); - } - if (message.balance !== undefined) { - DecCoin.encode(message.balance, writer.uint32(50).fork()).ldelim(); - } - if (message.withdrawn !== undefined) { - Coin.encode(message.withdrawn, writer.uint32(58).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): FractionalPayment { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseFractionalPayment(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.accountId = AccountID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.paymentId = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.owner = reader.string(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.state = reader.int32() as any; - continue; - case 5: - if (tag !== 42) { - break; - } - - message.rate = DecCoin.decode(reader, reader.uint32()); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.balance = DecCoin.decode(reader, reader.uint32()); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.withdrawn = Coin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): FractionalPayment { - return { - $type: FractionalPayment.$type, - accountId: isSet(object.accountId) - ? AccountID.fromJSON(object.accountId) - : undefined, - paymentId: isSet(object.paymentId) - ? globalThis.String(object.paymentId) - : "", - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - state: isSet(object.state) - ? fractionalPayment_StateFromJSON(object.state) - : 0, - rate: isSet(object.rate) ? DecCoin.fromJSON(object.rate) : undefined, - balance: isSet(object.balance) - ? DecCoin.fromJSON(object.balance) - : undefined, - withdrawn: isSet(object.withdrawn) - ? Coin.fromJSON(object.withdrawn) - : undefined, - }; - }, - - toJSON(message: FractionalPayment): unknown { - const obj: any = {}; - if (message.accountId !== undefined) { - obj.accountId = AccountID.toJSON(message.accountId); - } - if (message.paymentId !== "") { - obj.paymentId = message.paymentId; - } - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.state !== 0) { - obj.state = fractionalPayment_StateToJSON(message.state); - } - if (message.rate !== undefined) { - obj.rate = DecCoin.toJSON(message.rate); - } - if (message.balance !== undefined) { - obj.balance = DecCoin.toJSON(message.balance); - } - if (message.withdrawn !== undefined) { - obj.withdrawn = Coin.toJSON(message.withdrawn); - } - return obj; - }, - - create(base?: DeepPartial): FractionalPayment { - return FractionalPayment.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): FractionalPayment { - const message = createBaseFractionalPayment(); - message.accountId = - object.accountId !== undefined && object.accountId !== null - ? AccountID.fromPartial(object.accountId) - : undefined; - message.paymentId = object.paymentId ?? ""; - message.owner = object.owner ?? ""; - message.state = object.state ?? 0; - message.rate = - object.rate !== undefined && object.rate !== null - ? DecCoin.fromPartial(object.rate) - : undefined; - message.balance = - object.balance !== undefined && object.balance !== null - ? DecCoin.fromPartial(object.balance) - : undefined; - message.withdrawn = - object.withdrawn !== undefined && object.withdrawn !== null - ? Coin.fromPartial(object.withdrawn) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(FractionalPayment.$type, FractionalPayment); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/gov/v1beta3/genesis.ts b/ts/src/generated/akash/gov/v1beta3/genesis.ts index 85826ef9..d51f6d34 100644 --- a/ts/src/generated/akash/gov/v1beta3/genesis.ts +++ b/ts/src/generated/akash/gov/v1beta3/genesis.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/gov/v1beta3/genesis.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; import { DepositParams } from "./params"; @@ -14,25 +20,28 @@ function createBaseGenesisState(): GenesisState { return { $type: "akash.gov.v1beta3.GenesisState", depositParams: undefined }; } -export const GenesisState = { +export const GenesisState: MessageFns< + GenesisState, + "akash.gov.v1beta3.GenesisState" +> = { $type: "akash.gov.v1beta3.GenesisState" as const, encode( message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.depositParams !== undefined) { DepositParams.encode( message.depositParams, writer.uint32(10).fork(), - ).ldelim(); + ).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { + decode(input: BinaryReader | Uint8Array, length?: number): GenesisState { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseGenesisState(); while (reader.pos < end) { @@ -49,7 +58,7 @@ export const GenesisState = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -107,11 +116,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/gov/v1beta3/params.ts b/ts/src/generated/akash/gov/v1beta3/params.ts index 2de0ffea..b0416117 100644 --- a/ts/src/generated/akash/gov/v1beta3/params.ts +++ b/ts/src/generated/akash/gov/v1beta3/params.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/gov/v1beta3/params.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; /** DepositParams defines the parameters for the x/gov module */ @@ -20,22 +26,25 @@ function createBaseDepositParams(): DepositParams { }; } -export const DepositParams = { +export const DepositParams: MessageFns< + DepositParams, + "akash.gov.v1beta3.DepositParams" +> = { $type: "akash.gov.v1beta3.DepositParams" as const, encode( message: DepositParams, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.minInitialDepositRate.length !== 0) { writer.uint32(10).bytes(message.minInitialDepositRate); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): DepositParams { + decode(input: BinaryReader | Uint8Array, length?: number): DepositParams { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseDepositParams(); while (reader.pos < end) { @@ -52,7 +61,7 @@ export const DepositParams = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -135,11 +144,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/inflation/v1beta2/genesis.ts b/ts/src/generated/akash/inflation/v1beta2/genesis.ts index f2cd269a..2faa279e 100644 --- a/ts/src/generated/akash/inflation/v1beta2/genesis.ts +++ b/ts/src/generated/akash/inflation/v1beta2/genesis.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inflation/v1beta2/genesis.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; import { Params } from "./params"; @@ -14,22 +20,25 @@ function createBaseGenesisState(): GenesisState { return { $type: "akash.inflation.v1beta2.GenesisState", params: undefined }; } -export const GenesisState = { +export const GenesisState: MessageFns< + GenesisState, + "akash.inflation.v1beta2.GenesisState" +> = { $type: "akash.inflation.v1beta2.GenesisState" as const, encode( message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.params !== undefined) { - Params.encode(message.params, writer.uint32(10).fork()).ldelim(); + Params.encode(message.params, writer.uint32(10).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { + decode(input: BinaryReader | Uint8Array, length?: number): GenesisState { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseGenesisState(); while (reader.pos < end) { @@ -46,7 +55,7 @@ export const GenesisState = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -102,11 +111,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/inflation/v1beta2/params.ts b/ts/src/generated/akash/inflation/v1beta2/params.ts index 01f47b87..c49f6648 100644 --- a/ts/src/generated/akash/inflation/v1beta2/params.ts +++ b/ts/src/generated/akash/inflation/v1beta2/params.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inflation/v1beta2/params.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; /** Params defines the parameters for the x/deployment package */ @@ -29,13 +35,13 @@ function createBaseParams(): Params { }; } -export const Params = { +export const Params: MessageFns = { $type: "akash.inflation.v1beta2.Params" as const, encode( message: Params, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.inflationDecayFactor !== "") { writer.uint32(10).string(message.inflationDecayFactor); } @@ -48,9 +54,9 @@ export const Params = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Params { + decode(input: BinaryReader | Uint8Array, length?: number): Params { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseParams(); while (reader.pos < end) { @@ -81,7 +87,7 @@ export const Params = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -150,11 +156,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/inflation/v1beta3/genesis.ts b/ts/src/generated/akash/inflation/v1beta3/genesis.ts index 9a850e04..ba8a14a0 100644 --- a/ts/src/generated/akash/inflation/v1beta3/genesis.ts +++ b/ts/src/generated/akash/inflation/v1beta3/genesis.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inflation/v1beta3/genesis.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; import { Params } from "./params"; @@ -14,22 +20,25 @@ function createBaseGenesisState(): GenesisState { return { $type: "akash.inflation.v1beta3.GenesisState", params: undefined }; } -export const GenesisState = { +export const GenesisState: MessageFns< + GenesisState, + "akash.inflation.v1beta3.GenesisState" +> = { $type: "akash.inflation.v1beta3.GenesisState" as const, encode( message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.params !== undefined) { - Params.encode(message.params, writer.uint32(10).fork()).ldelim(); + Params.encode(message.params, writer.uint32(10).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { + decode(input: BinaryReader | Uint8Array, length?: number): GenesisState { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseGenesisState(); while (reader.pos < end) { @@ -46,7 +55,7 @@ export const GenesisState = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -102,11 +111,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/inflation/v1beta3/params.ts b/ts/src/generated/akash/inflation/v1beta3/params.ts index ac548c25..a6c1c8d4 100644 --- a/ts/src/generated/akash/inflation/v1beta3/params.ts +++ b/ts/src/generated/akash/inflation/v1beta3/params.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inflation/v1beta3/params.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; /** Params defines the parameters for the x/deployment package */ @@ -29,13 +35,13 @@ function createBaseParams(): Params { }; } -export const Params = { +export const Params: MessageFns = { $type: "akash.inflation.v1beta3.Params" as const, encode( message: Params, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.inflationDecayFactor !== "") { writer.uint32(10).string(message.inflationDecayFactor); } @@ -48,9 +54,9 @@ export const Params = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Params { + decode(input: BinaryReader | Uint8Array, length?: number): Params { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseParams(); while (reader.pos < end) { @@ -81,7 +87,7 @@ export const Params = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -150,11 +156,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/inventory/v1/cluster.ts b/ts/src/generated/akash/inventory/v1/cluster.ts index d379d0fa..9dfc3714 100644 --- a/ts/src/generated/akash/inventory/v1/cluster.ts +++ b/ts/src/generated/akash/inventory/v1/cluster.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inventory/v1/cluster.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; import { Node } from "./node"; import { Storage } from "./storage"; @@ -16,25 +22,25 @@ function createBaseCluster(): Cluster { return { $type: "akash.inventory.v1.Cluster", nodes: [], storage: [] }; } -export const Cluster = { +export const Cluster: MessageFns = { $type: "akash.inventory.v1.Cluster" as const, encode( message: Cluster, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { for (const v of message.nodes) { - Node.encode(v!, writer.uint32(10).fork()).ldelim(); + Node.encode(v!, writer.uint32(10).fork()).join(); } for (const v of message.storage) { - Storage.encode(v!, writer.uint32(18).fork()).ldelim(); + Storage.encode(v!, writer.uint32(18).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Cluster { + decode(input: BinaryReader | Uint8Array, length?: number): Cluster { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseCluster(); while (reader.pos < end) { @@ -58,7 +64,7 @@ export const Cluster = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -120,7 +126,12 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; } diff --git a/ts/src/generated/akash/inventory/v1/cpu.ts b/ts/src/generated/akash/inventory/v1/cpu.ts index 83ff1f07..c5203395 100644 --- a/ts/src/generated/akash/inventory/v1/cpu.ts +++ b/ts/src/generated/akash/inventory/v1/cpu.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inventory/v1/cpu.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; import { ResourcePair } from "./resourcepair"; @@ -30,13 +36,13 @@ function createBaseCPUInfo(): CPUInfo { }; } -export const CPUInfo = { +export const CPUInfo: MessageFns = { $type: "akash.inventory.v1.CPUInfo" as const, encode( message: CPUInfo, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.id !== "") { writer.uint32(10).string(message.id); } @@ -52,9 +58,9 @@ export const CPUInfo = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): CPUInfo { + decode(input: BinaryReader | Uint8Array, length?: number): CPUInfo { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseCPUInfo(); while (reader.pos < end) { @@ -92,7 +98,7 @@ export const CPUInfo = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -143,22 +149,25 @@ function createBaseCPU(): CPU { return { $type: "akash.inventory.v1.CPU", quantity: undefined, info: [] }; } -export const CPU = { +export const CPU: MessageFns = { $type: "akash.inventory.v1.CPU" as const, - encode(message: CPU, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + encode( + message: CPU, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.quantity !== undefined) { - ResourcePair.encode(message.quantity, writer.uint32(10).fork()).ldelim(); + ResourcePair.encode(message.quantity, writer.uint32(10).fork()).join(); } for (const v of message.info) { - CPUInfo.encode(v!, writer.uint32(18).fork()).ldelim(); + CPUInfo.encode(v!, writer.uint32(18).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): CPU { + decode(input: BinaryReader | Uint8Array, length?: number): CPU { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseCPU(); while (reader.pos < end) { @@ -182,7 +191,7 @@ export const CPU = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -247,11 +256,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/inventory/v1/gpu.ts b/ts/src/generated/akash/inventory/v1/gpu.ts index 5f4bd261..9b65928a 100644 --- a/ts/src/generated/akash/inventory/v1/gpu.ts +++ b/ts/src/generated/akash/inventory/v1/gpu.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inventory/v1/gpu.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; import { ResourcePair } from "./resourcepair"; @@ -34,13 +40,13 @@ function createBaseGPUInfo(): GPUInfo { }; } -export const GPUInfo = { +export const GPUInfo: MessageFns = { $type: "akash.inventory.v1.GPUInfo" as const, encode( message: GPUInfo, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.vendor !== "") { writer.uint32(10).string(message.vendor); } @@ -62,9 +68,9 @@ export const GPUInfo = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): GPUInfo { + decode(input: BinaryReader | Uint8Array, length?: number): GPUInfo { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseGPUInfo(); while (reader.pos < end) { @@ -116,7 +122,7 @@ export const GPUInfo = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -183,22 +189,25 @@ function createBaseGPU(): GPU { return { $type: "akash.inventory.v1.GPU", quantity: undefined, info: [] }; } -export const GPU = { +export const GPU: MessageFns = { $type: "akash.inventory.v1.GPU" as const, - encode(message: GPU, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + encode( + message: GPU, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.quantity !== undefined) { - ResourcePair.encode(message.quantity, writer.uint32(10).fork()).ldelim(); + ResourcePair.encode(message.quantity, writer.uint32(10).fork()).join(); } for (const v of message.info) { - GPUInfo.encode(v!, writer.uint32(18).fork()).ldelim(); + GPUInfo.encode(v!, writer.uint32(18).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): GPU { + decode(input: BinaryReader | Uint8Array, length?: number): GPU { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseGPU(); while (reader.pos < end) { @@ -222,7 +231,7 @@ export const GPU = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -287,11 +296,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/inventory/v1/memory.ts b/ts/src/generated/akash/inventory/v1/memory.ts index b49a9b44..61dd3926 100644 --- a/ts/src/generated/akash/inventory/v1/memory.ts +++ b/ts/src/generated/akash/inventory/v1/memory.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inventory/v1/memory.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; import { ResourcePair } from "./resourcepair"; @@ -30,13 +36,16 @@ function createBaseMemoryInfo(): MemoryInfo { }; } -export const MemoryInfo = { +export const MemoryInfo: MessageFns< + MemoryInfo, + "akash.inventory.v1.MemoryInfo" +> = { $type: "akash.inventory.v1.MemoryInfo" as const, encode( message: MemoryInfo, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.vendor !== "") { writer.uint32(10).string(message.vendor); } @@ -52,9 +61,9 @@ export const MemoryInfo = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): MemoryInfo { + decode(input: BinaryReader | Uint8Array, length?: number): MemoryInfo { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseMemoryInfo(); while (reader.pos < end) { @@ -92,7 +101,7 @@ export const MemoryInfo = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -145,25 +154,25 @@ function createBaseMemory(): Memory { return { $type: "akash.inventory.v1.Memory", quantity: undefined, info: [] }; } -export const Memory = { +export const Memory: MessageFns = { $type: "akash.inventory.v1.Memory" as const, encode( message: Memory, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.quantity !== undefined) { - ResourcePair.encode(message.quantity, writer.uint32(10).fork()).ldelim(); + ResourcePair.encode(message.quantity, writer.uint32(10).fork()).join(); } for (const v of message.info) { - MemoryInfo.encode(v!, writer.uint32(18).fork()).ldelim(); + MemoryInfo.encode(v!, writer.uint32(18).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Memory { + decode(input: BinaryReader | Uint8Array, length?: number): Memory { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseMemory(); while (reader.pos < end) { @@ -187,7 +196,7 @@ export const Memory = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -252,11 +261,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/inventory/v1/node.ts b/ts/src/generated/akash/inventory/v1/node.ts index afbf8f60..af79dfb6 100644 --- a/ts/src/generated/akash/inventory/v1/node.ts +++ b/ts/src/generated/akash/inventory/v1/node.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inventory/v1/node.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; import { NodeResources } from "./resources"; @@ -22,22 +28,25 @@ function createBaseNodeCapabilities(): NodeCapabilities { return { $type: "akash.inventory.v1.NodeCapabilities", storageClasses: [] }; } -export const NodeCapabilities = { +export const NodeCapabilities: MessageFns< + NodeCapabilities, + "akash.inventory.v1.NodeCapabilities" +> = { $type: "akash.inventory.v1.NodeCapabilities" as const, encode( message: NodeCapabilities, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { for (const v of message.storageClasses) { writer.uint32(10).string(v!); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): NodeCapabilities { + decode(input: BinaryReader | Uint8Array, length?: number): NodeCapabilities { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseNodeCapabilities(); while (reader.pos < end) { @@ -54,7 +63,7 @@ export const NodeCapabilities = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -97,31 +106,31 @@ function createBaseNode(): Node { }; } -export const Node = { +export const Node: MessageFns = { $type: "akash.inventory.v1.Node" as const, - encode(message: Node, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + encode( + message: Node, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.name !== "") { writer.uint32(10).string(message.name); } if (message.resources !== undefined) { - NodeResources.encode( - message.resources, - writer.uint32(18).fork(), - ).ldelim(); + NodeResources.encode(message.resources, writer.uint32(18).fork()).join(); } if (message.capabilities !== undefined) { NodeCapabilities.encode( message.capabilities, writer.uint32(26).fork(), - ).ldelim(); + ).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Node { + decode(input: BinaryReader | Uint8Array, length?: number): Node { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseNode(); while (reader.pos < end) { @@ -155,7 +164,7 @@ export const Node = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -228,11 +237,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/inventory/v1/resourcepair.ts b/ts/src/generated/akash/inventory/v1/resourcepair.ts index 43a772fd..ba95c1ab 100644 --- a/ts/src/generated/akash/inventory/v1/resourcepair.ts +++ b/ts/src/generated/akash/inventory/v1/resourcepair.ts @@ -1,9 +1,15 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inventory/v1/resourcepair.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Quantity } from "../../../k8s.io/apimachinery/pkg/api/resource/generated"; +import { Quantity } from "../../../k8s/io/apimachinery/pkg/api/resource/generated"; import { messageTypeRegistry } from "../../../typeRegistry"; -import { Attribute } from "../../base/v1beta3/attribute"; +import { Attribute } from "../../base/attributes/v1/attribute"; /** ResourcePair to extents resource.Quantity to provide total and available units of the resource */ export interface ResourcePair { @@ -22,28 +28,31 @@ function createBaseResourcePair(): ResourcePair { }; } -export const ResourcePair = { +export const ResourcePair: MessageFns< + ResourcePair, + "akash.inventory.v1.ResourcePair" +> = { $type: "akash.inventory.v1.ResourcePair" as const, encode( message: ResourcePair, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.allocatable !== undefined) { - Quantity.encode(message.allocatable, writer.uint32(10).fork()).ldelim(); + Quantity.encode(message.allocatable, writer.uint32(10).fork()).join(); } if (message.allocated !== undefined) { - Quantity.encode(message.allocated, writer.uint32(18).fork()).ldelim(); + Quantity.encode(message.allocated, writer.uint32(18).fork()).join(); } for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); + Attribute.encode(v!, writer.uint32(26).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ResourcePair { + decode(input: BinaryReader | Uint8Array, length?: number): ResourcePair { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseResourcePair(); while (reader.pos < end) { @@ -74,7 +83,7 @@ export const ResourcePair = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -150,11 +159,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/inventory/v1/resources.ts b/ts/src/generated/akash/inventory/v1/resources.ts index 01d983dd..1b89f6ca 100644 --- a/ts/src/generated/akash/inventory/v1/resources.ts +++ b/ts/src/generated/akash/inventory/v1/resources.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inventory/v1/resources.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; import { CPU } from "./cpu"; import { GPU } from "./gpu"; @@ -30,46 +36,49 @@ function createBaseNodeResources(): NodeResources { }; } -export const NodeResources = { +export const NodeResources: MessageFns< + NodeResources, + "akash.inventory.v1.NodeResources" +> = { $type: "akash.inventory.v1.NodeResources" as const, encode( message: NodeResources, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.cpu !== undefined) { - CPU.encode(message.cpu, writer.uint32(10).fork()).ldelim(); + CPU.encode(message.cpu, writer.uint32(10).fork()).join(); } if (message.memory !== undefined) { - Memory.encode(message.memory, writer.uint32(18).fork()).ldelim(); + Memory.encode(message.memory, writer.uint32(18).fork()).join(); } if (message.gpu !== undefined) { - GPU.encode(message.gpu, writer.uint32(26).fork()).ldelim(); + GPU.encode(message.gpu, writer.uint32(26).fork()).join(); } if (message.ephemeralStorage !== undefined) { ResourcePair.encode( message.ephemeralStorage, writer.uint32(34).fork(), - ).ldelim(); + ).join(); } if (message.volumesAttached !== undefined) { ResourcePair.encode( message.volumesAttached, writer.uint32(42).fork(), - ).ldelim(); + ).join(); } if (message.volumesMounted !== undefined) { ResourcePair.encode( message.volumesMounted, writer.uint32(50).fork(), - ).ldelim(); + ).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): NodeResources { + decode(input: BinaryReader | Uint8Array, length?: number): NodeResources { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseNodeResources(); while (reader.pos < end) { @@ -127,7 +136,7 @@ export const NodeResources = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -229,11 +238,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/inventory/v1/service.grpc-js.ts b/ts/src/generated/akash/inventory/v1/service.grpc-js.ts index 3aa1eef0..e62058ec 100644 --- a/ts/src/generated/akash/inventory/v1/service.grpc-js.ts +++ b/ts/src/generated/akash/inventory/v1/service.grpc-js.ts @@ -1,3 +1,9 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inventory/v1/service.proto + /* eslint-disable */ import { ChannelCredentials, diff --git a/ts/src/generated/akash/inventory/v1/service.ts b/ts/src/generated/akash/inventory/v1/service.ts index db566ae2..04b6c3bb 100644 --- a/ts/src/generated/akash/inventory/v1/service.ts +++ b/ts/src/generated/akash/inventory/v1/service.ts @@ -1,5 +1,11 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inventory/v1/service.proto + /* eslint-disable */ -import _m0 from "protobufjs/minimal"; +import { BinaryReader } from "@bufbuild/protobuf/wire"; import { Observable } from "rxjs"; import { map } from "rxjs/operators"; import { Empty } from "../../../google/protobuf/empty"; @@ -35,7 +41,7 @@ export class NodeRPCClientImpl implements NodeRPC { QueryNode(request: Empty): Promise { const data = Empty.encode(request).finish(); const promise = this.rpc.request(this.service, "QueryNode", data); - return promise.then((data) => Node.decode(_m0.Reader.create(data))); + return promise.then((data) => Node.decode(new BinaryReader(data))); } StreamNode(request: Empty): Observable { @@ -45,7 +51,7 @@ export class NodeRPCClientImpl implements NodeRPC { "StreamNode", data, ); - return result.pipe(map((data) => Node.decode(_m0.Reader.create(data)))); + return result.pipe(map((data) => Node.decode(new BinaryReader(data)))); } } @@ -78,7 +84,7 @@ export class ClusterRPCClientImpl implements ClusterRPC { QueryCluster(request: Empty): Promise { const data = Empty.encode(request).finish(); const promise = this.rpc.request(this.service, "QueryCluster", data); - return promise.then((data) => Cluster.decode(_m0.Reader.create(data))); + return promise.then((data) => Cluster.decode(new BinaryReader(data))); } StreamCluster(request: Empty): Observable { @@ -88,7 +94,7 @@ export class ClusterRPCClientImpl implements ClusterRPC { "StreamCluster", data, ); - return result.pipe(map((data) => Cluster.decode(_m0.Reader.create(data)))); + return result.pipe(map((data) => Cluster.decode(new BinaryReader(data)))); } } diff --git a/ts/src/generated/akash/inventory/v1/storage.ts b/ts/src/generated/akash/inventory/v1/storage.ts index 8ba8c86b..8def570b 100644 --- a/ts/src/generated/akash/inventory/v1/storage.ts +++ b/ts/src/generated/akash/inventory/v1/storage.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/inventory/v1/storage.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; import { ResourcePair } from "./resourcepair"; @@ -22,13 +28,16 @@ function createBaseStorageInfo(): StorageInfo { return { $type: "akash.inventory.v1.StorageInfo", class: "", iops: "" }; } -export const StorageInfo = { +export const StorageInfo: MessageFns< + StorageInfo, + "akash.inventory.v1.StorageInfo" +> = { $type: "akash.inventory.v1.StorageInfo" as const, encode( message: StorageInfo, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.class !== "") { writer.uint32(10).string(message.class); } @@ -38,9 +47,9 @@ export const StorageInfo = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): StorageInfo { + decode(input: BinaryReader | Uint8Array, length?: number): StorageInfo { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseStorageInfo(); while (reader.pos < end) { @@ -64,7 +73,7 @@ export const StorageInfo = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -109,25 +118,25 @@ function createBaseStorage(): Storage { }; } -export const Storage = { +export const Storage: MessageFns = { $type: "akash.inventory.v1.Storage" as const, encode( message: Storage, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.quantity !== undefined) { - ResourcePair.encode(message.quantity, writer.uint32(10).fork()).ldelim(); + ResourcePair.encode(message.quantity, writer.uint32(10).fork()).join(); } if (message.info !== undefined) { - StorageInfo.encode(message.info, writer.uint32(18).fork()).ldelim(); + StorageInfo.encode(message.info, writer.uint32(18).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Storage { + decode(input: BinaryReader | Uint8Array, length?: number): Storage { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseStorage(); while (reader.pos < end) { @@ -151,7 +160,7 @@ export const Storage = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -217,11 +226,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/manifest/v2beta1/group.ts b/ts/src/generated/akash/manifest/v2beta1/group.ts deleted file mode 100644 index acc9aad6..00000000 --- a/ts/src/generated/akash/manifest/v2beta1/group.ts +++ /dev/null @@ -1,125 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Service } from "./service"; - -/** Group store name and list of services */ -export interface Group { - $type: "akash.manifest.v2beta1.Group"; - name: string; - services: Service[]; -} - -function createBaseGroup(): Group { - return { $type: "akash.manifest.v2beta1.Group", name: "", services: [] }; -} - -export const Group = { - $type: "akash.manifest.v2beta1.Group" as const, - - encode(message: Group, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - for (const v of message.services) { - Service.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Group { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.services.push(Service.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Group { - return { - $type: Group.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - services: globalThis.Array.isArray(object?.services) - ? object.services.map((e: any) => Service.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Group): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.services?.length) { - obj.services = message.services.map((e) => Service.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): Group { - return Group.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Group { - const message = createBaseGroup(); - message.name = object.name ?? ""; - message.services = - object.services?.map((e) => Service.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Group.$type, Group); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/manifest/v2beta1/httpoptions.ts b/ts/src/generated/akash/manifest/v2beta1/httpoptions.ts deleted file mode 100644 index 33b17968..00000000 --- a/ts/src/generated/akash/manifest/v2beta1/httpoptions.ts +++ /dev/null @@ -1,218 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** ServiceExposeHTTPOptions */ -export interface ServiceExposeHTTPOptions { - $type: "akash.manifest.v2beta1.ServiceExposeHTTPOptions"; - maxBodySize: number; - readTimeout: number; - sendTimeout: number; - nextTries: number; - nextTimeout: number; - nextCases: string[]; -} - -function createBaseServiceExposeHTTPOptions(): ServiceExposeHTTPOptions { - return { - $type: "akash.manifest.v2beta1.ServiceExposeHTTPOptions", - maxBodySize: 0, - readTimeout: 0, - sendTimeout: 0, - nextTries: 0, - nextTimeout: 0, - nextCases: [], - }; -} - -export const ServiceExposeHTTPOptions = { - $type: "akash.manifest.v2beta1.ServiceExposeHTTPOptions" as const, - - encode( - message: ServiceExposeHTTPOptions, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.maxBodySize !== 0) { - writer.uint32(8).uint32(message.maxBodySize); - } - if (message.readTimeout !== 0) { - writer.uint32(16).uint32(message.readTimeout); - } - if (message.sendTimeout !== 0) { - writer.uint32(24).uint32(message.sendTimeout); - } - if (message.nextTries !== 0) { - writer.uint32(32).uint32(message.nextTries); - } - if (message.nextTimeout !== 0) { - writer.uint32(40).uint32(message.nextTimeout); - } - for (const v of message.nextCases) { - writer.uint32(50).string(v!); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): ServiceExposeHTTPOptions { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseServiceExposeHTTPOptions(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.maxBodySize = reader.uint32(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.readTimeout = reader.uint32(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.sendTimeout = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.nextTries = reader.uint32(); - continue; - case 5: - if (tag !== 40) { - break; - } - - message.nextTimeout = reader.uint32(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.nextCases.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ServiceExposeHTTPOptions { - return { - $type: ServiceExposeHTTPOptions.$type, - maxBodySize: isSet(object.maxBodySize) - ? globalThis.Number(object.maxBodySize) - : 0, - readTimeout: isSet(object.readTimeout) - ? globalThis.Number(object.readTimeout) - : 0, - sendTimeout: isSet(object.sendTimeout) - ? globalThis.Number(object.sendTimeout) - : 0, - nextTries: isSet(object.nextTries) - ? globalThis.Number(object.nextTries) - : 0, - nextTimeout: isSet(object.nextTimeout) - ? globalThis.Number(object.nextTimeout) - : 0, - nextCases: globalThis.Array.isArray(object?.nextCases) - ? object.nextCases.map((e: any) => globalThis.String(e)) - : [], - }; - }, - - toJSON(message: ServiceExposeHTTPOptions): unknown { - const obj: any = {}; - if (message.maxBodySize !== 0) { - obj.maxBodySize = Math.round(message.maxBodySize); - } - if (message.readTimeout !== 0) { - obj.readTimeout = Math.round(message.readTimeout); - } - if (message.sendTimeout !== 0) { - obj.sendTimeout = Math.round(message.sendTimeout); - } - if (message.nextTries !== 0) { - obj.nextTries = Math.round(message.nextTries); - } - if (message.nextTimeout !== 0) { - obj.nextTimeout = Math.round(message.nextTimeout); - } - if (message.nextCases?.length) { - obj.nextCases = message.nextCases; - } - return obj; - }, - - create( - base?: DeepPartial, - ): ServiceExposeHTTPOptions { - return ServiceExposeHTTPOptions.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): ServiceExposeHTTPOptions { - const message = createBaseServiceExposeHTTPOptions(); - message.maxBodySize = object.maxBodySize ?? 0; - message.readTimeout = object.readTimeout ?? 0; - message.sendTimeout = object.sendTimeout ?? 0; - message.nextTries = object.nextTries ?? 0; - message.nextTimeout = object.nextTimeout ?? 0; - message.nextCases = object.nextCases?.map((e) => e) || []; - return message; - }, -}; - -messageTypeRegistry.set( - ServiceExposeHTTPOptions.$type, - ServiceExposeHTTPOptions, -); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/manifest/v2beta1/service.grpc-js.ts b/ts/src/generated/akash/manifest/v2beta1/service.grpc-js.ts deleted file mode 100644 index 91f55c18..00000000 --- a/ts/src/generated/akash/manifest/v2beta1/service.grpc-js.ts +++ /dev/null @@ -1,463 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { ResourceUnits } from "../../base/v1beta2/resourceunits"; -import { ServiceExpose } from "./serviceexpose"; - -export const protobufPackage = "akash.manifest.v2beta1"; - -/** StorageParams */ -export interface StorageParams { - $type: "akash.manifest.v2beta1.StorageParams"; - name: string; - mount: string; - readOnly: boolean; -} - -/** ServiceParams */ -export interface ServiceParams { - $type: "akash.manifest.v2beta1.ServiceParams"; - storage: StorageParams[]; -} - -/** Service stores name, image, args, env, unit, count and expose list of service */ -export interface Service { - $type: "akash.manifest.v2beta1.Service"; - name: string; - image: string; - command: string[]; - args: string[]; - env: string[]; - resources: ResourceUnits | undefined; - count: number; - expose: ServiceExpose[]; - params: ServiceParams | undefined; -} - -function createBaseStorageParams(): StorageParams { - return { - $type: "akash.manifest.v2beta1.StorageParams", - name: "", - mount: "", - readOnly: false, - }; -} - -export const StorageParams = { - $type: "akash.manifest.v2beta1.StorageParams" as const, - - encode( - message: StorageParams, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.mount !== "") { - writer.uint32(18).string(message.mount); - } - if (message.readOnly !== false) { - writer.uint32(24).bool(message.readOnly); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): StorageParams { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseStorageParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.mount = reader.string(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.readOnly = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): StorageParams { - return { - $type: StorageParams.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - mount: isSet(object.mount) ? globalThis.String(object.mount) : "", - readOnly: isSet(object.readOnly) - ? globalThis.Boolean(object.readOnly) - : false, - }; - }, - - toJSON(message: StorageParams): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.mount !== "") { - obj.mount = message.mount; - } - if (message.readOnly !== false) { - obj.readOnly = message.readOnly; - } - return obj; - }, - - create(base?: DeepPartial): StorageParams { - return StorageParams.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): StorageParams { - const message = createBaseStorageParams(); - message.name = object.name ?? ""; - message.mount = object.mount ?? ""; - message.readOnly = object.readOnly ?? false; - return message; - }, -}; - -messageTypeRegistry.set(StorageParams.$type, StorageParams); - -function createBaseServiceParams(): ServiceParams { - return { $type: "akash.manifest.v2beta1.ServiceParams", storage: [] }; -} - -export const ServiceParams = { - $type: "akash.manifest.v2beta1.ServiceParams" as const, - - encode( - message: ServiceParams, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.storage) { - StorageParams.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceParams { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseServiceParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.storage.push(StorageParams.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ServiceParams { - return { - $type: ServiceParams.$type, - storage: globalThis.Array.isArray(object?.storage) - ? object.storage.map((e: any) => StorageParams.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ServiceParams): unknown { - const obj: any = {}; - if (message.storage?.length) { - obj.storage = message.storage.map((e) => StorageParams.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): ServiceParams { - return ServiceParams.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ServiceParams { - const message = createBaseServiceParams(); - message.storage = - object.storage?.map((e) => StorageParams.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(ServiceParams.$type, ServiceParams); - -function createBaseService(): Service { - return { - $type: "akash.manifest.v2beta1.Service", - name: "", - image: "", - command: [], - args: [], - env: [], - resources: undefined, - count: 0, - expose: [], - params: undefined, - }; -} - -export const Service = { - $type: "akash.manifest.v2beta1.Service" as const, - - encode( - message: Service, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.image !== "") { - writer.uint32(18).string(message.image); - } - for (const v of message.command) { - writer.uint32(26).string(v!); - } - for (const v of message.args) { - writer.uint32(34).string(v!); - } - for (const v of message.env) { - writer.uint32(42).string(v!); - } - if (message.resources !== undefined) { - ResourceUnits.encode( - message.resources, - writer.uint32(50).fork(), - ).ldelim(); - } - if (message.count !== 0) { - writer.uint32(56).uint32(message.count); - } - for (const v of message.expose) { - ServiceExpose.encode(v!, writer.uint32(66).fork()).ldelim(); - } - if (message.params !== undefined) { - ServiceParams.encode(message.params, writer.uint32(74).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Service { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseService(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.image = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.command.push(reader.string()); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.args.push(reader.string()); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.env.push(reader.string()); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.resources = ResourceUnits.decode(reader, reader.uint32()); - continue; - case 7: - if (tag !== 56) { - break; - } - - message.count = reader.uint32(); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.expose.push(ServiceExpose.decode(reader, reader.uint32())); - continue; - case 9: - if (tag !== 74) { - break; - } - - message.params = ServiceParams.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Service { - return { - $type: Service.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - image: isSet(object.image) ? globalThis.String(object.image) : "", - command: globalThis.Array.isArray(object?.command) - ? object.command.map((e: any) => globalThis.String(e)) - : [], - args: globalThis.Array.isArray(object?.args) - ? object.args.map((e: any) => globalThis.String(e)) - : [], - env: globalThis.Array.isArray(object?.env) - ? object.env.map((e: any) => globalThis.String(e)) - : [], - resources: isSet(object.resources) - ? ResourceUnits.fromJSON(object.resources) - : undefined, - count: isSet(object.count) ? globalThis.Number(object.count) : 0, - expose: globalThis.Array.isArray(object?.expose) - ? object.expose.map((e: any) => ServiceExpose.fromJSON(e)) - : [], - params: isSet(object.params) - ? ServiceParams.fromJSON(object.params) - : undefined, - }; - }, - - toJSON(message: Service): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.image !== "") { - obj.image = message.image; - } - if (message.command?.length) { - obj.command = message.command; - } - if (message.args?.length) { - obj.args = message.args; - } - if (message.env?.length) { - obj.env = message.env; - } - if (message.resources !== undefined) { - obj.resources = ResourceUnits.toJSON(message.resources); - } - if (message.count !== 0) { - obj.count = Math.round(message.count); - } - if (message.expose?.length) { - obj.expose = message.expose.map((e) => ServiceExpose.toJSON(e)); - } - if (message.params !== undefined) { - obj.params = ServiceParams.toJSON(message.params); - } - return obj; - }, - - create(base?: DeepPartial): Service { - return Service.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Service { - const message = createBaseService(); - message.name = object.name ?? ""; - message.image = object.image ?? ""; - message.command = object.command?.map((e) => e) || []; - message.args = object.args?.map((e) => e) || []; - message.env = object.env?.map((e) => e) || []; - message.resources = - object.resources !== undefined && object.resources !== null - ? ResourceUnits.fromPartial(object.resources) - : undefined; - message.count = object.count ?? 0; - message.expose = - object.expose?.map((e) => ServiceExpose.fromPartial(e)) || []; - message.params = - object.params !== undefined && object.params !== null - ? ServiceParams.fromPartial(object.params) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Service.$type, Service); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/manifest/v2beta1/service.ts b/ts/src/generated/akash/manifest/v2beta1/service.ts deleted file mode 100644 index 059865b9..00000000 --- a/ts/src/generated/akash/manifest/v2beta1/service.ts +++ /dev/null @@ -1,461 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { ResourceUnits } from "../../base/v1beta2/resourceunits"; -import { ServiceExpose } from "./serviceexpose"; - -/** StorageParams */ -export interface StorageParams { - $type: "akash.manifest.v2beta1.StorageParams"; - name: string; - mount: string; - readOnly: boolean; -} - -/** ServiceParams */ -export interface ServiceParams { - $type: "akash.manifest.v2beta1.ServiceParams"; - storage: StorageParams[]; -} - -/** Service stores name, image, args, env, unit, count and expose list of service */ -export interface Service { - $type: "akash.manifest.v2beta1.Service"; - name: string; - image: string; - command: string[]; - args: string[]; - env: string[]; - resources: ResourceUnits | undefined; - count: number; - expose: ServiceExpose[]; - params: ServiceParams | undefined; -} - -function createBaseStorageParams(): StorageParams { - return { - $type: "akash.manifest.v2beta1.StorageParams", - name: "", - mount: "", - readOnly: false, - }; -} - -export const StorageParams = { - $type: "akash.manifest.v2beta1.StorageParams" as const, - - encode( - message: StorageParams, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.mount !== "") { - writer.uint32(18).string(message.mount); - } - if (message.readOnly !== false) { - writer.uint32(24).bool(message.readOnly); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): StorageParams { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseStorageParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.mount = reader.string(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.readOnly = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): StorageParams { - return { - $type: StorageParams.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - mount: isSet(object.mount) ? globalThis.String(object.mount) : "", - readOnly: isSet(object.readOnly) - ? globalThis.Boolean(object.readOnly) - : false, - }; - }, - - toJSON(message: StorageParams): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.mount !== "") { - obj.mount = message.mount; - } - if (message.readOnly !== false) { - obj.readOnly = message.readOnly; - } - return obj; - }, - - create(base?: DeepPartial): StorageParams { - return StorageParams.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): StorageParams { - const message = createBaseStorageParams(); - message.name = object.name ?? ""; - message.mount = object.mount ?? ""; - message.readOnly = object.readOnly ?? false; - return message; - }, -}; - -messageTypeRegistry.set(StorageParams.$type, StorageParams); - -function createBaseServiceParams(): ServiceParams { - return { $type: "akash.manifest.v2beta1.ServiceParams", storage: [] }; -} - -export const ServiceParams = { - $type: "akash.manifest.v2beta1.ServiceParams" as const, - - encode( - message: ServiceParams, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.storage) { - StorageParams.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceParams { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseServiceParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.storage.push(StorageParams.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ServiceParams { - return { - $type: ServiceParams.$type, - storage: globalThis.Array.isArray(object?.storage) - ? object.storage.map((e: any) => StorageParams.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ServiceParams): unknown { - const obj: any = {}; - if (message.storage?.length) { - obj.storage = message.storage.map((e) => StorageParams.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): ServiceParams { - return ServiceParams.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ServiceParams { - const message = createBaseServiceParams(); - message.storage = - object.storage?.map((e) => StorageParams.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(ServiceParams.$type, ServiceParams); - -function createBaseService(): Service { - return { - $type: "akash.manifest.v2beta1.Service", - name: "", - image: "", - command: [], - args: [], - env: [], - resources: undefined, - count: 0, - expose: [], - params: undefined, - }; -} - -export const Service = { - $type: "akash.manifest.v2beta1.Service" as const, - - encode( - message: Service, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.image !== "") { - writer.uint32(18).string(message.image); - } - for (const v of message.command) { - writer.uint32(26).string(v!); - } - for (const v of message.args) { - writer.uint32(34).string(v!); - } - for (const v of message.env) { - writer.uint32(42).string(v!); - } - if (message.resources !== undefined) { - ResourceUnits.encode( - message.resources, - writer.uint32(50).fork(), - ).ldelim(); - } - if (message.count !== 0) { - writer.uint32(56).uint32(message.count); - } - for (const v of message.expose) { - ServiceExpose.encode(v!, writer.uint32(66).fork()).ldelim(); - } - if (message.params !== undefined) { - ServiceParams.encode(message.params, writer.uint32(74).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Service { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseService(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.image = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.command.push(reader.string()); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.args.push(reader.string()); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.env.push(reader.string()); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.resources = ResourceUnits.decode(reader, reader.uint32()); - continue; - case 7: - if (tag !== 56) { - break; - } - - message.count = reader.uint32(); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.expose.push(ServiceExpose.decode(reader, reader.uint32())); - continue; - case 9: - if (tag !== 74) { - break; - } - - message.params = ServiceParams.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Service { - return { - $type: Service.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - image: isSet(object.image) ? globalThis.String(object.image) : "", - command: globalThis.Array.isArray(object?.command) - ? object.command.map((e: any) => globalThis.String(e)) - : [], - args: globalThis.Array.isArray(object?.args) - ? object.args.map((e: any) => globalThis.String(e)) - : [], - env: globalThis.Array.isArray(object?.env) - ? object.env.map((e: any) => globalThis.String(e)) - : [], - resources: isSet(object.resources) - ? ResourceUnits.fromJSON(object.resources) - : undefined, - count: isSet(object.count) ? globalThis.Number(object.count) : 0, - expose: globalThis.Array.isArray(object?.expose) - ? object.expose.map((e: any) => ServiceExpose.fromJSON(e)) - : [], - params: isSet(object.params) - ? ServiceParams.fromJSON(object.params) - : undefined, - }; - }, - - toJSON(message: Service): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.image !== "") { - obj.image = message.image; - } - if (message.command?.length) { - obj.command = message.command; - } - if (message.args?.length) { - obj.args = message.args; - } - if (message.env?.length) { - obj.env = message.env; - } - if (message.resources !== undefined) { - obj.resources = ResourceUnits.toJSON(message.resources); - } - if (message.count !== 0) { - obj.count = Math.round(message.count); - } - if (message.expose?.length) { - obj.expose = message.expose.map((e) => ServiceExpose.toJSON(e)); - } - if (message.params !== undefined) { - obj.params = ServiceParams.toJSON(message.params); - } - return obj; - }, - - create(base?: DeepPartial): Service { - return Service.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Service { - const message = createBaseService(); - message.name = object.name ?? ""; - message.image = object.image ?? ""; - message.command = object.command?.map((e) => e) || []; - message.args = object.args?.map((e) => e) || []; - message.env = object.env?.map((e) => e) || []; - message.resources = - object.resources !== undefined && object.resources !== null - ? ResourceUnits.fromPartial(object.resources) - : undefined; - message.count = object.count ?? 0; - message.expose = - object.expose?.map((e) => ServiceExpose.fromPartial(e)) || []; - message.params = - object.params !== undefined && object.params !== null - ? ServiceParams.fromPartial(object.params) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Service.$type, Service); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/manifest/v2beta1/serviceexpose.ts b/ts/src/generated/akash/manifest/v2beta1/serviceexpose.ts deleted file mode 100644 index b6892989..00000000 --- a/ts/src/generated/akash/manifest/v2beta1/serviceexpose.ts +++ /dev/null @@ -1,269 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { ServiceExposeHTTPOptions } from "./httpoptions"; - -/** ServiceExpose stores exposed ports and hosts details */ -export interface ServiceExpose { - $type: "akash.manifest.v2beta1.ServiceExpose"; - /** port on the container */ - port: number; - /** port on the service definition */ - externalPort: number; - proto: string; - service: string; - global: boolean; - hosts: string[]; - httpOptions: ServiceExposeHTTPOptions | undefined; - /** The name of the IP address associated with this, if any */ - ip: string; - /** The sequence number of the associated endpoint in the on-chain data */ - endpointSequenceNumber: number; -} - -function createBaseServiceExpose(): ServiceExpose { - return { - $type: "akash.manifest.v2beta1.ServiceExpose", - port: 0, - externalPort: 0, - proto: "", - service: "", - global: false, - hosts: [], - httpOptions: undefined, - ip: "", - endpointSequenceNumber: 0, - }; -} - -export const ServiceExpose = { - $type: "akash.manifest.v2beta1.ServiceExpose" as const, - - encode( - message: ServiceExpose, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.port !== 0) { - writer.uint32(8).uint32(message.port); - } - if (message.externalPort !== 0) { - writer.uint32(16).uint32(message.externalPort); - } - if (message.proto !== "") { - writer.uint32(26).string(message.proto); - } - if (message.service !== "") { - writer.uint32(34).string(message.service); - } - if (message.global !== false) { - writer.uint32(40).bool(message.global); - } - for (const v of message.hosts) { - writer.uint32(50).string(v!); - } - if (message.httpOptions !== undefined) { - ServiceExposeHTTPOptions.encode( - message.httpOptions, - writer.uint32(58).fork(), - ).ldelim(); - } - if (message.ip !== "") { - writer.uint32(66).string(message.ip); - } - if (message.endpointSequenceNumber !== 0) { - writer.uint32(72).uint32(message.endpointSequenceNumber); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceExpose { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseServiceExpose(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.port = reader.uint32(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.externalPort = reader.uint32(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.proto = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.service = reader.string(); - continue; - case 5: - if (tag !== 40) { - break; - } - - message.global = reader.bool(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.hosts.push(reader.string()); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.httpOptions = ServiceExposeHTTPOptions.decode( - reader, - reader.uint32(), - ); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.ip = reader.string(); - continue; - case 9: - if (tag !== 72) { - break; - } - - message.endpointSequenceNumber = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ServiceExpose { - return { - $type: ServiceExpose.$type, - port: isSet(object.port) ? globalThis.Number(object.port) : 0, - externalPort: isSet(object.externalPort) - ? globalThis.Number(object.externalPort) - : 0, - proto: isSet(object.proto) ? globalThis.String(object.proto) : "", - service: isSet(object.service) ? globalThis.String(object.service) : "", - global: isSet(object.global) ? globalThis.Boolean(object.global) : false, - hosts: globalThis.Array.isArray(object?.hosts) - ? object.hosts.map((e: any) => globalThis.String(e)) - : [], - httpOptions: isSet(object.httpOptions) - ? ServiceExposeHTTPOptions.fromJSON(object.httpOptions) - : undefined, - ip: isSet(object.ip) ? globalThis.String(object.ip) : "", - endpointSequenceNumber: isSet(object.endpointSequenceNumber) - ? globalThis.Number(object.endpointSequenceNumber) - : 0, - }; - }, - - toJSON(message: ServiceExpose): unknown { - const obj: any = {}; - if (message.port !== 0) { - obj.port = Math.round(message.port); - } - if (message.externalPort !== 0) { - obj.externalPort = Math.round(message.externalPort); - } - if (message.proto !== "") { - obj.proto = message.proto; - } - if (message.service !== "") { - obj.service = message.service; - } - if (message.global !== false) { - obj.global = message.global; - } - if (message.hosts?.length) { - obj.hosts = message.hosts; - } - if (message.httpOptions !== undefined) { - obj.httpOptions = ServiceExposeHTTPOptions.toJSON(message.httpOptions); - } - if (message.ip !== "") { - obj.ip = message.ip; - } - if (message.endpointSequenceNumber !== 0) { - obj.endpointSequenceNumber = Math.round(message.endpointSequenceNumber); - } - return obj; - }, - - create(base?: DeepPartial): ServiceExpose { - return ServiceExpose.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ServiceExpose { - const message = createBaseServiceExpose(); - message.port = object.port ?? 0; - message.externalPort = object.externalPort ?? 0; - message.proto = object.proto ?? ""; - message.service = object.service ?? ""; - message.global = object.global ?? false; - message.hosts = object.hosts?.map((e) => e) || []; - message.httpOptions = - object.httpOptions !== undefined && object.httpOptions !== null - ? ServiceExposeHTTPOptions.fromPartial(object.httpOptions) - : undefined; - message.ip = object.ip ?? ""; - message.endpointSequenceNumber = object.endpointSequenceNumber ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(ServiceExpose.$type, ServiceExpose); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/manifest/v2beta2/group.ts b/ts/src/generated/akash/manifest/v2beta2/group.ts deleted file mode 100644 index bff3a44c..00000000 --- a/ts/src/generated/akash/manifest/v2beta2/group.ts +++ /dev/null @@ -1,125 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Service } from "./service"; - -/** Group store name and list of services */ -export interface Group { - $type: "akash.manifest.v2beta2.Group"; - name: string; - services: Service[]; -} - -function createBaseGroup(): Group { - return { $type: "akash.manifest.v2beta2.Group", name: "", services: [] }; -} - -export const Group = { - $type: "akash.manifest.v2beta2.Group" as const, - - encode(message: Group, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - for (const v of message.services) { - Service.encode(v!, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Group { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGroup(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.services.push(Service.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Group { - return { - $type: Group.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - services: globalThis.Array.isArray(object?.services) - ? object.services.map((e: any) => Service.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Group): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.services?.length) { - obj.services = message.services.map((e) => Service.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): Group { - return Group.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Group { - const message = createBaseGroup(); - message.name = object.name ?? ""; - message.services = - object.services?.map((e) => Service.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Group.$type, Group); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/manifest/v2beta2/httpoptions.ts b/ts/src/generated/akash/manifest/v2beta2/httpoptions.ts deleted file mode 100644 index e677424d..00000000 --- a/ts/src/generated/akash/manifest/v2beta2/httpoptions.ts +++ /dev/null @@ -1,218 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** ServiceExposeHTTPOptions */ -export interface ServiceExposeHTTPOptions { - $type: "akash.manifest.v2beta2.ServiceExposeHTTPOptions"; - maxBodySize: number; - readTimeout: number; - sendTimeout: number; - nextTries: number; - nextTimeout: number; - nextCases: string[]; -} - -function createBaseServiceExposeHTTPOptions(): ServiceExposeHTTPOptions { - return { - $type: "akash.manifest.v2beta2.ServiceExposeHTTPOptions", - maxBodySize: 0, - readTimeout: 0, - sendTimeout: 0, - nextTries: 0, - nextTimeout: 0, - nextCases: [], - }; -} - -export const ServiceExposeHTTPOptions = { - $type: "akash.manifest.v2beta2.ServiceExposeHTTPOptions" as const, - - encode( - message: ServiceExposeHTTPOptions, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.maxBodySize !== 0) { - writer.uint32(8).uint32(message.maxBodySize); - } - if (message.readTimeout !== 0) { - writer.uint32(16).uint32(message.readTimeout); - } - if (message.sendTimeout !== 0) { - writer.uint32(24).uint32(message.sendTimeout); - } - if (message.nextTries !== 0) { - writer.uint32(32).uint32(message.nextTries); - } - if (message.nextTimeout !== 0) { - writer.uint32(40).uint32(message.nextTimeout); - } - for (const v of message.nextCases) { - writer.uint32(50).string(v!); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): ServiceExposeHTTPOptions { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseServiceExposeHTTPOptions(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.maxBodySize = reader.uint32(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.readTimeout = reader.uint32(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.sendTimeout = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.nextTries = reader.uint32(); - continue; - case 5: - if (tag !== 40) { - break; - } - - message.nextTimeout = reader.uint32(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.nextCases.push(reader.string()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ServiceExposeHTTPOptions { - return { - $type: ServiceExposeHTTPOptions.$type, - maxBodySize: isSet(object.maxBodySize) - ? globalThis.Number(object.maxBodySize) - : 0, - readTimeout: isSet(object.readTimeout) - ? globalThis.Number(object.readTimeout) - : 0, - sendTimeout: isSet(object.sendTimeout) - ? globalThis.Number(object.sendTimeout) - : 0, - nextTries: isSet(object.nextTries) - ? globalThis.Number(object.nextTries) - : 0, - nextTimeout: isSet(object.nextTimeout) - ? globalThis.Number(object.nextTimeout) - : 0, - nextCases: globalThis.Array.isArray(object?.nextCases) - ? object.nextCases.map((e: any) => globalThis.String(e)) - : [], - }; - }, - - toJSON(message: ServiceExposeHTTPOptions): unknown { - const obj: any = {}; - if (message.maxBodySize !== 0) { - obj.maxBodySize = Math.round(message.maxBodySize); - } - if (message.readTimeout !== 0) { - obj.readTimeout = Math.round(message.readTimeout); - } - if (message.sendTimeout !== 0) { - obj.sendTimeout = Math.round(message.sendTimeout); - } - if (message.nextTries !== 0) { - obj.nextTries = Math.round(message.nextTries); - } - if (message.nextTimeout !== 0) { - obj.nextTimeout = Math.round(message.nextTimeout); - } - if (message.nextCases?.length) { - obj.nextCases = message.nextCases; - } - return obj; - }, - - create( - base?: DeepPartial, - ): ServiceExposeHTTPOptions { - return ServiceExposeHTTPOptions.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): ServiceExposeHTTPOptions { - const message = createBaseServiceExposeHTTPOptions(); - message.maxBodySize = object.maxBodySize ?? 0; - message.readTimeout = object.readTimeout ?? 0; - message.sendTimeout = object.sendTimeout ?? 0; - message.nextTries = object.nextTries ?? 0; - message.nextTimeout = object.nextTimeout ?? 0; - message.nextCases = object.nextCases?.map((e) => e) || []; - return message; - }, -}; - -messageTypeRegistry.set( - ServiceExposeHTTPOptions.$type, - ServiceExposeHTTPOptions, -); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/manifest/v2beta2/service.grpc-js.ts b/ts/src/generated/akash/manifest/v2beta2/service.grpc-js.ts deleted file mode 100644 index a475eda9..00000000 --- a/ts/src/generated/akash/manifest/v2beta2/service.grpc-js.ts +++ /dev/null @@ -1,625 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Resources } from "../../base/v1beta3/resources"; -import { ServiceExpose } from "./serviceexpose"; - -export const protobufPackage = "akash.manifest.v2beta2"; - -/** StorageParams */ -export interface StorageParams { - $type: "akash.manifest.v2beta2.StorageParams"; - name: string; - mount: string; - readOnly: boolean; -} - -/** ServiceParams */ -export interface ServiceParams { - $type: "akash.manifest.v2beta2.ServiceParams"; - storage: StorageParams[]; -} - -/** Credentials to fetch image from registry */ -export interface ServiceImageCredentials { - $type: "akash.manifest.v2beta2.ServiceImageCredentials"; - host: string; - email: string; - username: string; - password: string; -} - -/** Service stores name, image, args, env, unit, count and expose list of service */ -export interface Service { - $type: "akash.manifest.v2beta2.Service"; - name: string; - image: string; - command: string[]; - args: string[]; - env: string[]; - resources: Resources | undefined; - count: number; - expose: ServiceExpose[]; - params: ServiceParams | undefined; - credentials: ServiceImageCredentials | undefined; -} - -function createBaseStorageParams(): StorageParams { - return { - $type: "akash.manifest.v2beta2.StorageParams", - name: "", - mount: "", - readOnly: false, - }; -} - -export const StorageParams = { - $type: "akash.manifest.v2beta2.StorageParams" as const, - - encode( - message: StorageParams, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.mount !== "") { - writer.uint32(18).string(message.mount); - } - if (message.readOnly !== false) { - writer.uint32(24).bool(message.readOnly); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): StorageParams { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseStorageParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.mount = reader.string(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.readOnly = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): StorageParams { - return { - $type: StorageParams.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - mount: isSet(object.mount) ? globalThis.String(object.mount) : "", - readOnly: isSet(object.readOnly) - ? globalThis.Boolean(object.readOnly) - : false, - }; - }, - - toJSON(message: StorageParams): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.mount !== "") { - obj.mount = message.mount; - } - if (message.readOnly !== false) { - obj.readOnly = message.readOnly; - } - return obj; - }, - - create(base?: DeepPartial): StorageParams { - return StorageParams.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): StorageParams { - const message = createBaseStorageParams(); - message.name = object.name ?? ""; - message.mount = object.mount ?? ""; - message.readOnly = object.readOnly ?? false; - return message; - }, -}; - -messageTypeRegistry.set(StorageParams.$type, StorageParams); - -function createBaseServiceParams(): ServiceParams { - return { $type: "akash.manifest.v2beta2.ServiceParams", storage: [] }; -} - -export const ServiceParams = { - $type: "akash.manifest.v2beta2.ServiceParams" as const, - - encode( - message: ServiceParams, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.storage) { - StorageParams.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceParams { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseServiceParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.storage.push(StorageParams.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ServiceParams { - return { - $type: ServiceParams.$type, - storage: globalThis.Array.isArray(object?.storage) - ? object.storage.map((e: any) => StorageParams.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ServiceParams): unknown { - const obj: any = {}; - if (message.storage?.length) { - obj.storage = message.storage.map((e) => StorageParams.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): ServiceParams { - return ServiceParams.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ServiceParams { - const message = createBaseServiceParams(); - message.storage = - object.storage?.map((e) => StorageParams.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(ServiceParams.$type, ServiceParams); - -function createBaseServiceImageCredentials(): ServiceImageCredentials { - return { - $type: "akash.manifest.v2beta2.ServiceImageCredentials", - host: "", - email: "", - username: "", - password: "", - }; -} - -export const ServiceImageCredentials = { - $type: "akash.manifest.v2beta2.ServiceImageCredentials" as const, - - encode( - message: ServiceImageCredentials, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.host !== "") { - writer.uint32(10).string(message.host); - } - if (message.email !== "") { - writer.uint32(18).string(message.email); - } - if (message.username !== "") { - writer.uint32(26).string(message.username); - } - if (message.password !== "") { - writer.uint32(34).string(message.password); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): ServiceImageCredentials { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseServiceImageCredentials(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.host = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.email = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.username = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.password = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ServiceImageCredentials { - return { - $type: ServiceImageCredentials.$type, - host: isSet(object.host) ? globalThis.String(object.host) : "", - email: isSet(object.email) ? globalThis.String(object.email) : "", - username: isSet(object.username) - ? globalThis.String(object.username) - : "", - password: isSet(object.password) - ? globalThis.String(object.password) - : "", - }; - }, - - toJSON(message: ServiceImageCredentials): unknown { - const obj: any = {}; - if (message.host !== "") { - obj.host = message.host; - } - if (message.email !== "") { - obj.email = message.email; - } - if (message.username !== "") { - obj.username = message.username; - } - if (message.password !== "") { - obj.password = message.password; - } - return obj; - }, - - create(base?: DeepPartial): ServiceImageCredentials { - return ServiceImageCredentials.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): ServiceImageCredentials { - const message = createBaseServiceImageCredentials(); - message.host = object.host ?? ""; - message.email = object.email ?? ""; - message.username = object.username ?? ""; - message.password = object.password ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(ServiceImageCredentials.$type, ServiceImageCredentials); - -function createBaseService(): Service { - return { - $type: "akash.manifest.v2beta2.Service", - name: "", - image: "", - command: [], - args: [], - env: [], - resources: undefined, - count: 0, - expose: [], - params: undefined, - credentials: undefined, - }; -} - -export const Service = { - $type: "akash.manifest.v2beta2.Service" as const, - - encode( - message: Service, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.image !== "") { - writer.uint32(18).string(message.image); - } - for (const v of message.command) { - writer.uint32(26).string(v!); - } - for (const v of message.args) { - writer.uint32(34).string(v!); - } - for (const v of message.env) { - writer.uint32(42).string(v!); - } - if (message.resources !== undefined) { - Resources.encode(message.resources, writer.uint32(50).fork()).ldelim(); - } - if (message.count !== 0) { - writer.uint32(56).uint32(message.count); - } - for (const v of message.expose) { - ServiceExpose.encode(v!, writer.uint32(66).fork()).ldelim(); - } - if (message.params !== undefined) { - ServiceParams.encode(message.params, writer.uint32(74).fork()).ldelim(); - } - if (message.credentials !== undefined) { - ServiceImageCredentials.encode( - message.credentials, - writer.uint32(82).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Service { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseService(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.image = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.command.push(reader.string()); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.args.push(reader.string()); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.env.push(reader.string()); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.resources = Resources.decode(reader, reader.uint32()); - continue; - case 7: - if (tag !== 56) { - break; - } - - message.count = reader.uint32(); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.expose.push(ServiceExpose.decode(reader, reader.uint32())); - continue; - case 9: - if (tag !== 74) { - break; - } - - message.params = ServiceParams.decode(reader, reader.uint32()); - continue; - case 10: - if (tag !== 82) { - break; - } - - message.credentials = ServiceImageCredentials.decode( - reader, - reader.uint32(), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Service { - return { - $type: Service.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - image: isSet(object.image) ? globalThis.String(object.image) : "", - command: globalThis.Array.isArray(object?.command) - ? object.command.map((e: any) => globalThis.String(e)) - : [], - args: globalThis.Array.isArray(object?.args) - ? object.args.map((e: any) => globalThis.String(e)) - : [], - env: globalThis.Array.isArray(object?.env) - ? object.env.map((e: any) => globalThis.String(e)) - : [], - resources: isSet(object.resources) - ? Resources.fromJSON(object.resources) - : undefined, - count: isSet(object.count) ? globalThis.Number(object.count) : 0, - expose: globalThis.Array.isArray(object?.expose) - ? object.expose.map((e: any) => ServiceExpose.fromJSON(e)) - : [], - params: isSet(object.params) - ? ServiceParams.fromJSON(object.params) - : undefined, - credentials: isSet(object.credentials) - ? ServiceImageCredentials.fromJSON(object.credentials) - : undefined, - }; - }, - - toJSON(message: Service): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.image !== "") { - obj.image = message.image; - } - if (message.command?.length) { - obj.command = message.command; - } - if (message.args?.length) { - obj.args = message.args; - } - if (message.env?.length) { - obj.env = message.env; - } - if (message.resources !== undefined) { - obj.resources = Resources.toJSON(message.resources); - } - if (message.count !== 0) { - obj.count = Math.round(message.count); - } - if (message.expose?.length) { - obj.expose = message.expose.map((e) => ServiceExpose.toJSON(e)); - } - if (message.params !== undefined) { - obj.params = ServiceParams.toJSON(message.params); - } - if (message.credentials !== undefined) { - obj.credentials = ServiceImageCredentials.toJSON(message.credentials); - } - return obj; - }, - - create(base?: DeepPartial): Service { - return Service.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Service { - const message = createBaseService(); - message.name = object.name ?? ""; - message.image = object.image ?? ""; - message.command = object.command?.map((e) => e) || []; - message.args = object.args?.map((e) => e) || []; - message.env = object.env?.map((e) => e) || []; - message.resources = - object.resources !== undefined && object.resources !== null - ? Resources.fromPartial(object.resources) - : undefined; - message.count = object.count ?? 0; - message.expose = - object.expose?.map((e) => ServiceExpose.fromPartial(e)) || []; - message.params = - object.params !== undefined && object.params !== null - ? ServiceParams.fromPartial(object.params) - : undefined; - message.credentials = - object.credentials !== undefined && object.credentials !== null - ? ServiceImageCredentials.fromPartial(object.credentials) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Service.$type, Service); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -export type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/manifest/v2beta2/service.ts b/ts/src/generated/akash/manifest/v2beta2/service.ts deleted file mode 100644 index 996319e3..00000000 --- a/ts/src/generated/akash/manifest/v2beta2/service.ts +++ /dev/null @@ -1,623 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Resources } from "../../base/v1beta3/resources"; -import { ServiceExpose } from "./serviceexpose"; - -/** StorageParams */ -export interface StorageParams { - $type: "akash.manifest.v2beta2.StorageParams"; - name: string; - mount: string; - readOnly: boolean; -} - -/** ServiceParams */ -export interface ServiceParams { - $type: "akash.manifest.v2beta2.ServiceParams"; - storage: StorageParams[]; -} - -/** Credentials to fetch image from registry */ -export interface ServiceImageCredentials { - $type: "akash.manifest.v2beta2.ServiceImageCredentials"; - host: string; - email: string; - username: string; - password: string; -} - -/** Service stores name, image, args, env, unit, count and expose list of service */ -export interface Service { - $type: "akash.manifest.v2beta2.Service"; - name: string; - image: string; - command: string[]; - args: string[]; - env: string[]; - resources: Resources | undefined; - count: number; - expose: ServiceExpose[]; - params: ServiceParams | undefined; - credentials: ServiceImageCredentials | undefined; -} - -function createBaseStorageParams(): StorageParams { - return { - $type: "akash.manifest.v2beta2.StorageParams", - name: "", - mount: "", - readOnly: false, - }; -} - -export const StorageParams = { - $type: "akash.manifest.v2beta2.StorageParams" as const, - - encode( - message: StorageParams, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.mount !== "") { - writer.uint32(18).string(message.mount); - } - if (message.readOnly !== false) { - writer.uint32(24).bool(message.readOnly); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): StorageParams { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseStorageParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.mount = reader.string(); - continue; - case 3: - if (tag !== 24) { - break; - } - - message.readOnly = reader.bool(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): StorageParams { - return { - $type: StorageParams.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - mount: isSet(object.mount) ? globalThis.String(object.mount) : "", - readOnly: isSet(object.readOnly) - ? globalThis.Boolean(object.readOnly) - : false, - }; - }, - - toJSON(message: StorageParams): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.mount !== "") { - obj.mount = message.mount; - } - if (message.readOnly !== false) { - obj.readOnly = message.readOnly; - } - return obj; - }, - - create(base?: DeepPartial): StorageParams { - return StorageParams.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): StorageParams { - const message = createBaseStorageParams(); - message.name = object.name ?? ""; - message.mount = object.mount ?? ""; - message.readOnly = object.readOnly ?? false; - return message; - }, -}; - -messageTypeRegistry.set(StorageParams.$type, StorageParams); - -function createBaseServiceParams(): ServiceParams { - return { $type: "akash.manifest.v2beta2.ServiceParams", storage: [] }; -} - -export const ServiceParams = { - $type: "akash.manifest.v2beta2.ServiceParams" as const, - - encode( - message: ServiceParams, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.storage) { - StorageParams.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceParams { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseServiceParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.storage.push(StorageParams.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ServiceParams { - return { - $type: ServiceParams.$type, - storage: globalThis.Array.isArray(object?.storage) - ? object.storage.map((e: any) => StorageParams.fromJSON(e)) - : [], - }; - }, - - toJSON(message: ServiceParams): unknown { - const obj: any = {}; - if (message.storage?.length) { - obj.storage = message.storage.map((e) => StorageParams.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): ServiceParams { - return ServiceParams.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ServiceParams { - const message = createBaseServiceParams(); - message.storage = - object.storage?.map((e) => StorageParams.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(ServiceParams.$type, ServiceParams); - -function createBaseServiceImageCredentials(): ServiceImageCredentials { - return { - $type: "akash.manifest.v2beta2.ServiceImageCredentials", - host: "", - email: "", - username: "", - password: "", - }; -} - -export const ServiceImageCredentials = { - $type: "akash.manifest.v2beta2.ServiceImageCredentials" as const, - - encode( - message: ServiceImageCredentials, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.host !== "") { - writer.uint32(10).string(message.host); - } - if (message.email !== "") { - writer.uint32(18).string(message.email); - } - if (message.username !== "") { - writer.uint32(26).string(message.username); - } - if (message.password !== "") { - writer.uint32(34).string(message.password); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): ServiceImageCredentials { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseServiceImageCredentials(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.host = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.email = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.username = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.password = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ServiceImageCredentials { - return { - $type: ServiceImageCredentials.$type, - host: isSet(object.host) ? globalThis.String(object.host) : "", - email: isSet(object.email) ? globalThis.String(object.email) : "", - username: isSet(object.username) - ? globalThis.String(object.username) - : "", - password: isSet(object.password) - ? globalThis.String(object.password) - : "", - }; - }, - - toJSON(message: ServiceImageCredentials): unknown { - const obj: any = {}; - if (message.host !== "") { - obj.host = message.host; - } - if (message.email !== "") { - obj.email = message.email; - } - if (message.username !== "") { - obj.username = message.username; - } - if (message.password !== "") { - obj.password = message.password; - } - return obj; - }, - - create(base?: DeepPartial): ServiceImageCredentials { - return ServiceImageCredentials.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): ServiceImageCredentials { - const message = createBaseServiceImageCredentials(); - message.host = object.host ?? ""; - message.email = object.email ?? ""; - message.username = object.username ?? ""; - message.password = object.password ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(ServiceImageCredentials.$type, ServiceImageCredentials); - -function createBaseService(): Service { - return { - $type: "akash.manifest.v2beta2.Service", - name: "", - image: "", - command: [], - args: [], - env: [], - resources: undefined, - count: 0, - expose: [], - params: undefined, - credentials: undefined, - }; -} - -export const Service = { - $type: "akash.manifest.v2beta2.Service" as const, - - encode( - message: Service, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.name !== "") { - writer.uint32(10).string(message.name); - } - if (message.image !== "") { - writer.uint32(18).string(message.image); - } - for (const v of message.command) { - writer.uint32(26).string(v!); - } - for (const v of message.args) { - writer.uint32(34).string(v!); - } - for (const v of message.env) { - writer.uint32(42).string(v!); - } - if (message.resources !== undefined) { - Resources.encode(message.resources, writer.uint32(50).fork()).ldelim(); - } - if (message.count !== 0) { - writer.uint32(56).uint32(message.count); - } - for (const v of message.expose) { - ServiceExpose.encode(v!, writer.uint32(66).fork()).ldelim(); - } - if (message.params !== undefined) { - ServiceParams.encode(message.params, writer.uint32(74).fork()).ldelim(); - } - if (message.credentials !== undefined) { - ServiceImageCredentials.encode( - message.credentials, - writer.uint32(82).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Service { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseService(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.name = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.image = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.command.push(reader.string()); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.args.push(reader.string()); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.env.push(reader.string()); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.resources = Resources.decode(reader, reader.uint32()); - continue; - case 7: - if (tag !== 56) { - break; - } - - message.count = reader.uint32(); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.expose.push(ServiceExpose.decode(reader, reader.uint32())); - continue; - case 9: - if (tag !== 74) { - break; - } - - message.params = ServiceParams.decode(reader, reader.uint32()); - continue; - case 10: - if (tag !== 82) { - break; - } - - message.credentials = ServiceImageCredentials.decode( - reader, - reader.uint32(), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Service { - return { - $type: Service.$type, - name: isSet(object.name) ? globalThis.String(object.name) : "", - image: isSet(object.image) ? globalThis.String(object.image) : "", - command: globalThis.Array.isArray(object?.command) - ? object.command.map((e: any) => globalThis.String(e)) - : [], - args: globalThis.Array.isArray(object?.args) - ? object.args.map((e: any) => globalThis.String(e)) - : [], - env: globalThis.Array.isArray(object?.env) - ? object.env.map((e: any) => globalThis.String(e)) - : [], - resources: isSet(object.resources) - ? Resources.fromJSON(object.resources) - : undefined, - count: isSet(object.count) ? globalThis.Number(object.count) : 0, - expose: globalThis.Array.isArray(object?.expose) - ? object.expose.map((e: any) => ServiceExpose.fromJSON(e)) - : [], - params: isSet(object.params) - ? ServiceParams.fromJSON(object.params) - : undefined, - credentials: isSet(object.credentials) - ? ServiceImageCredentials.fromJSON(object.credentials) - : undefined, - }; - }, - - toJSON(message: Service): unknown { - const obj: any = {}; - if (message.name !== "") { - obj.name = message.name; - } - if (message.image !== "") { - obj.image = message.image; - } - if (message.command?.length) { - obj.command = message.command; - } - if (message.args?.length) { - obj.args = message.args; - } - if (message.env?.length) { - obj.env = message.env; - } - if (message.resources !== undefined) { - obj.resources = Resources.toJSON(message.resources); - } - if (message.count !== 0) { - obj.count = Math.round(message.count); - } - if (message.expose?.length) { - obj.expose = message.expose.map((e) => ServiceExpose.toJSON(e)); - } - if (message.params !== undefined) { - obj.params = ServiceParams.toJSON(message.params); - } - if (message.credentials !== undefined) { - obj.credentials = ServiceImageCredentials.toJSON(message.credentials); - } - return obj; - }, - - create(base?: DeepPartial): Service { - return Service.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Service { - const message = createBaseService(); - message.name = object.name ?? ""; - message.image = object.image ?? ""; - message.command = object.command?.map((e) => e) || []; - message.args = object.args?.map((e) => e) || []; - message.env = object.env?.map((e) => e) || []; - message.resources = - object.resources !== undefined && object.resources !== null - ? Resources.fromPartial(object.resources) - : undefined; - message.count = object.count ?? 0; - message.expose = - object.expose?.map((e) => ServiceExpose.fromPartial(e)) || []; - message.params = - object.params !== undefined && object.params !== null - ? ServiceParams.fromPartial(object.params) - : undefined; - message.credentials = - object.credentials !== undefined && object.credentials !== null - ? ServiceImageCredentials.fromPartial(object.credentials) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Service.$type, Service); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/manifest/v2beta2/serviceexpose.ts b/ts/src/generated/akash/manifest/v2beta2/serviceexpose.ts deleted file mode 100644 index 5e510bb6..00000000 --- a/ts/src/generated/akash/manifest/v2beta2/serviceexpose.ts +++ /dev/null @@ -1,269 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { ServiceExposeHTTPOptions } from "./httpoptions"; - -/** ServiceExpose stores exposed ports and hosts details */ -export interface ServiceExpose { - $type: "akash.manifest.v2beta2.ServiceExpose"; - /** port on the container */ - port: number; - /** port on the service definition */ - externalPort: number; - proto: string; - service: string; - global: boolean; - hosts: string[]; - httpOptions: ServiceExposeHTTPOptions | undefined; - /** The name of the IP address associated with this, if any */ - ip: string; - /** The sequence number of the associated endpoint in the on-chain data */ - endpointSequenceNumber: number; -} - -function createBaseServiceExpose(): ServiceExpose { - return { - $type: "akash.manifest.v2beta2.ServiceExpose", - port: 0, - externalPort: 0, - proto: "", - service: "", - global: false, - hosts: [], - httpOptions: undefined, - ip: "", - endpointSequenceNumber: 0, - }; -} - -export const ServiceExpose = { - $type: "akash.manifest.v2beta2.ServiceExpose" as const, - - encode( - message: ServiceExpose, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.port !== 0) { - writer.uint32(8).uint32(message.port); - } - if (message.externalPort !== 0) { - writer.uint32(16).uint32(message.externalPort); - } - if (message.proto !== "") { - writer.uint32(26).string(message.proto); - } - if (message.service !== "") { - writer.uint32(34).string(message.service); - } - if (message.global !== false) { - writer.uint32(40).bool(message.global); - } - for (const v of message.hosts) { - writer.uint32(50).string(v!); - } - if (message.httpOptions !== undefined) { - ServiceExposeHTTPOptions.encode( - message.httpOptions, - writer.uint32(58).fork(), - ).ldelim(); - } - if (message.ip !== "") { - writer.uint32(66).string(message.ip); - } - if (message.endpointSequenceNumber !== 0) { - writer.uint32(72).uint32(message.endpointSequenceNumber); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceExpose { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseServiceExpose(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.port = reader.uint32(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.externalPort = reader.uint32(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.proto = reader.string(); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.service = reader.string(); - continue; - case 5: - if (tag !== 40) { - break; - } - - message.global = reader.bool(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.hosts.push(reader.string()); - continue; - case 7: - if (tag !== 58) { - break; - } - - message.httpOptions = ServiceExposeHTTPOptions.decode( - reader, - reader.uint32(), - ); - continue; - case 8: - if (tag !== 66) { - break; - } - - message.ip = reader.string(); - continue; - case 9: - if (tag !== 72) { - break; - } - - message.endpointSequenceNumber = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ServiceExpose { - return { - $type: ServiceExpose.$type, - port: isSet(object.port) ? globalThis.Number(object.port) : 0, - externalPort: isSet(object.externalPort) - ? globalThis.Number(object.externalPort) - : 0, - proto: isSet(object.proto) ? globalThis.String(object.proto) : "", - service: isSet(object.service) ? globalThis.String(object.service) : "", - global: isSet(object.global) ? globalThis.Boolean(object.global) : false, - hosts: globalThis.Array.isArray(object?.hosts) - ? object.hosts.map((e: any) => globalThis.String(e)) - : [], - httpOptions: isSet(object.httpOptions) - ? ServiceExposeHTTPOptions.fromJSON(object.httpOptions) - : undefined, - ip: isSet(object.ip) ? globalThis.String(object.ip) : "", - endpointSequenceNumber: isSet(object.endpointSequenceNumber) - ? globalThis.Number(object.endpointSequenceNumber) - : 0, - }; - }, - - toJSON(message: ServiceExpose): unknown { - const obj: any = {}; - if (message.port !== 0) { - obj.port = Math.round(message.port); - } - if (message.externalPort !== 0) { - obj.externalPort = Math.round(message.externalPort); - } - if (message.proto !== "") { - obj.proto = message.proto; - } - if (message.service !== "") { - obj.service = message.service; - } - if (message.global !== false) { - obj.global = message.global; - } - if (message.hosts?.length) { - obj.hosts = message.hosts; - } - if (message.httpOptions !== undefined) { - obj.httpOptions = ServiceExposeHTTPOptions.toJSON(message.httpOptions); - } - if (message.ip !== "") { - obj.ip = message.ip; - } - if (message.endpointSequenceNumber !== 0) { - obj.endpointSequenceNumber = Math.round(message.endpointSequenceNumber); - } - return obj; - }, - - create(base?: DeepPartial): ServiceExpose { - return ServiceExpose.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ServiceExpose { - const message = createBaseServiceExpose(); - message.port = object.port ?? 0; - message.externalPort = object.externalPort ?? 0; - message.proto = object.proto ?? ""; - message.service = object.service ?? ""; - message.global = object.global ?? false; - message.hosts = object.hosts?.map((e) => e) || []; - message.httpOptions = - object.httpOptions !== undefined && object.httpOptions !== null - ? ServiceExposeHTTPOptions.fromPartial(object.httpOptions) - : undefined; - message.ip = object.ip ?? ""; - message.endpointSequenceNumber = object.endpointSequenceNumber ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(ServiceExpose.$type, ServiceExpose); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/manifest/v2beta3/group.ts b/ts/src/generated/akash/manifest/v2beta3/group.ts new file mode 100644 index 00000000..3ce619e5 --- /dev/null +++ b/ts/src/generated/akash/manifest/v2beta3/group.ts @@ -0,0 +1,139 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/manifest/v2beta3/group.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Service } from "./service"; + +/** Group store name and list of services */ +export interface Group { + $type: "akash.manifest.v2beta3.Group"; + name: string; + services: Service[]; +} + +function createBaseGroup(): Group { + return { $type: "akash.manifest.v2beta3.Group", name: "", services: [] }; +} + +export const Group: MessageFns = { + $type: "akash.manifest.v2beta3.Group" as const, + + encode( + message: Group, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + for (const v of message.services) { + Service.encode(v!, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Group { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGroup(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.services.push(Service.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Group { + return { + $type: Group.$type, + name: isSet(object.name) ? globalThis.String(object.name) : "", + services: globalThis.Array.isArray(object?.services) + ? object.services.map((e: any) => Service.fromJSON(e)) + : [], + }; + }, + + toJSON(message: Group): unknown { + const obj: any = {}; + if (message.name !== "") { + obj.name = message.name; + } + if (message.services?.length) { + obj.services = message.services.map((e) => Service.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): Group { + return Group.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Group { + const message = createBaseGroup(); + message.name = object.name ?? ""; + message.services = + object.services?.map((e) => Service.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Group.$type, Group); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/manifest/v2beta3/httpoptions.ts b/ts/src/generated/akash/manifest/v2beta3/httpoptions.ts new file mode 100644 index 00000000..21cd8391 --- /dev/null +++ b/ts/src/generated/akash/manifest/v2beta3/httpoptions.ts @@ -0,0 +1,232 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/manifest/v2beta3/httpoptions.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** ServiceExposeHTTPOptions */ +export interface ServiceExposeHTTPOptions { + $type: "akash.manifest.v2beta3.ServiceExposeHTTPOptions"; + maxBodySize: number; + readTimeout: number; + sendTimeout: number; + nextTries: number; + nextTimeout: number; + nextCases: string[]; +} + +function createBaseServiceExposeHTTPOptions(): ServiceExposeHTTPOptions { + return { + $type: "akash.manifest.v2beta3.ServiceExposeHTTPOptions", + maxBodySize: 0, + readTimeout: 0, + sendTimeout: 0, + nextTries: 0, + nextTimeout: 0, + nextCases: [], + }; +} + +export const ServiceExposeHTTPOptions: MessageFns< + ServiceExposeHTTPOptions, + "akash.manifest.v2beta3.ServiceExposeHTTPOptions" +> = { + $type: "akash.manifest.v2beta3.ServiceExposeHTTPOptions" as const, + + encode( + message: ServiceExposeHTTPOptions, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.maxBodySize !== 0) { + writer.uint32(8).uint32(message.maxBodySize); + } + if (message.readTimeout !== 0) { + writer.uint32(16).uint32(message.readTimeout); + } + if (message.sendTimeout !== 0) { + writer.uint32(24).uint32(message.sendTimeout); + } + if (message.nextTries !== 0) { + writer.uint32(32).uint32(message.nextTries); + } + if (message.nextTimeout !== 0) { + writer.uint32(40).uint32(message.nextTimeout); + } + for (const v of message.nextCases) { + writer.uint32(50).string(v!); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): ServiceExposeHTTPOptions { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseServiceExposeHTTPOptions(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 8) { + break; + } + + message.maxBodySize = reader.uint32(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.readTimeout = reader.uint32(); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.sendTimeout = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.nextTries = reader.uint32(); + continue; + case 5: + if (tag !== 40) { + break; + } + + message.nextTimeout = reader.uint32(); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.nextCases.push(reader.string()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): ServiceExposeHTTPOptions { + return { + $type: ServiceExposeHTTPOptions.$type, + maxBodySize: isSet(object.maxBodySize) + ? globalThis.Number(object.maxBodySize) + : 0, + readTimeout: isSet(object.readTimeout) + ? globalThis.Number(object.readTimeout) + : 0, + sendTimeout: isSet(object.sendTimeout) + ? globalThis.Number(object.sendTimeout) + : 0, + nextTries: isSet(object.nextTries) + ? globalThis.Number(object.nextTries) + : 0, + nextTimeout: isSet(object.nextTimeout) + ? globalThis.Number(object.nextTimeout) + : 0, + nextCases: globalThis.Array.isArray(object?.nextCases) + ? object.nextCases.map((e: any) => globalThis.String(e)) + : [], + }; + }, + + toJSON(message: ServiceExposeHTTPOptions): unknown { + const obj: any = {}; + if (message.maxBodySize !== 0) { + obj.maxBodySize = Math.round(message.maxBodySize); + } + if (message.readTimeout !== 0) { + obj.readTimeout = Math.round(message.readTimeout); + } + if (message.sendTimeout !== 0) { + obj.sendTimeout = Math.round(message.sendTimeout); + } + if (message.nextTries !== 0) { + obj.nextTries = Math.round(message.nextTries); + } + if (message.nextTimeout !== 0) { + obj.nextTimeout = Math.round(message.nextTimeout); + } + if (message.nextCases?.length) { + obj.nextCases = message.nextCases; + } + return obj; + }, + + create( + base?: DeepPartial, + ): ServiceExposeHTTPOptions { + return ServiceExposeHTTPOptions.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): ServiceExposeHTTPOptions { + const message = createBaseServiceExposeHTTPOptions(); + message.maxBodySize = object.maxBodySize ?? 0; + message.readTimeout = object.readTimeout ?? 0; + message.sendTimeout = object.sendTimeout ?? 0; + message.nextTries = object.nextTries ?? 0; + message.nextTimeout = object.nextTimeout ?? 0; + message.nextCases = object.nextCases?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + ServiceExposeHTTPOptions.$type, + ServiceExposeHTTPOptions, +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/manifest/v2beta3/service.grpc-js.ts b/ts/src/generated/akash/manifest/v2beta3/service.grpc-js.ts new file mode 100644 index 00000000..30226465 --- /dev/null +++ b/ts/src/generated/akash/manifest/v2beta3/service.grpc-js.ts @@ -0,0 +1,671 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/manifest/v2beta3/service.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Resources } from "../../base/resources/v1beta4/resources"; +import { ServiceExpose } from "./serviceexpose"; + +export const protobufPackage = "akash.manifest.v2beta3"; + +/** StorageParams */ +export interface StorageParams { + $type: "akash.manifest.v2beta3.StorageParams"; + name: string; + mount: string; + readOnly: boolean; +} + +/** ServiceParams */ +export interface ServiceParams { + $type: "akash.manifest.v2beta3.ServiceParams"; + storage: StorageParams[]; + credentials: ImageCredentials | undefined; +} + +/** Credentials to fetch image from registry */ +export interface ImageCredentials { + $type: "akash.manifest.v2beta3.ImageCredentials"; + host: string; + email: string; + username: string; + password: string; +} + +/** Service stores name, image, args, env, unit, count and expose list of service */ +export interface Service { + $type: "akash.manifest.v2beta3.Service"; + name: string; + image: string; + command: string[]; + args: string[]; + env: string[]; + resources: Resources | undefined; + count: number; + expose: ServiceExpose[]; + params: ServiceParams | undefined; + credentials: ImageCredentials | undefined; +} + +function createBaseStorageParams(): StorageParams { + return { + $type: "akash.manifest.v2beta3.StorageParams", + name: "", + mount: "", + readOnly: false, + }; +} + +export const StorageParams: MessageFns< + StorageParams, + "akash.manifest.v2beta3.StorageParams" +> = { + $type: "akash.manifest.v2beta3.StorageParams" as const, + + encode( + message: StorageParams, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.mount !== "") { + writer.uint32(18).string(message.mount); + } + if (message.readOnly !== false) { + writer.uint32(24).bool(message.readOnly); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): StorageParams { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseStorageParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.mount = reader.string(); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.readOnly = reader.bool(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): StorageParams { + return { + $type: StorageParams.$type, + name: isSet(object.name) ? globalThis.String(object.name) : "", + mount: isSet(object.mount) ? globalThis.String(object.mount) : "", + readOnly: isSet(object.readOnly) + ? globalThis.Boolean(object.readOnly) + : false, + }; + }, + + toJSON(message: StorageParams): unknown { + const obj: any = {}; + if (message.name !== "") { + obj.name = message.name; + } + if (message.mount !== "") { + obj.mount = message.mount; + } + if (message.readOnly !== false) { + obj.readOnly = message.readOnly; + } + return obj; + }, + + create(base?: DeepPartial): StorageParams { + return StorageParams.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): StorageParams { + const message = createBaseStorageParams(); + message.name = object.name ?? ""; + message.mount = object.mount ?? ""; + message.readOnly = object.readOnly ?? false; + return message; + }, +}; + +messageTypeRegistry.set(StorageParams.$type, StorageParams); + +function createBaseServiceParams(): ServiceParams { + return { + $type: "akash.manifest.v2beta3.ServiceParams", + storage: [], + credentials: undefined, + }; +} + +export const ServiceParams: MessageFns< + ServiceParams, + "akash.manifest.v2beta3.ServiceParams" +> = { + $type: "akash.manifest.v2beta3.ServiceParams" as const, + + encode( + message: ServiceParams, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.storage) { + StorageParams.encode(v!, writer.uint32(10).fork()).join(); + } + if (message.credentials !== undefined) { + ImageCredentials.encode( + message.credentials, + writer.uint32(82).fork(), + ).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): ServiceParams { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseServiceParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.storage.push(StorageParams.decode(reader, reader.uint32())); + continue; + case 10: + if (tag !== 82) { + break; + } + + message.credentials = ImageCredentials.decode( + reader, + reader.uint32(), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): ServiceParams { + return { + $type: ServiceParams.$type, + storage: globalThis.Array.isArray(object?.storage) + ? object.storage.map((e: any) => StorageParams.fromJSON(e)) + : [], + credentials: isSet(object.credentials) + ? ImageCredentials.fromJSON(object.credentials) + : undefined, + }; + }, + + toJSON(message: ServiceParams): unknown { + const obj: any = {}; + if (message.storage?.length) { + obj.storage = message.storage.map((e) => StorageParams.toJSON(e)); + } + if (message.credentials !== undefined) { + obj.credentials = ImageCredentials.toJSON(message.credentials); + } + return obj; + }, + + create(base?: DeepPartial): ServiceParams { + return ServiceParams.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ServiceParams { + const message = createBaseServiceParams(); + message.storage = + object.storage?.map((e) => StorageParams.fromPartial(e)) || []; + message.credentials = + object.credentials !== undefined && object.credentials !== null + ? ImageCredentials.fromPartial(object.credentials) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ServiceParams.$type, ServiceParams); + +function createBaseImageCredentials(): ImageCredentials { + return { + $type: "akash.manifest.v2beta3.ImageCredentials", + host: "", + email: "", + username: "", + password: "", + }; +} + +export const ImageCredentials: MessageFns< + ImageCredentials, + "akash.manifest.v2beta3.ImageCredentials" +> = { + $type: "akash.manifest.v2beta3.ImageCredentials" as const, + + encode( + message: ImageCredentials, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.host !== "") { + writer.uint32(10).string(message.host); + } + if (message.email !== "") { + writer.uint32(18).string(message.email); + } + if (message.username !== "") { + writer.uint32(26).string(message.username); + } + if (message.password !== "") { + writer.uint32(34).string(message.password); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): ImageCredentials { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseImageCredentials(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.host = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.email = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.username = reader.string(); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.password = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): ImageCredentials { + return { + $type: ImageCredentials.$type, + host: isSet(object.host) ? globalThis.String(object.host) : "", + email: isSet(object.email) ? globalThis.String(object.email) : "", + username: isSet(object.username) + ? globalThis.String(object.username) + : "", + password: isSet(object.password) + ? globalThis.String(object.password) + : "", + }; + }, + + toJSON(message: ImageCredentials): unknown { + const obj: any = {}; + if (message.host !== "") { + obj.host = message.host; + } + if (message.email !== "") { + obj.email = message.email; + } + if (message.username !== "") { + obj.username = message.username; + } + if (message.password !== "") { + obj.password = message.password; + } + return obj; + }, + + create(base?: DeepPartial): ImageCredentials { + return ImageCredentials.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ImageCredentials { + const message = createBaseImageCredentials(); + message.host = object.host ?? ""; + message.email = object.email ?? ""; + message.username = object.username ?? ""; + message.password = object.password ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ImageCredentials.$type, ImageCredentials); + +function createBaseService(): Service { + return { + $type: "akash.manifest.v2beta3.Service", + name: "", + image: "", + command: [], + args: [], + env: [], + resources: undefined, + count: 0, + expose: [], + params: undefined, + credentials: undefined, + }; +} + +export const Service: MessageFns = { + $type: "akash.manifest.v2beta3.Service" as const, + + encode( + message: Service, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.image !== "") { + writer.uint32(18).string(message.image); + } + for (const v of message.command) { + writer.uint32(26).string(v!); + } + for (const v of message.args) { + writer.uint32(34).string(v!); + } + for (const v of message.env) { + writer.uint32(42).string(v!); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(50).fork()).join(); + } + if (message.count !== 0) { + writer.uint32(56).uint32(message.count); + } + for (const v of message.expose) { + ServiceExpose.encode(v!, writer.uint32(66).fork()).join(); + } + if (message.params !== undefined) { + ServiceParams.encode(message.params, writer.uint32(74).fork()).join(); + } + if (message.credentials !== undefined) { + ImageCredentials.encode( + message.credentials, + writer.uint32(82).fork(), + ).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Service { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseService(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.image = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.command.push(reader.string()); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.args.push(reader.string()); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.env.push(reader.string()); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.resources = Resources.decode(reader, reader.uint32()); + continue; + case 7: + if (tag !== 56) { + break; + } + + message.count = reader.uint32(); + continue; + case 8: + if (tag !== 66) { + break; + } + + message.expose.push(ServiceExpose.decode(reader, reader.uint32())); + continue; + case 9: + if (tag !== 74) { + break; + } + + message.params = ServiceParams.decode(reader, reader.uint32()); + continue; + case 10: + if (tag !== 82) { + break; + } + + message.credentials = ImageCredentials.decode( + reader, + reader.uint32(), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Service { + return { + $type: Service.$type, + name: isSet(object.name) ? globalThis.String(object.name) : "", + image: isSet(object.image) ? globalThis.String(object.image) : "", + command: globalThis.Array.isArray(object?.command) + ? object.command.map((e: any) => globalThis.String(e)) + : [], + args: globalThis.Array.isArray(object?.args) + ? object.args.map((e: any) => globalThis.String(e)) + : [], + env: globalThis.Array.isArray(object?.env) + ? object.env.map((e: any) => globalThis.String(e)) + : [], + resources: isSet(object.resources) + ? Resources.fromJSON(object.resources) + : undefined, + count: isSet(object.count) ? globalThis.Number(object.count) : 0, + expose: globalThis.Array.isArray(object?.expose) + ? object.expose.map((e: any) => ServiceExpose.fromJSON(e)) + : [], + params: isSet(object.params) + ? ServiceParams.fromJSON(object.params) + : undefined, + credentials: isSet(object.credentials) + ? ImageCredentials.fromJSON(object.credentials) + : undefined, + }; + }, + + toJSON(message: Service): unknown { + const obj: any = {}; + if (message.name !== "") { + obj.name = message.name; + } + if (message.image !== "") { + obj.image = message.image; + } + if (message.command?.length) { + obj.command = message.command; + } + if (message.args?.length) { + obj.args = message.args; + } + if (message.env?.length) { + obj.env = message.env; + } + if (message.resources !== undefined) { + obj.resources = Resources.toJSON(message.resources); + } + if (message.count !== 0) { + obj.count = Math.round(message.count); + } + if (message.expose?.length) { + obj.expose = message.expose.map((e) => ServiceExpose.toJSON(e)); + } + if (message.params !== undefined) { + obj.params = ServiceParams.toJSON(message.params); + } + if (message.credentials !== undefined) { + obj.credentials = ImageCredentials.toJSON(message.credentials); + } + return obj; + }, + + create(base?: DeepPartial): Service { + return Service.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Service { + const message = createBaseService(); + message.name = object.name ?? ""; + message.image = object.image ?? ""; + message.command = object.command?.map((e) => e) || []; + message.args = object.args?.map((e) => e) || []; + message.env = object.env?.map((e) => e) || []; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + message.count = object.count ?? 0; + message.expose = + object.expose?.map((e) => ServiceExpose.fromPartial(e)) || []; + message.params = + object.params !== undefined && object.params !== null + ? ServiceParams.fromPartial(object.params) + : undefined; + message.credentials = + object.credentials !== undefined && object.credentials !== null + ? ImageCredentials.fromPartial(object.credentials) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Service.$type, Service); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +export interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/manifest/v2beta3/service.ts b/ts/src/generated/akash/manifest/v2beta3/service.ts new file mode 100644 index 00000000..9bb0fb4b --- /dev/null +++ b/ts/src/generated/akash/manifest/v2beta3/service.ts @@ -0,0 +1,669 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/manifest/v2beta3/service.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Resources } from "../../base/resources/v1beta4/resources"; +import { ServiceExpose } from "./serviceexpose"; + +/** StorageParams */ +export interface StorageParams { + $type: "akash.manifest.v2beta3.StorageParams"; + name: string; + mount: string; + readOnly: boolean; +} + +/** ServiceParams */ +export interface ServiceParams { + $type: "akash.manifest.v2beta3.ServiceParams"; + storage: StorageParams[]; + credentials: ImageCredentials | undefined; +} + +/** Credentials to fetch image from registry */ +export interface ImageCredentials { + $type: "akash.manifest.v2beta3.ImageCredentials"; + host: string; + email: string; + username: string; + password: string; +} + +/** Service stores name, image, args, env, unit, count and expose list of service */ +export interface Service { + $type: "akash.manifest.v2beta3.Service"; + name: string; + image: string; + command: string[]; + args: string[]; + env: string[]; + resources: Resources | undefined; + count: number; + expose: ServiceExpose[]; + params: ServiceParams | undefined; + credentials: ImageCredentials | undefined; +} + +function createBaseStorageParams(): StorageParams { + return { + $type: "akash.manifest.v2beta3.StorageParams", + name: "", + mount: "", + readOnly: false, + }; +} + +export const StorageParams: MessageFns< + StorageParams, + "akash.manifest.v2beta3.StorageParams" +> = { + $type: "akash.manifest.v2beta3.StorageParams" as const, + + encode( + message: StorageParams, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.mount !== "") { + writer.uint32(18).string(message.mount); + } + if (message.readOnly !== false) { + writer.uint32(24).bool(message.readOnly); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): StorageParams { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseStorageParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.mount = reader.string(); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.readOnly = reader.bool(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): StorageParams { + return { + $type: StorageParams.$type, + name: isSet(object.name) ? globalThis.String(object.name) : "", + mount: isSet(object.mount) ? globalThis.String(object.mount) : "", + readOnly: isSet(object.readOnly) + ? globalThis.Boolean(object.readOnly) + : false, + }; + }, + + toJSON(message: StorageParams): unknown { + const obj: any = {}; + if (message.name !== "") { + obj.name = message.name; + } + if (message.mount !== "") { + obj.mount = message.mount; + } + if (message.readOnly !== false) { + obj.readOnly = message.readOnly; + } + return obj; + }, + + create(base?: DeepPartial): StorageParams { + return StorageParams.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): StorageParams { + const message = createBaseStorageParams(); + message.name = object.name ?? ""; + message.mount = object.mount ?? ""; + message.readOnly = object.readOnly ?? false; + return message; + }, +}; + +messageTypeRegistry.set(StorageParams.$type, StorageParams); + +function createBaseServiceParams(): ServiceParams { + return { + $type: "akash.manifest.v2beta3.ServiceParams", + storage: [], + credentials: undefined, + }; +} + +export const ServiceParams: MessageFns< + ServiceParams, + "akash.manifest.v2beta3.ServiceParams" +> = { + $type: "akash.manifest.v2beta3.ServiceParams" as const, + + encode( + message: ServiceParams, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.storage) { + StorageParams.encode(v!, writer.uint32(10).fork()).join(); + } + if (message.credentials !== undefined) { + ImageCredentials.encode( + message.credentials, + writer.uint32(82).fork(), + ).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): ServiceParams { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseServiceParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.storage.push(StorageParams.decode(reader, reader.uint32())); + continue; + case 10: + if (tag !== 82) { + break; + } + + message.credentials = ImageCredentials.decode( + reader, + reader.uint32(), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): ServiceParams { + return { + $type: ServiceParams.$type, + storage: globalThis.Array.isArray(object?.storage) + ? object.storage.map((e: any) => StorageParams.fromJSON(e)) + : [], + credentials: isSet(object.credentials) + ? ImageCredentials.fromJSON(object.credentials) + : undefined, + }; + }, + + toJSON(message: ServiceParams): unknown { + const obj: any = {}; + if (message.storage?.length) { + obj.storage = message.storage.map((e) => StorageParams.toJSON(e)); + } + if (message.credentials !== undefined) { + obj.credentials = ImageCredentials.toJSON(message.credentials); + } + return obj; + }, + + create(base?: DeepPartial): ServiceParams { + return ServiceParams.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ServiceParams { + const message = createBaseServiceParams(); + message.storage = + object.storage?.map((e) => StorageParams.fromPartial(e)) || []; + message.credentials = + object.credentials !== undefined && object.credentials !== null + ? ImageCredentials.fromPartial(object.credentials) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ServiceParams.$type, ServiceParams); + +function createBaseImageCredentials(): ImageCredentials { + return { + $type: "akash.manifest.v2beta3.ImageCredentials", + host: "", + email: "", + username: "", + password: "", + }; +} + +export const ImageCredentials: MessageFns< + ImageCredentials, + "akash.manifest.v2beta3.ImageCredentials" +> = { + $type: "akash.manifest.v2beta3.ImageCredentials" as const, + + encode( + message: ImageCredentials, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.host !== "") { + writer.uint32(10).string(message.host); + } + if (message.email !== "") { + writer.uint32(18).string(message.email); + } + if (message.username !== "") { + writer.uint32(26).string(message.username); + } + if (message.password !== "") { + writer.uint32(34).string(message.password); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): ImageCredentials { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseImageCredentials(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.host = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.email = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.username = reader.string(); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.password = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): ImageCredentials { + return { + $type: ImageCredentials.$type, + host: isSet(object.host) ? globalThis.String(object.host) : "", + email: isSet(object.email) ? globalThis.String(object.email) : "", + username: isSet(object.username) + ? globalThis.String(object.username) + : "", + password: isSet(object.password) + ? globalThis.String(object.password) + : "", + }; + }, + + toJSON(message: ImageCredentials): unknown { + const obj: any = {}; + if (message.host !== "") { + obj.host = message.host; + } + if (message.email !== "") { + obj.email = message.email; + } + if (message.username !== "") { + obj.username = message.username; + } + if (message.password !== "") { + obj.password = message.password; + } + return obj; + }, + + create(base?: DeepPartial): ImageCredentials { + return ImageCredentials.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ImageCredentials { + const message = createBaseImageCredentials(); + message.host = object.host ?? ""; + message.email = object.email ?? ""; + message.username = object.username ?? ""; + message.password = object.password ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(ImageCredentials.$type, ImageCredentials); + +function createBaseService(): Service { + return { + $type: "akash.manifest.v2beta3.Service", + name: "", + image: "", + command: [], + args: [], + env: [], + resources: undefined, + count: 0, + expose: [], + params: undefined, + credentials: undefined, + }; +} + +export const Service: MessageFns = { + $type: "akash.manifest.v2beta3.Service" as const, + + encode( + message: Service, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.image !== "") { + writer.uint32(18).string(message.image); + } + for (const v of message.command) { + writer.uint32(26).string(v!); + } + for (const v of message.args) { + writer.uint32(34).string(v!); + } + for (const v of message.env) { + writer.uint32(42).string(v!); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(50).fork()).join(); + } + if (message.count !== 0) { + writer.uint32(56).uint32(message.count); + } + for (const v of message.expose) { + ServiceExpose.encode(v!, writer.uint32(66).fork()).join(); + } + if (message.params !== undefined) { + ServiceParams.encode(message.params, writer.uint32(74).fork()).join(); + } + if (message.credentials !== undefined) { + ImageCredentials.encode( + message.credentials, + writer.uint32(82).fork(), + ).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Service { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseService(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.image = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.command.push(reader.string()); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.args.push(reader.string()); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.env.push(reader.string()); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.resources = Resources.decode(reader, reader.uint32()); + continue; + case 7: + if (tag !== 56) { + break; + } + + message.count = reader.uint32(); + continue; + case 8: + if (tag !== 66) { + break; + } + + message.expose.push(ServiceExpose.decode(reader, reader.uint32())); + continue; + case 9: + if (tag !== 74) { + break; + } + + message.params = ServiceParams.decode(reader, reader.uint32()); + continue; + case 10: + if (tag !== 82) { + break; + } + + message.credentials = ImageCredentials.decode( + reader, + reader.uint32(), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Service { + return { + $type: Service.$type, + name: isSet(object.name) ? globalThis.String(object.name) : "", + image: isSet(object.image) ? globalThis.String(object.image) : "", + command: globalThis.Array.isArray(object?.command) + ? object.command.map((e: any) => globalThis.String(e)) + : [], + args: globalThis.Array.isArray(object?.args) + ? object.args.map((e: any) => globalThis.String(e)) + : [], + env: globalThis.Array.isArray(object?.env) + ? object.env.map((e: any) => globalThis.String(e)) + : [], + resources: isSet(object.resources) + ? Resources.fromJSON(object.resources) + : undefined, + count: isSet(object.count) ? globalThis.Number(object.count) : 0, + expose: globalThis.Array.isArray(object?.expose) + ? object.expose.map((e: any) => ServiceExpose.fromJSON(e)) + : [], + params: isSet(object.params) + ? ServiceParams.fromJSON(object.params) + : undefined, + credentials: isSet(object.credentials) + ? ImageCredentials.fromJSON(object.credentials) + : undefined, + }; + }, + + toJSON(message: Service): unknown { + const obj: any = {}; + if (message.name !== "") { + obj.name = message.name; + } + if (message.image !== "") { + obj.image = message.image; + } + if (message.command?.length) { + obj.command = message.command; + } + if (message.args?.length) { + obj.args = message.args; + } + if (message.env?.length) { + obj.env = message.env; + } + if (message.resources !== undefined) { + obj.resources = Resources.toJSON(message.resources); + } + if (message.count !== 0) { + obj.count = Math.round(message.count); + } + if (message.expose?.length) { + obj.expose = message.expose.map((e) => ServiceExpose.toJSON(e)); + } + if (message.params !== undefined) { + obj.params = ServiceParams.toJSON(message.params); + } + if (message.credentials !== undefined) { + obj.credentials = ImageCredentials.toJSON(message.credentials); + } + return obj; + }, + + create(base?: DeepPartial): Service { + return Service.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Service { + const message = createBaseService(); + message.name = object.name ?? ""; + message.image = object.image ?? ""; + message.command = object.command?.map((e) => e) || []; + message.args = object.args?.map((e) => e) || []; + message.env = object.env?.map((e) => e) || []; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + message.count = object.count ?? 0; + message.expose = + object.expose?.map((e) => ServiceExpose.fromPartial(e)) || []; + message.params = + object.params !== undefined && object.params !== null + ? ServiceParams.fromPartial(object.params) + : undefined; + message.credentials = + object.credentials !== undefined && object.credentials !== null + ? ImageCredentials.fromPartial(object.credentials) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Service.$type, Service); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/manifest/v2beta3/serviceexpose.ts b/ts/src/generated/akash/manifest/v2beta3/serviceexpose.ts new file mode 100644 index 00000000..ce628705 --- /dev/null +++ b/ts/src/generated/akash/manifest/v2beta3/serviceexpose.ts @@ -0,0 +1,283 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/manifest/v2beta3/serviceexpose.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { ServiceExposeHTTPOptions } from "./httpoptions"; + +/** ServiceExpose stores exposed ports and hosts details */ +export interface ServiceExpose { + $type: "akash.manifest.v2beta3.ServiceExpose"; + /** port on the container */ + port: number; + /** port on the service definition */ + externalPort: number; + proto: string; + service: string; + global: boolean; + hosts: string[]; + httpOptions: ServiceExposeHTTPOptions | undefined; + /** The name of the IP address associated with this, if any */ + ip: string; + /** The sequence number of the associated endpoint in the on-chain data */ + endpointSequenceNumber: number; +} + +function createBaseServiceExpose(): ServiceExpose { + return { + $type: "akash.manifest.v2beta3.ServiceExpose", + port: 0, + externalPort: 0, + proto: "", + service: "", + global: false, + hosts: [], + httpOptions: undefined, + ip: "", + endpointSequenceNumber: 0, + }; +} + +export const ServiceExpose: MessageFns< + ServiceExpose, + "akash.manifest.v2beta3.ServiceExpose" +> = { + $type: "akash.manifest.v2beta3.ServiceExpose" as const, + + encode( + message: ServiceExpose, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.port !== 0) { + writer.uint32(8).uint32(message.port); + } + if (message.externalPort !== 0) { + writer.uint32(16).uint32(message.externalPort); + } + if (message.proto !== "") { + writer.uint32(26).string(message.proto); + } + if (message.service !== "") { + writer.uint32(34).string(message.service); + } + if (message.global !== false) { + writer.uint32(40).bool(message.global); + } + for (const v of message.hosts) { + writer.uint32(50).string(v!); + } + if (message.httpOptions !== undefined) { + ServiceExposeHTTPOptions.encode( + message.httpOptions, + writer.uint32(58).fork(), + ).join(); + } + if (message.ip !== "") { + writer.uint32(66).string(message.ip); + } + if (message.endpointSequenceNumber !== 0) { + writer.uint32(72).uint32(message.endpointSequenceNumber); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): ServiceExpose { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseServiceExpose(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 8) { + break; + } + + message.port = reader.uint32(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.externalPort = reader.uint32(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.proto = reader.string(); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.service = reader.string(); + continue; + case 5: + if (tag !== 40) { + break; + } + + message.global = reader.bool(); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.hosts.push(reader.string()); + continue; + case 7: + if (tag !== 58) { + break; + } + + message.httpOptions = ServiceExposeHTTPOptions.decode( + reader, + reader.uint32(), + ); + continue; + case 8: + if (tag !== 66) { + break; + } + + message.ip = reader.string(); + continue; + case 9: + if (tag !== 72) { + break; + } + + message.endpointSequenceNumber = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): ServiceExpose { + return { + $type: ServiceExpose.$type, + port: isSet(object.port) ? globalThis.Number(object.port) : 0, + externalPort: isSet(object.externalPort) + ? globalThis.Number(object.externalPort) + : 0, + proto: isSet(object.proto) ? globalThis.String(object.proto) : "", + service: isSet(object.service) ? globalThis.String(object.service) : "", + global: isSet(object.global) ? globalThis.Boolean(object.global) : false, + hosts: globalThis.Array.isArray(object?.hosts) + ? object.hosts.map((e: any) => globalThis.String(e)) + : [], + httpOptions: isSet(object.httpOptions) + ? ServiceExposeHTTPOptions.fromJSON(object.httpOptions) + : undefined, + ip: isSet(object.ip) ? globalThis.String(object.ip) : "", + endpointSequenceNumber: isSet(object.endpointSequenceNumber) + ? globalThis.Number(object.endpointSequenceNumber) + : 0, + }; + }, + + toJSON(message: ServiceExpose): unknown { + const obj: any = {}; + if (message.port !== 0) { + obj.port = Math.round(message.port); + } + if (message.externalPort !== 0) { + obj.externalPort = Math.round(message.externalPort); + } + if (message.proto !== "") { + obj.proto = message.proto; + } + if (message.service !== "") { + obj.service = message.service; + } + if (message.global !== false) { + obj.global = message.global; + } + if (message.hosts?.length) { + obj.hosts = message.hosts; + } + if (message.httpOptions !== undefined) { + obj.httpOptions = ServiceExposeHTTPOptions.toJSON(message.httpOptions); + } + if (message.ip !== "") { + obj.ip = message.ip; + } + if (message.endpointSequenceNumber !== 0) { + obj.endpointSequenceNumber = Math.round(message.endpointSequenceNumber); + } + return obj; + }, + + create(base?: DeepPartial): ServiceExpose { + return ServiceExpose.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ServiceExpose { + const message = createBaseServiceExpose(); + message.port = object.port ?? 0; + message.externalPort = object.externalPort ?? 0; + message.proto = object.proto ?? ""; + message.service = object.service ?? ""; + message.global = object.global ?? false; + message.hosts = object.hosts?.map((e) => e) || []; + message.httpOptions = + object.httpOptions !== undefined && object.httpOptions !== null + ? ServiceExposeHTTPOptions.fromPartial(object.httpOptions) + : undefined; + message.ip = object.ip ?? ""; + message.endpointSequenceNumber = object.endpointSequenceNumber ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ServiceExpose.$type, ServiceExpose); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1/bid.ts b/ts/src/generated/akash/market/v1/bid.ts new file mode 100644 index 00000000..757e19cb --- /dev/null +++ b/ts/src/generated/akash/market/v1/bid.ts @@ -0,0 +1,198 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1/bid.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** + * BidID stores owner and all other seq numbers + * A successful bid becomes a Lease(ID). + */ +export interface BidID { + $type: "akash.market.v1.BidID"; + owner: string; + dseq: Long; + gseq: number; + oseq: number; + provider: string; +} + +function createBaseBidID(): BidID { + return { + $type: "akash.market.v1.BidID", + owner: "", + dseq: Long.UZERO, + gseq: 0, + oseq: 0, + provider: "", + }; +} + +export const BidID: MessageFns = { + $type: "akash.market.v1.BidID" as const, + + encode( + message: BidID, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq.toString()); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + if (message.oseq !== 0) { + writer.uint32(32).uint32(message.oseq); + } + if (message.provider !== "") { + writer.uint32(42).string(message.provider); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): BidID { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseBidID(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = Long.fromString(reader.uint64().toString(), true); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.oseq = reader.uint32(); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.provider = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): BidID { + return { + $type: BidID.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, + provider: isSet(object.provider) + ? globalThis.String(object.provider) + : "", + }; + }, + + toJSON(message: BidID): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + if (message.oseq !== 0) { + obj.oseq = Math.round(message.oseq); + } + if (message.provider !== "") { + obj.provider = message.provider; + } + return obj; + }, + + create(base?: DeepPartial): BidID { + return BidID.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): BidID { + const message = createBaseBidID(); + message.owner = object.owner ?? ""; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + message.oseq = object.oseq ?? 0; + message.provider = object.provider ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(BidID.$type, BidID); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1/event.ts b/ts/src/generated/akash/market/v1/event.ts new file mode 100644 index 00000000..7810e912 --- /dev/null +++ b/ts/src/generated/akash/market/v1/event.ts @@ -0,0 +1,575 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1/event.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { DecCoin } from "../../../cosmos/base/v1beta1/coin"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { BidID } from "./bid"; +import { LeaseID } from "./lease"; +import { OrderID } from "./order"; + +/** EventOrderCreated */ +export interface EventOrderCreated { + $type: "akash.market.v1.EventOrderCreated"; + id: OrderID | undefined; +} + +/** EventOrderClosed */ +export interface EventOrderClosed { + $type: "akash.market.v1.EventOrderClosed"; + id: OrderID | undefined; +} + +/** EventBidCreated */ +export interface EventBidCreated { + $type: "akash.market.v1.EventBidCreated"; + id: BidID | undefined; + price: DecCoin | undefined; +} + +/** EventBidClosed */ +export interface EventBidClosed { + $type: "akash.market.v1.EventBidClosed"; + id: BidID | undefined; +} + +/** EventLeaseCreated */ +export interface EventLeaseCreated { + $type: "akash.market.v1.EventLeaseCreated"; + id: LeaseID | undefined; + price: DecCoin | undefined; +} + +/** EventLeaseClosed */ +export interface EventLeaseClosed { + $type: "akash.market.v1.EventLeaseClosed"; + id: LeaseID | undefined; +} + +function createBaseEventOrderCreated(): EventOrderCreated { + return { $type: "akash.market.v1.EventOrderCreated", id: undefined }; +} + +export const EventOrderCreated: MessageFns< + EventOrderCreated, + "akash.market.v1.EventOrderCreated" +> = { + $type: "akash.market.v1.EventOrderCreated" as const, + + encode( + message: EventOrderCreated, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + OrderID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): EventOrderCreated { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventOrderCreated(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = OrderID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventOrderCreated { + return { + $type: EventOrderCreated.$type, + id: isSet(object.id) ? OrderID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: EventOrderCreated): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = OrderID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): EventOrderCreated { + return EventOrderCreated.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): EventOrderCreated { + const message = createBaseEventOrderCreated(); + message.id = + object.id !== undefined && object.id !== null + ? OrderID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(EventOrderCreated.$type, EventOrderCreated); + +function createBaseEventOrderClosed(): EventOrderClosed { + return { $type: "akash.market.v1.EventOrderClosed", id: undefined }; +} + +export const EventOrderClosed: MessageFns< + EventOrderClosed, + "akash.market.v1.EventOrderClosed" +> = { + $type: "akash.market.v1.EventOrderClosed" as const, + + encode( + message: EventOrderClosed, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + OrderID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): EventOrderClosed { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventOrderClosed(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = OrderID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventOrderClosed { + return { + $type: EventOrderClosed.$type, + id: isSet(object.id) ? OrderID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: EventOrderClosed): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = OrderID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): EventOrderClosed { + return EventOrderClosed.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): EventOrderClosed { + const message = createBaseEventOrderClosed(); + message.id = + object.id !== undefined && object.id !== null + ? OrderID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(EventOrderClosed.$type, EventOrderClosed); + +function createBaseEventBidCreated(): EventBidCreated { + return { + $type: "akash.market.v1.EventBidCreated", + id: undefined, + price: undefined, + }; +} + +export const EventBidCreated: MessageFns< + EventBidCreated, + "akash.market.v1.EventBidCreated" +> = { + $type: "akash.market.v1.EventBidCreated" as const, + + encode( + message: EventBidCreated, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + BidID.encode(message.id, writer.uint32(10).fork()).join(); + } + if (message.price !== undefined) { + DecCoin.encode(message.price, writer.uint32(26).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): EventBidCreated { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventBidCreated(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = BidID.decode(reader, reader.uint32()); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.price = DecCoin.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventBidCreated { + return { + $type: EventBidCreated.$type, + id: isSet(object.id) ? BidID.fromJSON(object.id) : undefined, + price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, + }; + }, + + toJSON(message: EventBidCreated): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = BidID.toJSON(message.id); + } + if (message.price !== undefined) { + obj.price = DecCoin.toJSON(message.price); + } + return obj; + }, + + create(base?: DeepPartial): EventBidCreated { + return EventBidCreated.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): EventBidCreated { + const message = createBaseEventBidCreated(); + message.id = + object.id !== undefined && object.id !== null + ? BidID.fromPartial(object.id) + : undefined; + message.price = + object.price !== undefined && object.price !== null + ? DecCoin.fromPartial(object.price) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(EventBidCreated.$type, EventBidCreated); + +function createBaseEventBidClosed(): EventBidClosed { + return { $type: "akash.market.v1.EventBidClosed", id: undefined }; +} + +export const EventBidClosed: MessageFns< + EventBidClosed, + "akash.market.v1.EventBidClosed" +> = { + $type: "akash.market.v1.EventBidClosed" as const, + + encode( + message: EventBidClosed, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + BidID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): EventBidClosed { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventBidClosed(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = BidID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventBidClosed { + return { + $type: EventBidClosed.$type, + id: isSet(object.id) ? BidID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: EventBidClosed): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = BidID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): EventBidClosed { + return EventBidClosed.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): EventBidClosed { + const message = createBaseEventBidClosed(); + message.id = + object.id !== undefined && object.id !== null + ? BidID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(EventBidClosed.$type, EventBidClosed); + +function createBaseEventLeaseCreated(): EventLeaseCreated { + return { + $type: "akash.market.v1.EventLeaseCreated", + id: undefined, + price: undefined, + }; +} + +export const EventLeaseCreated: MessageFns< + EventLeaseCreated, + "akash.market.v1.EventLeaseCreated" +> = { + $type: "akash.market.v1.EventLeaseCreated" as const, + + encode( + message: EventLeaseCreated, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + LeaseID.encode(message.id, writer.uint32(10).fork()).join(); + } + if (message.price !== undefined) { + DecCoin.encode(message.price, writer.uint32(26).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): EventLeaseCreated { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventLeaseCreated(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = LeaseID.decode(reader, reader.uint32()); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.price = DecCoin.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventLeaseCreated { + return { + $type: EventLeaseCreated.$type, + id: isSet(object.id) ? LeaseID.fromJSON(object.id) : undefined, + price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, + }; + }, + + toJSON(message: EventLeaseCreated): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = LeaseID.toJSON(message.id); + } + if (message.price !== undefined) { + obj.price = DecCoin.toJSON(message.price); + } + return obj; + }, + + create(base?: DeepPartial): EventLeaseCreated { + return EventLeaseCreated.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): EventLeaseCreated { + const message = createBaseEventLeaseCreated(); + message.id = + object.id !== undefined && object.id !== null + ? LeaseID.fromPartial(object.id) + : undefined; + message.price = + object.price !== undefined && object.price !== null + ? DecCoin.fromPartial(object.price) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(EventLeaseCreated.$type, EventLeaseCreated); + +function createBaseEventLeaseClosed(): EventLeaseClosed { + return { $type: "akash.market.v1.EventLeaseClosed", id: undefined }; +} + +export const EventLeaseClosed: MessageFns< + EventLeaseClosed, + "akash.market.v1.EventLeaseClosed" +> = { + $type: "akash.market.v1.EventLeaseClosed" as const, + + encode( + message: EventLeaseClosed, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + LeaseID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): EventLeaseClosed { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventLeaseClosed(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = LeaseID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventLeaseClosed { + return { + $type: EventLeaseClosed.$type, + id: isSet(object.id) ? LeaseID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: EventLeaseClosed): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = LeaseID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): EventLeaseClosed { + return EventLeaseClosed.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): EventLeaseClosed { + const message = createBaseEventLeaseClosed(); + message.id = + object.id !== undefined && object.id !== null + ? LeaseID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(EventLeaseClosed.$type, EventLeaseClosed); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1/filters.ts b/ts/src/generated/akash/market/v1/filters.ts new file mode 100644 index 00000000..74d57e88 --- /dev/null +++ b/ts/src/generated/akash/market/v1/filters.ts @@ -0,0 +1,215 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1/filters.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** LeaseFilters defines flags for lease list filter */ +export interface LeaseFilters { + $type: "akash.market.v1.LeaseFilters"; + owner: string; + dseq: Long; + gseq: number; + oseq: number; + provider: string; + state: string; +} + +function createBaseLeaseFilters(): LeaseFilters { + return { + $type: "akash.market.v1.LeaseFilters", + owner: "", + dseq: Long.UZERO, + gseq: 0, + oseq: 0, + provider: "", + state: "", + }; +} + +export const LeaseFilters: MessageFns< + LeaseFilters, + "akash.market.v1.LeaseFilters" +> = { + $type: "akash.market.v1.LeaseFilters" as const, + + encode( + message: LeaseFilters, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq.toString()); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + if (message.oseq !== 0) { + writer.uint32(32).uint32(message.oseq); + } + if (message.provider !== "") { + writer.uint32(42).string(message.provider); + } + if (message.state !== "") { + writer.uint32(50).string(message.state); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): LeaseFilters { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseLeaseFilters(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = Long.fromString(reader.uint64().toString(), true); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.oseq = reader.uint32(); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.provider = reader.string(); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.state = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): LeaseFilters { + return { + $type: LeaseFilters.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, + provider: isSet(object.provider) + ? globalThis.String(object.provider) + : "", + state: isSet(object.state) ? globalThis.String(object.state) : "", + }; + }, + + toJSON(message: LeaseFilters): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + if (message.oseq !== 0) { + obj.oseq = Math.round(message.oseq); + } + if (message.provider !== "") { + obj.provider = message.provider; + } + if (message.state !== "") { + obj.state = message.state; + } + return obj; + }, + + create(base?: DeepPartial): LeaseFilters { + return LeaseFilters.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): LeaseFilters { + const message = createBaseLeaseFilters(); + message.owner = object.owner ?? ""; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + message.oseq = object.oseq ?? 0; + message.provider = object.provider ?? ""; + message.state = object.state ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(LeaseFilters.$type, LeaseFilters); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1/lease.ts b/ts/src/generated/akash/market/v1/lease.ts new file mode 100644 index 00000000..12e44c79 --- /dev/null +++ b/ts/src/generated/akash/market/v1/lease.ts @@ -0,0 +1,407 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1/lease.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { DecCoin } from "../../../cosmos/base/v1beta1/coin"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** LeaseID stores bid details of lease */ +export interface LeaseID { + $type: "akash.market.v1.LeaseID"; + owner: string; + dseq: Long; + gseq: number; + oseq: number; + provider: string; +} + +/** Lease stores LeaseID, state of lease and price */ +export interface Lease { + $type: "akash.market.v1.Lease"; + id: LeaseID | undefined; + state: Lease_State; + price: DecCoin | undefined; + createdAt: Long; + closedOn: Long; +} + +/** State is an enum which refers to state of lease */ +export enum Lease_State { + /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ + invalid = 0, + /** active - LeaseActive denotes state for lease active */ + active = 1, + /** insufficient_funds - LeaseInsufficientFunds denotes state for lease insufficient_funds */ + insufficient_funds = 2, + /** closed - LeaseClosed denotes state for lease closed */ + closed = 3, + UNRECOGNIZED = -1, +} + +export function lease_StateFromJSON(object: any): Lease_State { + switch (object) { + case 0: + case "invalid": + return Lease_State.invalid; + case 1: + case "active": + return Lease_State.active; + case 2: + case "insufficient_funds": + return Lease_State.insufficient_funds; + case 3: + case "closed": + return Lease_State.closed; + case -1: + case "UNRECOGNIZED": + default: + return Lease_State.UNRECOGNIZED; + } +} + +export function lease_StateToJSON(object: Lease_State): string { + switch (object) { + case Lease_State.invalid: + return "invalid"; + case Lease_State.active: + return "active"; + case Lease_State.insufficient_funds: + return "insufficient_funds"; + case Lease_State.closed: + return "closed"; + case Lease_State.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +function createBaseLeaseID(): LeaseID { + return { + $type: "akash.market.v1.LeaseID", + owner: "", + dseq: Long.UZERO, + gseq: 0, + oseq: 0, + provider: "", + }; +} + +export const LeaseID: MessageFns = { + $type: "akash.market.v1.LeaseID" as const, + + encode( + message: LeaseID, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq.toString()); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + if (message.oseq !== 0) { + writer.uint32(32).uint32(message.oseq); + } + if (message.provider !== "") { + writer.uint32(42).string(message.provider); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): LeaseID { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseLeaseID(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = Long.fromString(reader.uint64().toString(), true); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.oseq = reader.uint32(); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.provider = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): LeaseID { + return { + $type: LeaseID.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, + provider: isSet(object.provider) + ? globalThis.String(object.provider) + : "", + }; + }, + + toJSON(message: LeaseID): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + if (message.oseq !== 0) { + obj.oseq = Math.round(message.oseq); + } + if (message.provider !== "") { + obj.provider = message.provider; + } + return obj; + }, + + create(base?: DeepPartial): LeaseID { + return LeaseID.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): LeaseID { + const message = createBaseLeaseID(); + message.owner = object.owner ?? ""; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + message.oseq = object.oseq ?? 0; + message.provider = object.provider ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(LeaseID.$type, LeaseID); + +function createBaseLease(): Lease { + return { + $type: "akash.market.v1.Lease", + id: undefined, + state: 0, + price: undefined, + createdAt: Long.ZERO, + closedOn: Long.ZERO, + }; +} + +export const Lease: MessageFns = { + $type: "akash.market.v1.Lease" as const, + + encode( + message: Lease, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + LeaseID.encode(message.id, writer.uint32(10).fork()).join(); + } + if (message.state !== 0) { + writer.uint32(16).int32(message.state); + } + if (message.price !== undefined) { + DecCoin.encode(message.price, writer.uint32(26).fork()).join(); + } + if (!message.createdAt.equals(Long.ZERO)) { + writer.uint32(32).int64(message.createdAt.toString()); + } + if (!message.closedOn.equals(Long.ZERO)) { + writer.uint32(40).int64(message.closedOn.toString()); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Lease { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseLease(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = LeaseID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.state = reader.int32() as any; + continue; + case 3: + if (tag !== 26) { + break; + } + + message.price = DecCoin.decode(reader, reader.uint32()); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.createdAt = Long.fromString(reader.int64().toString()); + continue; + case 5: + if (tag !== 40) { + break; + } + + message.closedOn = Long.fromString(reader.int64().toString()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Lease { + return { + $type: Lease.$type, + id: isSet(object.id) ? LeaseID.fromJSON(object.id) : undefined, + state: isSet(object.state) ? lease_StateFromJSON(object.state) : 0, + price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, + createdAt: isSet(object.createdAt) + ? Long.fromValue(object.createdAt) + : Long.ZERO, + closedOn: isSet(object.closedOn) + ? Long.fromValue(object.closedOn) + : Long.ZERO, + }; + }, + + toJSON(message: Lease): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = LeaseID.toJSON(message.id); + } + if (message.state !== 0) { + obj.state = lease_StateToJSON(message.state); + } + if (message.price !== undefined) { + obj.price = DecCoin.toJSON(message.price); + } + if (!message.createdAt.equals(Long.ZERO)) { + obj.createdAt = (message.createdAt || Long.ZERO).toString(); + } + if (!message.closedOn.equals(Long.ZERO)) { + obj.closedOn = (message.closedOn || Long.ZERO).toString(); + } + return obj; + }, + + create(base?: DeepPartial): Lease { + return Lease.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Lease { + const message = createBaseLease(); + message.id = + object.id !== undefined && object.id !== null + ? LeaseID.fromPartial(object.id) + : undefined; + message.state = object.state ?? 0; + message.price = + object.price !== undefined && object.price !== null + ? DecCoin.fromPartial(object.price) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? Long.fromValue(object.createdAt) + : Long.ZERO; + message.closedOn = + object.closedOn !== undefined && object.closedOn !== null + ? Long.fromValue(object.closedOn) + : Long.ZERO; + return message; + }, +}; + +messageTypeRegistry.set(Lease.$type, Lease); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1/order.ts b/ts/src/generated/akash/market/v1/order.ts new file mode 100644 index 00000000..8fca1483 --- /dev/null +++ b/ts/src/generated/akash/market/v1/order.ts @@ -0,0 +1,176 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1/order.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** OrderID stores owner and all other seq numbers */ +export interface OrderID { + $type: "akash.market.v1.OrderID"; + owner: string; + dseq: Long; + gseq: number; + oseq: number; +} + +function createBaseOrderID(): OrderID { + return { + $type: "akash.market.v1.OrderID", + owner: "", + dseq: Long.UZERO, + gseq: 0, + oseq: 0, + }; +} + +export const OrderID: MessageFns = { + $type: "akash.market.v1.OrderID" as const, + + encode( + message: OrderID, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq.toString()); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + if (message.oseq !== 0) { + writer.uint32(32).uint32(message.oseq); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): OrderID { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseOrderID(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = Long.fromString(reader.uint64().toString(), true); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.oseq = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): OrderID { + return { + $type: OrderID.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, + }; + }, + + toJSON(message: OrderID): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + if (message.oseq !== 0) { + obj.oseq = Math.round(message.oseq); + } + return obj; + }, + + create(base?: DeepPartial): OrderID { + return OrderID.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): OrderID { + const message = createBaseOrderID(); + message.owner = object.owner ?? ""; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + message.oseq = object.oseq ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(OrderID.$type, OrderID); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1beta2/bid.ts b/ts/src/generated/akash/market/v1beta2/bid.ts deleted file mode 100644 index 4ee18b9c..00000000 --- a/ts/src/generated/akash/market/v1beta2/bid.ts +++ /dev/null @@ -1,879 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin, DecCoin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { OrderID } from "./order"; - -/** MsgCreateBid defines an SDK message for creating Bid */ -export interface MsgCreateBid { - $type: "akash.market.v1beta2.MsgCreateBid"; - order: OrderID | undefined; - provider: string; - price: DecCoin | undefined; - deposit: Coin | undefined; -} - -/** MsgCreateBidResponse defines the Msg/CreateBid response type. */ -export interface MsgCreateBidResponse { - $type: "akash.market.v1beta2.MsgCreateBidResponse"; -} - -/** MsgCloseBid defines an SDK message for closing bid */ -export interface MsgCloseBid { - $type: "akash.market.v1beta2.MsgCloseBid"; - bidId: BidID | undefined; -} - -/** MsgCloseBidResponse defines the Msg/CloseBid response type. */ -export interface MsgCloseBidResponse { - $type: "akash.market.v1beta2.MsgCloseBidResponse"; -} - -/** - * BidID stores owner and all other seq numbers - * A successful bid becomes a Lease(ID). - */ -export interface BidID { - $type: "akash.market.v1beta2.BidID"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; -} - -/** Bid stores BidID, state of bid and price */ -export interface Bid { - $type: "akash.market.v1beta2.Bid"; - bidId: BidID | undefined; - state: Bid_State; - price: DecCoin | undefined; - createdAt: Long; -} - -/** State is an enum which refers to state of bid */ -export enum Bid_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** open - BidOpen denotes state for bid open */ - open = 1, - /** active - BidMatched denotes state for bid open */ - active = 2, - /** lost - BidLost denotes state for bid lost */ - lost = 3, - /** closed - BidClosed denotes state for bid closed */ - closed = 4, - UNRECOGNIZED = -1, -} - -export function bid_StateFromJSON(object: any): Bid_State { - switch (object) { - case 0: - case "invalid": - return Bid_State.invalid; - case 1: - case "open": - return Bid_State.open; - case 2: - case "active": - return Bid_State.active; - case 3: - case "lost": - return Bid_State.lost; - case 4: - case "closed": - return Bid_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Bid_State.UNRECOGNIZED; - } -} - -export function bid_StateToJSON(object: Bid_State): string { - switch (object) { - case Bid_State.invalid: - return "invalid"; - case Bid_State.open: - return "open"; - case Bid_State.active: - return "active"; - case Bid_State.lost: - return "lost"; - case Bid_State.closed: - return "closed"; - case Bid_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** BidFilters defines flags for bid list filter */ -export interface BidFilters { - $type: "akash.market.v1beta2.BidFilters"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; - state: string; -} - -function createBaseMsgCreateBid(): MsgCreateBid { - return { - $type: "akash.market.v1beta2.MsgCreateBid", - order: undefined, - provider: "", - price: undefined, - deposit: undefined, - }; -} - -export const MsgCreateBid = { - $type: "akash.market.v1beta2.MsgCreateBid" as const, - - encode( - message: MsgCreateBid, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.order !== undefined) { - OrderID.encode(message.order, writer.uint32(10).fork()).ldelim(); - } - if (message.provider !== "") { - writer.uint32(18).string(message.provider); - } - if (message.price !== undefined) { - DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - if (message.deposit !== undefined) { - Coin.encode(message.deposit, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateBid { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateBid(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.order = OrderID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.provider = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.price = DecCoin.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.deposit = Coin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateBid { - return { - $type: MsgCreateBid.$type, - order: isSet(object.order) ? OrderID.fromJSON(object.order) : undefined, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, - deposit: isSet(object.deposit) - ? Coin.fromJSON(object.deposit) - : undefined, - }; - }, - - toJSON(message: MsgCreateBid): unknown { - const obj: any = {}; - if (message.order !== undefined) { - obj.order = OrderID.toJSON(message.order); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - if (message.price !== undefined) { - obj.price = DecCoin.toJSON(message.price); - } - if (message.deposit !== undefined) { - obj.deposit = Coin.toJSON(message.deposit); - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateBid { - return MsgCreateBid.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateBid { - const message = createBaseMsgCreateBid(); - message.order = - object.order !== undefined && object.order !== null - ? OrderID.fromPartial(object.order) - : undefined; - message.provider = object.provider ?? ""; - message.price = - object.price !== undefined && object.price !== null - ? DecCoin.fromPartial(object.price) - : undefined; - message.deposit = - object.deposit !== undefined && object.deposit !== null - ? Coin.fromPartial(object.deposit) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateBid.$type, MsgCreateBid); - -function createBaseMsgCreateBidResponse(): MsgCreateBidResponse { - return { $type: "akash.market.v1beta2.MsgCreateBidResponse" }; -} - -export const MsgCreateBidResponse = { - $type: "akash.market.v1beta2.MsgCreateBidResponse" as const, - - encode( - _: MsgCreateBidResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateBidResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateBidResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateBidResponse { - return { $type: MsgCreateBidResponse.$type }; - }, - - toJSON(_: MsgCreateBidResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCreateBidResponse { - return MsgCreateBidResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCreateBidResponse { - const message = createBaseMsgCreateBidResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateBidResponse.$type, MsgCreateBidResponse); - -function createBaseMsgCloseBid(): MsgCloseBid { - return { $type: "akash.market.v1beta2.MsgCloseBid", bidId: undefined }; -} - -export const MsgCloseBid = { - $type: "akash.market.v1beta2.MsgCloseBid" as const, - - encode( - message: MsgCloseBid, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidId !== undefined) { - BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseBid { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseBid(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidId = BidID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCloseBid { - return { - $type: MsgCloseBid.$type, - bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, - }; - }, - - toJSON(message: MsgCloseBid): unknown { - const obj: any = {}; - if (message.bidId !== undefined) { - obj.bidId = BidID.toJSON(message.bidId); - } - return obj; - }, - - create(base?: DeepPartial): MsgCloseBid { - return MsgCloseBid.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCloseBid { - const message = createBaseMsgCloseBid(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? BidID.fromPartial(object.bidId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseBid.$type, MsgCloseBid); - -function createBaseMsgCloseBidResponse(): MsgCloseBidResponse { - return { $type: "akash.market.v1beta2.MsgCloseBidResponse" }; -} - -export const MsgCloseBidResponse = { - $type: "akash.market.v1beta2.MsgCloseBidResponse" as const, - - encode( - _: MsgCloseBidResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseBidResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseBidResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCloseBidResponse { - return { $type: MsgCloseBidResponse.$type }; - }, - - toJSON(_: MsgCloseBidResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCloseBidResponse { - return MsgCloseBidResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCloseBidResponse { - const message = createBaseMsgCloseBidResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseBidResponse.$type, MsgCloseBidResponse); - -function createBaseBidID(): BidID { - return { - $type: "akash.market.v1beta2.BidID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - }; -} - -export const BidID = { - $type: "akash.market.v1beta2.BidID" as const, - - encode(message: BidID, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): BidID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseBidID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.provider = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): BidID { - return { - $type: BidID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - }; - }, - - toJSON(message: BidID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - return obj; - }, - - create(base?: DeepPartial): BidID { - return BidID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): BidID { - const message = createBaseBidID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(BidID.$type, BidID); - -function createBaseBid(): Bid { - return { - $type: "akash.market.v1beta2.Bid", - bidId: undefined, - state: 0, - price: undefined, - createdAt: Long.ZERO, - }; -} - -export const Bid = { - $type: "akash.market.v1beta2.Bid" as const, - - encode(message: Bid, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.bidId !== undefined) { - BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.price !== undefined) { - DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Bid { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseBid(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidId = BidID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.price = DecCoin.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Bid { - return { - $type: Bid.$type, - bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, - state: isSet(object.state) ? bid_StateFromJSON(object.state) : 0, - price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Bid): unknown { - const obj: any = {}; - if (message.bidId !== undefined) { - obj.bidId = BidID.toJSON(message.bidId); - } - if (message.state !== 0) { - obj.state = bid_StateToJSON(message.state); - } - if (message.price !== undefined) { - obj.price = DecCoin.toJSON(message.price); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Bid { - return Bid.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Bid { - const message = createBaseBid(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? BidID.fromPartial(object.bidId) - : undefined; - message.state = object.state ?? 0; - message.price = - object.price !== undefined && object.price !== null - ? DecCoin.fromPartial(object.price) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Bid.$type, Bid); - -function createBaseBidFilters(): BidFilters { - return { - $type: "akash.market.v1beta2.BidFilters", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - state: "", - }; -} - -export const BidFilters = { - $type: "akash.market.v1beta2.BidFilters" as const, - - encode( - message: BidFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - if (message.state !== "") { - writer.uint32(50).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): BidFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseBidFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.provider = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): BidFilters { - return { - $type: BidFilters.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: BidFilters): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): BidFilters { - return BidFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): BidFilters { - const message = createBaseBidFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(BidFilters.$type, BidFilters); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta2/genesis.ts b/ts/src/generated/akash/market/v1beta2/genesis.ts deleted file mode 100644 index 08dfb568..00000000 --- a/ts/src/generated/akash/market/v1beta2/genesis.ts +++ /dev/null @@ -1,155 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Lease } from "./lease"; -import { Order } from "./order"; -import { Params } from "./params"; - -/** GenesisState defines the basic genesis state used by market module */ -export interface GenesisState { - $type: "akash.market.v1beta2.GenesisState"; - orders: Order[]; - leases: Lease[]; - params: Params | undefined; -} - -function createBaseGenesisState(): GenesisState { - return { - $type: "akash.market.v1beta2.GenesisState", - orders: [], - leases: [], - params: undefined, - }; -} - -export const GenesisState = { - $type: "akash.market.v1beta2.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.orders) { - Order.encode(v!, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.leases) { - Lease.encode(v!, writer.uint32(18).fork()).ldelim(); - } - if (message.params !== undefined) { - Params.encode(message.params, writer.uint32(26).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.orders.push(Order.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.leases.push(Lease.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.params = Params.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - orders: globalThis.Array.isArray(object?.orders) - ? object.orders.map((e: any) => Order.fromJSON(e)) - : [], - leases: globalThis.Array.isArray(object?.leases) - ? object.leases.map((e: any) => Lease.fromJSON(e)) - : [], - params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.orders?.length) { - obj.orders = message.orders.map((e) => Order.toJSON(e)); - } - if (message.leases?.length) { - obj.leases = message.leases.map((e) => Lease.toJSON(e)); - } - if (message.params !== undefined) { - obj.params = Params.toJSON(message.params); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.orders = object.orders?.map((e) => Order.fromPartial(e)) || []; - message.leases = object.leases?.map((e) => Lease.fromPartial(e)) || []; - message.params = - object.params !== undefined && object.params !== null - ? Params.fromPartial(object.params) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta2/lease.ts b/ts/src/generated/akash/market/v1beta2/lease.ts deleted file mode 100644 index b22182b3..00000000 --- a/ts/src/generated/akash/market/v1beta2/lease.ts +++ /dev/null @@ -1,980 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { DecCoin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { BidID } from "./bid"; - -/** LeaseID stores bid details of lease */ -export interface LeaseID { - $type: "akash.market.v1beta2.LeaseID"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; -} - -/** Lease stores LeaseID, state of lease and price */ -export interface Lease { - $type: "akash.market.v1beta2.Lease"; - leaseId: LeaseID | undefined; - state: Lease_State; - price: DecCoin | undefined; - createdAt: Long; - closedOn: Long; -} - -/** State is an enum which refers to state of lease */ -export enum Lease_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** active - LeaseActive denotes state for lease active */ - active = 1, - /** insufficient_funds - LeaseInsufficientFunds denotes state for lease insufficient_funds */ - insufficient_funds = 2, - /** closed - LeaseClosed denotes state for lease closed */ - closed = 3, - UNRECOGNIZED = -1, -} - -export function lease_StateFromJSON(object: any): Lease_State { - switch (object) { - case 0: - case "invalid": - return Lease_State.invalid; - case 1: - case "active": - return Lease_State.active; - case 2: - case "insufficient_funds": - return Lease_State.insufficient_funds; - case 3: - case "closed": - return Lease_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Lease_State.UNRECOGNIZED; - } -} - -export function lease_StateToJSON(object: Lease_State): string { - switch (object) { - case Lease_State.invalid: - return "invalid"; - case Lease_State.active: - return "active"; - case Lease_State.insufficient_funds: - return "insufficient_funds"; - case Lease_State.closed: - return "closed"; - case Lease_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** LeaseFilters defines flags for lease list filter */ -export interface LeaseFilters { - $type: "akash.market.v1beta2.LeaseFilters"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; - state: string; -} - -/** MsgCreateLease is sent to create a lease */ -export interface MsgCreateLease { - $type: "akash.market.v1beta2.MsgCreateLease"; - bidId: BidID | undefined; -} - -/** MsgCreateLeaseResponse is the response from creating a lease */ -export interface MsgCreateLeaseResponse { - $type: "akash.market.v1beta2.MsgCreateLeaseResponse"; -} - -/** MsgWithdrawLease defines an SDK message for closing bid */ -export interface MsgWithdrawLease { - $type: "akash.market.v1beta2.MsgWithdrawLease"; - bidId: LeaseID | undefined; -} - -/** MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. */ -export interface MsgWithdrawLeaseResponse { - $type: "akash.market.v1beta2.MsgWithdrawLeaseResponse"; -} - -/** MsgCloseLease defines an SDK message for closing order */ -export interface MsgCloseLease { - $type: "akash.market.v1beta2.MsgCloseLease"; - leaseId: LeaseID | undefined; -} - -/** MsgCloseLeaseResponse defines the Msg/CloseLease response type. */ -export interface MsgCloseLeaseResponse { - $type: "akash.market.v1beta2.MsgCloseLeaseResponse"; -} - -function createBaseLeaseID(): LeaseID { - return { - $type: "akash.market.v1beta2.LeaseID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - }; -} - -export const LeaseID = { - $type: "akash.market.v1beta2.LeaseID" as const, - - encode( - message: LeaseID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): LeaseID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseLeaseID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.provider = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): LeaseID { - return { - $type: LeaseID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - }; - }, - - toJSON(message: LeaseID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - return obj; - }, - - create(base?: DeepPartial): LeaseID { - return LeaseID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): LeaseID { - const message = createBaseLeaseID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(LeaseID.$type, LeaseID); - -function createBaseLease(): Lease { - return { - $type: "akash.market.v1beta2.Lease", - leaseId: undefined, - state: 0, - price: undefined, - createdAt: Long.ZERO, - closedOn: Long.ZERO, - }; -} - -export const Lease = { - $type: "akash.market.v1beta2.Lease" as const, - - encode(message: Lease, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.price !== undefined) { - DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - if (!message.closedOn.equals(Long.ZERO)) { - writer.uint32(40).int64(message.closedOn); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Lease { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.leaseId = LeaseID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.price = DecCoin.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - case 5: - if (tag !== 40) { - break; - } - - message.closedOn = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Lease { - return { - $type: Lease.$type, - leaseId: isSet(object.leaseId) - ? LeaseID.fromJSON(object.leaseId) - : undefined, - state: isSet(object.state) ? lease_StateFromJSON(object.state) : 0, - price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - closedOn: isSet(object.closedOn) - ? Long.fromValue(object.closedOn) - : Long.ZERO, - }; - }, - - toJSON(message: Lease): unknown { - const obj: any = {}; - if (message.leaseId !== undefined) { - obj.leaseId = LeaseID.toJSON(message.leaseId); - } - if (message.state !== 0) { - obj.state = lease_StateToJSON(message.state); - } - if (message.price !== undefined) { - obj.price = DecCoin.toJSON(message.price); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - if (!message.closedOn.equals(Long.ZERO)) { - obj.closedOn = (message.closedOn || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Lease { - return Lease.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Lease { - const message = createBaseLease(); - message.leaseId = - object.leaseId !== undefined && object.leaseId !== null - ? LeaseID.fromPartial(object.leaseId) - : undefined; - message.state = object.state ?? 0; - message.price = - object.price !== undefined && object.price !== null - ? DecCoin.fromPartial(object.price) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - message.closedOn = - object.closedOn !== undefined && object.closedOn !== null - ? Long.fromValue(object.closedOn) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Lease.$type, Lease); - -function createBaseLeaseFilters(): LeaseFilters { - return { - $type: "akash.market.v1beta2.LeaseFilters", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - state: "", - }; -} - -export const LeaseFilters = { - $type: "akash.market.v1beta2.LeaseFilters" as const, - - encode( - message: LeaseFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - if (message.state !== "") { - writer.uint32(50).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): LeaseFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseLeaseFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.provider = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): LeaseFilters { - return { - $type: LeaseFilters.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: LeaseFilters): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): LeaseFilters { - return LeaseFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): LeaseFilters { - const message = createBaseLeaseFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(LeaseFilters.$type, LeaseFilters); - -function createBaseMsgCreateLease(): MsgCreateLease { - return { $type: "akash.market.v1beta2.MsgCreateLease", bidId: undefined }; -} - -export const MsgCreateLease = { - $type: "akash.market.v1beta2.MsgCreateLease" as const, - - encode( - message: MsgCreateLease, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidId !== undefined) { - BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateLease { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidId = BidID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateLease { - return { - $type: MsgCreateLease.$type, - bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, - }; - }, - - toJSON(message: MsgCreateLease): unknown { - const obj: any = {}; - if (message.bidId !== undefined) { - obj.bidId = BidID.toJSON(message.bidId); - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateLease { - return MsgCreateLease.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateLease { - const message = createBaseMsgCreateLease(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? BidID.fromPartial(object.bidId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateLease.$type, MsgCreateLease); - -function createBaseMsgCreateLeaseResponse(): MsgCreateLeaseResponse { - return { $type: "akash.market.v1beta2.MsgCreateLeaseResponse" }; -} - -export const MsgCreateLeaseResponse = { - $type: "akash.market.v1beta2.MsgCreateLeaseResponse" as const, - - encode( - _: MsgCreateLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateLeaseResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateLeaseResponse { - return { $type: MsgCreateLeaseResponse.$type }; - }, - - toJSON(_: MsgCreateLeaseResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCreateLeaseResponse { - return MsgCreateLeaseResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCreateLeaseResponse { - const message = createBaseMsgCreateLeaseResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateLeaseResponse.$type, MsgCreateLeaseResponse); - -function createBaseMsgWithdrawLease(): MsgWithdrawLease { - return { $type: "akash.market.v1beta2.MsgWithdrawLease", bidId: undefined }; -} - -export const MsgWithdrawLease = { - $type: "akash.market.v1beta2.MsgWithdrawLease" as const, - - encode( - message: MsgWithdrawLease, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidId !== undefined) { - LeaseID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgWithdrawLease { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgWithdrawLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidId = LeaseID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgWithdrawLease { - return { - $type: MsgWithdrawLease.$type, - bidId: isSet(object.bidId) ? LeaseID.fromJSON(object.bidId) : undefined, - }; - }, - - toJSON(message: MsgWithdrawLease): unknown { - const obj: any = {}; - if (message.bidId !== undefined) { - obj.bidId = LeaseID.toJSON(message.bidId); - } - return obj; - }, - - create(base?: DeepPartial): MsgWithdrawLease { - return MsgWithdrawLease.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgWithdrawLease { - const message = createBaseMsgWithdrawLease(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? LeaseID.fromPartial(object.bidId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgWithdrawLease.$type, MsgWithdrawLease); - -function createBaseMsgWithdrawLeaseResponse(): MsgWithdrawLeaseResponse { - return { $type: "akash.market.v1beta2.MsgWithdrawLeaseResponse" }; -} - -export const MsgWithdrawLeaseResponse = { - $type: "akash.market.v1beta2.MsgWithdrawLeaseResponse" as const, - - encode( - _: MsgWithdrawLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgWithdrawLeaseResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgWithdrawLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgWithdrawLeaseResponse { - return { $type: MsgWithdrawLeaseResponse.$type }; - }, - - toJSON(_: MsgWithdrawLeaseResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgWithdrawLeaseResponse { - return MsgWithdrawLeaseResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgWithdrawLeaseResponse { - const message = createBaseMsgWithdrawLeaseResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgWithdrawLeaseResponse.$type, - MsgWithdrawLeaseResponse, -); - -function createBaseMsgCloseLease(): MsgCloseLease { - return { $type: "akash.market.v1beta2.MsgCloseLease", leaseId: undefined }; -} - -export const MsgCloseLease = { - $type: "akash.market.v1beta2.MsgCloseLease" as const, - - encode( - message: MsgCloseLease, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseLease { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.leaseId = LeaseID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCloseLease { - return { - $type: MsgCloseLease.$type, - leaseId: isSet(object.leaseId) - ? LeaseID.fromJSON(object.leaseId) - : undefined, - }; - }, - - toJSON(message: MsgCloseLease): unknown { - const obj: any = {}; - if (message.leaseId !== undefined) { - obj.leaseId = LeaseID.toJSON(message.leaseId); - } - return obj; - }, - - create(base?: DeepPartial): MsgCloseLease { - return MsgCloseLease.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCloseLease { - const message = createBaseMsgCloseLease(); - message.leaseId = - object.leaseId !== undefined && object.leaseId !== null - ? LeaseID.fromPartial(object.leaseId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseLease.$type, MsgCloseLease); - -function createBaseMsgCloseLeaseResponse(): MsgCloseLeaseResponse { - return { $type: "akash.market.v1beta2.MsgCloseLeaseResponse" }; -} - -export const MsgCloseLeaseResponse = { - $type: "akash.market.v1beta2.MsgCloseLeaseResponse" as const, - - encode( - _: MsgCloseLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCloseLeaseResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCloseLeaseResponse { - return { $type: MsgCloseLeaseResponse.$type }; - }, - - toJSON(_: MsgCloseLeaseResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCloseLeaseResponse { - return MsgCloseLeaseResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCloseLeaseResponse { - const message = createBaseMsgCloseLeaseResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseLeaseResponse.$type, MsgCloseLeaseResponse); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta2/order.ts b/ts/src/generated/akash/market/v1beta2/order.ts deleted file mode 100644 index d77c7251..00000000 --- a/ts/src/generated/akash/market/v1beta2/order.ts +++ /dev/null @@ -1,502 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { GroupSpec } from "../../deployment/v1beta2/groupspec"; - -/** OrderID stores owner and all other seq numbers */ -export interface OrderID { - $type: "akash.market.v1beta2.OrderID"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; -} - -/** Order stores orderID, state of order and other details */ -export interface Order { - $type: "akash.market.v1beta2.Order"; - orderId: OrderID | undefined; - state: Order_State; - spec: GroupSpec | undefined; - createdAt: Long; -} - -/** State is an enum which refers to state of order */ -export enum Order_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** open - OrderOpen denotes state for order open */ - open = 1, - /** active - OrderMatched denotes state for order matched */ - active = 2, - /** closed - OrderClosed denotes state for order lost */ - closed = 3, - UNRECOGNIZED = -1, -} - -export function order_StateFromJSON(object: any): Order_State { - switch (object) { - case 0: - case "invalid": - return Order_State.invalid; - case 1: - case "open": - return Order_State.open; - case 2: - case "active": - return Order_State.active; - case 3: - case "closed": - return Order_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Order_State.UNRECOGNIZED; - } -} - -export function order_StateToJSON(object: Order_State): string { - switch (object) { - case Order_State.invalid: - return "invalid"; - case Order_State.open: - return "open"; - case Order_State.active: - return "active"; - case Order_State.closed: - return "closed"; - case Order_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** OrderFilters defines flags for order list filter */ -export interface OrderFilters { - $type: "akash.market.v1beta2.OrderFilters"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - state: string; -} - -function createBaseOrderID(): OrderID { - return { - $type: "akash.market.v1beta2.OrderID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - }; -} - -export const OrderID = { - $type: "akash.market.v1beta2.OrderID" as const, - - encode( - message: OrderID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): OrderID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOrderID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): OrderID { - return { - $type: OrderID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - }; - }, - - toJSON(message: OrderID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - return obj; - }, - - create(base?: DeepPartial): OrderID { - return OrderID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): OrderID { - const message = createBaseOrderID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(OrderID.$type, OrderID); - -function createBaseOrder(): Order { - return { - $type: "akash.market.v1beta2.Order", - orderId: undefined, - state: 0, - spec: undefined, - createdAt: Long.ZERO, - }; -} - -export const Order = { - $type: "akash.market.v1beta2.Order" as const, - - encode(message: Order, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.orderId !== undefined) { - OrderID.encode(message.orderId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.spec !== undefined) { - GroupSpec.encode(message.spec, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Order { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOrder(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.orderId = OrderID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.spec = GroupSpec.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Order { - return { - $type: Order.$type, - orderId: isSet(object.orderId) - ? OrderID.fromJSON(object.orderId) - : undefined, - state: isSet(object.state) ? order_StateFromJSON(object.state) : 0, - spec: isSet(object.spec) ? GroupSpec.fromJSON(object.spec) : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Order): unknown { - const obj: any = {}; - if (message.orderId !== undefined) { - obj.orderId = OrderID.toJSON(message.orderId); - } - if (message.state !== 0) { - obj.state = order_StateToJSON(message.state); - } - if (message.spec !== undefined) { - obj.spec = GroupSpec.toJSON(message.spec); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Order { - return Order.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Order { - const message = createBaseOrder(); - message.orderId = - object.orderId !== undefined && object.orderId !== null - ? OrderID.fromPartial(object.orderId) - : undefined; - message.state = object.state ?? 0; - message.spec = - object.spec !== undefined && object.spec !== null - ? GroupSpec.fromPartial(object.spec) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Order.$type, Order); - -function createBaseOrderFilters(): OrderFilters { - return { - $type: "akash.market.v1beta2.OrderFilters", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - state: "", - }; -} - -export const OrderFilters = { - $type: "akash.market.v1beta2.OrderFilters" as const, - - encode( - message: OrderFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.state !== "") { - writer.uint32(42).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): OrderFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOrderFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): OrderFilters { - return { - $type: OrderFilters.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: OrderFilters): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): OrderFilters { - return OrderFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): OrderFilters { - const message = createBaseOrderFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(OrderFilters.$type, OrderFilters); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta2/params.ts b/ts/src/generated/akash/market/v1beta2/params.ts deleted file mode 100644 index b2f82530..00000000 --- a/ts/src/generated/akash/market/v1beta2/params.ts +++ /dev/null @@ -1,136 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Params is the params for the x/market module */ -export interface Params { - $type: "akash.market.v1beta2.Params"; - bidMinDeposit: Coin | undefined; - orderMaxBids: number; -} - -function createBaseParams(): Params { - return { - $type: "akash.market.v1beta2.Params", - bidMinDeposit: undefined, - orderMaxBids: 0, - }; -} - -export const Params = { - $type: "akash.market.v1beta2.Params" as const, - - encode( - message: Params, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidMinDeposit !== undefined) { - Coin.encode(message.bidMinDeposit, writer.uint32(10).fork()).ldelim(); - } - if (message.orderMaxBids !== 0) { - writer.uint32(16).uint32(message.orderMaxBids); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Params { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidMinDeposit = Coin.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.orderMaxBids = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Params { - return { - $type: Params.$type, - bidMinDeposit: isSet(object.bidMinDeposit) - ? Coin.fromJSON(object.bidMinDeposit) - : undefined, - orderMaxBids: isSet(object.orderMaxBids) - ? globalThis.Number(object.orderMaxBids) - : 0, - }; - }, - - toJSON(message: Params): unknown { - const obj: any = {}; - if (message.bidMinDeposit !== undefined) { - obj.bidMinDeposit = Coin.toJSON(message.bidMinDeposit); - } - if (message.orderMaxBids !== 0) { - obj.orderMaxBids = Math.round(message.orderMaxBids); - } - return obj; - }, - - create(base?: DeepPartial): Params { - return Params.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Params { - const message = createBaseParams(); - message.bidMinDeposit = - object.bidMinDeposit !== undefined && object.bidMinDeposit !== null - ? Coin.fromPartial(object.bidMinDeposit) - : undefined; - message.orderMaxBids = object.orderMaxBids ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(Params.$type, Params); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta2/query.ts b/ts/src/generated/akash/market/v1beta2/query.ts deleted file mode 100644 index 6d783907..00000000 --- a/ts/src/generated/akash/market/v1beta2/query.ts +++ /dev/null @@ -1,1275 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Account, FractionalPayment } from "../../escrow/v1beta2/types"; -import { Bid, BidFilters, BidID } from "./bid"; -import { Lease, LeaseFilters, LeaseID } from "./lease"; -import { Order, OrderFilters, OrderID } from "./order"; - -/** QueryOrdersRequest is request type for the Query/Orders RPC method */ -export interface QueryOrdersRequest { - $type: "akash.market.v1beta2.QueryOrdersRequest"; - filters: OrderFilters | undefined; - pagination: PageRequest | undefined; -} - -/** QueryOrdersResponse is response type for the Query/Orders RPC method */ -export interface QueryOrdersResponse { - $type: "akash.market.v1beta2.QueryOrdersResponse"; - orders: Order[]; - pagination: PageResponse | undefined; -} - -/** QueryOrderRequest is request type for the Query/Order RPC method */ -export interface QueryOrderRequest { - $type: "akash.market.v1beta2.QueryOrderRequest"; - id: OrderID | undefined; -} - -/** QueryOrderResponse is response type for the Query/Order RPC method */ -export interface QueryOrderResponse { - $type: "akash.market.v1beta2.QueryOrderResponse"; - order: Order | undefined; -} - -/** QueryBidsRequest is request type for the Query/Bids RPC method */ -export interface QueryBidsRequest { - $type: "akash.market.v1beta2.QueryBidsRequest"; - filters: BidFilters | undefined; - pagination: PageRequest | undefined; -} - -/** QueryBidsResponse is response type for the Query/Bids RPC method */ -export interface QueryBidsResponse { - $type: "akash.market.v1beta2.QueryBidsResponse"; - bids: QueryBidResponse[]; - pagination: PageResponse | undefined; -} - -/** QueryBidRequest is request type for the Query/Bid RPC method */ -export interface QueryBidRequest { - $type: "akash.market.v1beta2.QueryBidRequest"; - id: BidID | undefined; -} - -/** QueryBidResponse is response type for the Query/Bid RPC method */ -export interface QueryBidResponse { - $type: "akash.market.v1beta2.QueryBidResponse"; - bid: Bid | undefined; - escrowAccount: Account | undefined; -} - -/** QueryLeasesRequest is request type for the Query/Leases RPC method */ -export interface QueryLeasesRequest { - $type: "akash.market.v1beta2.QueryLeasesRequest"; - filters: LeaseFilters | undefined; - pagination: PageRequest | undefined; -} - -/** QueryLeasesResponse is response type for the Query/Leases RPC method */ -export interface QueryLeasesResponse { - $type: "akash.market.v1beta2.QueryLeasesResponse"; - leases: QueryLeaseResponse[]; - pagination: PageResponse | undefined; -} - -/** QueryLeaseRequest is request type for the Query/Lease RPC method */ -export interface QueryLeaseRequest { - $type: "akash.market.v1beta2.QueryLeaseRequest"; - id: LeaseID | undefined; -} - -/** QueryLeaseResponse is response type for the Query/Lease RPC method */ -export interface QueryLeaseResponse { - $type: "akash.market.v1beta2.QueryLeaseResponse"; - lease: Lease | undefined; - escrowPayment: FractionalPayment | undefined; -} - -function createBaseQueryOrdersRequest(): QueryOrdersRequest { - return { - $type: "akash.market.v1beta2.QueryOrdersRequest", - filters: undefined, - pagination: undefined, - }; -} - -export const QueryOrdersRequest = { - $type: "akash.market.v1beta2.QueryOrdersRequest" as const, - - encode( - message: QueryOrdersRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filters !== undefined) { - OrderFilters.encode(message.filters, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrdersRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryOrdersRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filters = OrderFilters.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryOrdersRequest { - return { - $type: QueryOrdersRequest.$type, - filters: isSet(object.filters) - ? OrderFilters.fromJSON(object.filters) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryOrdersRequest): unknown { - const obj: any = {}; - if (message.filters !== undefined) { - obj.filters = OrderFilters.toJSON(message.filters); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryOrdersRequest { - return QueryOrdersRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryOrdersRequest { - const message = createBaseQueryOrdersRequest(); - message.filters = - object.filters !== undefined && object.filters !== null - ? OrderFilters.fromPartial(object.filters) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryOrdersRequest.$type, QueryOrdersRequest); - -function createBaseQueryOrdersResponse(): QueryOrdersResponse { - return { - $type: "akash.market.v1beta2.QueryOrdersResponse", - orders: [], - pagination: undefined, - }; -} - -export const QueryOrdersResponse = { - $type: "akash.market.v1beta2.QueryOrdersResponse" as const, - - encode( - message: QueryOrdersResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.orders) { - Order.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrdersResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryOrdersResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.orders.push(Order.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryOrdersResponse { - return { - $type: QueryOrdersResponse.$type, - orders: globalThis.Array.isArray(object?.orders) - ? object.orders.map((e: any) => Order.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryOrdersResponse): unknown { - const obj: any = {}; - if (message.orders?.length) { - obj.orders = message.orders.map((e) => Order.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryOrdersResponse { - return QueryOrdersResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryOrdersResponse { - const message = createBaseQueryOrdersResponse(); - message.orders = object.orders?.map((e) => Order.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryOrdersResponse.$type, QueryOrdersResponse); - -function createBaseQueryOrderRequest(): QueryOrderRequest { - return { $type: "akash.market.v1beta2.QueryOrderRequest", id: undefined }; -} - -export const QueryOrderRequest = { - $type: "akash.market.v1beta2.QueryOrderRequest" as const, - - encode( - message: QueryOrderRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - OrderID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrderRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryOrderRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = OrderID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryOrderRequest { - return { - $type: QueryOrderRequest.$type, - id: isSet(object.id) ? OrderID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryOrderRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = OrderID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryOrderRequest { - return QueryOrderRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryOrderRequest { - const message = createBaseQueryOrderRequest(); - message.id = - object.id !== undefined && object.id !== null - ? OrderID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryOrderRequest.$type, QueryOrderRequest); - -function createBaseQueryOrderResponse(): QueryOrderResponse { - return { $type: "akash.market.v1beta2.QueryOrderResponse", order: undefined }; -} - -export const QueryOrderResponse = { - $type: "akash.market.v1beta2.QueryOrderResponse" as const, - - encode( - message: QueryOrderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.order !== undefined) { - Order.encode(message.order, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryOrderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.order = Order.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryOrderResponse { - return { - $type: QueryOrderResponse.$type, - order: isSet(object.order) ? Order.fromJSON(object.order) : undefined, - }; - }, - - toJSON(message: QueryOrderResponse): unknown { - const obj: any = {}; - if (message.order !== undefined) { - obj.order = Order.toJSON(message.order); - } - return obj; - }, - - create(base?: DeepPartial): QueryOrderResponse { - return QueryOrderResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryOrderResponse { - const message = createBaseQueryOrderResponse(); - message.order = - object.order !== undefined && object.order !== null - ? Order.fromPartial(object.order) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryOrderResponse.$type, QueryOrderResponse); - -function createBaseQueryBidsRequest(): QueryBidsRequest { - return { - $type: "akash.market.v1beta2.QueryBidsRequest", - filters: undefined, - pagination: undefined, - }; -} - -export const QueryBidsRequest = { - $type: "akash.market.v1beta2.QueryBidsRequest" as const, - - encode( - message: QueryBidsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filters !== undefined) { - BidFilters.encode(message.filters, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidsRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryBidsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filters = BidFilters.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryBidsRequest { - return { - $type: QueryBidsRequest.$type, - filters: isSet(object.filters) - ? BidFilters.fromJSON(object.filters) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryBidsRequest): unknown { - const obj: any = {}; - if (message.filters !== undefined) { - obj.filters = BidFilters.toJSON(message.filters); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryBidsRequest { - return QueryBidsRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryBidsRequest { - const message = createBaseQueryBidsRequest(); - message.filters = - object.filters !== undefined && object.filters !== null - ? BidFilters.fromPartial(object.filters) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryBidsRequest.$type, QueryBidsRequest); - -function createBaseQueryBidsResponse(): QueryBidsResponse { - return { - $type: "akash.market.v1beta2.QueryBidsResponse", - bids: [], - pagination: undefined, - }; -} - -export const QueryBidsResponse = { - $type: "akash.market.v1beta2.QueryBidsResponse" as const, - - encode( - message: QueryBidsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.bids) { - QueryBidResponse.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidsResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryBidsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bids.push(QueryBidResponse.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryBidsResponse { - return { - $type: QueryBidsResponse.$type, - bids: globalThis.Array.isArray(object?.bids) - ? object.bids.map((e: any) => QueryBidResponse.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryBidsResponse): unknown { - const obj: any = {}; - if (message.bids?.length) { - obj.bids = message.bids.map((e) => QueryBidResponse.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryBidsResponse { - return QueryBidsResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryBidsResponse { - const message = createBaseQueryBidsResponse(); - message.bids = - object.bids?.map((e) => QueryBidResponse.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryBidsResponse.$type, QueryBidsResponse); - -function createBaseQueryBidRequest(): QueryBidRequest { - return { $type: "akash.market.v1beta2.QueryBidRequest", id: undefined }; -} - -export const QueryBidRequest = { - $type: "akash.market.v1beta2.QueryBidRequest" as const, - - encode( - message: QueryBidRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - BidID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryBidRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = BidID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryBidRequest { - return { - $type: QueryBidRequest.$type, - id: isSet(object.id) ? BidID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryBidRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = BidID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryBidRequest { - return QueryBidRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryBidRequest { - const message = createBaseQueryBidRequest(); - message.id = - object.id !== undefined && object.id !== null - ? BidID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryBidRequest.$type, QueryBidRequest); - -function createBaseQueryBidResponse(): QueryBidResponse { - return { - $type: "akash.market.v1beta2.QueryBidResponse", - bid: undefined, - escrowAccount: undefined, - }; -} - -export const QueryBidResponse = { - $type: "akash.market.v1beta2.QueryBidResponse" as const, - - encode( - message: QueryBidResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bid !== undefined) { - Bid.encode(message.bid, writer.uint32(10).fork()).ldelim(); - } - if (message.escrowAccount !== undefined) { - Account.encode(message.escrowAccount, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryBidResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bid = Bid.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.escrowAccount = Account.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryBidResponse { - return { - $type: QueryBidResponse.$type, - bid: isSet(object.bid) ? Bid.fromJSON(object.bid) : undefined, - escrowAccount: isSet(object.escrowAccount) - ? Account.fromJSON(object.escrowAccount) - : undefined, - }; - }, - - toJSON(message: QueryBidResponse): unknown { - const obj: any = {}; - if (message.bid !== undefined) { - obj.bid = Bid.toJSON(message.bid); - } - if (message.escrowAccount !== undefined) { - obj.escrowAccount = Account.toJSON(message.escrowAccount); - } - return obj; - }, - - create(base?: DeepPartial): QueryBidResponse { - return QueryBidResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryBidResponse { - const message = createBaseQueryBidResponse(); - message.bid = - object.bid !== undefined && object.bid !== null - ? Bid.fromPartial(object.bid) - : undefined; - message.escrowAccount = - object.escrowAccount !== undefined && object.escrowAccount !== null - ? Account.fromPartial(object.escrowAccount) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryBidResponse.$type, QueryBidResponse); - -function createBaseQueryLeasesRequest(): QueryLeasesRequest { - return { - $type: "akash.market.v1beta2.QueryLeasesRequest", - filters: undefined, - pagination: undefined, - }; -} - -export const QueryLeasesRequest = { - $type: "akash.market.v1beta2.QueryLeasesRequest" as const, - - encode( - message: QueryLeasesRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filters !== undefined) { - LeaseFilters.encode(message.filters, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeasesRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryLeasesRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filters = LeaseFilters.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryLeasesRequest { - return { - $type: QueryLeasesRequest.$type, - filters: isSet(object.filters) - ? LeaseFilters.fromJSON(object.filters) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryLeasesRequest): unknown { - const obj: any = {}; - if (message.filters !== undefined) { - obj.filters = LeaseFilters.toJSON(message.filters); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryLeasesRequest { - return QueryLeasesRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryLeasesRequest { - const message = createBaseQueryLeasesRequest(); - message.filters = - object.filters !== undefined && object.filters !== null - ? LeaseFilters.fromPartial(object.filters) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryLeasesRequest.$type, QueryLeasesRequest); - -function createBaseQueryLeasesResponse(): QueryLeasesResponse { - return { - $type: "akash.market.v1beta2.QueryLeasesResponse", - leases: [], - pagination: undefined, - }; -} - -export const QueryLeasesResponse = { - $type: "akash.market.v1beta2.QueryLeasesResponse" as const, - - encode( - message: QueryLeasesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.leases) { - QueryLeaseResponse.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeasesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryLeasesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.leases.push( - QueryLeaseResponse.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryLeasesResponse { - return { - $type: QueryLeasesResponse.$type, - leases: globalThis.Array.isArray(object?.leases) - ? object.leases.map((e: any) => QueryLeaseResponse.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryLeasesResponse): unknown { - const obj: any = {}; - if (message.leases?.length) { - obj.leases = message.leases.map((e) => QueryLeaseResponse.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryLeasesResponse { - return QueryLeasesResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryLeasesResponse { - const message = createBaseQueryLeasesResponse(); - message.leases = - object.leases?.map((e) => QueryLeaseResponse.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryLeasesResponse.$type, QueryLeasesResponse); - -function createBaseQueryLeaseRequest(): QueryLeaseRequest { - return { $type: "akash.market.v1beta2.QueryLeaseRequest", id: undefined }; -} - -export const QueryLeaseRequest = { - $type: "akash.market.v1beta2.QueryLeaseRequest" as const, - - encode( - message: QueryLeaseRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - LeaseID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeaseRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryLeaseRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = LeaseID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryLeaseRequest { - return { - $type: QueryLeaseRequest.$type, - id: isSet(object.id) ? LeaseID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryLeaseRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = LeaseID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryLeaseRequest { - return QueryLeaseRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryLeaseRequest { - const message = createBaseQueryLeaseRequest(); - message.id = - object.id !== undefined && object.id !== null - ? LeaseID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryLeaseRequest.$type, QueryLeaseRequest); - -function createBaseQueryLeaseResponse(): QueryLeaseResponse { - return { - $type: "akash.market.v1beta2.QueryLeaseResponse", - lease: undefined, - escrowPayment: undefined, - }; -} - -export const QueryLeaseResponse = { - $type: "akash.market.v1beta2.QueryLeaseResponse" as const, - - encode( - message: QueryLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.lease !== undefined) { - Lease.encode(message.lease, writer.uint32(10).fork()).ldelim(); - } - if (message.escrowPayment !== undefined) { - FractionalPayment.encode( - message.escrowPayment, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeaseResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.lease = Lease.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.escrowPayment = FractionalPayment.decode( - reader, - reader.uint32(), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryLeaseResponse { - return { - $type: QueryLeaseResponse.$type, - lease: isSet(object.lease) ? Lease.fromJSON(object.lease) : undefined, - escrowPayment: isSet(object.escrowPayment) - ? FractionalPayment.fromJSON(object.escrowPayment) - : undefined, - }; - }, - - toJSON(message: QueryLeaseResponse): unknown { - const obj: any = {}; - if (message.lease !== undefined) { - obj.lease = Lease.toJSON(message.lease); - } - if (message.escrowPayment !== undefined) { - obj.escrowPayment = FractionalPayment.toJSON(message.escrowPayment); - } - return obj; - }, - - create(base?: DeepPartial): QueryLeaseResponse { - return QueryLeaseResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryLeaseResponse { - const message = createBaseQueryLeaseResponse(); - message.lease = - object.lease !== undefined && object.lease !== null - ? Lease.fromPartial(object.lease) - : undefined; - message.escrowPayment = - object.escrowPayment !== undefined && object.escrowPayment !== null - ? FractionalPayment.fromPartial(object.escrowPayment) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryLeaseResponse.$type, QueryLeaseResponse); - -/** Query defines the gRPC querier service */ -export interface Query { - /** Orders queries orders with filters */ - Orders(request: QueryOrdersRequest): Promise; - /** Order queries order details */ - Order(request: QueryOrderRequest): Promise; - /** Bids queries bids with filters */ - Bids(request: QueryBidsRequest): Promise; - /** Bid queries bid details */ - Bid(request: QueryBidRequest): Promise; - /** Leases queries leases with filters */ - Leases(request: QueryLeasesRequest): Promise; - /** Lease queries lease details */ - Lease(request: QueryLeaseRequest): Promise; -} - -export const QueryServiceName = "akash.market.v1beta2.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.Orders = this.Orders.bind(this); - this.Order = this.Order.bind(this); - this.Bids = this.Bids.bind(this); - this.Bid = this.Bid.bind(this); - this.Leases = this.Leases.bind(this); - this.Lease = this.Lease.bind(this); - } - Orders(request: QueryOrdersRequest): Promise { - const data = QueryOrdersRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Orders", data); - return promise.then((data) => - QueryOrdersResponse.decode(_m0.Reader.create(data)), - ); - } - - Order(request: QueryOrderRequest): Promise { - const data = QueryOrderRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Order", data); - return promise.then((data) => - QueryOrderResponse.decode(_m0.Reader.create(data)), - ); - } - - Bids(request: QueryBidsRequest): Promise { - const data = QueryBidsRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Bids", data); - return promise.then((data) => - QueryBidsResponse.decode(_m0.Reader.create(data)), - ); - } - - Bid(request: QueryBidRequest): Promise { - const data = QueryBidRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Bid", data); - return promise.then((data) => - QueryBidResponse.decode(_m0.Reader.create(data)), - ); - } - - Leases(request: QueryLeasesRequest): Promise { - const data = QueryLeasesRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Leases", data); - return promise.then((data) => - QueryLeasesResponse.decode(_m0.Reader.create(data)), - ); - } - - Lease(request: QueryLeaseRequest): Promise { - const data = QueryLeaseRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Lease", data); - return promise.then((data) => - QueryLeaseResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta2/service.grpc-js.ts b/ts/src/generated/akash/market/v1beta2/service.grpc-js.ts deleted file mode 100644 index b4f80934..00000000 --- a/ts/src/generated/akash/market/v1beta2/service.grpc-js.ts +++ /dev/null @@ -1,252 +0,0 @@ -/* eslint-disable */ -import { - ChannelCredentials, - Client, - makeGenericClientConstructor, - Metadata, -} from "@grpc/grpc-js"; -import type { - CallOptions, - ClientOptions, - ClientUnaryCall, - handleUnaryCall, - ServiceError, - UntypedServiceImplementation, -} from "@grpc/grpc-js"; -import { - MsgCloseBid, - MsgCloseBidResponse, - MsgCreateBid, - MsgCreateBidResponse, -} from "./bid"; -import { - MsgCloseLease, - MsgCloseLeaseResponse, - MsgCreateLease, - MsgCreateLeaseResponse, - MsgWithdrawLease, - MsgWithdrawLeaseResponse, -} from "./lease"; - -export const protobufPackage = "akash.market.v1beta2"; - -/** Msg defines the market Msg service */ -export type MsgService = typeof MsgService; -export const MsgService = { - /** CreateBid defines a method to create a bid given proper inputs. */ - createBid: { - path: "/akash.market.v1beta2.Msg/CreateBid", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCreateBid) => - Buffer.from(MsgCreateBid.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCreateBid.decode(value), - responseSerialize: (value: MsgCreateBidResponse) => - Buffer.from(MsgCreateBidResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgCreateBidResponse.decode(value), - }, - /** CloseBid defines a method to close a bid given proper inputs. */ - closeBid: { - path: "/akash.market.v1beta2.Msg/CloseBid", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCloseBid) => - Buffer.from(MsgCloseBid.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCloseBid.decode(value), - responseSerialize: (value: MsgCloseBidResponse) => - Buffer.from(MsgCloseBidResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgCloseBidResponse.decode(value), - }, - /** WithdrawLease withdraws accrued funds from the lease payment */ - withdrawLease: { - path: "/akash.market.v1beta2.Msg/WithdrawLease", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgWithdrawLease) => - Buffer.from(MsgWithdrawLease.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgWithdrawLease.decode(value), - responseSerialize: (value: MsgWithdrawLeaseResponse) => - Buffer.from(MsgWithdrawLeaseResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgWithdrawLeaseResponse.decode(value), - }, - /** CreateLease creates a new lease */ - createLease: { - path: "/akash.market.v1beta2.Msg/CreateLease", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCreateLease) => - Buffer.from(MsgCreateLease.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCreateLease.decode(value), - responseSerialize: (value: MsgCreateLeaseResponse) => - Buffer.from(MsgCreateLeaseResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgCreateLeaseResponse.decode(value), - }, - /** CloseLease defines a method to close an order given proper inputs. */ - closeLease: { - path: "/akash.market.v1beta2.Msg/CloseLease", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCloseLease) => - Buffer.from(MsgCloseLease.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCloseLease.decode(value), - responseSerialize: (value: MsgCloseLeaseResponse) => - Buffer.from(MsgCloseLeaseResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgCloseLeaseResponse.decode(value), - }, -} as const; - -export interface MsgServer extends UntypedServiceImplementation { - /** CreateBid defines a method to create a bid given proper inputs. */ - createBid: handleUnaryCall; - /** CloseBid defines a method to close a bid given proper inputs. */ - closeBid: handleUnaryCall; - /** WithdrawLease withdraws accrued funds from the lease payment */ - withdrawLease: handleUnaryCall; - /** CreateLease creates a new lease */ - createLease: handleUnaryCall; - /** CloseLease defines a method to close an order given proper inputs. */ - closeLease: handleUnaryCall; -} - -export interface MsgClient extends Client { - /** CreateBid defines a method to create a bid given proper inputs. */ - createBid( - request: MsgCreateBid, - callback: ( - error: ServiceError | null, - response: MsgCreateBidResponse, - ) => void, - ): ClientUnaryCall; - createBid( - request: MsgCreateBid, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCreateBidResponse, - ) => void, - ): ClientUnaryCall; - createBid( - request: MsgCreateBid, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCreateBidResponse, - ) => void, - ): ClientUnaryCall; - /** CloseBid defines a method to close a bid given proper inputs. */ - closeBid( - request: MsgCloseBid, - callback: ( - error: ServiceError | null, - response: MsgCloseBidResponse, - ) => void, - ): ClientUnaryCall; - closeBid( - request: MsgCloseBid, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCloseBidResponse, - ) => void, - ): ClientUnaryCall; - closeBid( - request: MsgCloseBid, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCloseBidResponse, - ) => void, - ): ClientUnaryCall; - /** WithdrawLease withdraws accrued funds from the lease payment */ - withdrawLease( - request: MsgWithdrawLease, - callback: ( - error: ServiceError | null, - response: MsgWithdrawLeaseResponse, - ) => void, - ): ClientUnaryCall; - withdrawLease( - request: MsgWithdrawLease, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgWithdrawLeaseResponse, - ) => void, - ): ClientUnaryCall; - withdrawLease( - request: MsgWithdrawLease, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgWithdrawLeaseResponse, - ) => void, - ): ClientUnaryCall; - /** CreateLease creates a new lease */ - createLease( - request: MsgCreateLease, - callback: ( - error: ServiceError | null, - response: MsgCreateLeaseResponse, - ) => void, - ): ClientUnaryCall; - createLease( - request: MsgCreateLease, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCreateLeaseResponse, - ) => void, - ): ClientUnaryCall; - createLease( - request: MsgCreateLease, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCreateLeaseResponse, - ) => void, - ): ClientUnaryCall; - /** CloseLease defines a method to close an order given proper inputs. */ - closeLease( - request: MsgCloseLease, - callback: ( - error: ServiceError | null, - response: MsgCloseLeaseResponse, - ) => void, - ): ClientUnaryCall; - closeLease( - request: MsgCloseLease, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCloseLeaseResponse, - ) => void, - ): ClientUnaryCall; - closeLease( - request: MsgCloseLease, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCloseLeaseResponse, - ) => void, - ): ClientUnaryCall; -} - -export const MsgClient = makeGenericClientConstructor( - MsgService, - "akash.market.v1beta2.Msg", -) as unknown as { - new ( - address: string, - credentials: ChannelCredentials, - options?: Partial, - ): MsgClient; - service: typeof MsgService; - serviceName: string; -}; diff --git a/ts/src/generated/akash/market/v1beta2/service.ts b/ts/src/generated/akash/market/v1beta2/service.ts deleted file mode 100644 index e0935c2c..00000000 --- a/ts/src/generated/akash/market/v1beta2/service.ts +++ /dev/null @@ -1,92 +0,0 @@ -/* eslint-disable */ -import _m0 from "protobufjs/minimal"; -import { - MsgCloseBid, - MsgCloseBidResponse, - MsgCreateBid, - MsgCreateBidResponse, -} from "./bid"; -import { - MsgCloseLease, - MsgCloseLeaseResponse, - MsgCreateLease, - MsgCreateLeaseResponse, - MsgWithdrawLease, - MsgWithdrawLeaseResponse, -} from "./lease"; - -/** Msg defines the market Msg service */ -export interface Msg { - /** CreateBid defines a method to create a bid given proper inputs. */ - CreateBid(request: MsgCreateBid): Promise; - /** CloseBid defines a method to close a bid given proper inputs. */ - CloseBid(request: MsgCloseBid): Promise; - /** WithdrawLease withdraws accrued funds from the lease payment */ - WithdrawLease(request: MsgWithdrawLease): Promise; - /** CreateLease creates a new lease */ - CreateLease(request: MsgCreateLease): Promise; - /** CloseLease defines a method to close an order given proper inputs. */ - CloseLease(request: MsgCloseLease): Promise; -} - -export const MsgServiceName = "akash.market.v1beta2.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.CreateBid = this.CreateBid.bind(this); - this.CloseBid = this.CloseBid.bind(this); - this.WithdrawLease = this.WithdrawLease.bind(this); - this.CreateLease = this.CreateLease.bind(this); - this.CloseLease = this.CloseLease.bind(this); - } - CreateBid(request: MsgCreateBid): Promise { - const data = MsgCreateBid.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateBid", data); - return promise.then((data) => - MsgCreateBidResponse.decode(_m0.Reader.create(data)), - ); - } - - CloseBid(request: MsgCloseBid): Promise { - const data = MsgCloseBid.encode(request).finish(); - const promise = this.rpc.request(this.service, "CloseBid", data); - return promise.then((data) => - MsgCloseBidResponse.decode(_m0.Reader.create(data)), - ); - } - - WithdrawLease(request: MsgWithdrawLease): Promise { - const data = MsgWithdrawLease.encode(request).finish(); - const promise = this.rpc.request(this.service, "WithdrawLease", data); - return promise.then((data) => - MsgWithdrawLeaseResponse.decode(_m0.Reader.create(data)), - ); - } - - CreateLease(request: MsgCreateLease): Promise { - const data = MsgCreateLease.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateLease", data); - return promise.then((data) => - MsgCreateLeaseResponse.decode(_m0.Reader.create(data)), - ); - } - - CloseLease(request: MsgCloseLease): Promise { - const data = MsgCloseLease.encode(request).finish(); - const promise = this.rpc.request(this.service, "CloseLease", data); - return promise.then((data) => - MsgCloseLeaseResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} diff --git a/ts/src/generated/akash/market/v1beta3/bid.ts b/ts/src/generated/akash/market/v1beta3/bid.ts deleted file mode 100644 index ac659f56..00000000 --- a/ts/src/generated/akash/market/v1beta3/bid.ts +++ /dev/null @@ -1,879 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin, DecCoin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { OrderID } from "./order"; - -/** MsgCreateBid defines an SDK message for creating Bid */ -export interface MsgCreateBid { - $type: "akash.market.v1beta3.MsgCreateBid"; - order: OrderID | undefined; - provider: string; - price: DecCoin | undefined; - deposit: Coin | undefined; -} - -/** MsgCreateBidResponse defines the Msg/CreateBid response type. */ -export interface MsgCreateBidResponse { - $type: "akash.market.v1beta3.MsgCreateBidResponse"; -} - -/** MsgCloseBid defines an SDK message for closing bid */ -export interface MsgCloseBid { - $type: "akash.market.v1beta3.MsgCloseBid"; - bidId: BidID | undefined; -} - -/** MsgCloseBidResponse defines the Msg/CloseBid response type. */ -export interface MsgCloseBidResponse { - $type: "akash.market.v1beta3.MsgCloseBidResponse"; -} - -/** - * BidID stores owner and all other seq numbers - * A successful bid becomes a Lease(ID). - */ -export interface BidID { - $type: "akash.market.v1beta3.BidID"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; -} - -/** Bid stores BidID, state of bid and price */ -export interface Bid { - $type: "akash.market.v1beta3.Bid"; - bidId: BidID | undefined; - state: Bid_State; - price: DecCoin | undefined; - createdAt: Long; -} - -/** State is an enum which refers to state of bid */ -export enum Bid_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** open - BidOpen denotes state for bid open */ - open = 1, - /** active - BidMatched denotes state for bid open */ - active = 2, - /** lost - BidLost denotes state for bid lost */ - lost = 3, - /** closed - BidClosed denotes state for bid closed */ - closed = 4, - UNRECOGNIZED = -1, -} - -export function bid_StateFromJSON(object: any): Bid_State { - switch (object) { - case 0: - case "invalid": - return Bid_State.invalid; - case 1: - case "open": - return Bid_State.open; - case 2: - case "active": - return Bid_State.active; - case 3: - case "lost": - return Bid_State.lost; - case 4: - case "closed": - return Bid_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Bid_State.UNRECOGNIZED; - } -} - -export function bid_StateToJSON(object: Bid_State): string { - switch (object) { - case Bid_State.invalid: - return "invalid"; - case Bid_State.open: - return "open"; - case Bid_State.active: - return "active"; - case Bid_State.lost: - return "lost"; - case Bid_State.closed: - return "closed"; - case Bid_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** BidFilters defines flags for bid list filter */ -export interface BidFilters { - $type: "akash.market.v1beta3.BidFilters"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; - state: string; -} - -function createBaseMsgCreateBid(): MsgCreateBid { - return { - $type: "akash.market.v1beta3.MsgCreateBid", - order: undefined, - provider: "", - price: undefined, - deposit: undefined, - }; -} - -export const MsgCreateBid = { - $type: "akash.market.v1beta3.MsgCreateBid" as const, - - encode( - message: MsgCreateBid, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.order !== undefined) { - OrderID.encode(message.order, writer.uint32(10).fork()).ldelim(); - } - if (message.provider !== "") { - writer.uint32(18).string(message.provider); - } - if (message.price !== undefined) { - DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - if (message.deposit !== undefined) { - Coin.encode(message.deposit, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateBid { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateBid(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.order = OrderID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.provider = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.price = DecCoin.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.deposit = Coin.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateBid { - return { - $type: MsgCreateBid.$type, - order: isSet(object.order) ? OrderID.fromJSON(object.order) : undefined, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, - deposit: isSet(object.deposit) - ? Coin.fromJSON(object.deposit) - : undefined, - }; - }, - - toJSON(message: MsgCreateBid): unknown { - const obj: any = {}; - if (message.order !== undefined) { - obj.order = OrderID.toJSON(message.order); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - if (message.price !== undefined) { - obj.price = DecCoin.toJSON(message.price); - } - if (message.deposit !== undefined) { - obj.deposit = Coin.toJSON(message.deposit); - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateBid { - return MsgCreateBid.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateBid { - const message = createBaseMsgCreateBid(); - message.order = - object.order !== undefined && object.order !== null - ? OrderID.fromPartial(object.order) - : undefined; - message.provider = object.provider ?? ""; - message.price = - object.price !== undefined && object.price !== null - ? DecCoin.fromPartial(object.price) - : undefined; - message.deposit = - object.deposit !== undefined && object.deposit !== null - ? Coin.fromPartial(object.deposit) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateBid.$type, MsgCreateBid); - -function createBaseMsgCreateBidResponse(): MsgCreateBidResponse { - return { $type: "akash.market.v1beta3.MsgCreateBidResponse" }; -} - -export const MsgCreateBidResponse = { - $type: "akash.market.v1beta3.MsgCreateBidResponse" as const, - - encode( - _: MsgCreateBidResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateBidResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateBidResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateBidResponse { - return { $type: MsgCreateBidResponse.$type }; - }, - - toJSON(_: MsgCreateBidResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCreateBidResponse { - return MsgCreateBidResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCreateBidResponse { - const message = createBaseMsgCreateBidResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateBidResponse.$type, MsgCreateBidResponse); - -function createBaseMsgCloseBid(): MsgCloseBid { - return { $type: "akash.market.v1beta3.MsgCloseBid", bidId: undefined }; -} - -export const MsgCloseBid = { - $type: "akash.market.v1beta3.MsgCloseBid" as const, - - encode( - message: MsgCloseBid, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidId !== undefined) { - BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseBid { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseBid(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidId = BidID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCloseBid { - return { - $type: MsgCloseBid.$type, - bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, - }; - }, - - toJSON(message: MsgCloseBid): unknown { - const obj: any = {}; - if (message.bidId !== undefined) { - obj.bidId = BidID.toJSON(message.bidId); - } - return obj; - }, - - create(base?: DeepPartial): MsgCloseBid { - return MsgCloseBid.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCloseBid { - const message = createBaseMsgCloseBid(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? BidID.fromPartial(object.bidId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseBid.$type, MsgCloseBid); - -function createBaseMsgCloseBidResponse(): MsgCloseBidResponse { - return { $type: "akash.market.v1beta3.MsgCloseBidResponse" }; -} - -export const MsgCloseBidResponse = { - $type: "akash.market.v1beta3.MsgCloseBidResponse" as const, - - encode( - _: MsgCloseBidResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseBidResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseBidResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCloseBidResponse { - return { $type: MsgCloseBidResponse.$type }; - }, - - toJSON(_: MsgCloseBidResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCloseBidResponse { - return MsgCloseBidResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCloseBidResponse { - const message = createBaseMsgCloseBidResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseBidResponse.$type, MsgCloseBidResponse); - -function createBaseBidID(): BidID { - return { - $type: "akash.market.v1beta3.BidID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - }; -} - -export const BidID = { - $type: "akash.market.v1beta3.BidID" as const, - - encode(message: BidID, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): BidID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseBidID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.provider = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): BidID { - return { - $type: BidID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - }; - }, - - toJSON(message: BidID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - return obj; - }, - - create(base?: DeepPartial): BidID { - return BidID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): BidID { - const message = createBaseBidID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(BidID.$type, BidID); - -function createBaseBid(): Bid { - return { - $type: "akash.market.v1beta3.Bid", - bidId: undefined, - state: 0, - price: undefined, - createdAt: Long.ZERO, - }; -} - -export const Bid = { - $type: "akash.market.v1beta3.Bid" as const, - - encode(message: Bid, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.bidId !== undefined) { - BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.price !== undefined) { - DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Bid { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseBid(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidId = BidID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.price = DecCoin.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Bid { - return { - $type: Bid.$type, - bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, - state: isSet(object.state) ? bid_StateFromJSON(object.state) : 0, - price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Bid): unknown { - const obj: any = {}; - if (message.bidId !== undefined) { - obj.bidId = BidID.toJSON(message.bidId); - } - if (message.state !== 0) { - obj.state = bid_StateToJSON(message.state); - } - if (message.price !== undefined) { - obj.price = DecCoin.toJSON(message.price); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Bid { - return Bid.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Bid { - const message = createBaseBid(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? BidID.fromPartial(object.bidId) - : undefined; - message.state = object.state ?? 0; - message.price = - object.price !== undefined && object.price !== null - ? DecCoin.fromPartial(object.price) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Bid.$type, Bid); - -function createBaseBidFilters(): BidFilters { - return { - $type: "akash.market.v1beta3.BidFilters", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - state: "", - }; -} - -export const BidFilters = { - $type: "akash.market.v1beta3.BidFilters" as const, - - encode( - message: BidFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - if (message.state !== "") { - writer.uint32(50).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): BidFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseBidFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.provider = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): BidFilters { - return { - $type: BidFilters.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: BidFilters): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): BidFilters { - return BidFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): BidFilters { - const message = createBaseBidFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(BidFilters.$type, BidFilters); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta3/genesis.ts b/ts/src/generated/akash/market/v1beta3/genesis.ts deleted file mode 100644 index 21dd74e0..00000000 --- a/ts/src/generated/akash/market/v1beta3/genesis.ts +++ /dev/null @@ -1,175 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Bid } from "./bid"; -import { Lease } from "./lease"; -import { Order } from "./order"; -import { Params } from "./params"; - -/** GenesisState defines the basic genesis state used by market module */ -export interface GenesisState { - $type: "akash.market.v1beta3.GenesisState"; - params: Params | undefined; - orders: Order[]; - leases: Lease[]; - bids: Bid[]; -} - -function createBaseGenesisState(): GenesisState { - return { - $type: "akash.market.v1beta3.GenesisState", - params: undefined, - orders: [], - leases: [], - bids: [], - }; -} - -export const GenesisState = { - $type: "akash.market.v1beta3.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.params !== undefined) { - Params.encode(message.params, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.orders) { - Order.encode(v!, writer.uint32(18).fork()).ldelim(); - } - for (const v of message.leases) { - Lease.encode(v!, writer.uint32(26).fork()).ldelim(); - } - for (const v of message.bids) { - Bid.encode(v!, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.params = Params.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.orders.push(Order.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.leases.push(Lease.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.bids.push(Bid.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, - orders: globalThis.Array.isArray(object?.orders) - ? object.orders.map((e: any) => Order.fromJSON(e)) - : [], - leases: globalThis.Array.isArray(object?.leases) - ? object.leases.map((e: any) => Lease.fromJSON(e)) - : [], - bids: globalThis.Array.isArray(object?.bids) - ? object.bids.map((e: any) => Bid.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.params !== undefined) { - obj.params = Params.toJSON(message.params); - } - if (message.orders?.length) { - obj.orders = message.orders.map((e) => Order.toJSON(e)); - } - if (message.leases?.length) { - obj.leases = message.leases.map((e) => Lease.toJSON(e)); - } - if (message.bids?.length) { - obj.bids = message.bids.map((e) => Bid.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.params = - object.params !== undefined && object.params !== null - ? Params.fromPartial(object.params) - : undefined; - message.orders = object.orders?.map((e) => Order.fromPartial(e)) || []; - message.leases = object.leases?.map((e) => Lease.fromPartial(e)) || []; - message.bids = object.bids?.map((e) => Bid.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta3/lease.ts b/ts/src/generated/akash/market/v1beta3/lease.ts deleted file mode 100644 index 8bd3b809..00000000 --- a/ts/src/generated/akash/market/v1beta3/lease.ts +++ /dev/null @@ -1,980 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { DecCoin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { BidID } from "./bid"; - -/** LeaseID stores bid details of lease */ -export interface LeaseID { - $type: "akash.market.v1beta3.LeaseID"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; -} - -/** Lease stores LeaseID, state of lease and price */ -export interface Lease { - $type: "akash.market.v1beta3.Lease"; - leaseId: LeaseID | undefined; - state: Lease_State; - price: DecCoin | undefined; - createdAt: Long; - closedOn: Long; -} - -/** State is an enum which refers to state of lease */ -export enum Lease_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** active - LeaseActive denotes state for lease active */ - active = 1, - /** insufficient_funds - LeaseInsufficientFunds denotes state for lease insufficient_funds */ - insufficient_funds = 2, - /** closed - LeaseClosed denotes state for lease closed */ - closed = 3, - UNRECOGNIZED = -1, -} - -export function lease_StateFromJSON(object: any): Lease_State { - switch (object) { - case 0: - case "invalid": - return Lease_State.invalid; - case 1: - case "active": - return Lease_State.active; - case 2: - case "insufficient_funds": - return Lease_State.insufficient_funds; - case 3: - case "closed": - return Lease_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Lease_State.UNRECOGNIZED; - } -} - -export function lease_StateToJSON(object: Lease_State): string { - switch (object) { - case Lease_State.invalid: - return "invalid"; - case Lease_State.active: - return "active"; - case Lease_State.insufficient_funds: - return "insufficient_funds"; - case Lease_State.closed: - return "closed"; - case Lease_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** LeaseFilters defines flags for lease list filter */ -export interface LeaseFilters { - $type: "akash.market.v1beta3.LeaseFilters"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; - state: string; -} - -/** MsgCreateLease is sent to create a lease */ -export interface MsgCreateLease { - $type: "akash.market.v1beta3.MsgCreateLease"; - bidId: BidID | undefined; -} - -/** MsgCreateLeaseResponse is the response from creating a lease */ -export interface MsgCreateLeaseResponse { - $type: "akash.market.v1beta3.MsgCreateLeaseResponse"; -} - -/** MsgWithdrawLease defines an SDK message for closing bid */ -export interface MsgWithdrawLease { - $type: "akash.market.v1beta3.MsgWithdrawLease"; - bidId: LeaseID | undefined; -} - -/** MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. */ -export interface MsgWithdrawLeaseResponse { - $type: "akash.market.v1beta3.MsgWithdrawLeaseResponse"; -} - -/** MsgCloseLease defines an SDK message for closing order */ -export interface MsgCloseLease { - $type: "akash.market.v1beta3.MsgCloseLease"; - leaseId: LeaseID | undefined; -} - -/** MsgCloseLeaseResponse defines the Msg/CloseLease response type. */ -export interface MsgCloseLeaseResponse { - $type: "akash.market.v1beta3.MsgCloseLeaseResponse"; -} - -function createBaseLeaseID(): LeaseID { - return { - $type: "akash.market.v1beta3.LeaseID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - }; -} - -export const LeaseID = { - $type: "akash.market.v1beta3.LeaseID" as const, - - encode( - message: LeaseID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): LeaseID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseLeaseID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.provider = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): LeaseID { - return { - $type: LeaseID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - }; - }, - - toJSON(message: LeaseID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - return obj; - }, - - create(base?: DeepPartial): LeaseID { - return LeaseID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): LeaseID { - const message = createBaseLeaseID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(LeaseID.$type, LeaseID); - -function createBaseLease(): Lease { - return { - $type: "akash.market.v1beta3.Lease", - leaseId: undefined, - state: 0, - price: undefined, - createdAt: Long.ZERO, - closedOn: Long.ZERO, - }; -} - -export const Lease = { - $type: "akash.market.v1beta3.Lease" as const, - - encode(message: Lease, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.price !== undefined) { - DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - if (!message.closedOn.equals(Long.ZERO)) { - writer.uint32(40).int64(message.closedOn); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Lease { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.leaseId = LeaseID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.price = DecCoin.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - case 5: - if (tag !== 40) { - break; - } - - message.closedOn = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Lease { - return { - $type: Lease.$type, - leaseId: isSet(object.leaseId) - ? LeaseID.fromJSON(object.leaseId) - : undefined, - state: isSet(object.state) ? lease_StateFromJSON(object.state) : 0, - price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - closedOn: isSet(object.closedOn) - ? Long.fromValue(object.closedOn) - : Long.ZERO, - }; - }, - - toJSON(message: Lease): unknown { - const obj: any = {}; - if (message.leaseId !== undefined) { - obj.leaseId = LeaseID.toJSON(message.leaseId); - } - if (message.state !== 0) { - obj.state = lease_StateToJSON(message.state); - } - if (message.price !== undefined) { - obj.price = DecCoin.toJSON(message.price); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - if (!message.closedOn.equals(Long.ZERO)) { - obj.closedOn = (message.closedOn || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Lease { - return Lease.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Lease { - const message = createBaseLease(); - message.leaseId = - object.leaseId !== undefined && object.leaseId !== null - ? LeaseID.fromPartial(object.leaseId) - : undefined; - message.state = object.state ?? 0; - message.price = - object.price !== undefined && object.price !== null - ? DecCoin.fromPartial(object.price) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - message.closedOn = - object.closedOn !== undefined && object.closedOn !== null - ? Long.fromValue(object.closedOn) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Lease.$type, Lease); - -function createBaseLeaseFilters(): LeaseFilters { - return { - $type: "akash.market.v1beta3.LeaseFilters", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - state: "", - }; -} - -export const LeaseFilters = { - $type: "akash.market.v1beta3.LeaseFilters" as const, - - encode( - message: LeaseFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - if (message.state !== "") { - writer.uint32(50).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): LeaseFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseLeaseFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.provider = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): LeaseFilters { - return { - $type: LeaseFilters.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: LeaseFilters): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): LeaseFilters { - return LeaseFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): LeaseFilters { - const message = createBaseLeaseFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(LeaseFilters.$type, LeaseFilters); - -function createBaseMsgCreateLease(): MsgCreateLease { - return { $type: "akash.market.v1beta3.MsgCreateLease", bidId: undefined }; -} - -export const MsgCreateLease = { - $type: "akash.market.v1beta3.MsgCreateLease" as const, - - encode( - message: MsgCreateLease, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidId !== undefined) { - BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateLease { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidId = BidID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateLease { - return { - $type: MsgCreateLease.$type, - bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, - }; - }, - - toJSON(message: MsgCreateLease): unknown { - const obj: any = {}; - if (message.bidId !== undefined) { - obj.bidId = BidID.toJSON(message.bidId); - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateLease { - return MsgCreateLease.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateLease { - const message = createBaseMsgCreateLease(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? BidID.fromPartial(object.bidId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateLease.$type, MsgCreateLease); - -function createBaseMsgCreateLeaseResponse(): MsgCreateLeaseResponse { - return { $type: "akash.market.v1beta3.MsgCreateLeaseResponse" }; -} - -export const MsgCreateLeaseResponse = { - $type: "akash.market.v1beta3.MsgCreateLeaseResponse" as const, - - encode( - _: MsgCreateLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateLeaseResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateLeaseResponse { - return { $type: MsgCreateLeaseResponse.$type }; - }, - - toJSON(_: MsgCreateLeaseResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCreateLeaseResponse { - return MsgCreateLeaseResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCreateLeaseResponse { - const message = createBaseMsgCreateLeaseResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateLeaseResponse.$type, MsgCreateLeaseResponse); - -function createBaseMsgWithdrawLease(): MsgWithdrawLease { - return { $type: "akash.market.v1beta3.MsgWithdrawLease", bidId: undefined }; -} - -export const MsgWithdrawLease = { - $type: "akash.market.v1beta3.MsgWithdrawLease" as const, - - encode( - message: MsgWithdrawLease, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidId !== undefined) { - LeaseID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgWithdrawLease { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgWithdrawLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidId = LeaseID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgWithdrawLease { - return { - $type: MsgWithdrawLease.$type, - bidId: isSet(object.bidId) ? LeaseID.fromJSON(object.bidId) : undefined, - }; - }, - - toJSON(message: MsgWithdrawLease): unknown { - const obj: any = {}; - if (message.bidId !== undefined) { - obj.bidId = LeaseID.toJSON(message.bidId); - } - return obj; - }, - - create(base?: DeepPartial): MsgWithdrawLease { - return MsgWithdrawLease.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgWithdrawLease { - const message = createBaseMsgWithdrawLease(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? LeaseID.fromPartial(object.bidId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgWithdrawLease.$type, MsgWithdrawLease); - -function createBaseMsgWithdrawLeaseResponse(): MsgWithdrawLeaseResponse { - return { $type: "akash.market.v1beta3.MsgWithdrawLeaseResponse" }; -} - -export const MsgWithdrawLeaseResponse = { - $type: "akash.market.v1beta3.MsgWithdrawLeaseResponse" as const, - - encode( - _: MsgWithdrawLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgWithdrawLeaseResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgWithdrawLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgWithdrawLeaseResponse { - return { $type: MsgWithdrawLeaseResponse.$type }; - }, - - toJSON(_: MsgWithdrawLeaseResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgWithdrawLeaseResponse { - return MsgWithdrawLeaseResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgWithdrawLeaseResponse { - const message = createBaseMsgWithdrawLeaseResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgWithdrawLeaseResponse.$type, - MsgWithdrawLeaseResponse, -); - -function createBaseMsgCloseLease(): MsgCloseLease { - return { $type: "akash.market.v1beta3.MsgCloseLease", leaseId: undefined }; -} - -export const MsgCloseLease = { - $type: "akash.market.v1beta3.MsgCloseLease" as const, - - encode( - message: MsgCloseLease, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseLease { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.leaseId = LeaseID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCloseLease { - return { - $type: MsgCloseLease.$type, - leaseId: isSet(object.leaseId) - ? LeaseID.fromJSON(object.leaseId) - : undefined, - }; - }, - - toJSON(message: MsgCloseLease): unknown { - const obj: any = {}; - if (message.leaseId !== undefined) { - obj.leaseId = LeaseID.toJSON(message.leaseId); - } - return obj; - }, - - create(base?: DeepPartial): MsgCloseLease { - return MsgCloseLease.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCloseLease { - const message = createBaseMsgCloseLease(); - message.leaseId = - object.leaseId !== undefined && object.leaseId !== null - ? LeaseID.fromPartial(object.leaseId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseLease.$type, MsgCloseLease); - -function createBaseMsgCloseLeaseResponse(): MsgCloseLeaseResponse { - return { $type: "akash.market.v1beta3.MsgCloseLeaseResponse" }; -} - -export const MsgCloseLeaseResponse = { - $type: "akash.market.v1beta3.MsgCloseLeaseResponse" as const, - - encode( - _: MsgCloseLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCloseLeaseResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCloseLeaseResponse { - return { $type: MsgCloseLeaseResponse.$type }; - }, - - toJSON(_: MsgCloseLeaseResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCloseLeaseResponse { - return MsgCloseLeaseResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCloseLeaseResponse { - const message = createBaseMsgCloseLeaseResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseLeaseResponse.$type, MsgCloseLeaseResponse); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta3/order.ts b/ts/src/generated/akash/market/v1beta3/order.ts deleted file mode 100644 index 52a629a6..00000000 --- a/ts/src/generated/akash/market/v1beta3/order.ts +++ /dev/null @@ -1,502 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { GroupSpec } from "../../deployment/v1beta3/groupspec"; - -/** OrderID stores owner and all other seq numbers */ -export interface OrderID { - $type: "akash.market.v1beta3.OrderID"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; -} - -/** Order stores orderID, state of order and other details */ -export interface Order { - $type: "akash.market.v1beta3.Order"; - orderId: OrderID | undefined; - state: Order_State; - spec: GroupSpec | undefined; - createdAt: Long; -} - -/** State is an enum which refers to state of order */ -export enum Order_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** open - OrderOpen denotes state for order open */ - open = 1, - /** active - OrderMatched denotes state for order matched */ - active = 2, - /** closed - OrderClosed denotes state for order lost */ - closed = 3, - UNRECOGNIZED = -1, -} - -export function order_StateFromJSON(object: any): Order_State { - switch (object) { - case 0: - case "invalid": - return Order_State.invalid; - case 1: - case "open": - return Order_State.open; - case 2: - case "active": - return Order_State.active; - case 3: - case "closed": - return Order_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Order_State.UNRECOGNIZED; - } -} - -export function order_StateToJSON(object: Order_State): string { - switch (object) { - case Order_State.invalid: - return "invalid"; - case Order_State.open: - return "open"; - case Order_State.active: - return "active"; - case Order_State.closed: - return "closed"; - case Order_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** OrderFilters defines flags for order list filter */ -export interface OrderFilters { - $type: "akash.market.v1beta3.OrderFilters"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - state: string; -} - -function createBaseOrderID(): OrderID { - return { - $type: "akash.market.v1beta3.OrderID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - }; -} - -export const OrderID = { - $type: "akash.market.v1beta3.OrderID" as const, - - encode( - message: OrderID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): OrderID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOrderID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): OrderID { - return { - $type: OrderID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - }; - }, - - toJSON(message: OrderID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - return obj; - }, - - create(base?: DeepPartial): OrderID { - return OrderID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): OrderID { - const message = createBaseOrderID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(OrderID.$type, OrderID); - -function createBaseOrder(): Order { - return { - $type: "akash.market.v1beta3.Order", - orderId: undefined, - state: 0, - spec: undefined, - createdAt: Long.ZERO, - }; -} - -export const Order = { - $type: "akash.market.v1beta3.Order" as const, - - encode(message: Order, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.orderId !== undefined) { - OrderID.encode(message.orderId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.spec !== undefined) { - GroupSpec.encode(message.spec, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Order { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOrder(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.orderId = OrderID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.spec = GroupSpec.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Order { - return { - $type: Order.$type, - orderId: isSet(object.orderId) - ? OrderID.fromJSON(object.orderId) - : undefined, - state: isSet(object.state) ? order_StateFromJSON(object.state) : 0, - spec: isSet(object.spec) ? GroupSpec.fromJSON(object.spec) : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Order): unknown { - const obj: any = {}; - if (message.orderId !== undefined) { - obj.orderId = OrderID.toJSON(message.orderId); - } - if (message.state !== 0) { - obj.state = order_StateToJSON(message.state); - } - if (message.spec !== undefined) { - obj.spec = GroupSpec.toJSON(message.spec); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Order { - return Order.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Order { - const message = createBaseOrder(); - message.orderId = - object.orderId !== undefined && object.orderId !== null - ? OrderID.fromPartial(object.orderId) - : undefined; - message.state = object.state ?? 0; - message.spec = - object.spec !== undefined && object.spec !== null - ? GroupSpec.fromPartial(object.spec) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Order.$type, Order); - -function createBaseOrderFilters(): OrderFilters { - return { - $type: "akash.market.v1beta3.OrderFilters", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - state: "", - }; -} - -export const OrderFilters = { - $type: "akash.market.v1beta3.OrderFilters" as const, - - encode( - message: OrderFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.state !== "") { - writer.uint32(42).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): OrderFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOrderFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): OrderFilters { - return { - $type: OrderFilters.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: OrderFilters): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): OrderFilters { - return OrderFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): OrderFilters { - const message = createBaseOrderFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(OrderFilters.$type, OrderFilters); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta3/params.ts b/ts/src/generated/akash/market/v1beta3/params.ts deleted file mode 100644 index 5a31a319..00000000 --- a/ts/src/generated/akash/market/v1beta3/params.ts +++ /dev/null @@ -1,136 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Params is the params for the x/market module */ -export interface Params { - $type: "akash.market.v1beta3.Params"; - bidMinDeposit: Coin | undefined; - orderMaxBids: number; -} - -function createBaseParams(): Params { - return { - $type: "akash.market.v1beta3.Params", - bidMinDeposit: undefined, - orderMaxBids: 0, - }; -} - -export const Params = { - $type: "akash.market.v1beta3.Params" as const, - - encode( - message: Params, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidMinDeposit !== undefined) { - Coin.encode(message.bidMinDeposit, writer.uint32(10).fork()).ldelim(); - } - if (message.orderMaxBids !== 0) { - writer.uint32(16).uint32(message.orderMaxBids); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Params { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidMinDeposit = Coin.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.orderMaxBids = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Params { - return { - $type: Params.$type, - bidMinDeposit: isSet(object.bidMinDeposit) - ? Coin.fromJSON(object.bidMinDeposit) - : undefined, - orderMaxBids: isSet(object.orderMaxBids) - ? globalThis.Number(object.orderMaxBids) - : 0, - }; - }, - - toJSON(message: Params): unknown { - const obj: any = {}; - if (message.bidMinDeposit !== undefined) { - obj.bidMinDeposit = Coin.toJSON(message.bidMinDeposit); - } - if (message.orderMaxBids !== 0) { - obj.orderMaxBids = Math.round(message.orderMaxBids); - } - return obj; - }, - - create(base?: DeepPartial): Params { - return Params.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Params { - const message = createBaseParams(); - message.bidMinDeposit = - object.bidMinDeposit !== undefined && object.bidMinDeposit !== null - ? Coin.fromPartial(object.bidMinDeposit) - : undefined; - message.orderMaxBids = object.orderMaxBids ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(Params.$type, Params); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta3/query.ts b/ts/src/generated/akash/market/v1beta3/query.ts deleted file mode 100644 index 98ce21f2..00000000 --- a/ts/src/generated/akash/market/v1beta3/query.ts +++ /dev/null @@ -1,1275 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Account, FractionalPayment } from "../../escrow/v1beta3/types"; -import { Bid, BidFilters, BidID } from "./bid"; -import { Lease, LeaseFilters, LeaseID } from "./lease"; -import { Order, OrderFilters, OrderID } from "./order"; - -/** QueryOrdersRequest is request type for the Query/Orders RPC method */ -export interface QueryOrdersRequest { - $type: "akash.market.v1beta3.QueryOrdersRequest"; - filters: OrderFilters | undefined; - pagination: PageRequest | undefined; -} - -/** QueryOrdersResponse is response type for the Query/Orders RPC method */ -export interface QueryOrdersResponse { - $type: "akash.market.v1beta3.QueryOrdersResponse"; - orders: Order[]; - pagination: PageResponse | undefined; -} - -/** QueryOrderRequest is request type for the Query/Order RPC method */ -export interface QueryOrderRequest { - $type: "akash.market.v1beta3.QueryOrderRequest"; - id: OrderID | undefined; -} - -/** QueryOrderResponse is response type for the Query/Order RPC method */ -export interface QueryOrderResponse { - $type: "akash.market.v1beta3.QueryOrderResponse"; - order: Order | undefined; -} - -/** QueryBidsRequest is request type for the Query/Bids RPC method */ -export interface QueryBidsRequest { - $type: "akash.market.v1beta3.QueryBidsRequest"; - filters: BidFilters | undefined; - pagination: PageRequest | undefined; -} - -/** QueryBidsResponse is response type for the Query/Bids RPC method */ -export interface QueryBidsResponse { - $type: "akash.market.v1beta3.QueryBidsResponse"; - bids: QueryBidResponse[]; - pagination: PageResponse | undefined; -} - -/** QueryBidRequest is request type for the Query/Bid RPC method */ -export interface QueryBidRequest { - $type: "akash.market.v1beta3.QueryBidRequest"; - id: BidID | undefined; -} - -/** QueryBidResponse is response type for the Query/Bid RPC method */ -export interface QueryBidResponse { - $type: "akash.market.v1beta3.QueryBidResponse"; - bid: Bid | undefined; - escrowAccount: Account | undefined; -} - -/** QueryLeasesRequest is request type for the Query/Leases RPC method */ -export interface QueryLeasesRequest { - $type: "akash.market.v1beta3.QueryLeasesRequest"; - filters: LeaseFilters | undefined; - pagination: PageRequest | undefined; -} - -/** QueryLeasesResponse is response type for the Query/Leases RPC method */ -export interface QueryLeasesResponse { - $type: "akash.market.v1beta3.QueryLeasesResponse"; - leases: QueryLeaseResponse[]; - pagination: PageResponse | undefined; -} - -/** QueryLeaseRequest is request type for the Query/Lease RPC method */ -export interface QueryLeaseRequest { - $type: "akash.market.v1beta3.QueryLeaseRequest"; - id: LeaseID | undefined; -} - -/** QueryLeaseResponse is response type for the Query/Lease RPC method */ -export interface QueryLeaseResponse { - $type: "akash.market.v1beta3.QueryLeaseResponse"; - lease: Lease | undefined; - escrowPayment: FractionalPayment | undefined; -} - -function createBaseQueryOrdersRequest(): QueryOrdersRequest { - return { - $type: "akash.market.v1beta3.QueryOrdersRequest", - filters: undefined, - pagination: undefined, - }; -} - -export const QueryOrdersRequest = { - $type: "akash.market.v1beta3.QueryOrdersRequest" as const, - - encode( - message: QueryOrdersRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filters !== undefined) { - OrderFilters.encode(message.filters, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrdersRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryOrdersRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filters = OrderFilters.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryOrdersRequest { - return { - $type: QueryOrdersRequest.$type, - filters: isSet(object.filters) - ? OrderFilters.fromJSON(object.filters) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryOrdersRequest): unknown { - const obj: any = {}; - if (message.filters !== undefined) { - obj.filters = OrderFilters.toJSON(message.filters); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryOrdersRequest { - return QueryOrdersRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryOrdersRequest { - const message = createBaseQueryOrdersRequest(); - message.filters = - object.filters !== undefined && object.filters !== null - ? OrderFilters.fromPartial(object.filters) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryOrdersRequest.$type, QueryOrdersRequest); - -function createBaseQueryOrdersResponse(): QueryOrdersResponse { - return { - $type: "akash.market.v1beta3.QueryOrdersResponse", - orders: [], - pagination: undefined, - }; -} - -export const QueryOrdersResponse = { - $type: "akash.market.v1beta3.QueryOrdersResponse" as const, - - encode( - message: QueryOrdersResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.orders) { - Order.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrdersResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryOrdersResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.orders.push(Order.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryOrdersResponse { - return { - $type: QueryOrdersResponse.$type, - orders: globalThis.Array.isArray(object?.orders) - ? object.orders.map((e: any) => Order.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryOrdersResponse): unknown { - const obj: any = {}; - if (message.orders?.length) { - obj.orders = message.orders.map((e) => Order.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryOrdersResponse { - return QueryOrdersResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryOrdersResponse { - const message = createBaseQueryOrdersResponse(); - message.orders = object.orders?.map((e) => Order.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryOrdersResponse.$type, QueryOrdersResponse); - -function createBaseQueryOrderRequest(): QueryOrderRequest { - return { $type: "akash.market.v1beta3.QueryOrderRequest", id: undefined }; -} - -export const QueryOrderRequest = { - $type: "akash.market.v1beta3.QueryOrderRequest" as const, - - encode( - message: QueryOrderRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - OrderID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrderRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryOrderRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = OrderID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryOrderRequest { - return { - $type: QueryOrderRequest.$type, - id: isSet(object.id) ? OrderID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryOrderRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = OrderID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryOrderRequest { - return QueryOrderRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryOrderRequest { - const message = createBaseQueryOrderRequest(); - message.id = - object.id !== undefined && object.id !== null - ? OrderID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryOrderRequest.$type, QueryOrderRequest); - -function createBaseQueryOrderResponse(): QueryOrderResponse { - return { $type: "akash.market.v1beta3.QueryOrderResponse", order: undefined }; -} - -export const QueryOrderResponse = { - $type: "akash.market.v1beta3.QueryOrderResponse" as const, - - encode( - message: QueryOrderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.order !== undefined) { - Order.encode(message.order, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryOrderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.order = Order.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryOrderResponse { - return { - $type: QueryOrderResponse.$type, - order: isSet(object.order) ? Order.fromJSON(object.order) : undefined, - }; - }, - - toJSON(message: QueryOrderResponse): unknown { - const obj: any = {}; - if (message.order !== undefined) { - obj.order = Order.toJSON(message.order); - } - return obj; - }, - - create(base?: DeepPartial): QueryOrderResponse { - return QueryOrderResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryOrderResponse { - const message = createBaseQueryOrderResponse(); - message.order = - object.order !== undefined && object.order !== null - ? Order.fromPartial(object.order) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryOrderResponse.$type, QueryOrderResponse); - -function createBaseQueryBidsRequest(): QueryBidsRequest { - return { - $type: "akash.market.v1beta3.QueryBidsRequest", - filters: undefined, - pagination: undefined, - }; -} - -export const QueryBidsRequest = { - $type: "akash.market.v1beta3.QueryBidsRequest" as const, - - encode( - message: QueryBidsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filters !== undefined) { - BidFilters.encode(message.filters, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidsRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryBidsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filters = BidFilters.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryBidsRequest { - return { - $type: QueryBidsRequest.$type, - filters: isSet(object.filters) - ? BidFilters.fromJSON(object.filters) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryBidsRequest): unknown { - const obj: any = {}; - if (message.filters !== undefined) { - obj.filters = BidFilters.toJSON(message.filters); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryBidsRequest { - return QueryBidsRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryBidsRequest { - const message = createBaseQueryBidsRequest(); - message.filters = - object.filters !== undefined && object.filters !== null - ? BidFilters.fromPartial(object.filters) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryBidsRequest.$type, QueryBidsRequest); - -function createBaseQueryBidsResponse(): QueryBidsResponse { - return { - $type: "akash.market.v1beta3.QueryBidsResponse", - bids: [], - pagination: undefined, - }; -} - -export const QueryBidsResponse = { - $type: "akash.market.v1beta3.QueryBidsResponse" as const, - - encode( - message: QueryBidsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.bids) { - QueryBidResponse.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidsResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryBidsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bids.push(QueryBidResponse.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryBidsResponse { - return { - $type: QueryBidsResponse.$type, - bids: globalThis.Array.isArray(object?.bids) - ? object.bids.map((e: any) => QueryBidResponse.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryBidsResponse): unknown { - const obj: any = {}; - if (message.bids?.length) { - obj.bids = message.bids.map((e) => QueryBidResponse.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryBidsResponse { - return QueryBidsResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryBidsResponse { - const message = createBaseQueryBidsResponse(); - message.bids = - object.bids?.map((e) => QueryBidResponse.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryBidsResponse.$type, QueryBidsResponse); - -function createBaseQueryBidRequest(): QueryBidRequest { - return { $type: "akash.market.v1beta3.QueryBidRequest", id: undefined }; -} - -export const QueryBidRequest = { - $type: "akash.market.v1beta3.QueryBidRequest" as const, - - encode( - message: QueryBidRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - BidID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryBidRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = BidID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryBidRequest { - return { - $type: QueryBidRequest.$type, - id: isSet(object.id) ? BidID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryBidRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = BidID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryBidRequest { - return QueryBidRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryBidRequest { - const message = createBaseQueryBidRequest(); - message.id = - object.id !== undefined && object.id !== null - ? BidID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryBidRequest.$type, QueryBidRequest); - -function createBaseQueryBidResponse(): QueryBidResponse { - return { - $type: "akash.market.v1beta3.QueryBidResponse", - bid: undefined, - escrowAccount: undefined, - }; -} - -export const QueryBidResponse = { - $type: "akash.market.v1beta3.QueryBidResponse" as const, - - encode( - message: QueryBidResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bid !== undefined) { - Bid.encode(message.bid, writer.uint32(10).fork()).ldelim(); - } - if (message.escrowAccount !== undefined) { - Account.encode(message.escrowAccount, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryBidResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bid = Bid.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.escrowAccount = Account.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryBidResponse { - return { - $type: QueryBidResponse.$type, - bid: isSet(object.bid) ? Bid.fromJSON(object.bid) : undefined, - escrowAccount: isSet(object.escrowAccount) - ? Account.fromJSON(object.escrowAccount) - : undefined, - }; - }, - - toJSON(message: QueryBidResponse): unknown { - const obj: any = {}; - if (message.bid !== undefined) { - obj.bid = Bid.toJSON(message.bid); - } - if (message.escrowAccount !== undefined) { - obj.escrowAccount = Account.toJSON(message.escrowAccount); - } - return obj; - }, - - create(base?: DeepPartial): QueryBidResponse { - return QueryBidResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryBidResponse { - const message = createBaseQueryBidResponse(); - message.bid = - object.bid !== undefined && object.bid !== null - ? Bid.fromPartial(object.bid) - : undefined; - message.escrowAccount = - object.escrowAccount !== undefined && object.escrowAccount !== null - ? Account.fromPartial(object.escrowAccount) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryBidResponse.$type, QueryBidResponse); - -function createBaseQueryLeasesRequest(): QueryLeasesRequest { - return { - $type: "akash.market.v1beta3.QueryLeasesRequest", - filters: undefined, - pagination: undefined, - }; -} - -export const QueryLeasesRequest = { - $type: "akash.market.v1beta3.QueryLeasesRequest" as const, - - encode( - message: QueryLeasesRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filters !== undefined) { - LeaseFilters.encode(message.filters, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeasesRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryLeasesRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filters = LeaseFilters.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryLeasesRequest { - return { - $type: QueryLeasesRequest.$type, - filters: isSet(object.filters) - ? LeaseFilters.fromJSON(object.filters) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryLeasesRequest): unknown { - const obj: any = {}; - if (message.filters !== undefined) { - obj.filters = LeaseFilters.toJSON(message.filters); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryLeasesRequest { - return QueryLeasesRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryLeasesRequest { - const message = createBaseQueryLeasesRequest(); - message.filters = - object.filters !== undefined && object.filters !== null - ? LeaseFilters.fromPartial(object.filters) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryLeasesRequest.$type, QueryLeasesRequest); - -function createBaseQueryLeasesResponse(): QueryLeasesResponse { - return { - $type: "akash.market.v1beta3.QueryLeasesResponse", - leases: [], - pagination: undefined, - }; -} - -export const QueryLeasesResponse = { - $type: "akash.market.v1beta3.QueryLeasesResponse" as const, - - encode( - message: QueryLeasesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.leases) { - QueryLeaseResponse.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeasesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryLeasesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.leases.push( - QueryLeaseResponse.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryLeasesResponse { - return { - $type: QueryLeasesResponse.$type, - leases: globalThis.Array.isArray(object?.leases) - ? object.leases.map((e: any) => QueryLeaseResponse.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryLeasesResponse): unknown { - const obj: any = {}; - if (message.leases?.length) { - obj.leases = message.leases.map((e) => QueryLeaseResponse.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryLeasesResponse { - return QueryLeasesResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryLeasesResponse { - const message = createBaseQueryLeasesResponse(); - message.leases = - object.leases?.map((e) => QueryLeaseResponse.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryLeasesResponse.$type, QueryLeasesResponse); - -function createBaseQueryLeaseRequest(): QueryLeaseRequest { - return { $type: "akash.market.v1beta3.QueryLeaseRequest", id: undefined }; -} - -export const QueryLeaseRequest = { - $type: "akash.market.v1beta3.QueryLeaseRequest" as const, - - encode( - message: QueryLeaseRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - LeaseID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeaseRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryLeaseRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = LeaseID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryLeaseRequest { - return { - $type: QueryLeaseRequest.$type, - id: isSet(object.id) ? LeaseID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryLeaseRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = LeaseID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryLeaseRequest { - return QueryLeaseRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryLeaseRequest { - const message = createBaseQueryLeaseRequest(); - message.id = - object.id !== undefined && object.id !== null - ? LeaseID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryLeaseRequest.$type, QueryLeaseRequest); - -function createBaseQueryLeaseResponse(): QueryLeaseResponse { - return { - $type: "akash.market.v1beta3.QueryLeaseResponse", - lease: undefined, - escrowPayment: undefined, - }; -} - -export const QueryLeaseResponse = { - $type: "akash.market.v1beta3.QueryLeaseResponse" as const, - - encode( - message: QueryLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.lease !== undefined) { - Lease.encode(message.lease, writer.uint32(10).fork()).ldelim(); - } - if (message.escrowPayment !== undefined) { - FractionalPayment.encode( - message.escrowPayment, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeaseResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.lease = Lease.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.escrowPayment = FractionalPayment.decode( - reader, - reader.uint32(), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryLeaseResponse { - return { - $type: QueryLeaseResponse.$type, - lease: isSet(object.lease) ? Lease.fromJSON(object.lease) : undefined, - escrowPayment: isSet(object.escrowPayment) - ? FractionalPayment.fromJSON(object.escrowPayment) - : undefined, - }; - }, - - toJSON(message: QueryLeaseResponse): unknown { - const obj: any = {}; - if (message.lease !== undefined) { - obj.lease = Lease.toJSON(message.lease); - } - if (message.escrowPayment !== undefined) { - obj.escrowPayment = FractionalPayment.toJSON(message.escrowPayment); - } - return obj; - }, - - create(base?: DeepPartial): QueryLeaseResponse { - return QueryLeaseResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryLeaseResponse { - const message = createBaseQueryLeaseResponse(); - message.lease = - object.lease !== undefined && object.lease !== null - ? Lease.fromPartial(object.lease) - : undefined; - message.escrowPayment = - object.escrowPayment !== undefined && object.escrowPayment !== null - ? FractionalPayment.fromPartial(object.escrowPayment) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryLeaseResponse.$type, QueryLeaseResponse); - -/** Query defines the gRPC querier service */ -export interface Query { - /** Orders queries orders with filters */ - Orders(request: QueryOrdersRequest): Promise; - /** Order queries order details */ - Order(request: QueryOrderRequest): Promise; - /** Bids queries bids with filters */ - Bids(request: QueryBidsRequest): Promise; - /** Bid queries bid details */ - Bid(request: QueryBidRequest): Promise; - /** Leases queries leases with filters */ - Leases(request: QueryLeasesRequest): Promise; - /** Lease queries lease details */ - Lease(request: QueryLeaseRequest): Promise; -} - -export const QueryServiceName = "akash.market.v1beta3.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.Orders = this.Orders.bind(this); - this.Order = this.Order.bind(this); - this.Bids = this.Bids.bind(this); - this.Bid = this.Bid.bind(this); - this.Leases = this.Leases.bind(this); - this.Lease = this.Lease.bind(this); - } - Orders(request: QueryOrdersRequest): Promise { - const data = QueryOrdersRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Orders", data); - return promise.then((data) => - QueryOrdersResponse.decode(_m0.Reader.create(data)), - ); - } - - Order(request: QueryOrderRequest): Promise { - const data = QueryOrderRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Order", data); - return promise.then((data) => - QueryOrderResponse.decode(_m0.Reader.create(data)), - ); - } - - Bids(request: QueryBidsRequest): Promise { - const data = QueryBidsRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Bids", data); - return promise.then((data) => - QueryBidsResponse.decode(_m0.Reader.create(data)), - ); - } - - Bid(request: QueryBidRequest): Promise { - const data = QueryBidRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Bid", data); - return promise.then((data) => - QueryBidResponse.decode(_m0.Reader.create(data)), - ); - } - - Leases(request: QueryLeasesRequest): Promise { - const data = QueryLeasesRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Leases", data); - return promise.then((data) => - QueryLeasesResponse.decode(_m0.Reader.create(data)), - ); - } - - Lease(request: QueryLeaseRequest): Promise { - const data = QueryLeaseRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Lease", data); - return promise.then((data) => - QueryLeaseResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta3/service.grpc-js.ts b/ts/src/generated/akash/market/v1beta3/service.grpc-js.ts deleted file mode 100644 index 7c698813..00000000 --- a/ts/src/generated/akash/market/v1beta3/service.grpc-js.ts +++ /dev/null @@ -1,252 +0,0 @@ -/* eslint-disable */ -import { - ChannelCredentials, - Client, - makeGenericClientConstructor, - Metadata, -} from "@grpc/grpc-js"; -import type { - CallOptions, - ClientOptions, - ClientUnaryCall, - handleUnaryCall, - ServiceError, - UntypedServiceImplementation, -} from "@grpc/grpc-js"; -import { - MsgCloseBid, - MsgCloseBidResponse, - MsgCreateBid, - MsgCreateBidResponse, -} from "./bid"; -import { - MsgCloseLease, - MsgCloseLeaseResponse, - MsgCreateLease, - MsgCreateLeaseResponse, - MsgWithdrawLease, - MsgWithdrawLeaseResponse, -} from "./lease"; - -export const protobufPackage = "akash.market.v1beta3"; - -/** Msg defines the market Msg service */ -export type MsgService = typeof MsgService; -export const MsgService = { - /** CreateBid defines a method to create a bid given proper inputs. */ - createBid: { - path: "/akash.market.v1beta3.Msg/CreateBid", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCreateBid) => - Buffer.from(MsgCreateBid.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCreateBid.decode(value), - responseSerialize: (value: MsgCreateBidResponse) => - Buffer.from(MsgCreateBidResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgCreateBidResponse.decode(value), - }, - /** CloseBid defines a method to close a bid given proper inputs. */ - closeBid: { - path: "/akash.market.v1beta3.Msg/CloseBid", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCloseBid) => - Buffer.from(MsgCloseBid.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCloseBid.decode(value), - responseSerialize: (value: MsgCloseBidResponse) => - Buffer.from(MsgCloseBidResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgCloseBidResponse.decode(value), - }, - /** WithdrawLease withdraws accrued funds from the lease payment */ - withdrawLease: { - path: "/akash.market.v1beta3.Msg/WithdrawLease", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgWithdrawLease) => - Buffer.from(MsgWithdrawLease.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgWithdrawLease.decode(value), - responseSerialize: (value: MsgWithdrawLeaseResponse) => - Buffer.from(MsgWithdrawLeaseResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgWithdrawLeaseResponse.decode(value), - }, - /** CreateLease creates a new lease */ - createLease: { - path: "/akash.market.v1beta3.Msg/CreateLease", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCreateLease) => - Buffer.from(MsgCreateLease.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCreateLease.decode(value), - responseSerialize: (value: MsgCreateLeaseResponse) => - Buffer.from(MsgCreateLeaseResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgCreateLeaseResponse.decode(value), - }, - /** CloseLease defines a method to close an order given proper inputs. */ - closeLease: { - path: "/akash.market.v1beta3.Msg/CloseLease", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCloseLease) => - Buffer.from(MsgCloseLease.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCloseLease.decode(value), - responseSerialize: (value: MsgCloseLeaseResponse) => - Buffer.from(MsgCloseLeaseResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgCloseLeaseResponse.decode(value), - }, -} as const; - -export interface MsgServer extends UntypedServiceImplementation { - /** CreateBid defines a method to create a bid given proper inputs. */ - createBid: handleUnaryCall; - /** CloseBid defines a method to close a bid given proper inputs. */ - closeBid: handleUnaryCall; - /** WithdrawLease withdraws accrued funds from the lease payment */ - withdrawLease: handleUnaryCall; - /** CreateLease creates a new lease */ - createLease: handleUnaryCall; - /** CloseLease defines a method to close an order given proper inputs. */ - closeLease: handleUnaryCall; -} - -export interface MsgClient extends Client { - /** CreateBid defines a method to create a bid given proper inputs. */ - createBid( - request: MsgCreateBid, - callback: ( - error: ServiceError | null, - response: MsgCreateBidResponse, - ) => void, - ): ClientUnaryCall; - createBid( - request: MsgCreateBid, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCreateBidResponse, - ) => void, - ): ClientUnaryCall; - createBid( - request: MsgCreateBid, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCreateBidResponse, - ) => void, - ): ClientUnaryCall; - /** CloseBid defines a method to close a bid given proper inputs. */ - closeBid( - request: MsgCloseBid, - callback: ( - error: ServiceError | null, - response: MsgCloseBidResponse, - ) => void, - ): ClientUnaryCall; - closeBid( - request: MsgCloseBid, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCloseBidResponse, - ) => void, - ): ClientUnaryCall; - closeBid( - request: MsgCloseBid, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCloseBidResponse, - ) => void, - ): ClientUnaryCall; - /** WithdrawLease withdraws accrued funds from the lease payment */ - withdrawLease( - request: MsgWithdrawLease, - callback: ( - error: ServiceError | null, - response: MsgWithdrawLeaseResponse, - ) => void, - ): ClientUnaryCall; - withdrawLease( - request: MsgWithdrawLease, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgWithdrawLeaseResponse, - ) => void, - ): ClientUnaryCall; - withdrawLease( - request: MsgWithdrawLease, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgWithdrawLeaseResponse, - ) => void, - ): ClientUnaryCall; - /** CreateLease creates a new lease */ - createLease( - request: MsgCreateLease, - callback: ( - error: ServiceError | null, - response: MsgCreateLeaseResponse, - ) => void, - ): ClientUnaryCall; - createLease( - request: MsgCreateLease, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCreateLeaseResponse, - ) => void, - ): ClientUnaryCall; - createLease( - request: MsgCreateLease, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCreateLeaseResponse, - ) => void, - ): ClientUnaryCall; - /** CloseLease defines a method to close an order given proper inputs. */ - closeLease( - request: MsgCloseLease, - callback: ( - error: ServiceError | null, - response: MsgCloseLeaseResponse, - ) => void, - ): ClientUnaryCall; - closeLease( - request: MsgCloseLease, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCloseLeaseResponse, - ) => void, - ): ClientUnaryCall; - closeLease( - request: MsgCloseLease, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCloseLeaseResponse, - ) => void, - ): ClientUnaryCall; -} - -export const MsgClient = makeGenericClientConstructor( - MsgService, - "akash.market.v1beta3.Msg", -) as unknown as { - new ( - address: string, - credentials: ChannelCredentials, - options?: Partial, - ): MsgClient; - service: typeof MsgService; - serviceName: string; -}; diff --git a/ts/src/generated/akash/market/v1beta3/service.ts b/ts/src/generated/akash/market/v1beta3/service.ts deleted file mode 100644 index 452b3354..00000000 --- a/ts/src/generated/akash/market/v1beta3/service.ts +++ /dev/null @@ -1,92 +0,0 @@ -/* eslint-disable */ -import _m0 from "protobufjs/minimal"; -import { - MsgCloseBid, - MsgCloseBidResponse, - MsgCreateBid, - MsgCreateBidResponse, -} from "./bid"; -import { - MsgCloseLease, - MsgCloseLeaseResponse, - MsgCreateLease, - MsgCreateLeaseResponse, - MsgWithdrawLease, - MsgWithdrawLeaseResponse, -} from "./lease"; - -/** Msg defines the market Msg service */ -export interface Msg { - /** CreateBid defines a method to create a bid given proper inputs. */ - CreateBid(request: MsgCreateBid): Promise; - /** CloseBid defines a method to close a bid given proper inputs. */ - CloseBid(request: MsgCloseBid): Promise; - /** WithdrawLease withdraws accrued funds from the lease payment */ - WithdrawLease(request: MsgWithdrawLease): Promise; - /** CreateLease creates a new lease */ - CreateLease(request: MsgCreateLease): Promise; - /** CloseLease defines a method to close an order given proper inputs. */ - CloseLease(request: MsgCloseLease): Promise; -} - -export const MsgServiceName = "akash.market.v1beta3.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.CreateBid = this.CreateBid.bind(this); - this.CloseBid = this.CloseBid.bind(this); - this.WithdrawLease = this.WithdrawLease.bind(this); - this.CreateLease = this.CreateLease.bind(this); - this.CloseLease = this.CloseLease.bind(this); - } - CreateBid(request: MsgCreateBid): Promise { - const data = MsgCreateBid.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateBid", data); - return promise.then((data) => - MsgCreateBidResponse.decode(_m0.Reader.create(data)), - ); - } - - CloseBid(request: MsgCloseBid): Promise { - const data = MsgCloseBid.encode(request).finish(); - const promise = this.rpc.request(this.service, "CloseBid", data); - return promise.then((data) => - MsgCloseBidResponse.decode(_m0.Reader.create(data)), - ); - } - - WithdrawLease(request: MsgWithdrawLease): Promise { - const data = MsgWithdrawLease.encode(request).finish(); - const promise = this.rpc.request(this.service, "WithdrawLease", data); - return promise.then((data) => - MsgWithdrawLeaseResponse.decode(_m0.Reader.create(data)), - ); - } - - CreateLease(request: MsgCreateLease): Promise { - const data = MsgCreateLease.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateLease", data); - return promise.then((data) => - MsgCreateLeaseResponse.decode(_m0.Reader.create(data)), - ); - } - - CloseLease(request: MsgCloseLease): Promise { - const data = MsgCloseLease.encode(request).finish(); - const promise = this.rpc.request(this.service, "CloseLease", data); - return promise.then((data) => - MsgCloseLeaseResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} diff --git a/ts/src/generated/akash/market/v1beta4/bid.ts b/ts/src/generated/akash/market/v1beta4/bid.ts deleted file mode 100644 index 04479900..00000000 --- a/ts/src/generated/akash/market/v1beta4/bid.ts +++ /dev/null @@ -1,1030 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin, DecCoin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Resources } from "../../base/v1beta3/resources"; -import { OrderID } from "./order"; - -/** - * ResourceOffer describes resources that provider is offering - * for deployment - */ -export interface ResourceOffer { - $type: "akash.market.v1beta4.ResourceOffer"; - resources: Resources | undefined; - count: number; -} - -/** MsgCreateBid defines an SDK message for creating Bid */ -export interface MsgCreateBid { - $type: "akash.market.v1beta4.MsgCreateBid"; - order: OrderID | undefined; - provider: string; - price: DecCoin | undefined; - deposit: Coin | undefined; - resourcesOffer: ResourceOffer[]; -} - -/** MsgCreateBidResponse defines the Msg/CreateBid response type. */ -export interface MsgCreateBidResponse { - $type: "akash.market.v1beta4.MsgCreateBidResponse"; -} - -/** MsgCloseBid defines an SDK message for closing bid */ -export interface MsgCloseBid { - $type: "akash.market.v1beta4.MsgCloseBid"; - bidId: BidID | undefined; -} - -/** MsgCloseBidResponse defines the Msg/CloseBid response type. */ -export interface MsgCloseBidResponse { - $type: "akash.market.v1beta4.MsgCloseBidResponse"; -} - -/** - * BidID stores owner and all other seq numbers - * A successful bid becomes a Lease(ID). - */ -export interface BidID { - $type: "akash.market.v1beta4.BidID"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; -} - -/** Bid stores BidID, state of bid and price */ -export interface Bid { - $type: "akash.market.v1beta4.Bid"; - bidId: BidID | undefined; - state: Bid_State; - price: DecCoin | undefined; - createdAt: Long; - resourcesOffer: ResourceOffer[]; -} - -/** State is an enum which refers to state of bid */ -export enum Bid_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** open - BidOpen denotes state for bid open */ - open = 1, - /** active - BidMatched denotes state for bid open */ - active = 2, - /** lost - BidLost denotes state for bid lost */ - lost = 3, - /** closed - BidClosed denotes state for bid closed */ - closed = 4, - UNRECOGNIZED = -1, -} - -export function bid_StateFromJSON(object: any): Bid_State { - switch (object) { - case 0: - case "invalid": - return Bid_State.invalid; - case 1: - case "open": - return Bid_State.open; - case 2: - case "active": - return Bid_State.active; - case 3: - case "lost": - return Bid_State.lost; - case 4: - case "closed": - return Bid_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Bid_State.UNRECOGNIZED; - } -} - -export function bid_StateToJSON(object: Bid_State): string { - switch (object) { - case Bid_State.invalid: - return "invalid"; - case Bid_State.open: - return "open"; - case Bid_State.active: - return "active"; - case Bid_State.lost: - return "lost"; - case Bid_State.closed: - return "closed"; - case Bid_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** BidFilters defines flags for bid list filter */ -export interface BidFilters { - $type: "akash.market.v1beta4.BidFilters"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; - state: string; -} - -function createBaseResourceOffer(): ResourceOffer { - return { - $type: "akash.market.v1beta4.ResourceOffer", - resources: undefined, - count: 0, - }; -} - -export const ResourceOffer = { - $type: "akash.market.v1beta4.ResourceOffer" as const, - - encode( - message: ResourceOffer, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.resources !== undefined) { - Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); - } - if (message.count !== 0) { - writer.uint32(16).uint32(message.count); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ResourceOffer { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseResourceOffer(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.resources = Resources.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.count = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ResourceOffer { - return { - $type: ResourceOffer.$type, - resources: isSet(object.resources) - ? Resources.fromJSON(object.resources) - : undefined, - count: isSet(object.count) ? globalThis.Number(object.count) : 0, - }; - }, - - toJSON(message: ResourceOffer): unknown { - const obj: any = {}; - if (message.resources !== undefined) { - obj.resources = Resources.toJSON(message.resources); - } - if (message.count !== 0) { - obj.count = Math.round(message.count); - } - return obj; - }, - - create(base?: DeepPartial): ResourceOffer { - return ResourceOffer.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ResourceOffer { - const message = createBaseResourceOffer(); - message.resources = - object.resources !== undefined && object.resources !== null - ? Resources.fromPartial(object.resources) - : undefined; - message.count = object.count ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(ResourceOffer.$type, ResourceOffer); - -function createBaseMsgCreateBid(): MsgCreateBid { - return { - $type: "akash.market.v1beta4.MsgCreateBid", - order: undefined, - provider: "", - price: undefined, - deposit: undefined, - resourcesOffer: [], - }; -} - -export const MsgCreateBid = { - $type: "akash.market.v1beta4.MsgCreateBid" as const, - - encode( - message: MsgCreateBid, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.order !== undefined) { - OrderID.encode(message.order, writer.uint32(10).fork()).ldelim(); - } - if (message.provider !== "") { - writer.uint32(18).string(message.provider); - } - if (message.price !== undefined) { - DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - if (message.deposit !== undefined) { - Coin.encode(message.deposit, writer.uint32(34).fork()).ldelim(); - } - for (const v of message.resourcesOffer) { - ResourceOffer.encode(v!, writer.uint32(42).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateBid { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateBid(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.order = OrderID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.provider = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.price = DecCoin.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.deposit = Coin.decode(reader, reader.uint32()); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.resourcesOffer.push( - ResourceOffer.decode(reader, reader.uint32()), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateBid { - return { - $type: MsgCreateBid.$type, - order: isSet(object.order) ? OrderID.fromJSON(object.order) : undefined, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, - deposit: isSet(object.deposit) - ? Coin.fromJSON(object.deposit) - : undefined, - resourcesOffer: globalThis.Array.isArray(object?.resourcesOffer) - ? object.resourcesOffer.map((e: any) => ResourceOffer.fromJSON(e)) - : [], - }; - }, - - toJSON(message: MsgCreateBid): unknown { - const obj: any = {}; - if (message.order !== undefined) { - obj.order = OrderID.toJSON(message.order); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - if (message.price !== undefined) { - obj.price = DecCoin.toJSON(message.price); - } - if (message.deposit !== undefined) { - obj.deposit = Coin.toJSON(message.deposit); - } - if (message.resourcesOffer?.length) { - obj.resourcesOffer = message.resourcesOffer.map((e) => - ResourceOffer.toJSON(e), - ); - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateBid { - return MsgCreateBid.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateBid { - const message = createBaseMsgCreateBid(); - message.order = - object.order !== undefined && object.order !== null - ? OrderID.fromPartial(object.order) - : undefined; - message.provider = object.provider ?? ""; - message.price = - object.price !== undefined && object.price !== null - ? DecCoin.fromPartial(object.price) - : undefined; - message.deposit = - object.deposit !== undefined && object.deposit !== null - ? Coin.fromPartial(object.deposit) - : undefined; - message.resourcesOffer = - object.resourcesOffer?.map((e) => ResourceOffer.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateBid.$type, MsgCreateBid); - -function createBaseMsgCreateBidResponse(): MsgCreateBidResponse { - return { $type: "akash.market.v1beta4.MsgCreateBidResponse" }; -} - -export const MsgCreateBidResponse = { - $type: "akash.market.v1beta4.MsgCreateBidResponse" as const, - - encode( - _: MsgCreateBidResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateBidResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateBidResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateBidResponse { - return { $type: MsgCreateBidResponse.$type }; - }, - - toJSON(_: MsgCreateBidResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCreateBidResponse { - return MsgCreateBidResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCreateBidResponse { - const message = createBaseMsgCreateBidResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateBidResponse.$type, MsgCreateBidResponse); - -function createBaseMsgCloseBid(): MsgCloseBid { - return { $type: "akash.market.v1beta4.MsgCloseBid", bidId: undefined }; -} - -export const MsgCloseBid = { - $type: "akash.market.v1beta4.MsgCloseBid" as const, - - encode( - message: MsgCloseBid, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidId !== undefined) { - BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseBid { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseBid(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidId = BidID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCloseBid { - return { - $type: MsgCloseBid.$type, - bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, - }; - }, - - toJSON(message: MsgCloseBid): unknown { - const obj: any = {}; - if (message.bidId !== undefined) { - obj.bidId = BidID.toJSON(message.bidId); - } - return obj; - }, - - create(base?: DeepPartial): MsgCloseBid { - return MsgCloseBid.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCloseBid { - const message = createBaseMsgCloseBid(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? BidID.fromPartial(object.bidId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseBid.$type, MsgCloseBid); - -function createBaseMsgCloseBidResponse(): MsgCloseBidResponse { - return { $type: "akash.market.v1beta4.MsgCloseBidResponse" }; -} - -export const MsgCloseBidResponse = { - $type: "akash.market.v1beta4.MsgCloseBidResponse" as const, - - encode( - _: MsgCloseBidResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseBidResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseBidResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCloseBidResponse { - return { $type: MsgCloseBidResponse.$type }; - }, - - toJSON(_: MsgCloseBidResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCloseBidResponse { - return MsgCloseBidResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCloseBidResponse { - const message = createBaseMsgCloseBidResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseBidResponse.$type, MsgCloseBidResponse); - -function createBaseBidID(): BidID { - return { - $type: "akash.market.v1beta4.BidID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - }; -} - -export const BidID = { - $type: "akash.market.v1beta4.BidID" as const, - - encode(message: BidID, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): BidID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseBidID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.provider = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): BidID { - return { - $type: BidID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - }; - }, - - toJSON(message: BidID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - return obj; - }, - - create(base?: DeepPartial): BidID { - return BidID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): BidID { - const message = createBaseBidID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(BidID.$type, BidID); - -function createBaseBid(): Bid { - return { - $type: "akash.market.v1beta4.Bid", - bidId: undefined, - state: 0, - price: undefined, - createdAt: Long.ZERO, - resourcesOffer: [], - }; -} - -export const Bid = { - $type: "akash.market.v1beta4.Bid" as const, - - encode(message: Bid, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.bidId !== undefined) { - BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.price !== undefined) { - DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - for (const v of message.resourcesOffer) { - ResourceOffer.encode(v!, writer.uint32(42).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Bid { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseBid(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidId = BidID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.price = DecCoin.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - case 5: - if (tag !== 42) { - break; - } - - message.resourcesOffer.push( - ResourceOffer.decode(reader, reader.uint32()), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Bid { - return { - $type: Bid.$type, - bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, - state: isSet(object.state) ? bid_StateFromJSON(object.state) : 0, - price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - resourcesOffer: globalThis.Array.isArray(object?.resourcesOffer) - ? object.resourcesOffer.map((e: any) => ResourceOffer.fromJSON(e)) - : [], - }; - }, - - toJSON(message: Bid): unknown { - const obj: any = {}; - if (message.bidId !== undefined) { - obj.bidId = BidID.toJSON(message.bidId); - } - if (message.state !== 0) { - obj.state = bid_StateToJSON(message.state); - } - if (message.price !== undefined) { - obj.price = DecCoin.toJSON(message.price); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - if (message.resourcesOffer?.length) { - obj.resourcesOffer = message.resourcesOffer.map((e) => - ResourceOffer.toJSON(e), - ); - } - return obj; - }, - - create(base?: DeepPartial): Bid { - return Bid.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Bid { - const message = createBaseBid(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? BidID.fromPartial(object.bidId) - : undefined; - message.state = object.state ?? 0; - message.price = - object.price !== undefined && object.price !== null - ? DecCoin.fromPartial(object.price) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - message.resourcesOffer = - object.resourcesOffer?.map((e) => ResourceOffer.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(Bid.$type, Bid); - -function createBaseBidFilters(): BidFilters { - return { - $type: "akash.market.v1beta4.BidFilters", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - state: "", - }; -} - -export const BidFilters = { - $type: "akash.market.v1beta4.BidFilters" as const, - - encode( - message: BidFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - if (message.state !== "") { - writer.uint32(50).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): BidFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseBidFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.provider = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): BidFilters { - return { - $type: BidFilters.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: BidFilters): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): BidFilters { - return BidFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): BidFilters { - const message = createBaseBidFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(BidFilters.$type, BidFilters); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta4/genesis.ts b/ts/src/generated/akash/market/v1beta4/genesis.ts deleted file mode 100644 index b22bca0c..00000000 --- a/ts/src/generated/akash/market/v1beta4/genesis.ts +++ /dev/null @@ -1,175 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Bid } from "./bid"; -import { Lease } from "./lease"; -import { Order } from "./order"; -import { Params } from "./params"; - -/** GenesisState defines the basic genesis state used by market module */ -export interface GenesisState { - $type: "akash.market.v1beta4.GenesisState"; - params: Params | undefined; - orders: Order[]; - leases: Lease[]; - bids: Bid[]; -} - -function createBaseGenesisState(): GenesisState { - return { - $type: "akash.market.v1beta4.GenesisState", - params: undefined, - orders: [], - leases: [], - bids: [], - }; -} - -export const GenesisState = { - $type: "akash.market.v1beta4.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.params !== undefined) { - Params.encode(message.params, writer.uint32(10).fork()).ldelim(); - } - for (const v of message.orders) { - Order.encode(v!, writer.uint32(18).fork()).ldelim(); - } - for (const v of message.leases) { - Lease.encode(v!, writer.uint32(26).fork()).ldelim(); - } - for (const v of message.bids) { - Bid.encode(v!, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.params = Params.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.orders.push(Order.decode(reader, reader.uint32())); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.leases.push(Lease.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.bids.push(Bid.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, - orders: globalThis.Array.isArray(object?.orders) - ? object.orders.map((e: any) => Order.fromJSON(e)) - : [], - leases: globalThis.Array.isArray(object?.leases) - ? object.leases.map((e: any) => Lease.fromJSON(e)) - : [], - bids: globalThis.Array.isArray(object?.bids) - ? object.bids.map((e: any) => Bid.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.params !== undefined) { - obj.params = Params.toJSON(message.params); - } - if (message.orders?.length) { - obj.orders = message.orders.map((e) => Order.toJSON(e)); - } - if (message.leases?.length) { - obj.leases = message.leases.map((e) => Lease.toJSON(e)); - } - if (message.bids?.length) { - obj.bids = message.bids.map((e) => Bid.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.params = - object.params !== undefined && object.params !== null - ? Params.fromPartial(object.params) - : undefined; - message.orders = object.orders?.map((e) => Order.fromPartial(e)) || []; - message.leases = object.leases?.map((e) => Lease.fromPartial(e)) || []; - message.bids = object.bids?.map((e) => Bid.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta4/lease.ts b/ts/src/generated/akash/market/v1beta4/lease.ts deleted file mode 100644 index 533890a0..00000000 --- a/ts/src/generated/akash/market/v1beta4/lease.ts +++ /dev/null @@ -1,980 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { DecCoin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { BidID } from "./bid"; - -/** LeaseID stores bid details of lease */ -export interface LeaseID { - $type: "akash.market.v1beta4.LeaseID"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; -} - -/** Lease stores LeaseID, state of lease and price */ -export interface Lease { - $type: "akash.market.v1beta4.Lease"; - leaseId: LeaseID | undefined; - state: Lease_State; - price: DecCoin | undefined; - createdAt: Long; - closedOn: Long; -} - -/** State is an enum which refers to state of lease */ -export enum Lease_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** active - LeaseActive denotes state for lease active */ - active = 1, - /** insufficient_funds - LeaseInsufficientFunds denotes state for lease insufficient_funds */ - insufficient_funds = 2, - /** closed - LeaseClosed denotes state for lease closed */ - closed = 3, - UNRECOGNIZED = -1, -} - -export function lease_StateFromJSON(object: any): Lease_State { - switch (object) { - case 0: - case "invalid": - return Lease_State.invalid; - case 1: - case "active": - return Lease_State.active; - case 2: - case "insufficient_funds": - return Lease_State.insufficient_funds; - case 3: - case "closed": - return Lease_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Lease_State.UNRECOGNIZED; - } -} - -export function lease_StateToJSON(object: Lease_State): string { - switch (object) { - case Lease_State.invalid: - return "invalid"; - case Lease_State.active: - return "active"; - case Lease_State.insufficient_funds: - return "insufficient_funds"; - case Lease_State.closed: - return "closed"; - case Lease_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** LeaseFilters defines flags for lease list filter */ -export interface LeaseFilters { - $type: "akash.market.v1beta4.LeaseFilters"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - provider: string; - state: string; -} - -/** MsgCreateLease is sent to create a lease */ -export interface MsgCreateLease { - $type: "akash.market.v1beta4.MsgCreateLease"; - bidId: BidID | undefined; -} - -/** MsgCreateLeaseResponse is the response from creating a lease */ -export interface MsgCreateLeaseResponse { - $type: "akash.market.v1beta4.MsgCreateLeaseResponse"; -} - -/** MsgWithdrawLease defines an SDK message for closing bid */ -export interface MsgWithdrawLease { - $type: "akash.market.v1beta4.MsgWithdrawLease"; - bidId: LeaseID | undefined; -} - -/** MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. */ -export interface MsgWithdrawLeaseResponse { - $type: "akash.market.v1beta4.MsgWithdrawLeaseResponse"; -} - -/** MsgCloseLease defines an SDK message for closing order */ -export interface MsgCloseLease { - $type: "akash.market.v1beta4.MsgCloseLease"; - leaseId: LeaseID | undefined; -} - -/** MsgCloseLeaseResponse defines the Msg/CloseLease response type. */ -export interface MsgCloseLeaseResponse { - $type: "akash.market.v1beta4.MsgCloseLeaseResponse"; -} - -function createBaseLeaseID(): LeaseID { - return { - $type: "akash.market.v1beta4.LeaseID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - }; -} - -export const LeaseID = { - $type: "akash.market.v1beta4.LeaseID" as const, - - encode( - message: LeaseID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): LeaseID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseLeaseID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.provider = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): LeaseID { - return { - $type: LeaseID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - }; - }, - - toJSON(message: LeaseID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - return obj; - }, - - create(base?: DeepPartial): LeaseID { - return LeaseID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): LeaseID { - const message = createBaseLeaseID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(LeaseID.$type, LeaseID); - -function createBaseLease(): Lease { - return { - $type: "akash.market.v1beta4.Lease", - leaseId: undefined, - state: 0, - price: undefined, - createdAt: Long.ZERO, - closedOn: Long.ZERO, - }; -} - -export const Lease = { - $type: "akash.market.v1beta4.Lease" as const, - - encode(message: Lease, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.price !== undefined) { - DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - if (!message.closedOn.equals(Long.ZERO)) { - writer.uint32(40).int64(message.closedOn); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Lease { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.leaseId = LeaseID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.price = DecCoin.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - case 5: - if (tag !== 40) { - break; - } - - message.closedOn = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Lease { - return { - $type: Lease.$type, - leaseId: isSet(object.leaseId) - ? LeaseID.fromJSON(object.leaseId) - : undefined, - state: isSet(object.state) ? lease_StateFromJSON(object.state) : 0, - price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - closedOn: isSet(object.closedOn) - ? Long.fromValue(object.closedOn) - : Long.ZERO, - }; - }, - - toJSON(message: Lease): unknown { - const obj: any = {}; - if (message.leaseId !== undefined) { - obj.leaseId = LeaseID.toJSON(message.leaseId); - } - if (message.state !== 0) { - obj.state = lease_StateToJSON(message.state); - } - if (message.price !== undefined) { - obj.price = DecCoin.toJSON(message.price); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - if (!message.closedOn.equals(Long.ZERO)) { - obj.closedOn = (message.closedOn || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Lease { - return Lease.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Lease { - const message = createBaseLease(); - message.leaseId = - object.leaseId !== undefined && object.leaseId !== null - ? LeaseID.fromPartial(object.leaseId) - : undefined; - message.state = object.state ?? 0; - message.price = - object.price !== undefined && object.price !== null - ? DecCoin.fromPartial(object.price) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - message.closedOn = - object.closedOn !== undefined && object.closedOn !== null - ? Long.fromValue(object.closedOn) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Lease.$type, Lease); - -function createBaseLeaseFilters(): LeaseFilters { - return { - $type: "akash.market.v1beta4.LeaseFilters", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - provider: "", - state: "", - }; -} - -export const LeaseFilters = { - $type: "akash.market.v1beta4.LeaseFilters" as const, - - encode( - message: LeaseFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.provider !== "") { - writer.uint32(42).string(message.provider); - } - if (message.state !== "") { - writer.uint32(50).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): LeaseFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseLeaseFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.provider = reader.string(); - continue; - case 6: - if (tag !== 50) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): LeaseFilters { - return { - $type: LeaseFilters.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - provider: isSet(object.provider) - ? globalThis.String(object.provider) - : "", - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: LeaseFilters): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.provider !== "") { - obj.provider = message.provider; - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): LeaseFilters { - return LeaseFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): LeaseFilters { - const message = createBaseLeaseFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.provider = object.provider ?? ""; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(LeaseFilters.$type, LeaseFilters); - -function createBaseMsgCreateLease(): MsgCreateLease { - return { $type: "akash.market.v1beta4.MsgCreateLease", bidId: undefined }; -} - -export const MsgCreateLease = { - $type: "akash.market.v1beta4.MsgCreateLease" as const, - - encode( - message: MsgCreateLease, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidId !== undefined) { - BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateLease { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidId = BidID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateLease { - return { - $type: MsgCreateLease.$type, - bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, - }; - }, - - toJSON(message: MsgCreateLease): unknown { - const obj: any = {}; - if (message.bidId !== undefined) { - obj.bidId = BidID.toJSON(message.bidId); - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateLease { - return MsgCreateLease.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateLease { - const message = createBaseMsgCreateLease(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? BidID.fromPartial(object.bidId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateLease.$type, MsgCreateLease); - -function createBaseMsgCreateLeaseResponse(): MsgCreateLeaseResponse { - return { $type: "akash.market.v1beta4.MsgCreateLeaseResponse" }; -} - -export const MsgCreateLeaseResponse = { - $type: "akash.market.v1beta4.MsgCreateLeaseResponse" as const, - - encode( - _: MsgCreateLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateLeaseResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateLeaseResponse { - return { $type: MsgCreateLeaseResponse.$type }; - }, - - toJSON(_: MsgCreateLeaseResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCreateLeaseResponse { - return MsgCreateLeaseResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCreateLeaseResponse { - const message = createBaseMsgCreateLeaseResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateLeaseResponse.$type, MsgCreateLeaseResponse); - -function createBaseMsgWithdrawLease(): MsgWithdrawLease { - return { $type: "akash.market.v1beta4.MsgWithdrawLease", bidId: undefined }; -} - -export const MsgWithdrawLease = { - $type: "akash.market.v1beta4.MsgWithdrawLease" as const, - - encode( - message: MsgWithdrawLease, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidId !== undefined) { - LeaseID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgWithdrawLease { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgWithdrawLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidId = LeaseID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgWithdrawLease { - return { - $type: MsgWithdrawLease.$type, - bidId: isSet(object.bidId) ? LeaseID.fromJSON(object.bidId) : undefined, - }; - }, - - toJSON(message: MsgWithdrawLease): unknown { - const obj: any = {}; - if (message.bidId !== undefined) { - obj.bidId = LeaseID.toJSON(message.bidId); - } - return obj; - }, - - create(base?: DeepPartial): MsgWithdrawLease { - return MsgWithdrawLease.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgWithdrawLease { - const message = createBaseMsgWithdrawLease(); - message.bidId = - object.bidId !== undefined && object.bidId !== null - ? LeaseID.fromPartial(object.bidId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgWithdrawLease.$type, MsgWithdrawLease); - -function createBaseMsgWithdrawLeaseResponse(): MsgWithdrawLeaseResponse { - return { $type: "akash.market.v1beta4.MsgWithdrawLeaseResponse" }; -} - -export const MsgWithdrawLeaseResponse = { - $type: "akash.market.v1beta4.MsgWithdrawLeaseResponse" as const, - - encode( - _: MsgWithdrawLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgWithdrawLeaseResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgWithdrawLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgWithdrawLeaseResponse { - return { $type: MsgWithdrawLeaseResponse.$type }; - }, - - toJSON(_: MsgWithdrawLeaseResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgWithdrawLeaseResponse { - return MsgWithdrawLeaseResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgWithdrawLeaseResponse { - const message = createBaseMsgWithdrawLeaseResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgWithdrawLeaseResponse.$type, - MsgWithdrawLeaseResponse, -); - -function createBaseMsgCloseLease(): MsgCloseLease { - return { $type: "akash.market.v1beta4.MsgCloseLease", leaseId: undefined }; -} - -export const MsgCloseLease = { - $type: "akash.market.v1beta4.MsgCloseLease" as const, - - encode( - message: MsgCloseLease, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseLease { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseLease(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.leaseId = LeaseID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCloseLease { - return { - $type: MsgCloseLease.$type, - leaseId: isSet(object.leaseId) - ? LeaseID.fromJSON(object.leaseId) - : undefined, - }; - }, - - toJSON(message: MsgCloseLease): unknown { - const obj: any = {}; - if (message.leaseId !== undefined) { - obj.leaseId = LeaseID.toJSON(message.leaseId); - } - return obj; - }, - - create(base?: DeepPartial): MsgCloseLease { - return MsgCloseLease.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCloseLease { - const message = createBaseMsgCloseLease(); - message.leaseId = - object.leaseId !== undefined && object.leaseId !== null - ? LeaseID.fromPartial(object.leaseId) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseLease.$type, MsgCloseLease); - -function createBaseMsgCloseLeaseResponse(): MsgCloseLeaseResponse { - return { $type: "akash.market.v1beta4.MsgCloseLeaseResponse" }; -} - -export const MsgCloseLeaseResponse = { - $type: "akash.market.v1beta4.MsgCloseLeaseResponse" as const, - - encode( - _: MsgCloseLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCloseLeaseResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCloseLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCloseLeaseResponse { - return { $type: MsgCloseLeaseResponse.$type }; - }, - - toJSON(_: MsgCloseLeaseResponse): unknown { - const obj: any = {}; - return obj; - }, - - create(base?: DeepPartial): MsgCloseLeaseResponse { - return MsgCloseLeaseResponse.fromPartial(base ?? {}); - }, - fromPartial(_: DeepPartial): MsgCloseLeaseResponse { - const message = createBaseMsgCloseLeaseResponse(); - return message; - }, -}; - -messageTypeRegistry.set(MsgCloseLeaseResponse.$type, MsgCloseLeaseResponse); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta4/order.ts b/ts/src/generated/akash/market/v1beta4/order.ts deleted file mode 100644 index 14e1b696..00000000 --- a/ts/src/generated/akash/market/v1beta4/order.ts +++ /dev/null @@ -1,502 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { GroupSpec } from "../../deployment/v1beta3/groupspec"; - -/** OrderID stores owner and all other seq numbers */ -export interface OrderID { - $type: "akash.market.v1beta4.OrderID"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; -} - -/** Order stores orderID, state of order and other details */ -export interface Order { - $type: "akash.market.v1beta4.Order"; - orderId: OrderID | undefined; - state: Order_State; - spec: GroupSpec | undefined; - createdAt: Long; -} - -/** State is an enum which refers to state of order */ -export enum Order_State { - /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ - invalid = 0, - /** open - OrderOpen denotes state for order open */ - open = 1, - /** active - OrderMatched denotes state for order matched */ - active = 2, - /** closed - OrderClosed denotes state for order lost */ - closed = 3, - UNRECOGNIZED = -1, -} - -export function order_StateFromJSON(object: any): Order_State { - switch (object) { - case 0: - case "invalid": - return Order_State.invalid; - case 1: - case "open": - return Order_State.open; - case 2: - case "active": - return Order_State.active; - case 3: - case "closed": - return Order_State.closed; - case -1: - case "UNRECOGNIZED": - default: - return Order_State.UNRECOGNIZED; - } -} - -export function order_StateToJSON(object: Order_State): string { - switch (object) { - case Order_State.invalid: - return "invalid"; - case Order_State.open: - return "open"; - case Order_State.active: - return "active"; - case Order_State.closed: - return "closed"; - case Order_State.UNRECOGNIZED: - default: - return "UNRECOGNIZED"; - } -} - -/** OrderFilters defines flags for order list filter */ -export interface OrderFilters { - $type: "akash.market.v1beta4.OrderFilters"; - owner: string; - dseq: Long; - gseq: number; - oseq: number; - state: string; -} - -function createBaseOrderID(): OrderID { - return { - $type: "akash.market.v1beta4.OrderID", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - }; -} - -export const OrderID = { - $type: "akash.market.v1beta4.OrderID" as const, - - encode( - message: OrderID, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): OrderID { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOrderID(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): OrderID { - return { - $type: OrderID.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - }; - }, - - toJSON(message: OrderID): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - return obj; - }, - - create(base?: DeepPartial): OrderID { - return OrderID.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): OrderID { - const message = createBaseOrderID(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(OrderID.$type, OrderID); - -function createBaseOrder(): Order { - return { - $type: "akash.market.v1beta4.Order", - orderId: undefined, - state: 0, - spec: undefined, - createdAt: Long.ZERO, - }; -} - -export const Order = { - $type: "akash.market.v1beta4.Order" as const, - - encode(message: Order, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { - if (message.orderId !== undefined) { - OrderID.encode(message.orderId, writer.uint32(10).fork()).ldelim(); - } - if (message.state !== 0) { - writer.uint32(16).int32(message.state); - } - if (message.spec !== undefined) { - GroupSpec.encode(message.spec, writer.uint32(26).fork()).ldelim(); - } - if (!message.createdAt.equals(Long.ZERO)) { - writer.uint32(32).int64(message.createdAt); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Order { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOrder(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.orderId = OrderID.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.state = reader.int32() as any; - continue; - case 3: - if (tag !== 26) { - break; - } - - message.spec = GroupSpec.decode(reader, reader.uint32()); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.createdAt = reader.int64() as Long; - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Order { - return { - $type: Order.$type, - orderId: isSet(object.orderId) - ? OrderID.fromJSON(object.orderId) - : undefined, - state: isSet(object.state) ? order_StateFromJSON(object.state) : 0, - spec: isSet(object.spec) ? GroupSpec.fromJSON(object.spec) : undefined, - createdAt: isSet(object.createdAt) - ? Long.fromValue(object.createdAt) - : Long.ZERO, - }; - }, - - toJSON(message: Order): unknown { - const obj: any = {}; - if (message.orderId !== undefined) { - obj.orderId = OrderID.toJSON(message.orderId); - } - if (message.state !== 0) { - obj.state = order_StateToJSON(message.state); - } - if (message.spec !== undefined) { - obj.spec = GroupSpec.toJSON(message.spec); - } - if (!message.createdAt.equals(Long.ZERO)) { - obj.createdAt = (message.createdAt || Long.ZERO).toString(); - } - return obj; - }, - - create(base?: DeepPartial): Order { - return Order.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Order { - const message = createBaseOrder(); - message.orderId = - object.orderId !== undefined && object.orderId !== null - ? OrderID.fromPartial(object.orderId) - : undefined; - message.state = object.state ?? 0; - message.spec = - object.spec !== undefined && object.spec !== null - ? GroupSpec.fromPartial(object.spec) - : undefined; - message.createdAt = - object.createdAt !== undefined && object.createdAt !== null - ? Long.fromValue(object.createdAt) - : Long.ZERO; - return message; - }, -}; - -messageTypeRegistry.set(Order.$type, Order); - -function createBaseOrderFilters(): OrderFilters { - return { - $type: "akash.market.v1beta4.OrderFilters", - owner: "", - dseq: Long.UZERO, - gseq: 0, - oseq: 0, - state: "", - }; -} - -export const OrderFilters = { - $type: "akash.market.v1beta4.OrderFilters" as const, - - encode( - message: OrderFilters, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (!message.dseq.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.dseq); - } - if (message.gseq !== 0) { - writer.uint32(24).uint32(message.gseq); - } - if (message.oseq !== 0) { - writer.uint32(32).uint32(message.oseq); - } - if (message.state !== "") { - writer.uint32(42).string(message.state); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): OrderFilters { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseOrderFilters(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.dseq = reader.uint64() as Long; - continue; - case 3: - if (tag !== 24) { - break; - } - - message.gseq = reader.uint32(); - continue; - case 4: - if (tag !== 32) { - break; - } - - message.oseq = reader.uint32(); - continue; - case 5: - if (tag !== 42) { - break; - } - - message.state = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): OrderFilters { - return { - $type: OrderFilters.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, - gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, - oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, - state: isSet(object.state) ? globalThis.String(object.state) : "", - }; - }, - - toJSON(message: OrderFilters): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (!message.dseq.equals(Long.UZERO)) { - obj.dseq = (message.dseq || Long.UZERO).toString(); - } - if (message.gseq !== 0) { - obj.gseq = Math.round(message.gseq); - } - if (message.oseq !== 0) { - obj.oseq = Math.round(message.oseq); - } - if (message.state !== "") { - obj.state = message.state; - } - return obj; - }, - - create(base?: DeepPartial): OrderFilters { - return OrderFilters.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): OrderFilters { - const message = createBaseOrderFilters(); - message.owner = object.owner ?? ""; - message.dseq = - object.dseq !== undefined && object.dseq !== null - ? Long.fromValue(object.dseq) - : Long.UZERO; - message.gseq = object.gseq ?? 0; - message.oseq = object.oseq ?? 0; - message.state = object.state ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(OrderFilters.$type, OrderFilters); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta4/params.ts b/ts/src/generated/akash/market/v1beta4/params.ts deleted file mode 100644 index 48934411..00000000 --- a/ts/src/generated/akash/market/v1beta4/params.ts +++ /dev/null @@ -1,136 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Coin } from "../../../cosmos/base/v1beta1/coin"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** Params is the params for the x/market module */ -export interface Params { - $type: "akash.market.v1beta4.Params"; - bidMinDeposit: Coin | undefined; - orderMaxBids: number; -} - -function createBaseParams(): Params { - return { - $type: "akash.market.v1beta4.Params", - bidMinDeposit: undefined, - orderMaxBids: 0, - }; -} - -export const Params = { - $type: "akash.market.v1beta4.Params" as const, - - encode( - message: Params, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bidMinDeposit !== undefined) { - Coin.encode(message.bidMinDeposit, writer.uint32(10).fork()).ldelim(); - } - if (message.orderMaxBids !== 0) { - writer.uint32(16).uint32(message.orderMaxBids); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Params { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bidMinDeposit = Coin.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.orderMaxBids = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Params { - return { - $type: Params.$type, - bidMinDeposit: isSet(object.bidMinDeposit) - ? Coin.fromJSON(object.bidMinDeposit) - : undefined, - orderMaxBids: isSet(object.orderMaxBids) - ? globalThis.Number(object.orderMaxBids) - : 0, - }; - }, - - toJSON(message: Params): unknown { - const obj: any = {}; - if (message.bidMinDeposit !== undefined) { - obj.bidMinDeposit = Coin.toJSON(message.bidMinDeposit); - } - if (message.orderMaxBids !== 0) { - obj.orderMaxBids = Math.round(message.orderMaxBids); - } - return obj; - }, - - create(base?: DeepPartial): Params { - return Params.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Params { - const message = createBaseParams(); - message.bidMinDeposit = - object.bidMinDeposit !== undefined && object.bidMinDeposit !== null - ? Coin.fromPartial(object.bidMinDeposit) - : undefined; - message.orderMaxBids = object.orderMaxBids ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(Params.$type, Params); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta4/query.ts b/ts/src/generated/akash/market/v1beta4/query.ts deleted file mode 100644 index 386a009e..00000000 --- a/ts/src/generated/akash/market/v1beta4/query.ts +++ /dev/null @@ -1,1275 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Account, FractionalPayment } from "../../escrow/v1beta3/types"; -import { Bid, BidFilters, BidID } from "./bid"; -import { Lease, LeaseFilters, LeaseID } from "./lease"; -import { Order, OrderFilters, OrderID } from "./order"; - -/** QueryOrdersRequest is request type for the Query/Orders RPC method */ -export interface QueryOrdersRequest { - $type: "akash.market.v1beta4.QueryOrdersRequest"; - filters: OrderFilters | undefined; - pagination: PageRequest | undefined; -} - -/** QueryOrdersResponse is response type for the Query/Orders RPC method */ -export interface QueryOrdersResponse { - $type: "akash.market.v1beta4.QueryOrdersResponse"; - orders: Order[]; - pagination: PageResponse | undefined; -} - -/** QueryOrderRequest is request type for the Query/Order RPC method */ -export interface QueryOrderRequest { - $type: "akash.market.v1beta4.QueryOrderRequest"; - id: OrderID | undefined; -} - -/** QueryOrderResponse is response type for the Query/Order RPC method */ -export interface QueryOrderResponse { - $type: "akash.market.v1beta4.QueryOrderResponse"; - order: Order | undefined; -} - -/** QueryBidsRequest is request type for the Query/Bids RPC method */ -export interface QueryBidsRequest { - $type: "akash.market.v1beta4.QueryBidsRequest"; - filters: BidFilters | undefined; - pagination: PageRequest | undefined; -} - -/** QueryBidsResponse is response type for the Query/Bids RPC method */ -export interface QueryBidsResponse { - $type: "akash.market.v1beta4.QueryBidsResponse"; - bids: QueryBidResponse[]; - pagination: PageResponse | undefined; -} - -/** QueryBidRequest is request type for the Query/Bid RPC method */ -export interface QueryBidRequest { - $type: "akash.market.v1beta4.QueryBidRequest"; - id: BidID | undefined; -} - -/** QueryBidResponse is response type for the Query/Bid RPC method */ -export interface QueryBidResponse { - $type: "akash.market.v1beta4.QueryBidResponse"; - bid: Bid | undefined; - escrowAccount: Account | undefined; -} - -/** QueryLeasesRequest is request type for the Query/Leases RPC method */ -export interface QueryLeasesRequest { - $type: "akash.market.v1beta4.QueryLeasesRequest"; - filters: LeaseFilters | undefined; - pagination: PageRequest | undefined; -} - -/** QueryLeasesResponse is response type for the Query/Leases RPC method */ -export interface QueryLeasesResponse { - $type: "akash.market.v1beta4.QueryLeasesResponse"; - leases: QueryLeaseResponse[]; - pagination: PageResponse | undefined; -} - -/** QueryLeaseRequest is request type for the Query/Lease RPC method */ -export interface QueryLeaseRequest { - $type: "akash.market.v1beta4.QueryLeaseRequest"; - id: LeaseID | undefined; -} - -/** QueryLeaseResponse is response type for the Query/Lease RPC method */ -export interface QueryLeaseResponse { - $type: "akash.market.v1beta4.QueryLeaseResponse"; - lease: Lease | undefined; - escrowPayment: FractionalPayment | undefined; -} - -function createBaseQueryOrdersRequest(): QueryOrdersRequest { - return { - $type: "akash.market.v1beta4.QueryOrdersRequest", - filters: undefined, - pagination: undefined, - }; -} - -export const QueryOrdersRequest = { - $type: "akash.market.v1beta4.QueryOrdersRequest" as const, - - encode( - message: QueryOrdersRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filters !== undefined) { - OrderFilters.encode(message.filters, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrdersRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryOrdersRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filters = OrderFilters.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryOrdersRequest { - return { - $type: QueryOrdersRequest.$type, - filters: isSet(object.filters) - ? OrderFilters.fromJSON(object.filters) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryOrdersRequest): unknown { - const obj: any = {}; - if (message.filters !== undefined) { - obj.filters = OrderFilters.toJSON(message.filters); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryOrdersRequest { - return QueryOrdersRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryOrdersRequest { - const message = createBaseQueryOrdersRequest(); - message.filters = - object.filters !== undefined && object.filters !== null - ? OrderFilters.fromPartial(object.filters) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryOrdersRequest.$type, QueryOrdersRequest); - -function createBaseQueryOrdersResponse(): QueryOrdersResponse { - return { - $type: "akash.market.v1beta4.QueryOrdersResponse", - orders: [], - pagination: undefined, - }; -} - -export const QueryOrdersResponse = { - $type: "akash.market.v1beta4.QueryOrdersResponse" as const, - - encode( - message: QueryOrdersResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.orders) { - Order.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrdersResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryOrdersResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.orders.push(Order.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryOrdersResponse { - return { - $type: QueryOrdersResponse.$type, - orders: globalThis.Array.isArray(object?.orders) - ? object.orders.map((e: any) => Order.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryOrdersResponse): unknown { - const obj: any = {}; - if (message.orders?.length) { - obj.orders = message.orders.map((e) => Order.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryOrdersResponse { - return QueryOrdersResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryOrdersResponse { - const message = createBaseQueryOrdersResponse(); - message.orders = object.orders?.map((e) => Order.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryOrdersResponse.$type, QueryOrdersResponse); - -function createBaseQueryOrderRequest(): QueryOrderRequest { - return { $type: "akash.market.v1beta4.QueryOrderRequest", id: undefined }; -} - -export const QueryOrderRequest = { - $type: "akash.market.v1beta4.QueryOrderRequest" as const, - - encode( - message: QueryOrderRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - OrderID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrderRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryOrderRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = OrderID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryOrderRequest { - return { - $type: QueryOrderRequest.$type, - id: isSet(object.id) ? OrderID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryOrderRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = OrderID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryOrderRequest { - return QueryOrderRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryOrderRequest { - const message = createBaseQueryOrderRequest(); - message.id = - object.id !== undefined && object.id !== null - ? OrderID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryOrderRequest.$type, QueryOrderRequest); - -function createBaseQueryOrderResponse(): QueryOrderResponse { - return { $type: "akash.market.v1beta4.QueryOrderResponse", order: undefined }; -} - -export const QueryOrderResponse = { - $type: "akash.market.v1beta4.QueryOrderResponse" as const, - - encode( - message: QueryOrderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.order !== undefined) { - Order.encode(message.order, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryOrderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.order = Order.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryOrderResponse { - return { - $type: QueryOrderResponse.$type, - order: isSet(object.order) ? Order.fromJSON(object.order) : undefined, - }; - }, - - toJSON(message: QueryOrderResponse): unknown { - const obj: any = {}; - if (message.order !== undefined) { - obj.order = Order.toJSON(message.order); - } - return obj; - }, - - create(base?: DeepPartial): QueryOrderResponse { - return QueryOrderResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryOrderResponse { - const message = createBaseQueryOrderResponse(); - message.order = - object.order !== undefined && object.order !== null - ? Order.fromPartial(object.order) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryOrderResponse.$type, QueryOrderResponse); - -function createBaseQueryBidsRequest(): QueryBidsRequest { - return { - $type: "akash.market.v1beta4.QueryBidsRequest", - filters: undefined, - pagination: undefined, - }; -} - -export const QueryBidsRequest = { - $type: "akash.market.v1beta4.QueryBidsRequest" as const, - - encode( - message: QueryBidsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filters !== undefined) { - BidFilters.encode(message.filters, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidsRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryBidsRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filters = BidFilters.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryBidsRequest { - return { - $type: QueryBidsRequest.$type, - filters: isSet(object.filters) - ? BidFilters.fromJSON(object.filters) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryBidsRequest): unknown { - const obj: any = {}; - if (message.filters !== undefined) { - obj.filters = BidFilters.toJSON(message.filters); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryBidsRequest { - return QueryBidsRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryBidsRequest { - const message = createBaseQueryBidsRequest(); - message.filters = - object.filters !== undefined && object.filters !== null - ? BidFilters.fromPartial(object.filters) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryBidsRequest.$type, QueryBidsRequest); - -function createBaseQueryBidsResponse(): QueryBidsResponse { - return { - $type: "akash.market.v1beta4.QueryBidsResponse", - bids: [], - pagination: undefined, - }; -} - -export const QueryBidsResponse = { - $type: "akash.market.v1beta4.QueryBidsResponse" as const, - - encode( - message: QueryBidsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.bids) { - QueryBidResponse.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidsResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryBidsResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bids.push(QueryBidResponse.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryBidsResponse { - return { - $type: QueryBidsResponse.$type, - bids: globalThis.Array.isArray(object?.bids) - ? object.bids.map((e: any) => QueryBidResponse.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryBidsResponse): unknown { - const obj: any = {}; - if (message.bids?.length) { - obj.bids = message.bids.map((e) => QueryBidResponse.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryBidsResponse { - return QueryBidsResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryBidsResponse { - const message = createBaseQueryBidsResponse(); - message.bids = - object.bids?.map((e) => QueryBidResponse.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryBidsResponse.$type, QueryBidsResponse); - -function createBaseQueryBidRequest(): QueryBidRequest { - return { $type: "akash.market.v1beta4.QueryBidRequest", id: undefined }; -} - -export const QueryBidRequest = { - $type: "akash.market.v1beta4.QueryBidRequest" as const, - - encode( - message: QueryBidRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - BidID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryBidRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = BidID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryBidRequest { - return { - $type: QueryBidRequest.$type, - id: isSet(object.id) ? BidID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryBidRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = BidID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryBidRequest { - return QueryBidRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryBidRequest { - const message = createBaseQueryBidRequest(); - message.id = - object.id !== undefined && object.id !== null - ? BidID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryBidRequest.$type, QueryBidRequest); - -function createBaseQueryBidResponse(): QueryBidResponse { - return { - $type: "akash.market.v1beta4.QueryBidResponse", - bid: undefined, - escrowAccount: undefined, - }; -} - -export const QueryBidResponse = { - $type: "akash.market.v1beta4.QueryBidResponse" as const, - - encode( - message: QueryBidResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.bid !== undefined) { - Bid.encode(message.bid, writer.uint32(10).fork()).ldelim(); - } - if (message.escrowAccount !== undefined) { - Account.encode(message.escrowAccount, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryBidResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.bid = Bid.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.escrowAccount = Account.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryBidResponse { - return { - $type: QueryBidResponse.$type, - bid: isSet(object.bid) ? Bid.fromJSON(object.bid) : undefined, - escrowAccount: isSet(object.escrowAccount) - ? Account.fromJSON(object.escrowAccount) - : undefined, - }; - }, - - toJSON(message: QueryBidResponse): unknown { - const obj: any = {}; - if (message.bid !== undefined) { - obj.bid = Bid.toJSON(message.bid); - } - if (message.escrowAccount !== undefined) { - obj.escrowAccount = Account.toJSON(message.escrowAccount); - } - return obj; - }, - - create(base?: DeepPartial): QueryBidResponse { - return QueryBidResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryBidResponse { - const message = createBaseQueryBidResponse(); - message.bid = - object.bid !== undefined && object.bid !== null - ? Bid.fromPartial(object.bid) - : undefined; - message.escrowAccount = - object.escrowAccount !== undefined && object.escrowAccount !== null - ? Account.fromPartial(object.escrowAccount) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryBidResponse.$type, QueryBidResponse); - -function createBaseQueryLeasesRequest(): QueryLeasesRequest { - return { - $type: "akash.market.v1beta4.QueryLeasesRequest", - filters: undefined, - pagination: undefined, - }; -} - -export const QueryLeasesRequest = { - $type: "akash.market.v1beta4.QueryLeasesRequest" as const, - - encode( - message: QueryLeasesRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.filters !== undefined) { - LeaseFilters.encode(message.filters, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeasesRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryLeasesRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.filters = LeaseFilters.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryLeasesRequest { - return { - $type: QueryLeasesRequest.$type, - filters: isSet(object.filters) - ? LeaseFilters.fromJSON(object.filters) - : undefined, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryLeasesRequest): unknown { - const obj: any = {}; - if (message.filters !== undefined) { - obj.filters = LeaseFilters.toJSON(message.filters); - } - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryLeasesRequest { - return QueryLeasesRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryLeasesRequest { - const message = createBaseQueryLeasesRequest(); - message.filters = - object.filters !== undefined && object.filters !== null - ? LeaseFilters.fromPartial(object.filters) - : undefined; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryLeasesRequest.$type, QueryLeasesRequest); - -function createBaseQueryLeasesResponse(): QueryLeasesResponse { - return { - $type: "akash.market.v1beta4.QueryLeasesResponse", - leases: [], - pagination: undefined, - }; -} - -export const QueryLeasesResponse = { - $type: "akash.market.v1beta4.QueryLeasesResponse" as const, - - encode( - message: QueryLeasesResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.leases) { - QueryLeaseResponse.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeasesResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryLeasesResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.leases.push( - QueryLeaseResponse.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryLeasesResponse { - return { - $type: QueryLeasesResponse.$type, - leases: globalThis.Array.isArray(object?.leases) - ? object.leases.map((e: any) => QueryLeaseResponse.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryLeasesResponse): unknown { - const obj: any = {}; - if (message.leases?.length) { - obj.leases = message.leases.map((e) => QueryLeaseResponse.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryLeasesResponse { - return QueryLeasesResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryLeasesResponse { - const message = createBaseQueryLeasesResponse(); - message.leases = - object.leases?.map((e) => QueryLeaseResponse.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryLeasesResponse.$type, QueryLeasesResponse); - -function createBaseQueryLeaseRequest(): QueryLeaseRequest { - return { $type: "akash.market.v1beta4.QueryLeaseRequest", id: undefined }; -} - -export const QueryLeaseRequest = { - $type: "akash.market.v1beta4.QueryLeaseRequest" as const, - - encode( - message: QueryLeaseRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.id !== undefined) { - LeaseID.encode(message.id, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeaseRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryLeaseRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.id = LeaseID.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryLeaseRequest { - return { - $type: QueryLeaseRequest.$type, - id: isSet(object.id) ? LeaseID.fromJSON(object.id) : undefined, - }; - }, - - toJSON(message: QueryLeaseRequest): unknown { - const obj: any = {}; - if (message.id !== undefined) { - obj.id = LeaseID.toJSON(message.id); - } - return obj; - }, - - create(base?: DeepPartial): QueryLeaseRequest { - return QueryLeaseRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryLeaseRequest { - const message = createBaseQueryLeaseRequest(); - message.id = - object.id !== undefined && object.id !== null - ? LeaseID.fromPartial(object.id) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryLeaseRequest.$type, QueryLeaseRequest); - -function createBaseQueryLeaseResponse(): QueryLeaseResponse { - return { - $type: "akash.market.v1beta4.QueryLeaseResponse", - lease: undefined, - escrowPayment: undefined, - }; -} - -export const QueryLeaseResponse = { - $type: "akash.market.v1beta4.QueryLeaseResponse" as const, - - encode( - message: QueryLeaseResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.lease !== undefined) { - Lease.encode(message.lease, writer.uint32(10).fork()).ldelim(); - } - if (message.escrowPayment !== undefined) { - FractionalPayment.encode( - message.escrowPayment, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeaseResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryLeaseResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.lease = Lease.decode(reader, reader.uint32()); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.escrowPayment = FractionalPayment.decode( - reader, - reader.uint32(), - ); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryLeaseResponse { - return { - $type: QueryLeaseResponse.$type, - lease: isSet(object.lease) ? Lease.fromJSON(object.lease) : undefined, - escrowPayment: isSet(object.escrowPayment) - ? FractionalPayment.fromJSON(object.escrowPayment) - : undefined, - }; - }, - - toJSON(message: QueryLeaseResponse): unknown { - const obj: any = {}; - if (message.lease !== undefined) { - obj.lease = Lease.toJSON(message.lease); - } - if (message.escrowPayment !== undefined) { - obj.escrowPayment = FractionalPayment.toJSON(message.escrowPayment); - } - return obj; - }, - - create(base?: DeepPartial): QueryLeaseResponse { - return QueryLeaseResponse.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryLeaseResponse { - const message = createBaseQueryLeaseResponse(); - message.lease = - object.lease !== undefined && object.lease !== null - ? Lease.fromPartial(object.lease) - : undefined; - message.escrowPayment = - object.escrowPayment !== undefined && object.escrowPayment !== null - ? FractionalPayment.fromPartial(object.escrowPayment) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryLeaseResponse.$type, QueryLeaseResponse); - -/** Query defines the gRPC querier service */ -export interface Query { - /** Orders queries orders with filters */ - Orders(request: QueryOrdersRequest): Promise; - /** Order queries order details */ - Order(request: QueryOrderRequest): Promise; - /** Bids queries bids with filters */ - Bids(request: QueryBidsRequest): Promise; - /** Bid queries bid details */ - Bid(request: QueryBidRequest): Promise; - /** Leases queries leases with filters */ - Leases(request: QueryLeasesRequest): Promise; - /** Lease queries lease details */ - Lease(request: QueryLeaseRequest): Promise; -} - -export const QueryServiceName = "akash.market.v1beta4.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.Orders = this.Orders.bind(this); - this.Order = this.Order.bind(this); - this.Bids = this.Bids.bind(this); - this.Bid = this.Bid.bind(this); - this.Leases = this.Leases.bind(this); - this.Lease = this.Lease.bind(this); - } - Orders(request: QueryOrdersRequest): Promise { - const data = QueryOrdersRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Orders", data); - return promise.then((data) => - QueryOrdersResponse.decode(_m0.Reader.create(data)), - ); - } - - Order(request: QueryOrderRequest): Promise { - const data = QueryOrderRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Order", data); - return promise.then((data) => - QueryOrderResponse.decode(_m0.Reader.create(data)), - ); - } - - Bids(request: QueryBidsRequest): Promise { - const data = QueryBidsRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Bids", data); - return promise.then((data) => - QueryBidsResponse.decode(_m0.Reader.create(data)), - ); - } - - Bid(request: QueryBidRequest): Promise { - const data = QueryBidRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Bid", data); - return promise.then((data) => - QueryBidResponse.decode(_m0.Reader.create(data)), - ); - } - - Leases(request: QueryLeasesRequest): Promise { - const data = QueryLeasesRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Leases", data); - return promise.then((data) => - QueryLeasesResponse.decode(_m0.Reader.create(data)), - ); - } - - Lease(request: QueryLeaseRequest): Promise { - const data = QueryLeaseRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Lease", data); - return promise.then((data) => - QueryLeaseResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/market/v1beta4/service.grpc-js.ts b/ts/src/generated/akash/market/v1beta4/service.grpc-js.ts deleted file mode 100644 index 616caba2..00000000 --- a/ts/src/generated/akash/market/v1beta4/service.grpc-js.ts +++ /dev/null @@ -1,252 +0,0 @@ -/* eslint-disable */ -import { - ChannelCredentials, - Client, - makeGenericClientConstructor, - Metadata, -} from "@grpc/grpc-js"; -import type { - CallOptions, - ClientOptions, - ClientUnaryCall, - handleUnaryCall, - ServiceError, - UntypedServiceImplementation, -} from "@grpc/grpc-js"; -import { - MsgCloseBid, - MsgCloseBidResponse, - MsgCreateBid, - MsgCreateBidResponse, -} from "./bid"; -import { - MsgCloseLease, - MsgCloseLeaseResponse, - MsgCreateLease, - MsgCreateLeaseResponse, - MsgWithdrawLease, - MsgWithdrawLeaseResponse, -} from "./lease"; - -export const protobufPackage = "akash.market.v1beta4"; - -/** Msg defines the market Msg service */ -export type MsgService = typeof MsgService; -export const MsgService = { - /** CreateBid defines a method to create a bid given proper inputs. */ - createBid: { - path: "/akash.market.v1beta4.Msg/CreateBid", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCreateBid) => - Buffer.from(MsgCreateBid.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCreateBid.decode(value), - responseSerialize: (value: MsgCreateBidResponse) => - Buffer.from(MsgCreateBidResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgCreateBidResponse.decode(value), - }, - /** CloseBid defines a method to close a bid given proper inputs. */ - closeBid: { - path: "/akash.market.v1beta4.Msg/CloseBid", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCloseBid) => - Buffer.from(MsgCloseBid.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCloseBid.decode(value), - responseSerialize: (value: MsgCloseBidResponse) => - Buffer.from(MsgCloseBidResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgCloseBidResponse.decode(value), - }, - /** WithdrawLease withdraws accrued funds from the lease payment */ - withdrawLease: { - path: "/akash.market.v1beta4.Msg/WithdrawLease", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgWithdrawLease) => - Buffer.from(MsgWithdrawLease.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgWithdrawLease.decode(value), - responseSerialize: (value: MsgWithdrawLeaseResponse) => - Buffer.from(MsgWithdrawLeaseResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgWithdrawLeaseResponse.decode(value), - }, - /** CreateLease creates a new lease */ - createLease: { - path: "/akash.market.v1beta4.Msg/CreateLease", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCreateLease) => - Buffer.from(MsgCreateLease.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCreateLease.decode(value), - responseSerialize: (value: MsgCreateLeaseResponse) => - Buffer.from(MsgCreateLeaseResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => - MsgCreateLeaseResponse.decode(value), - }, - /** CloseLease defines a method to close an order given proper inputs. */ - closeLease: { - path: "/akash.market.v1beta4.Msg/CloseLease", - requestStream: false, - responseStream: false, - requestSerialize: (value: MsgCloseLease) => - Buffer.from(MsgCloseLease.encode(value).finish()), - requestDeserialize: (value: Buffer) => MsgCloseLease.decode(value), - responseSerialize: (value: MsgCloseLeaseResponse) => - Buffer.from(MsgCloseLeaseResponse.encode(value).finish()), - responseDeserialize: (value: Buffer) => MsgCloseLeaseResponse.decode(value), - }, -} as const; - -export interface MsgServer extends UntypedServiceImplementation { - /** CreateBid defines a method to create a bid given proper inputs. */ - createBid: handleUnaryCall; - /** CloseBid defines a method to close a bid given proper inputs. */ - closeBid: handleUnaryCall; - /** WithdrawLease withdraws accrued funds from the lease payment */ - withdrawLease: handleUnaryCall; - /** CreateLease creates a new lease */ - createLease: handleUnaryCall; - /** CloseLease defines a method to close an order given proper inputs. */ - closeLease: handleUnaryCall; -} - -export interface MsgClient extends Client { - /** CreateBid defines a method to create a bid given proper inputs. */ - createBid( - request: MsgCreateBid, - callback: ( - error: ServiceError | null, - response: MsgCreateBidResponse, - ) => void, - ): ClientUnaryCall; - createBid( - request: MsgCreateBid, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCreateBidResponse, - ) => void, - ): ClientUnaryCall; - createBid( - request: MsgCreateBid, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCreateBidResponse, - ) => void, - ): ClientUnaryCall; - /** CloseBid defines a method to close a bid given proper inputs. */ - closeBid( - request: MsgCloseBid, - callback: ( - error: ServiceError | null, - response: MsgCloseBidResponse, - ) => void, - ): ClientUnaryCall; - closeBid( - request: MsgCloseBid, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCloseBidResponse, - ) => void, - ): ClientUnaryCall; - closeBid( - request: MsgCloseBid, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCloseBidResponse, - ) => void, - ): ClientUnaryCall; - /** WithdrawLease withdraws accrued funds from the lease payment */ - withdrawLease( - request: MsgWithdrawLease, - callback: ( - error: ServiceError | null, - response: MsgWithdrawLeaseResponse, - ) => void, - ): ClientUnaryCall; - withdrawLease( - request: MsgWithdrawLease, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgWithdrawLeaseResponse, - ) => void, - ): ClientUnaryCall; - withdrawLease( - request: MsgWithdrawLease, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgWithdrawLeaseResponse, - ) => void, - ): ClientUnaryCall; - /** CreateLease creates a new lease */ - createLease( - request: MsgCreateLease, - callback: ( - error: ServiceError | null, - response: MsgCreateLeaseResponse, - ) => void, - ): ClientUnaryCall; - createLease( - request: MsgCreateLease, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCreateLeaseResponse, - ) => void, - ): ClientUnaryCall; - createLease( - request: MsgCreateLease, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCreateLeaseResponse, - ) => void, - ): ClientUnaryCall; - /** CloseLease defines a method to close an order given proper inputs. */ - closeLease( - request: MsgCloseLease, - callback: ( - error: ServiceError | null, - response: MsgCloseLeaseResponse, - ) => void, - ): ClientUnaryCall; - closeLease( - request: MsgCloseLease, - metadata: Metadata, - callback: ( - error: ServiceError | null, - response: MsgCloseLeaseResponse, - ) => void, - ): ClientUnaryCall; - closeLease( - request: MsgCloseLease, - metadata: Metadata, - options: Partial, - callback: ( - error: ServiceError | null, - response: MsgCloseLeaseResponse, - ) => void, - ): ClientUnaryCall; -} - -export const MsgClient = makeGenericClientConstructor( - MsgService, - "akash.market.v1beta4.Msg", -) as unknown as { - new ( - address: string, - credentials: ChannelCredentials, - options?: Partial, - ): MsgClient; - service: typeof MsgService; - serviceName: string; -}; diff --git a/ts/src/generated/akash/market/v1beta4/service.ts b/ts/src/generated/akash/market/v1beta4/service.ts deleted file mode 100644 index 22a98833..00000000 --- a/ts/src/generated/akash/market/v1beta4/service.ts +++ /dev/null @@ -1,92 +0,0 @@ -/* eslint-disable */ -import _m0 from "protobufjs/minimal"; -import { - MsgCloseBid, - MsgCloseBidResponse, - MsgCreateBid, - MsgCreateBidResponse, -} from "./bid"; -import { - MsgCloseLease, - MsgCloseLeaseResponse, - MsgCreateLease, - MsgCreateLeaseResponse, - MsgWithdrawLease, - MsgWithdrawLeaseResponse, -} from "./lease"; - -/** Msg defines the market Msg service */ -export interface Msg { - /** CreateBid defines a method to create a bid given proper inputs. */ - CreateBid(request: MsgCreateBid): Promise; - /** CloseBid defines a method to close a bid given proper inputs. */ - CloseBid(request: MsgCloseBid): Promise; - /** WithdrawLease withdraws accrued funds from the lease payment */ - WithdrawLease(request: MsgWithdrawLease): Promise; - /** CreateLease creates a new lease */ - CreateLease(request: MsgCreateLease): Promise; - /** CloseLease defines a method to close an order given proper inputs. */ - CloseLease(request: MsgCloseLease): Promise; -} - -export const MsgServiceName = "akash.market.v1beta4.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.CreateBid = this.CreateBid.bind(this); - this.CloseBid = this.CloseBid.bind(this); - this.WithdrawLease = this.WithdrawLease.bind(this); - this.CreateLease = this.CreateLease.bind(this); - this.CloseLease = this.CloseLease.bind(this); - } - CreateBid(request: MsgCreateBid): Promise { - const data = MsgCreateBid.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateBid", data); - return promise.then((data) => - MsgCreateBidResponse.decode(_m0.Reader.create(data)), - ); - } - - CloseBid(request: MsgCloseBid): Promise { - const data = MsgCloseBid.encode(request).finish(); - const promise = this.rpc.request(this.service, "CloseBid", data); - return promise.then((data) => - MsgCloseBidResponse.decode(_m0.Reader.create(data)), - ); - } - - WithdrawLease(request: MsgWithdrawLease): Promise { - const data = MsgWithdrawLease.encode(request).finish(); - const promise = this.rpc.request(this.service, "WithdrawLease", data); - return promise.then((data) => - MsgWithdrawLeaseResponse.decode(_m0.Reader.create(data)), - ); - } - - CreateLease(request: MsgCreateLease): Promise { - const data = MsgCreateLease.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateLease", data); - return promise.then((data) => - MsgCreateLeaseResponse.decode(_m0.Reader.create(data)), - ); - } - - CloseLease(request: MsgCloseLease): Promise { - const data = MsgCloseLease.encode(request).finish(); - const promise = this.rpc.request(this.service, "CloseLease", data); - return promise.then((data) => - MsgCloseLeaseResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} diff --git a/ts/src/generated/akash/market/v1beta5/bid.ts b/ts/src/generated/akash/market/v1beta5/bid.ts new file mode 100644 index 00000000..f9422401 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/bid.ts @@ -0,0 +1,268 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1beta5/bid.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { DecCoin } from "../../../cosmos/base/v1beta1/coin"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { BidID } from "../v1/bid"; +import { ResourceOffer } from "./resourcesoffer"; + +/** Bid stores BidID, state of bid and price */ +export interface Bid { + $type: "akash.market.v1beta5.Bid"; + id: BidID | undefined; + state: Bid_State; + price: DecCoin | undefined; + createdAt: Long; + resourcesOffer: ResourceOffer[]; +} + +/** BidState is an enum which refers to state of bid */ +export enum Bid_State { + /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ + invalid = 0, + /** open - BidOpen denotes state for bid open */ + open = 1, + /** active - BidMatched denotes state for bid open */ + active = 2, + /** lost - BidLost denotes state for bid lost */ + lost = 3, + /** closed - BidClosed denotes state for bid closed */ + closed = 4, + UNRECOGNIZED = -1, +} + +export function bid_StateFromJSON(object: any): Bid_State { + switch (object) { + case 0: + case "invalid": + return Bid_State.invalid; + case 1: + case "open": + return Bid_State.open; + case 2: + case "active": + return Bid_State.active; + case 3: + case "lost": + return Bid_State.lost; + case 4: + case "closed": + return Bid_State.closed; + case -1: + case "UNRECOGNIZED": + default: + return Bid_State.UNRECOGNIZED; + } +} + +export function bid_StateToJSON(object: Bid_State): string { + switch (object) { + case Bid_State.invalid: + return "invalid"; + case Bid_State.open: + return "open"; + case Bid_State.active: + return "active"; + case Bid_State.lost: + return "lost"; + case Bid_State.closed: + return "closed"; + case Bid_State.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +function createBaseBid(): Bid { + return { + $type: "akash.market.v1beta5.Bid", + id: undefined, + state: 0, + price: undefined, + createdAt: Long.ZERO, + resourcesOffer: [], + }; +} + +export const Bid: MessageFns = { + $type: "akash.market.v1beta5.Bid" as const, + + encode( + message: Bid, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + BidID.encode(message.id, writer.uint32(10).fork()).join(); + } + if (message.state !== 0) { + writer.uint32(16).int32(message.state); + } + if (message.price !== undefined) { + DecCoin.encode(message.price, writer.uint32(26).fork()).join(); + } + if (!message.createdAt.equals(Long.ZERO)) { + writer.uint32(32).int64(message.createdAt.toString()); + } + for (const v of message.resourcesOffer) { + ResourceOffer.encode(v!, writer.uint32(42).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Bid { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseBid(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = BidID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.state = reader.int32() as any; + continue; + case 3: + if (tag !== 26) { + break; + } + + message.price = DecCoin.decode(reader, reader.uint32()); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.createdAt = Long.fromString(reader.int64().toString()); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.resourcesOffer.push( + ResourceOffer.decode(reader, reader.uint32()), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Bid { + return { + $type: Bid.$type, + id: isSet(object.id) ? BidID.fromJSON(object.id) : undefined, + state: isSet(object.state) ? bid_StateFromJSON(object.state) : 0, + price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, + createdAt: isSet(object.createdAt) + ? Long.fromValue(object.createdAt) + : Long.ZERO, + resourcesOffer: globalThis.Array.isArray(object?.resourcesOffer) + ? object.resourcesOffer.map((e: any) => ResourceOffer.fromJSON(e)) + : [], + }; + }, + + toJSON(message: Bid): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = BidID.toJSON(message.id); + } + if (message.state !== 0) { + obj.state = bid_StateToJSON(message.state); + } + if (message.price !== undefined) { + obj.price = DecCoin.toJSON(message.price); + } + if (!message.createdAt.equals(Long.ZERO)) { + obj.createdAt = (message.createdAt || Long.ZERO).toString(); + } + if (message.resourcesOffer?.length) { + obj.resourcesOffer = message.resourcesOffer.map((e) => + ResourceOffer.toJSON(e), + ); + } + return obj; + }, + + create(base?: DeepPartial): Bid { + return Bid.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Bid { + const message = createBaseBid(); + message.id = + object.id !== undefined && object.id !== null + ? BidID.fromPartial(object.id) + : undefined; + message.state = object.state ?? 0; + message.price = + object.price !== undefined && object.price !== null + ? DecCoin.fromPartial(object.price) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? Long.fromValue(object.createdAt) + : Long.ZERO; + message.resourcesOffer = + object.resourcesOffer?.map((e) => ResourceOffer.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Bid.$type, Bid); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1beta5/bidmsg.ts b/ts/src/generated/akash/market/v1beta5/bidmsg.ts new file mode 100644 index 00000000..1a2097b8 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/bidmsg.ts @@ -0,0 +1,423 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1beta5/bidmsg.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { Coin, DecCoin } from "../../../cosmos/base/v1beta1/coin"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { BidID } from "../v1/bid"; +import { OrderID } from "../v1/order"; +import { ResourceOffer } from "./resourcesoffer"; + +/** MsgCreateBid defines an SDK message for creating Bid */ +export interface MsgCreateBid { + $type: "akash.market.v1beta5.MsgCreateBid"; + orderId: OrderID | undefined; + provider: string; + price: DecCoin | undefined; + deposit: Coin | undefined; + resourcesOffer: ResourceOffer[]; +} + +/** MsgCreateBidResponse defines the Msg/CreateBid response type. */ +export interface MsgCreateBidResponse { + $type: "akash.market.v1beta5.MsgCreateBidResponse"; +} + +/** MsgCloseBid defines an SDK message for closing bid */ +export interface MsgCloseBid { + $type: "akash.market.v1beta5.MsgCloseBid"; + id: BidID | undefined; +} + +/** MsgCloseBidResponse defines the Msg/CloseBid response type. */ +export interface MsgCloseBidResponse { + $type: "akash.market.v1beta5.MsgCloseBidResponse"; +} + +function createBaseMsgCreateBid(): MsgCreateBid { + return { + $type: "akash.market.v1beta5.MsgCreateBid", + orderId: undefined, + provider: "", + price: undefined, + deposit: undefined, + resourcesOffer: [], + }; +} + +export const MsgCreateBid: MessageFns< + MsgCreateBid, + "akash.market.v1beta5.MsgCreateBid" +> = { + $type: "akash.market.v1beta5.MsgCreateBid" as const, + + encode( + message: MsgCreateBid, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.orderId !== undefined) { + OrderID.encode(message.orderId, writer.uint32(10).fork()).join(); + } + if (message.provider !== "") { + writer.uint32(18).string(message.provider); + } + if (message.price !== undefined) { + DecCoin.encode(message.price, writer.uint32(26).fork()).join(); + } + if (message.deposit !== undefined) { + Coin.encode(message.deposit, writer.uint32(34).fork()).join(); + } + for (const v of message.resourcesOffer) { + ResourceOffer.encode(v!, writer.uint32(42).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgCreateBid { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateBid(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.orderId = OrderID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.provider = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.price = DecCoin.decode(reader, reader.uint32()); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.deposit = Coin.decode(reader, reader.uint32()); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.resourcesOffer.push( + ResourceOffer.decode(reader, reader.uint32()), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCreateBid { + return { + $type: MsgCreateBid.$type, + orderId: isSet(object.orderId) + ? OrderID.fromJSON(object.orderId) + : undefined, + provider: isSet(object.provider) + ? globalThis.String(object.provider) + : "", + price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, + deposit: isSet(object.deposit) + ? Coin.fromJSON(object.deposit) + : undefined, + resourcesOffer: globalThis.Array.isArray(object?.resourcesOffer) + ? object.resourcesOffer.map((e: any) => ResourceOffer.fromJSON(e)) + : [], + }; + }, + + toJSON(message: MsgCreateBid): unknown { + const obj: any = {}; + if (message.orderId !== undefined) { + obj.orderId = OrderID.toJSON(message.orderId); + } + if (message.provider !== "") { + obj.provider = message.provider; + } + if (message.price !== undefined) { + obj.price = DecCoin.toJSON(message.price); + } + if (message.deposit !== undefined) { + obj.deposit = Coin.toJSON(message.deposit); + } + if (message.resourcesOffer?.length) { + obj.resourcesOffer = message.resourcesOffer.map((e) => + ResourceOffer.toJSON(e), + ); + } + return obj; + }, + + create(base?: DeepPartial): MsgCreateBid { + return MsgCreateBid.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCreateBid { + const message = createBaseMsgCreateBid(); + message.orderId = + object.orderId !== undefined && object.orderId !== null + ? OrderID.fromPartial(object.orderId) + : undefined; + message.provider = object.provider ?? ""; + message.price = + object.price !== undefined && object.price !== null + ? DecCoin.fromPartial(object.price) + : undefined; + message.deposit = + object.deposit !== undefined && object.deposit !== null + ? Coin.fromPartial(object.deposit) + : undefined; + message.resourcesOffer = + object.resourcesOffer?.map((e) => ResourceOffer.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(MsgCreateBid.$type, MsgCreateBid); + +function createBaseMsgCreateBidResponse(): MsgCreateBidResponse { + return { $type: "akash.market.v1beta5.MsgCreateBidResponse" }; +} + +export const MsgCreateBidResponse: MessageFns< + MsgCreateBidResponse, + "akash.market.v1beta5.MsgCreateBidResponse" +> = { + $type: "akash.market.v1beta5.MsgCreateBidResponse" as const, + + encode( + _: MsgCreateBidResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgCreateBidResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateBidResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCreateBidResponse { + return { $type: MsgCreateBidResponse.$type }; + }, + + toJSON(_: MsgCreateBidResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgCreateBidResponse { + return MsgCreateBidResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgCreateBidResponse { + const message = createBaseMsgCreateBidResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgCreateBidResponse.$type, MsgCreateBidResponse); + +function createBaseMsgCloseBid(): MsgCloseBid { + return { $type: "akash.market.v1beta5.MsgCloseBid", id: undefined }; +} + +export const MsgCloseBid: MessageFns< + MsgCloseBid, + "akash.market.v1beta5.MsgCloseBid" +> = { + $type: "akash.market.v1beta5.MsgCloseBid" as const, + + encode( + message: MsgCloseBid, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + BidID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgCloseBid { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseBid(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = BidID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCloseBid { + return { + $type: MsgCloseBid.$type, + id: isSet(object.id) ? BidID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: MsgCloseBid): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = BidID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): MsgCloseBid { + return MsgCloseBid.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCloseBid { + const message = createBaseMsgCloseBid(); + message.id = + object.id !== undefined && object.id !== null + ? BidID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseBid.$type, MsgCloseBid); + +function createBaseMsgCloseBidResponse(): MsgCloseBidResponse { + return { $type: "akash.market.v1beta5.MsgCloseBidResponse" }; +} + +export const MsgCloseBidResponse: MessageFns< + MsgCloseBidResponse, + "akash.market.v1beta5.MsgCloseBidResponse" +> = { + $type: "akash.market.v1beta5.MsgCloseBidResponse" as const, + + encode( + _: MsgCloseBidResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgCloseBidResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseBidResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCloseBidResponse { + return { $type: MsgCloseBidResponse.$type }; + }, + + toJSON(_: MsgCloseBidResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgCloseBidResponse { + return MsgCloseBidResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgCloseBidResponse { + const message = createBaseMsgCloseBidResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseBidResponse.$type, MsgCloseBidResponse); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1beta5/filters.ts b/ts/src/generated/akash/market/v1beta5/filters.ts new file mode 100644 index 00000000..c03c9f39 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/filters.ts @@ -0,0 +1,366 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1beta5/filters.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** BidFilters defines flags for bid list filter */ +export interface BidFilters { + $type: "akash.market.v1beta5.BidFilters"; + owner: string; + dseq: Long; + gseq: number; + oseq: number; + provider: string; + state: string; +} + +/** OrderFilters defines flags for order list filter */ +export interface OrderFilters { + $type: "akash.market.v1beta5.OrderFilters"; + owner: string; + dseq: Long; + gseq: number; + oseq: number; + state: string; +} + +function createBaseBidFilters(): BidFilters { + return { + $type: "akash.market.v1beta5.BidFilters", + owner: "", + dseq: Long.UZERO, + gseq: 0, + oseq: 0, + provider: "", + state: "", + }; +} + +export const BidFilters: MessageFns< + BidFilters, + "akash.market.v1beta5.BidFilters" +> = { + $type: "akash.market.v1beta5.BidFilters" as const, + + encode( + message: BidFilters, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq.toString()); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + if (message.oseq !== 0) { + writer.uint32(32).uint32(message.oseq); + } + if (message.provider !== "") { + writer.uint32(42).string(message.provider); + } + if (message.state !== "") { + writer.uint32(50).string(message.state); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): BidFilters { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseBidFilters(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = Long.fromString(reader.uint64().toString(), true); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.oseq = reader.uint32(); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.provider = reader.string(); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.state = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): BidFilters { + return { + $type: BidFilters.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, + provider: isSet(object.provider) + ? globalThis.String(object.provider) + : "", + state: isSet(object.state) ? globalThis.String(object.state) : "", + }; + }, + + toJSON(message: BidFilters): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + if (message.oseq !== 0) { + obj.oseq = Math.round(message.oseq); + } + if (message.provider !== "") { + obj.provider = message.provider; + } + if (message.state !== "") { + obj.state = message.state; + } + return obj; + }, + + create(base?: DeepPartial): BidFilters { + return BidFilters.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): BidFilters { + const message = createBaseBidFilters(); + message.owner = object.owner ?? ""; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + message.oseq = object.oseq ?? 0; + message.provider = object.provider ?? ""; + message.state = object.state ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(BidFilters.$type, BidFilters); + +function createBaseOrderFilters(): OrderFilters { + return { + $type: "akash.market.v1beta5.OrderFilters", + owner: "", + dseq: Long.UZERO, + gseq: 0, + oseq: 0, + state: "", + }; +} + +export const OrderFilters: MessageFns< + OrderFilters, + "akash.market.v1beta5.OrderFilters" +> = { + $type: "akash.market.v1beta5.OrderFilters" as const, + + encode( + message: OrderFilters, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq.toString()); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + if (message.oseq !== 0) { + writer.uint32(32).uint32(message.oseq); + } + if (message.state !== "") { + writer.uint32(42).string(message.state); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): OrderFilters { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseOrderFilters(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = Long.fromString(reader.uint64().toString(), true); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.oseq = reader.uint32(); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.state = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): OrderFilters { + return { + $type: OrderFilters.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, + state: isSet(object.state) ? globalThis.String(object.state) : "", + }; + }, + + toJSON(message: OrderFilters): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + if (message.oseq !== 0) { + obj.oseq = Math.round(message.oseq); + } + if (message.state !== "") { + obj.state = message.state; + } + return obj; + }, + + create(base?: DeepPartial): OrderFilters { + return OrderFilters.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): OrderFilters { + const message = createBaseOrderFilters(); + message.owner = object.owner ?? ""; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + message.oseq = object.oseq ?? 0; + message.state = object.state ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(OrderFilters.$type, OrderFilters); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1beta5/genesis.ts b/ts/src/generated/akash/market/v1beta5/genesis.ts new file mode 100644 index 00000000..e6558133 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/genesis.ts @@ -0,0 +1,189 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1beta5/genesis.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Lease } from "../v1/lease"; +import { Bid } from "./bid"; +import { Order } from "./order"; +import { Params } from "./params"; + +/** GenesisState defines the basic genesis state used by market module */ +export interface GenesisState { + $type: "akash.market.v1beta5.GenesisState"; + params: Params | undefined; + orders: Order[]; + leases: Lease[]; + bids: Bid[]; +} + +function createBaseGenesisState(): GenesisState { + return { + $type: "akash.market.v1beta5.GenesisState", + params: undefined, + orders: [], + leases: [], + bids: [], + }; +} + +export const GenesisState: MessageFns< + GenesisState, + "akash.market.v1beta5.GenesisState" +> = { + $type: "akash.market.v1beta5.GenesisState" as const, + + encode( + message: GenesisState, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(10).fork()).join(); + } + for (const v of message.orders) { + Order.encode(v!, writer.uint32(18).fork()).join(); + } + for (const v of message.leases) { + Lease.encode(v!, writer.uint32(26).fork()).join(); + } + for (const v of message.bids) { + Bid.encode(v!, writer.uint32(34).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GenesisState { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisState(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.params = Params.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.orders.push(Order.decode(reader, reader.uint32())); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.leases.push(Lease.decode(reader, reader.uint32())); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.bids.push(Bid.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisState { + return { + $type: GenesisState.$type, + params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, + orders: globalThis.Array.isArray(object?.orders) + ? object.orders.map((e: any) => Order.fromJSON(e)) + : [], + leases: globalThis.Array.isArray(object?.leases) + ? object.leases.map((e: any) => Lease.fromJSON(e)) + : [], + bids: globalThis.Array.isArray(object?.bids) + ? object.bids.map((e: any) => Bid.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GenesisState): unknown { + const obj: any = {}; + if (message.params !== undefined) { + obj.params = Params.toJSON(message.params); + } + if (message.orders?.length) { + obj.orders = message.orders.map((e) => Order.toJSON(e)); + } + if (message.leases?.length) { + obj.leases = message.leases.map((e) => Lease.toJSON(e)); + } + if (message.bids?.length) { + obj.bids = message.bids.map((e) => Bid.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): GenesisState { + return GenesisState.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisState { + const message = createBaseGenesisState(); + message.params = + object.params !== undefined && object.params !== null + ? Params.fromPartial(object.params) + : undefined; + message.orders = object.orders?.map((e) => Order.fromPartial(e)) || []; + message.leases = object.leases?.map((e) => Lease.fromPartial(e)) || []; + message.bids = object.bids?.map((e) => Bid.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GenesisState.$type, GenesisState); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1beta5/leasemsg.ts b/ts/src/generated/akash/market/v1beta5/leasemsg.ts new file mode 100644 index 00000000..06ac92ba --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/leasemsg.ts @@ -0,0 +1,482 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1beta5/leasemsg.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { BidID } from "../v1/bid"; +import { LeaseID } from "../v1/lease"; + +/** MsgCreateLease is sent to create a lease */ +export interface MsgCreateLease { + $type: "akash.market.v1beta5.MsgCreateLease"; + bidId: BidID | undefined; +} + +/** MsgCreateLeaseResponse is the response from creating a lease */ +export interface MsgCreateLeaseResponse { + $type: "akash.market.v1beta5.MsgCreateLeaseResponse"; +} + +/** MsgWithdrawLease defines an SDK message for withdrawing lease funds */ +export interface MsgWithdrawLease { + $type: "akash.market.v1beta5.MsgWithdrawLease"; + bidId: LeaseID | undefined; +} + +/** MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. */ +export interface MsgWithdrawLeaseResponse { + $type: "akash.market.v1beta5.MsgWithdrawLeaseResponse"; +} + +/** MsgCloseLease defines an SDK message for closing order */ +export interface MsgCloseLease { + $type: "akash.market.v1beta5.MsgCloseLease"; + leaseId: LeaseID | undefined; +} + +/** MsgCloseLeaseResponse defines the Msg/CloseLease response type. */ +export interface MsgCloseLeaseResponse { + $type: "akash.market.v1beta5.MsgCloseLeaseResponse"; +} + +function createBaseMsgCreateLease(): MsgCreateLease { + return { $type: "akash.market.v1beta5.MsgCreateLease", bidId: undefined }; +} + +export const MsgCreateLease: MessageFns< + MsgCreateLease, + "akash.market.v1beta5.MsgCreateLease" +> = { + $type: "akash.market.v1beta5.MsgCreateLease" as const, + + encode( + message: MsgCreateLease, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.bidId !== undefined) { + BidID.encode(message.bidId, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgCreateLease { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateLease(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.bidId = BidID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCreateLease { + return { + $type: MsgCreateLease.$type, + bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, + }; + }, + + toJSON(message: MsgCreateLease): unknown { + const obj: any = {}; + if (message.bidId !== undefined) { + obj.bidId = BidID.toJSON(message.bidId); + } + return obj; + }, + + create(base?: DeepPartial): MsgCreateLease { + return MsgCreateLease.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCreateLease { + const message = createBaseMsgCreateLease(); + message.bidId = + object.bidId !== undefined && object.bidId !== null + ? BidID.fromPartial(object.bidId) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgCreateLease.$type, MsgCreateLease); + +function createBaseMsgCreateLeaseResponse(): MsgCreateLeaseResponse { + return { $type: "akash.market.v1beta5.MsgCreateLeaseResponse" }; +} + +export const MsgCreateLeaseResponse: MessageFns< + MsgCreateLeaseResponse, + "akash.market.v1beta5.MsgCreateLeaseResponse" +> = { + $type: "akash.market.v1beta5.MsgCreateLeaseResponse" as const, + + encode( + _: MsgCreateLeaseResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgCreateLeaseResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateLeaseResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCreateLeaseResponse { + return { $type: MsgCreateLeaseResponse.$type }; + }, + + toJSON(_: MsgCreateLeaseResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgCreateLeaseResponse { + return MsgCreateLeaseResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgCreateLeaseResponse { + const message = createBaseMsgCreateLeaseResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgCreateLeaseResponse.$type, MsgCreateLeaseResponse); + +function createBaseMsgWithdrawLease(): MsgWithdrawLease { + return { $type: "akash.market.v1beta5.MsgWithdrawLease", bidId: undefined }; +} + +export const MsgWithdrawLease: MessageFns< + MsgWithdrawLease, + "akash.market.v1beta5.MsgWithdrawLease" +> = { + $type: "akash.market.v1beta5.MsgWithdrawLease" as const, + + encode( + message: MsgWithdrawLease, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.bidId !== undefined) { + LeaseID.encode(message.bidId, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgWithdrawLease { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgWithdrawLease(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.bidId = LeaseID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgWithdrawLease { + return { + $type: MsgWithdrawLease.$type, + bidId: isSet(object.bidId) ? LeaseID.fromJSON(object.bidId) : undefined, + }; + }, + + toJSON(message: MsgWithdrawLease): unknown { + const obj: any = {}; + if (message.bidId !== undefined) { + obj.bidId = LeaseID.toJSON(message.bidId); + } + return obj; + }, + + create(base?: DeepPartial): MsgWithdrawLease { + return MsgWithdrawLease.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgWithdrawLease { + const message = createBaseMsgWithdrawLease(); + message.bidId = + object.bidId !== undefined && object.bidId !== null + ? LeaseID.fromPartial(object.bidId) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgWithdrawLease.$type, MsgWithdrawLease); + +function createBaseMsgWithdrawLeaseResponse(): MsgWithdrawLeaseResponse { + return { $type: "akash.market.v1beta5.MsgWithdrawLeaseResponse" }; +} + +export const MsgWithdrawLeaseResponse: MessageFns< + MsgWithdrawLeaseResponse, + "akash.market.v1beta5.MsgWithdrawLeaseResponse" +> = { + $type: "akash.market.v1beta5.MsgWithdrawLeaseResponse" as const, + + encode( + _: MsgWithdrawLeaseResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgWithdrawLeaseResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgWithdrawLeaseResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgWithdrawLeaseResponse { + return { $type: MsgWithdrawLeaseResponse.$type }; + }, + + toJSON(_: MsgWithdrawLeaseResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgWithdrawLeaseResponse { + return MsgWithdrawLeaseResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgWithdrawLeaseResponse { + const message = createBaseMsgWithdrawLeaseResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgWithdrawLeaseResponse.$type, + MsgWithdrawLeaseResponse, +); + +function createBaseMsgCloseLease(): MsgCloseLease { + return { $type: "akash.market.v1beta5.MsgCloseLease", leaseId: undefined }; +} + +export const MsgCloseLease: MessageFns< + MsgCloseLease, + "akash.market.v1beta5.MsgCloseLease" +> = { + $type: "akash.market.v1beta5.MsgCloseLease" as const, + + encode( + message: MsgCloseLease, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.leaseId !== undefined) { + LeaseID.encode(message.leaseId, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgCloseLease { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseLease(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.leaseId = LeaseID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCloseLease { + return { + $type: MsgCloseLease.$type, + leaseId: isSet(object.leaseId) + ? LeaseID.fromJSON(object.leaseId) + : undefined, + }; + }, + + toJSON(message: MsgCloseLease): unknown { + const obj: any = {}; + if (message.leaseId !== undefined) { + obj.leaseId = LeaseID.toJSON(message.leaseId); + } + return obj; + }, + + create(base?: DeepPartial): MsgCloseLease { + return MsgCloseLease.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCloseLease { + const message = createBaseMsgCloseLease(); + message.leaseId = + object.leaseId !== undefined && object.leaseId !== null + ? LeaseID.fromPartial(object.leaseId) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseLease.$type, MsgCloseLease); + +function createBaseMsgCloseLeaseResponse(): MsgCloseLeaseResponse { + return { $type: "akash.market.v1beta5.MsgCloseLeaseResponse" }; +} + +export const MsgCloseLeaseResponse: MessageFns< + MsgCloseLeaseResponse, + "akash.market.v1beta5.MsgCloseLeaseResponse" +> = { + $type: "akash.market.v1beta5.MsgCloseLeaseResponse" as const, + + encode( + _: MsgCloseLeaseResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgCloseLeaseResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseLeaseResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCloseLeaseResponse { + return { $type: MsgCloseLeaseResponse.$type }; + }, + + toJSON(_: MsgCloseLeaseResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgCloseLeaseResponse { + return MsgCloseLeaseResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgCloseLeaseResponse { + const message = createBaseMsgCloseLeaseResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseLeaseResponse.$type, MsgCloseLeaseResponse); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1beta5/order.ts b/ts/src/generated/akash/market/v1beta5/order.ts new file mode 100644 index 00000000..056afee4 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/order.ts @@ -0,0 +1,236 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1beta5/order.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { GroupSpec } from "../../deployment/v1beta4/groupspec"; +import { OrderID } from "../v1/order"; + +/** Order stores orderID, state of order and other details */ +export interface Order { + $type: "akash.market.v1beta5.Order"; + id: OrderID | undefined; + state: Order_State; + spec: GroupSpec | undefined; + createdAt: Long; +} + +/** State is an enum which refers to state of order */ +export enum Order_State { + /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ + invalid = 0, + /** open - OrderOpen denotes state for order open */ + open = 1, + /** active - OrderMatched denotes state for order matched */ + active = 2, + /** closed - OrderClosed denotes state for order lost */ + closed = 3, + UNRECOGNIZED = -1, +} + +export function order_StateFromJSON(object: any): Order_State { + switch (object) { + case 0: + case "invalid": + return Order_State.invalid; + case 1: + case "open": + return Order_State.open; + case 2: + case "active": + return Order_State.active; + case 3: + case "closed": + return Order_State.closed; + case -1: + case "UNRECOGNIZED": + default: + return Order_State.UNRECOGNIZED; + } +} + +export function order_StateToJSON(object: Order_State): string { + switch (object) { + case Order_State.invalid: + return "invalid"; + case Order_State.open: + return "open"; + case Order_State.active: + return "active"; + case Order_State.closed: + return "closed"; + case Order_State.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +function createBaseOrder(): Order { + return { + $type: "akash.market.v1beta5.Order", + id: undefined, + state: 0, + spec: undefined, + createdAt: Long.ZERO, + }; +} + +export const Order: MessageFns = { + $type: "akash.market.v1beta5.Order" as const, + + encode( + message: Order, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + OrderID.encode(message.id, writer.uint32(10).fork()).join(); + } + if (message.state !== 0) { + writer.uint32(16).int32(message.state); + } + if (message.spec !== undefined) { + GroupSpec.encode(message.spec, writer.uint32(26).fork()).join(); + } + if (!message.createdAt.equals(Long.ZERO)) { + writer.uint32(32).int64(message.createdAt.toString()); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Order { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseOrder(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = OrderID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.state = reader.int32() as any; + continue; + case 3: + if (tag !== 26) { + break; + } + + message.spec = GroupSpec.decode(reader, reader.uint32()); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.createdAt = Long.fromString(reader.int64().toString()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Order { + return { + $type: Order.$type, + id: isSet(object.id) ? OrderID.fromJSON(object.id) : undefined, + state: isSet(object.state) ? order_StateFromJSON(object.state) : 0, + spec: isSet(object.spec) ? GroupSpec.fromJSON(object.spec) : undefined, + createdAt: isSet(object.createdAt) + ? Long.fromValue(object.createdAt) + : Long.ZERO, + }; + }, + + toJSON(message: Order): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = OrderID.toJSON(message.id); + } + if (message.state !== 0) { + obj.state = order_StateToJSON(message.state); + } + if (message.spec !== undefined) { + obj.spec = GroupSpec.toJSON(message.spec); + } + if (!message.createdAt.equals(Long.ZERO)) { + obj.createdAt = (message.createdAt || Long.ZERO).toString(); + } + return obj; + }, + + create(base?: DeepPartial): Order { + return Order.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Order { + const message = createBaseOrder(); + message.id = + object.id !== undefined && object.id !== null + ? OrderID.fromPartial(object.id) + : undefined; + message.state = object.state ?? 0; + message.spec = + object.spec !== undefined && object.spec !== null + ? GroupSpec.fromPartial(object.spec) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? Long.fromValue(object.createdAt) + : Long.ZERO; + return message; + }, +}; + +messageTypeRegistry.set(Order.$type, Order); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1beta5/params.ts b/ts/src/generated/akash/market/v1beta5/params.ts new file mode 100644 index 00000000..6d1ba09d --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/params.ts @@ -0,0 +1,147 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1beta5/params.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { Coin } from "../../../cosmos/base/v1beta1/coin"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** Params is the params for the x/market module */ +export interface Params { + $type: "akash.market.v1beta5.Params"; + bidMinDeposit: Coin | undefined; + orderMaxBids: number; +} + +function createBaseParams(): Params { + return { + $type: "akash.market.v1beta5.Params", + bidMinDeposit: undefined, + orderMaxBids: 0, + }; +} + +export const Params: MessageFns = { + $type: "akash.market.v1beta5.Params" as const, + + encode( + message: Params, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.bidMinDeposit !== undefined) { + Coin.encode(message.bidMinDeposit, writer.uint32(10).fork()).join(); + } + if (message.orderMaxBids !== 0) { + writer.uint32(16).uint32(message.orderMaxBids); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Params { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.bidMinDeposit = Coin.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.orderMaxBids = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Params { + return { + $type: Params.$type, + bidMinDeposit: isSet(object.bidMinDeposit) + ? Coin.fromJSON(object.bidMinDeposit) + : undefined, + orderMaxBids: isSet(object.orderMaxBids) + ? globalThis.Number(object.orderMaxBids) + : 0, + }; + }, + + toJSON(message: Params): unknown { + const obj: any = {}; + if (message.bidMinDeposit !== undefined) { + obj.bidMinDeposit = Coin.toJSON(message.bidMinDeposit); + } + if (message.orderMaxBids !== 0) { + obj.orderMaxBids = Math.round(message.orderMaxBids); + } + return obj; + }, + + create(base?: DeepPartial): Params { + return Params.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Params { + const message = createBaseParams(); + message.bidMinDeposit = + object.bidMinDeposit !== undefined && object.bidMinDeposit !== null + ? Coin.fromPartial(object.bidMinDeposit) + : undefined; + message.orderMaxBids = object.orderMaxBids ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Params.$type, Params); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1beta5/paramsmsg.ts b/ts/src/generated/akash/market/v1beta5/paramsmsg.ts new file mode 100644 index 00000000..9b0db3ac --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/paramsmsg.ts @@ -0,0 +1,227 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1beta5/paramsmsg.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Params } from "./params"; + +/** + * MsgUpdateParams is the Msg/UpdateParams request type. + * + * Since: akash v1.0.0 + */ +export interface MsgUpdateParams { + $type: "akash.market.v1beta5.MsgUpdateParams"; + /** authority is the address of the governance account. */ + authority: string; + /** + * params defines the x/deployment parameters to update. + * + * NOTE: All parameters must be supplied. + */ + params: Params | undefined; +} + +/** + * MsgUpdateParamsResponse defines the response structure for executing a + * MsgUpdateParams message. + * + * Since: akash v1.0.0 + */ +export interface MsgUpdateParamsResponse { + $type: "akash.market.v1beta5.MsgUpdateParamsResponse"; +} + +function createBaseMsgUpdateParams(): MsgUpdateParams { + return { + $type: "akash.market.v1beta5.MsgUpdateParams", + authority: "", + params: undefined, + }; +} + +export const MsgUpdateParams: MessageFns< + MsgUpdateParams, + "akash.market.v1beta5.MsgUpdateParams" +> = { + $type: "akash.market.v1beta5.MsgUpdateParams" as const, + + encode( + message: MsgUpdateParams, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.authority !== "") { + writer.uint32(10).string(message.authority); + } + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgUpdateParams { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.authority = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.params = Params.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgUpdateParams { + return { + $type: MsgUpdateParams.$type, + authority: isSet(object.authority) + ? globalThis.String(object.authority) + : "", + params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, + }; + }, + + toJSON(message: MsgUpdateParams): unknown { + const obj: any = {}; + if (message.authority !== "") { + obj.authority = message.authority; + } + if (message.params !== undefined) { + obj.params = Params.toJSON(message.params); + } + return obj; + }, + + create(base?: DeepPartial): MsgUpdateParams { + return MsgUpdateParams.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgUpdateParams { + const message = createBaseMsgUpdateParams(); + message.authority = object.authority ?? ""; + message.params = + object.params !== undefined && object.params !== null + ? Params.fromPartial(object.params) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgUpdateParams.$type, MsgUpdateParams); + +function createBaseMsgUpdateParamsResponse(): MsgUpdateParamsResponse { + return { $type: "akash.market.v1beta5.MsgUpdateParamsResponse" }; +} + +export const MsgUpdateParamsResponse: MessageFns< + MsgUpdateParamsResponse, + "akash.market.v1beta5.MsgUpdateParamsResponse" +> = { + $type: "akash.market.v1beta5.MsgUpdateParamsResponse" as const, + + encode( + _: MsgUpdateParamsResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgUpdateParamsResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateParamsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgUpdateParamsResponse { + return { $type: MsgUpdateParamsResponse.$type }; + }, + + toJSON(_: MsgUpdateParamsResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgUpdateParamsResponse { + return MsgUpdateParamsResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgUpdateParamsResponse { + const message = createBaseMsgUpdateParamsResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgUpdateParamsResponse.$type, MsgUpdateParamsResponse); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1beta5/query.ts b/ts/src/generated/akash/market/v1beta5/query.ts new file mode 100644 index 00000000..ab127582 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/query.ts @@ -0,0 +1,1497 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1beta5/query.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { + PageRequest, + PageResponse, +} from "../../../cosmos/base/query/v1beta1/pagination"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Account } from "../../escrow/v1/account"; +import { FractionalPayment } from "../../escrow/v1/fractional_payment"; +import { BidID } from "../v1/bid"; +import { LeaseFilters } from "../v1/filters"; +import { Lease, LeaseID } from "../v1/lease"; +import { OrderID } from "../v1/order"; +import { Bid } from "./bid"; +import { BidFilters, OrderFilters } from "./filters"; +import { Order } from "./order"; +import { Params } from "./params"; + +/** QueryOrdersRequest is request type for the Query/Orders RPC method */ +export interface QueryOrdersRequest { + $type: "akash.market.v1beta5.QueryOrdersRequest"; + filters: OrderFilters | undefined; + pagination: PageRequest | undefined; +} + +/** QueryOrdersResponse is response type for the Query/Orders RPC method */ +export interface QueryOrdersResponse { + $type: "akash.market.v1beta5.QueryOrdersResponse"; + orders: Order[]; + pagination: PageResponse | undefined; +} + +/** QueryOrderRequest is request type for the Query/Order RPC method */ +export interface QueryOrderRequest { + $type: "akash.market.v1beta5.QueryOrderRequest"; + id: OrderID | undefined; +} + +/** QueryOrderResponse is response type for the Query/Order RPC method */ +export interface QueryOrderResponse { + $type: "akash.market.v1beta5.QueryOrderResponse"; + order: Order | undefined; +} + +/** QueryBidsRequest is request type for the Query/Bids RPC method */ +export interface QueryBidsRequest { + $type: "akash.market.v1beta5.QueryBidsRequest"; + filters: BidFilters | undefined; + pagination: PageRequest | undefined; +} + +/** QueryBidsResponse is response type for the Query/Bids RPC method */ +export interface QueryBidsResponse { + $type: "akash.market.v1beta5.QueryBidsResponse"; + bids: QueryBidResponse[]; + pagination: PageResponse | undefined; +} + +/** QueryBidRequest is request type for the Query/Bid RPC method */ +export interface QueryBidRequest { + $type: "akash.market.v1beta5.QueryBidRequest"; + id: BidID | undefined; +} + +/** QueryBidResponse is response type for the Query/Bid RPC method */ +export interface QueryBidResponse { + $type: "akash.market.v1beta5.QueryBidResponse"; + bid: Bid | undefined; + escrowAccount: Account | undefined; +} + +/** QueryLeasesRequest is request type for the Query/Leases RPC method */ +export interface QueryLeasesRequest { + $type: "akash.market.v1beta5.QueryLeasesRequest"; + filters: LeaseFilters | undefined; + pagination: PageRequest | undefined; +} + +/** QueryLeasesResponse is response type for the Query/Leases RPC method */ +export interface QueryLeasesResponse { + $type: "akash.market.v1beta5.QueryLeasesResponse"; + leases: QueryLeaseResponse[]; + pagination: PageResponse | undefined; +} + +/** QueryLeaseRequest is request type for the Query/Lease RPC method */ +export interface QueryLeaseRequest { + $type: "akash.market.v1beta5.QueryLeaseRequest"; + id: LeaseID | undefined; +} + +/** QueryLeaseResponse is response type for the Query/Lease RPC method */ +export interface QueryLeaseResponse { + $type: "akash.market.v1beta5.QueryLeaseResponse"; + lease: Lease | undefined; + escrowPayment: FractionalPayment | undefined; +} + +/** QueryParamsRequest is the request type for the Query/Params RPC method. */ +export interface QueryParamsRequest { + $type: "akash.market.v1beta5.QueryParamsRequest"; +} + +/** QueryParamsResponse is the response type for the Query/Params RPC method. */ +export interface QueryParamsResponse { + $type: "akash.market.v1beta5.QueryParamsResponse"; + /** params defines the parameters of the module. */ + params: Params | undefined; +} + +function createBaseQueryOrdersRequest(): QueryOrdersRequest { + return { + $type: "akash.market.v1beta5.QueryOrdersRequest", + filters: undefined, + pagination: undefined, + }; +} + +export const QueryOrdersRequest: MessageFns< + QueryOrdersRequest, + "akash.market.v1beta5.QueryOrdersRequest" +> = { + $type: "akash.market.v1beta5.QueryOrdersRequest" as const, + + encode( + message: QueryOrdersRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.filters !== undefined) { + OrderFilters.encode(message.filters, writer.uint32(10).fork()).join(); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryOrdersRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryOrdersRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.filters = OrderFilters.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryOrdersRequest { + return { + $type: QueryOrdersRequest.$type, + filters: isSet(object.filters) + ? OrderFilters.fromJSON(object.filters) + : undefined, + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryOrdersRequest): unknown { + const obj: any = {}; + if (message.filters !== undefined) { + obj.filters = OrderFilters.toJSON(message.filters); + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryOrdersRequest { + return QueryOrdersRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryOrdersRequest { + const message = createBaseQueryOrdersRequest(); + message.filters = + object.filters !== undefined && object.filters !== null + ? OrderFilters.fromPartial(object.filters) + : undefined; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryOrdersRequest.$type, QueryOrdersRequest); + +function createBaseQueryOrdersResponse(): QueryOrdersResponse { + return { + $type: "akash.market.v1beta5.QueryOrdersResponse", + orders: [], + pagination: undefined, + }; +} + +export const QueryOrdersResponse: MessageFns< + QueryOrdersResponse, + "akash.market.v1beta5.QueryOrdersResponse" +> = { + $type: "akash.market.v1beta5.QueryOrdersResponse" as const, + + encode( + message: QueryOrdersResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.orders) { + Order.encode(v!, writer.uint32(10).fork()).join(); + } + if (message.pagination !== undefined) { + PageResponse.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryOrdersResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryOrdersResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.orders.push(Order.decode(reader, reader.uint32())); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryOrdersResponse { + return { + $type: QueryOrdersResponse.$type, + orders: globalThis.Array.isArray(object?.orders) + ? object.orders.map((e: any) => Order.fromJSON(e)) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryOrdersResponse): unknown { + const obj: any = {}; + if (message.orders?.length) { + obj.orders = message.orders.map((e) => Order.toJSON(e)); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryOrdersResponse { + return QueryOrdersResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryOrdersResponse { + const message = createBaseQueryOrdersResponse(); + message.orders = object.orders?.map((e) => Order.fromPartial(e)) || []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryOrdersResponse.$type, QueryOrdersResponse); + +function createBaseQueryOrderRequest(): QueryOrderRequest { + return { $type: "akash.market.v1beta5.QueryOrderRequest", id: undefined }; +} + +export const QueryOrderRequest: MessageFns< + QueryOrderRequest, + "akash.market.v1beta5.QueryOrderRequest" +> = { + $type: "akash.market.v1beta5.QueryOrderRequest" as const, + + encode( + message: QueryOrderRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + OrderID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): QueryOrderRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryOrderRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = OrderID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryOrderRequest { + return { + $type: QueryOrderRequest.$type, + id: isSet(object.id) ? OrderID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: QueryOrderRequest): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = OrderID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): QueryOrderRequest { + return QueryOrderRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryOrderRequest { + const message = createBaseQueryOrderRequest(); + message.id = + object.id !== undefined && object.id !== null + ? OrderID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryOrderRequest.$type, QueryOrderRequest); + +function createBaseQueryOrderResponse(): QueryOrderResponse { + return { $type: "akash.market.v1beta5.QueryOrderResponse", order: undefined }; +} + +export const QueryOrderResponse: MessageFns< + QueryOrderResponse, + "akash.market.v1beta5.QueryOrderResponse" +> = { + $type: "akash.market.v1beta5.QueryOrderResponse" as const, + + encode( + message: QueryOrderResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.order !== undefined) { + Order.encode(message.order, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryOrderResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryOrderResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.order = Order.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryOrderResponse { + return { + $type: QueryOrderResponse.$type, + order: isSet(object.order) ? Order.fromJSON(object.order) : undefined, + }; + }, + + toJSON(message: QueryOrderResponse): unknown { + const obj: any = {}; + if (message.order !== undefined) { + obj.order = Order.toJSON(message.order); + } + return obj; + }, + + create(base?: DeepPartial): QueryOrderResponse { + return QueryOrderResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryOrderResponse { + const message = createBaseQueryOrderResponse(); + message.order = + object.order !== undefined && object.order !== null + ? Order.fromPartial(object.order) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryOrderResponse.$type, QueryOrderResponse); + +function createBaseQueryBidsRequest(): QueryBidsRequest { + return { + $type: "akash.market.v1beta5.QueryBidsRequest", + filters: undefined, + pagination: undefined, + }; +} + +export const QueryBidsRequest: MessageFns< + QueryBidsRequest, + "akash.market.v1beta5.QueryBidsRequest" +> = { + $type: "akash.market.v1beta5.QueryBidsRequest" as const, + + encode( + message: QueryBidsRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.filters !== undefined) { + BidFilters.encode(message.filters, writer.uint32(10).fork()).join(); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): QueryBidsRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryBidsRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.filters = BidFilters.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryBidsRequest { + return { + $type: QueryBidsRequest.$type, + filters: isSet(object.filters) + ? BidFilters.fromJSON(object.filters) + : undefined, + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryBidsRequest): unknown { + const obj: any = {}; + if (message.filters !== undefined) { + obj.filters = BidFilters.toJSON(message.filters); + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryBidsRequest { + return QueryBidsRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryBidsRequest { + const message = createBaseQueryBidsRequest(); + message.filters = + object.filters !== undefined && object.filters !== null + ? BidFilters.fromPartial(object.filters) + : undefined; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryBidsRequest.$type, QueryBidsRequest); + +function createBaseQueryBidsResponse(): QueryBidsResponse { + return { + $type: "akash.market.v1beta5.QueryBidsResponse", + bids: [], + pagination: undefined, + }; +} + +export const QueryBidsResponse: MessageFns< + QueryBidsResponse, + "akash.market.v1beta5.QueryBidsResponse" +> = { + $type: "akash.market.v1beta5.QueryBidsResponse" as const, + + encode( + message: QueryBidsResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.bids) { + QueryBidResponse.encode(v!, writer.uint32(10).fork()).join(); + } + if (message.pagination !== undefined) { + PageResponse.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): QueryBidsResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryBidsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.bids.push(QueryBidResponse.decode(reader, reader.uint32())); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryBidsResponse { + return { + $type: QueryBidsResponse.$type, + bids: globalThis.Array.isArray(object?.bids) + ? object.bids.map((e: any) => QueryBidResponse.fromJSON(e)) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryBidsResponse): unknown { + const obj: any = {}; + if (message.bids?.length) { + obj.bids = message.bids.map((e) => QueryBidResponse.toJSON(e)); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryBidsResponse { + return QueryBidsResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryBidsResponse { + const message = createBaseQueryBidsResponse(); + message.bids = + object.bids?.map((e) => QueryBidResponse.fromPartial(e)) || []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryBidsResponse.$type, QueryBidsResponse); + +function createBaseQueryBidRequest(): QueryBidRequest { + return { $type: "akash.market.v1beta5.QueryBidRequest", id: undefined }; +} + +export const QueryBidRequest: MessageFns< + QueryBidRequest, + "akash.market.v1beta5.QueryBidRequest" +> = { + $type: "akash.market.v1beta5.QueryBidRequest" as const, + + encode( + message: QueryBidRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + BidID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): QueryBidRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryBidRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = BidID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryBidRequest { + return { + $type: QueryBidRequest.$type, + id: isSet(object.id) ? BidID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: QueryBidRequest): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = BidID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): QueryBidRequest { + return QueryBidRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryBidRequest { + const message = createBaseQueryBidRequest(); + message.id = + object.id !== undefined && object.id !== null + ? BidID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryBidRequest.$type, QueryBidRequest); + +function createBaseQueryBidResponse(): QueryBidResponse { + return { + $type: "akash.market.v1beta5.QueryBidResponse", + bid: undefined, + escrowAccount: undefined, + }; +} + +export const QueryBidResponse: MessageFns< + QueryBidResponse, + "akash.market.v1beta5.QueryBidResponse" +> = { + $type: "akash.market.v1beta5.QueryBidResponse" as const, + + encode( + message: QueryBidResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.bid !== undefined) { + Bid.encode(message.bid, writer.uint32(10).fork()).join(); + } + if (message.escrowAccount !== undefined) { + Account.encode(message.escrowAccount, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): QueryBidResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryBidResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.bid = Bid.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.escrowAccount = Account.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryBidResponse { + return { + $type: QueryBidResponse.$type, + bid: isSet(object.bid) ? Bid.fromJSON(object.bid) : undefined, + escrowAccount: isSet(object.escrowAccount) + ? Account.fromJSON(object.escrowAccount) + : undefined, + }; + }, + + toJSON(message: QueryBidResponse): unknown { + const obj: any = {}; + if (message.bid !== undefined) { + obj.bid = Bid.toJSON(message.bid); + } + if (message.escrowAccount !== undefined) { + obj.escrowAccount = Account.toJSON(message.escrowAccount); + } + return obj; + }, + + create(base?: DeepPartial): QueryBidResponse { + return QueryBidResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryBidResponse { + const message = createBaseQueryBidResponse(); + message.bid = + object.bid !== undefined && object.bid !== null + ? Bid.fromPartial(object.bid) + : undefined; + message.escrowAccount = + object.escrowAccount !== undefined && object.escrowAccount !== null + ? Account.fromPartial(object.escrowAccount) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryBidResponse.$type, QueryBidResponse); + +function createBaseQueryLeasesRequest(): QueryLeasesRequest { + return { + $type: "akash.market.v1beta5.QueryLeasesRequest", + filters: undefined, + pagination: undefined, + }; +} + +export const QueryLeasesRequest: MessageFns< + QueryLeasesRequest, + "akash.market.v1beta5.QueryLeasesRequest" +> = { + $type: "akash.market.v1beta5.QueryLeasesRequest" as const, + + encode( + message: QueryLeasesRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.filters !== undefined) { + LeaseFilters.encode(message.filters, writer.uint32(10).fork()).join(); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryLeasesRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryLeasesRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.filters = LeaseFilters.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryLeasesRequest { + return { + $type: QueryLeasesRequest.$type, + filters: isSet(object.filters) + ? LeaseFilters.fromJSON(object.filters) + : undefined, + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryLeasesRequest): unknown { + const obj: any = {}; + if (message.filters !== undefined) { + obj.filters = LeaseFilters.toJSON(message.filters); + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryLeasesRequest { + return QueryLeasesRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryLeasesRequest { + const message = createBaseQueryLeasesRequest(); + message.filters = + object.filters !== undefined && object.filters !== null + ? LeaseFilters.fromPartial(object.filters) + : undefined; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryLeasesRequest.$type, QueryLeasesRequest); + +function createBaseQueryLeasesResponse(): QueryLeasesResponse { + return { + $type: "akash.market.v1beta5.QueryLeasesResponse", + leases: [], + pagination: undefined, + }; +} + +export const QueryLeasesResponse: MessageFns< + QueryLeasesResponse, + "akash.market.v1beta5.QueryLeasesResponse" +> = { + $type: "akash.market.v1beta5.QueryLeasesResponse" as const, + + encode( + message: QueryLeasesResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.leases) { + QueryLeaseResponse.encode(v!, writer.uint32(10).fork()).join(); + } + if (message.pagination !== undefined) { + PageResponse.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryLeasesResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryLeasesResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.leases.push( + QueryLeaseResponse.decode(reader, reader.uint32()), + ); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryLeasesResponse { + return { + $type: QueryLeasesResponse.$type, + leases: globalThis.Array.isArray(object?.leases) + ? object.leases.map((e: any) => QueryLeaseResponse.fromJSON(e)) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryLeasesResponse): unknown { + const obj: any = {}; + if (message.leases?.length) { + obj.leases = message.leases.map((e) => QueryLeaseResponse.toJSON(e)); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryLeasesResponse { + return QueryLeasesResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryLeasesResponse { + const message = createBaseQueryLeasesResponse(); + message.leases = + object.leases?.map((e) => QueryLeaseResponse.fromPartial(e)) || []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryLeasesResponse.$type, QueryLeasesResponse); + +function createBaseQueryLeaseRequest(): QueryLeaseRequest { + return { $type: "akash.market.v1beta5.QueryLeaseRequest", id: undefined }; +} + +export const QueryLeaseRequest: MessageFns< + QueryLeaseRequest, + "akash.market.v1beta5.QueryLeaseRequest" +> = { + $type: "akash.market.v1beta5.QueryLeaseRequest" as const, + + encode( + message: QueryLeaseRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.id !== undefined) { + LeaseID.encode(message.id, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): QueryLeaseRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryLeaseRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = LeaseID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryLeaseRequest { + return { + $type: QueryLeaseRequest.$type, + id: isSet(object.id) ? LeaseID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: QueryLeaseRequest): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = LeaseID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): QueryLeaseRequest { + return QueryLeaseRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryLeaseRequest { + const message = createBaseQueryLeaseRequest(); + message.id = + object.id !== undefined && object.id !== null + ? LeaseID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryLeaseRequest.$type, QueryLeaseRequest); + +function createBaseQueryLeaseResponse(): QueryLeaseResponse { + return { + $type: "akash.market.v1beta5.QueryLeaseResponse", + lease: undefined, + escrowPayment: undefined, + }; +} + +export const QueryLeaseResponse: MessageFns< + QueryLeaseResponse, + "akash.market.v1beta5.QueryLeaseResponse" +> = { + $type: "akash.market.v1beta5.QueryLeaseResponse" as const, + + encode( + message: QueryLeaseResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.lease !== undefined) { + Lease.encode(message.lease, writer.uint32(10).fork()).join(); + } + if (message.escrowPayment !== undefined) { + FractionalPayment.encode( + message.escrowPayment, + writer.uint32(18).fork(), + ).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryLeaseResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryLeaseResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.lease = Lease.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.escrowPayment = FractionalPayment.decode( + reader, + reader.uint32(), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryLeaseResponse { + return { + $type: QueryLeaseResponse.$type, + lease: isSet(object.lease) ? Lease.fromJSON(object.lease) : undefined, + escrowPayment: isSet(object.escrowPayment) + ? FractionalPayment.fromJSON(object.escrowPayment) + : undefined, + }; + }, + + toJSON(message: QueryLeaseResponse): unknown { + const obj: any = {}; + if (message.lease !== undefined) { + obj.lease = Lease.toJSON(message.lease); + } + if (message.escrowPayment !== undefined) { + obj.escrowPayment = FractionalPayment.toJSON(message.escrowPayment); + } + return obj; + }, + + create(base?: DeepPartial): QueryLeaseResponse { + return QueryLeaseResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryLeaseResponse { + const message = createBaseQueryLeaseResponse(); + message.lease = + object.lease !== undefined && object.lease !== null + ? Lease.fromPartial(object.lease) + : undefined; + message.escrowPayment = + object.escrowPayment !== undefined && object.escrowPayment !== null + ? FractionalPayment.fromPartial(object.escrowPayment) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryLeaseResponse.$type, QueryLeaseResponse); + +function createBaseQueryParamsRequest(): QueryParamsRequest { + return { $type: "akash.market.v1beta5.QueryParamsRequest" }; +} + +export const QueryParamsRequest: MessageFns< + QueryParamsRequest, + "akash.market.v1beta5.QueryParamsRequest" +> = { + $type: "akash.market.v1beta5.QueryParamsRequest" as const, + + encode( + _: QueryParamsRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryParamsRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryParamsRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): QueryParamsRequest { + return { $type: QueryParamsRequest.$type }; + }, + + toJSON(_: QueryParamsRequest): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): QueryParamsRequest { + return QueryParamsRequest.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): QueryParamsRequest { + const message = createBaseQueryParamsRequest(); + return message; + }, +}; + +messageTypeRegistry.set(QueryParamsRequest.$type, QueryParamsRequest); + +function createBaseQueryParamsResponse(): QueryParamsResponse { + return { + $type: "akash.market.v1beta5.QueryParamsResponse", + params: undefined, + }; +} + +export const QueryParamsResponse: MessageFns< + QueryParamsResponse, + "akash.market.v1beta5.QueryParamsResponse" +> = { + $type: "akash.market.v1beta5.QueryParamsResponse" as const, + + encode( + message: QueryParamsResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryParamsResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryParamsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.params = Params.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryParamsResponse { + return { + $type: QueryParamsResponse.$type, + params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, + }; + }, + + toJSON(message: QueryParamsResponse): unknown { + const obj: any = {}; + if (message.params !== undefined) { + obj.params = Params.toJSON(message.params); + } + return obj; + }, + + create(base?: DeepPartial): QueryParamsResponse { + return QueryParamsResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryParamsResponse { + const message = createBaseQueryParamsResponse(); + message.params = + object.params !== undefined && object.params !== null + ? Params.fromPartial(object.params) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryParamsResponse.$type, QueryParamsResponse); + +/** Query defines the gRPC querier service */ +export interface Query { + /** Orders queries orders with filters */ + Orders(request: QueryOrdersRequest): Promise; + /** Order queries order details */ + Order(request: QueryOrderRequest): Promise; + /** Bids queries bids with filters */ + Bids(request: QueryBidsRequest): Promise; + /** Bid queries bid details */ + Bid(request: QueryBidRequest): Promise; + /** Leases queries leases with filters */ + Leases(request: QueryLeasesRequest): Promise; + /** Lease queries lease details */ + Lease(request: QueryLeaseRequest): Promise; + /** Params returns the total set of minting parameters. */ + Params(request: QueryParamsRequest): Promise; +} + +export const QueryServiceName = "akash.market.v1beta5.Query"; +export class QueryClientImpl implements Query { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || QueryServiceName; + this.rpc = rpc; + this.Orders = this.Orders.bind(this); + this.Order = this.Order.bind(this); + this.Bids = this.Bids.bind(this); + this.Bid = this.Bid.bind(this); + this.Leases = this.Leases.bind(this); + this.Lease = this.Lease.bind(this); + this.Params = this.Params.bind(this); + } + Orders(request: QueryOrdersRequest): Promise { + const data = QueryOrdersRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Orders", data); + return promise.then((data) => + QueryOrdersResponse.decode(new BinaryReader(data)), + ); + } + + Order(request: QueryOrderRequest): Promise { + const data = QueryOrderRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Order", data); + return promise.then((data) => + QueryOrderResponse.decode(new BinaryReader(data)), + ); + } + + Bids(request: QueryBidsRequest): Promise { + const data = QueryBidsRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Bids", data); + return promise.then((data) => + QueryBidsResponse.decode(new BinaryReader(data)), + ); + } + + Bid(request: QueryBidRequest): Promise { + const data = QueryBidRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Bid", data); + return promise.then((data) => + QueryBidResponse.decode(new BinaryReader(data)), + ); + } + + Leases(request: QueryLeasesRequest): Promise { + const data = QueryLeasesRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Leases", data); + return promise.then((data) => + QueryLeasesResponse.decode(new BinaryReader(data)), + ); + } + + Lease(request: QueryLeaseRequest): Promise { + const data = QueryLeaseRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Lease", data); + return promise.then((data) => + QueryLeaseResponse.decode(new BinaryReader(data)), + ); + } + + Params(request: QueryParamsRequest): Promise { + const data = QueryParamsRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Params", data); + return promise.then((data) => + QueryParamsResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1beta5/resourcesoffer.ts b/ts/src/generated/akash/market/v1beta5/resourcesoffer.ts new file mode 100644 index 00000000..69741135 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/resourcesoffer.ts @@ -0,0 +1,151 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1beta5/resourcesoffer.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Resources } from "../../base/resources/v1beta4/resources"; + +/** + * ResourceOffer describes resources that provider is offering + * for deployment + */ +export interface ResourceOffer { + $type: "akash.market.v1beta5.ResourceOffer"; + resources: Resources | undefined; + count: number; +} + +function createBaseResourceOffer(): ResourceOffer { + return { + $type: "akash.market.v1beta5.ResourceOffer", + resources: undefined, + count: 0, + }; +} + +export const ResourceOffer: MessageFns< + ResourceOffer, + "akash.market.v1beta5.ResourceOffer" +> = { + $type: "akash.market.v1beta5.ResourceOffer" as const, + + encode( + message: ResourceOffer, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(10).fork()).join(); + } + if (message.count !== 0) { + writer.uint32(16).uint32(message.count); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): ResourceOffer { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseResourceOffer(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.resources = Resources.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.count = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): ResourceOffer { + return { + $type: ResourceOffer.$type, + resources: isSet(object.resources) + ? Resources.fromJSON(object.resources) + : undefined, + count: isSet(object.count) ? globalThis.Number(object.count) : 0, + }; + }, + + toJSON(message: ResourceOffer): unknown { + const obj: any = {}; + if (message.resources !== undefined) { + obj.resources = Resources.toJSON(message.resources); + } + if (message.count !== 0) { + obj.count = Math.round(message.count); + } + return obj; + }, + + create(base?: DeepPartial): ResourceOffer { + return ResourceOffer.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ResourceOffer { + const message = createBaseResourceOffer(); + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + message.count = object.count ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ResourceOffer.$type, ResourceOffer); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/market/v1beta5/service.grpc-js.ts b/ts/src/generated/akash/market/v1beta5/service.grpc-js.ts new file mode 100644 index 00000000..352c2497 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/service.grpc-js.ts @@ -0,0 +1,314 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1beta5/service.proto + +/* eslint-disable */ +import { + ChannelCredentials, + Client, + makeGenericClientConstructor, + Metadata, +} from "@grpc/grpc-js"; +import type { + CallOptions, + ClientOptions, + ClientUnaryCall, + handleUnaryCall, + ServiceError, + UntypedServiceImplementation, +} from "@grpc/grpc-js"; +import { + MsgCloseBid, + MsgCloseBidResponse, + MsgCreateBid, + MsgCreateBidResponse, +} from "./bidmsg"; +import { + MsgCloseLease, + MsgCloseLeaseResponse, + MsgCreateLease, + MsgCreateLeaseResponse, + MsgWithdrawLease, + MsgWithdrawLeaseResponse, +} from "./leasemsg"; +import { MsgUpdateParams, MsgUpdateParamsResponse } from "./paramsmsg"; + +export const protobufPackage = "akash.market.v1beta5"; + +/** Msg defines the market Msg service */ +export type MsgService = typeof MsgService; +export const MsgService = { + /** CreateBid defines a method to create a bid given proper inputs. */ + createBid: { + path: "/akash.market.v1beta5.Msg/CreateBid", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCreateBid) => + Buffer.from(MsgCreateBid.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCreateBid.decode(value), + responseSerialize: (value: MsgCreateBidResponse) => + Buffer.from(MsgCreateBidResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => MsgCreateBidResponse.decode(value), + }, + /** CloseBid defines a method to close a bid given proper inputs. */ + closeBid: { + path: "/akash.market.v1beta5.Msg/CloseBid", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCloseBid) => + Buffer.from(MsgCloseBid.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCloseBid.decode(value), + responseSerialize: (value: MsgCloseBidResponse) => + Buffer.from(MsgCloseBidResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => MsgCloseBidResponse.decode(value), + }, + /** WithdrawLease withdraws accrued funds from the lease payment */ + withdrawLease: { + path: "/akash.market.v1beta5.Msg/WithdrawLease", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgWithdrawLease) => + Buffer.from(MsgWithdrawLease.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgWithdrawLease.decode(value), + responseSerialize: (value: MsgWithdrawLeaseResponse) => + Buffer.from(MsgWithdrawLeaseResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgWithdrawLeaseResponse.decode(value), + }, + /** CreateLease creates a new lease */ + createLease: { + path: "/akash.market.v1beta5.Msg/CreateLease", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCreateLease) => + Buffer.from(MsgCreateLease.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCreateLease.decode(value), + responseSerialize: (value: MsgCreateLeaseResponse) => + Buffer.from(MsgCreateLeaseResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgCreateLeaseResponse.decode(value), + }, + /** CloseLease defines a method to close an order given proper inputs. */ + closeLease: { + path: "/akash.market.v1beta5.Msg/CloseLease", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCloseLease) => + Buffer.from(MsgCloseLease.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCloseLease.decode(value), + responseSerialize: (value: MsgCloseLeaseResponse) => + Buffer.from(MsgCloseLeaseResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => MsgCloseLeaseResponse.decode(value), + }, + /** + * UpdateParams defines a governance operation for updating the x/market module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + updateParams: { + path: "/akash.market.v1beta5.Msg/UpdateParams", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgUpdateParams) => + Buffer.from(MsgUpdateParams.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgUpdateParams.decode(value), + responseSerialize: (value: MsgUpdateParamsResponse) => + Buffer.from(MsgUpdateParamsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgUpdateParamsResponse.decode(value), + }, +} as const; + +export interface MsgServer extends UntypedServiceImplementation { + /** CreateBid defines a method to create a bid given proper inputs. */ + createBid: handleUnaryCall; + /** CloseBid defines a method to close a bid given proper inputs. */ + closeBid: handleUnaryCall; + /** WithdrawLease withdraws accrued funds from the lease payment */ + withdrawLease: handleUnaryCall; + /** CreateLease creates a new lease */ + createLease: handleUnaryCall; + /** CloseLease defines a method to close an order given proper inputs. */ + closeLease: handleUnaryCall; + /** + * UpdateParams defines a governance operation for updating the x/market module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + updateParams: handleUnaryCall; +} + +export interface MsgClient extends Client { + /** CreateBid defines a method to create a bid given proper inputs. */ + createBid( + request: MsgCreateBid, + callback: ( + error: ServiceError | null, + response: MsgCreateBidResponse, + ) => void, + ): ClientUnaryCall; + createBid( + request: MsgCreateBid, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCreateBidResponse, + ) => void, + ): ClientUnaryCall; + createBid( + request: MsgCreateBid, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCreateBidResponse, + ) => void, + ): ClientUnaryCall; + /** CloseBid defines a method to close a bid given proper inputs. */ + closeBid( + request: MsgCloseBid, + callback: ( + error: ServiceError | null, + response: MsgCloseBidResponse, + ) => void, + ): ClientUnaryCall; + closeBid( + request: MsgCloseBid, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCloseBidResponse, + ) => void, + ): ClientUnaryCall; + closeBid( + request: MsgCloseBid, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCloseBidResponse, + ) => void, + ): ClientUnaryCall; + /** WithdrawLease withdraws accrued funds from the lease payment */ + withdrawLease( + request: MsgWithdrawLease, + callback: ( + error: ServiceError | null, + response: MsgWithdrawLeaseResponse, + ) => void, + ): ClientUnaryCall; + withdrawLease( + request: MsgWithdrawLease, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgWithdrawLeaseResponse, + ) => void, + ): ClientUnaryCall; + withdrawLease( + request: MsgWithdrawLease, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgWithdrawLeaseResponse, + ) => void, + ): ClientUnaryCall; + /** CreateLease creates a new lease */ + createLease( + request: MsgCreateLease, + callback: ( + error: ServiceError | null, + response: MsgCreateLeaseResponse, + ) => void, + ): ClientUnaryCall; + createLease( + request: MsgCreateLease, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCreateLeaseResponse, + ) => void, + ): ClientUnaryCall; + createLease( + request: MsgCreateLease, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCreateLeaseResponse, + ) => void, + ): ClientUnaryCall; + /** CloseLease defines a method to close an order given proper inputs. */ + closeLease( + request: MsgCloseLease, + callback: ( + error: ServiceError | null, + response: MsgCloseLeaseResponse, + ) => void, + ): ClientUnaryCall; + closeLease( + request: MsgCloseLease, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCloseLeaseResponse, + ) => void, + ): ClientUnaryCall; + closeLease( + request: MsgCloseLease, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCloseLeaseResponse, + ) => void, + ): ClientUnaryCall; + /** + * UpdateParams defines a governance operation for updating the x/market module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + updateParams( + request: MsgUpdateParams, + callback: ( + error: ServiceError | null, + response: MsgUpdateParamsResponse, + ) => void, + ): ClientUnaryCall; + updateParams( + request: MsgUpdateParams, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgUpdateParamsResponse, + ) => void, + ): ClientUnaryCall; + updateParams( + request: MsgUpdateParams, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgUpdateParamsResponse, + ) => void, + ): ClientUnaryCall; +} + +export const MsgClient = makeGenericClientConstructor( + MsgService, + "akash.market.v1beta5.Msg", +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial, + ): MsgClient; + service: typeof MsgService; + serviceName: string; +}; diff --git a/ts/src/generated/akash/market/v1beta5/service.ts b/ts/src/generated/akash/market/v1beta5/service.ts new file mode 100644 index 00000000..7fe49df2 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/service.ts @@ -0,0 +1,115 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/market/v1beta5/service.proto + +/* eslint-disable */ +import { BinaryReader } from "@bufbuild/protobuf/wire"; +import { + MsgCloseBid, + MsgCloseBidResponse, + MsgCreateBid, + MsgCreateBidResponse, +} from "./bidmsg"; +import { + MsgCloseLease, + MsgCloseLeaseResponse, + MsgCreateLease, + MsgCreateLeaseResponse, + MsgWithdrawLease, + MsgWithdrawLeaseResponse, +} from "./leasemsg"; +import { MsgUpdateParams, MsgUpdateParamsResponse } from "./paramsmsg"; + +/** Msg defines the market Msg service */ +export interface Msg { + /** CreateBid defines a method to create a bid given proper inputs. */ + CreateBid(request: MsgCreateBid): Promise; + /** CloseBid defines a method to close a bid given proper inputs. */ + CloseBid(request: MsgCloseBid): Promise; + /** WithdrawLease withdraws accrued funds from the lease payment */ + WithdrawLease(request: MsgWithdrawLease): Promise; + /** CreateLease creates a new lease */ + CreateLease(request: MsgCreateLease): Promise; + /** CloseLease defines a method to close an order given proper inputs. */ + CloseLease(request: MsgCloseLease): Promise; + /** + * UpdateParams defines a governance operation for updating the x/market module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + UpdateParams(request: MsgUpdateParams): Promise; +} + +export const MsgServiceName = "akash.market.v1beta5.Msg"; +export class MsgClientImpl implements Msg { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || MsgServiceName; + this.rpc = rpc; + this.CreateBid = this.CreateBid.bind(this); + this.CloseBid = this.CloseBid.bind(this); + this.WithdrawLease = this.WithdrawLease.bind(this); + this.CreateLease = this.CreateLease.bind(this); + this.CloseLease = this.CloseLease.bind(this); + this.UpdateParams = this.UpdateParams.bind(this); + } + CreateBid(request: MsgCreateBid): Promise { + const data = MsgCreateBid.encode(request).finish(); + const promise = this.rpc.request(this.service, "CreateBid", data); + return promise.then((data) => + MsgCreateBidResponse.decode(new BinaryReader(data)), + ); + } + + CloseBid(request: MsgCloseBid): Promise { + const data = MsgCloseBid.encode(request).finish(); + const promise = this.rpc.request(this.service, "CloseBid", data); + return promise.then((data) => + MsgCloseBidResponse.decode(new BinaryReader(data)), + ); + } + + WithdrawLease(request: MsgWithdrawLease): Promise { + const data = MsgWithdrawLease.encode(request).finish(); + const promise = this.rpc.request(this.service, "WithdrawLease", data); + return promise.then((data) => + MsgWithdrawLeaseResponse.decode(new BinaryReader(data)), + ); + } + + CreateLease(request: MsgCreateLease): Promise { + const data = MsgCreateLease.encode(request).finish(); + const promise = this.rpc.request(this.service, "CreateLease", data); + return promise.then((data) => + MsgCreateLeaseResponse.decode(new BinaryReader(data)), + ); + } + + CloseLease(request: MsgCloseLease): Promise { + const data = MsgCloseLease.encode(request).finish(); + const promise = this.rpc.request(this.service, "CloseLease", data); + return promise.then((data) => + MsgCloseLeaseResponse.decode(new BinaryReader(data)), + ); + } + + UpdateParams(request: MsgUpdateParams): Promise { + const data = MsgUpdateParams.encode(request).finish(); + const promise = this.rpc.request(this.service, "UpdateParams", data); + return promise.then((data) => + MsgUpdateParamsResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} diff --git a/ts/src/generated/akash/provider/lease/v1/service.grpc-js.ts b/ts/src/generated/akash/provider/lease/v1/service.grpc-js.ts index a6bff111..ff30aab3 100644 --- a/ts/src/generated/akash/provider/lease/v1/service.grpc-js.ts +++ b/ts/src/generated/akash/provider/lease/v1/service.grpc-js.ts @@ -1,4 +1,11 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/provider/lease/v1/service.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import { ChannelCredentials, Client, @@ -16,10 +23,9 @@ import type { UntypedServiceImplementation, } from "@grpc/grpc-js"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../../typeRegistry"; -import { Group } from "../../../manifest/v2beta2/group"; -import { LeaseID } from "../../../market/v1beta4/lease"; +import { Group } from "../../../manifest/v2beta3/group"; +import { LeaseID } from "../../../market/v1/lease"; export const protobufPackage = "akash.provider.lease.v1"; @@ -129,13 +135,16 @@ function createBaseLeaseServiceStatus(): LeaseServiceStatus { }; } -export const LeaseServiceStatus = { +export const LeaseServiceStatus: MessageFns< + LeaseServiceStatus, + "akash.provider.lease.v1.LeaseServiceStatus" +> = { $type: "akash.provider.lease.v1.LeaseServiceStatus" as const, encode( message: LeaseServiceStatus, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.available !== 0) { writer.uint32(8).int32(message.available); } @@ -146,7 +155,7 @@ export const LeaseServiceStatus = { writer.uint32(26).string(v!); } if (!message.observedGeneration.equals(Long.ZERO)) { - writer.uint32(32).int64(message.observedGeneration); + writer.uint32(32).int64(message.observedGeneration.toString()); } if (message.replicas !== 0) { writer.uint32(40).int32(message.replicas); @@ -163,9 +172,12 @@ export const LeaseServiceStatus = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): LeaseServiceStatus { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): LeaseServiceStatus { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseLeaseServiceStatus(); while (reader.pos < end) { @@ -197,7 +209,9 @@ export const LeaseServiceStatus = { break; } - message.observedGeneration = reader.int64() as Long; + message.observedGeneration = Long.fromString( + reader.int64().toString(), + ); continue; case 5: if (tag !== 40) { @@ -231,7 +245,7 @@ export const LeaseServiceStatus = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -326,13 +340,16 @@ function createBaseLeaseIPStatus(): LeaseIPStatus { }; } -export const LeaseIPStatus = { +export const LeaseIPStatus: MessageFns< + LeaseIPStatus, + "akash.provider.lease.v1.LeaseIPStatus" +> = { $type: "akash.provider.lease.v1.LeaseIPStatus" as const, encode( message: LeaseIPStatus, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.port !== 0) { writer.uint32(8).uint32(message.port); } @@ -348,9 +365,9 @@ export const LeaseIPStatus = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): LeaseIPStatus { + decode(input: BinaryReader | Uint8Array, length?: number): LeaseIPStatus { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseLeaseIPStatus(); while (reader.pos < end) { @@ -388,7 +405,7 @@ export const LeaseIPStatus = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -450,13 +467,16 @@ function createBaseForwarderPortStatus(): ForwarderPortStatus { }; } -export const ForwarderPortStatus = { +export const ForwarderPortStatus: MessageFns< + ForwarderPortStatus, + "akash.provider.lease.v1.ForwarderPortStatus" +> = { $type: "akash.provider.lease.v1.ForwarderPortStatus" as const, encode( message: ForwarderPortStatus, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.host !== "") { writer.uint32(10).string(message.host); } @@ -475,9 +495,12 @@ export const ForwarderPortStatus = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ForwarderPortStatus { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): ForwarderPortStatus { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseForwarderPortStatus(); while (reader.pos < end) { @@ -522,7 +545,7 @@ export const ForwarderPortStatus = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -586,13 +609,16 @@ function createBaseServiceStatus(): ServiceStatus { }; } -export const ServiceStatus = { +export const ServiceStatus: MessageFns< + ServiceStatus, + "akash.provider.lease.v1.ServiceStatus" +> = { $type: "akash.provider.lease.v1.ServiceStatus" as const, encode( message: ServiceStatus, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.name !== "") { writer.uint32(10).string(message.name); } @@ -600,20 +626,20 @@ export const ServiceStatus = { LeaseServiceStatus.encode( message.status, writer.uint32(18).fork(), - ).ldelim(); + ).join(); } for (const v of message.ports) { - ForwarderPortStatus.encode(v!, writer.uint32(26).fork()).ldelim(); + ForwarderPortStatus.encode(v!, writer.uint32(26).fork()).join(); } for (const v of message.ips) { - LeaseIPStatus.encode(v!, writer.uint32(34).fork()).ldelim(); + LeaseIPStatus.encode(v!, writer.uint32(34).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceStatus { + decode(input: BinaryReader | Uint8Array, length?: number): ServiceStatus { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceStatus(); while (reader.pos < end) { @@ -653,7 +679,7 @@ export const ServiceStatus = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -718,25 +744,31 @@ function createBaseSendManifestRequest(): SendManifestRequest { }; } -export const SendManifestRequest = { +export const SendManifestRequest: MessageFns< + SendManifestRequest, + "akash.provider.lease.v1.SendManifestRequest" +> = { $type: "akash.provider.lease.v1.SendManifestRequest" as const, encode( message: SendManifestRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); + LeaseID.encode(message.leaseId, writer.uint32(10).fork()).join(); } for (const v of message.manifest) { - Group.encode(v!, writer.uint32(18).fork()).ldelim(); + Group.encode(v!, writer.uint32(18).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): SendManifestRequest { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): SendManifestRequest { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseSendManifestRequest(); while (reader.pos < end) { @@ -760,7 +792,7 @@ export const SendManifestRequest = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -808,22 +840,25 @@ function createBaseSendManifestResponse(): SendManifestResponse { return { $type: "akash.provider.lease.v1.SendManifestResponse" }; } -export const SendManifestResponse = { +export const SendManifestResponse: MessageFns< + SendManifestResponse, + "akash.provider.lease.v1.SendManifestResponse" +> = { $type: "akash.provider.lease.v1.SendManifestResponse" as const, encode( _: SendManifestResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { return writer; }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): SendManifestResponse { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseSendManifestResponse(); while (reader.pos < end) { @@ -833,7 +868,7 @@ export const SendManifestResponse = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -866,15 +901,18 @@ function createBaseServiceLogsRequest(): ServiceLogsRequest { }; } -export const ServiceLogsRequest = { +export const ServiceLogsRequest: MessageFns< + ServiceLogsRequest, + "akash.provider.lease.v1.ServiceLogsRequest" +> = { $type: "akash.provider.lease.v1.ServiceLogsRequest" as const, encode( message: ServiceLogsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); + LeaseID.encode(message.leaseId, writer.uint32(10).fork()).join(); } for (const v of message.services) { writer.uint32(18).string(v!); @@ -882,9 +920,12 @@ export const ServiceLogsRequest = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceLogsRequest { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): ServiceLogsRequest { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceLogsRequest(); while (reader.pos < end) { @@ -908,7 +949,7 @@ export const ServiceLogsRequest = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -960,13 +1001,16 @@ function createBaseServiceLogs(): ServiceLogs { }; } -export const ServiceLogs = { +export const ServiceLogs: MessageFns< + ServiceLogs, + "akash.provider.lease.v1.ServiceLogs" +> = { $type: "akash.provider.lease.v1.ServiceLogs" as const, encode( message: ServiceLogs, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.name !== "") { writer.uint32(10).string(message.name); } @@ -976,9 +1020,9 @@ export const ServiceLogs = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceLogs { + decode(input: BinaryReader | Uint8Array, length?: number): ServiceLogs { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceLogs(); while (reader.pos < end) { @@ -1002,7 +1046,7 @@ export const ServiceLogs = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1045,22 +1089,28 @@ function createBaseServiceLogsResponse(): ServiceLogsResponse { return { $type: "akash.provider.lease.v1.ServiceLogsResponse", services: [] }; } -export const ServiceLogsResponse = { +export const ServiceLogsResponse: MessageFns< + ServiceLogsResponse, + "akash.provider.lease.v1.ServiceLogsResponse" +> = { $type: "akash.provider.lease.v1.ServiceLogsResponse" as const, encode( message: ServiceLogsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { for (const v of message.services) { - ServiceLogs.encode(v!, writer.uint32(10).fork()).ldelim(); + ServiceLogs.encode(v!, writer.uint32(10).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceLogsResponse { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): ServiceLogsResponse { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceLogsResponse(); while (reader.pos < end) { @@ -1077,7 +1127,7 @@ export const ServiceLogsResponse = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1116,22 +1166,25 @@ function createBaseShellRequest(): ShellRequest { return { $type: "akash.provider.lease.v1.ShellRequest", leaseId: undefined }; } -export const ShellRequest = { +export const ShellRequest: MessageFns< + ShellRequest, + "akash.provider.lease.v1.ShellRequest" +> = { $type: "akash.provider.lease.v1.ShellRequest" as const, encode( message: ShellRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); + LeaseID.encode(message.leaseId, writer.uint32(10).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ShellRequest { + decode(input: BinaryReader | Uint8Array, length?: number): ShellRequest { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseShellRequest(); while (reader.pos < end) { @@ -1148,7 +1201,7 @@ export const ShellRequest = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1193,15 +1246,18 @@ function createBaseServiceStatusRequest(): ServiceStatusRequest { }; } -export const ServiceStatusRequest = { +export const ServiceStatusRequest: MessageFns< + ServiceStatusRequest, + "akash.provider.lease.v1.ServiceStatusRequest" +> = { $type: "akash.provider.lease.v1.ServiceStatusRequest" as const, encode( message: ServiceStatusRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); + LeaseID.encode(message.leaseId, writer.uint32(10).fork()).join(); } for (const v of message.services) { writer.uint32(18).string(v!); @@ -1210,11 +1266,11 @@ export const ServiceStatusRequest = { }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): ServiceStatusRequest { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceStatusRequest(); while (reader.pos < end) { @@ -1238,7 +1294,7 @@ export const ServiceStatusRequest = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1289,25 +1345,28 @@ function createBaseServiceStatusResponse(): ServiceStatusResponse { }; } -export const ServiceStatusResponse = { +export const ServiceStatusResponse: MessageFns< + ServiceStatusResponse, + "akash.provider.lease.v1.ServiceStatusResponse" +> = { $type: "akash.provider.lease.v1.ServiceStatusResponse" as const, encode( message: ServiceStatusResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { for (const v of message.services) { - ServiceStatus.encode(v!, writer.uint32(10).fork()).ldelim(); + ServiceStatus.encode(v!, writer.uint32(10).fork()).join(); } return writer; }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): ServiceStatusResponse { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceStatusResponse(); while (reader.pos < end) { @@ -1324,7 +1383,7 @@ export const ServiceStatusResponse = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1650,11 +1709,16 @@ export type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +export interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/provider/lease/v1/service.ts b/ts/src/generated/akash/provider/lease/v1/service.ts index 5ac9f7e1..690a8f4b 100644 --- a/ts/src/generated/akash/provider/lease/v1/service.ts +++ b/ts/src/generated/akash/provider/lease/v1/service.ts @@ -1,11 +1,17 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/provider/lease/v1/service.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { Observable } from "rxjs"; import { map } from "rxjs/operators"; import { messageTypeRegistry } from "../../../../typeRegistry"; -import { Group } from "../../../manifest/v2beta2/group"; -import { LeaseID } from "../../../market/v1beta4/lease"; +import { Group } from "../../../manifest/v2beta3/group"; +import { LeaseID } from "../../../market/v1/lease"; /** LeaseServiceStatus */ export interface LeaseServiceStatus { @@ -113,13 +119,16 @@ function createBaseLeaseServiceStatus(): LeaseServiceStatus { }; } -export const LeaseServiceStatus = { +export const LeaseServiceStatus: MessageFns< + LeaseServiceStatus, + "akash.provider.lease.v1.LeaseServiceStatus" +> = { $type: "akash.provider.lease.v1.LeaseServiceStatus" as const, encode( message: LeaseServiceStatus, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.available !== 0) { writer.uint32(8).int32(message.available); } @@ -130,7 +139,7 @@ export const LeaseServiceStatus = { writer.uint32(26).string(v!); } if (!message.observedGeneration.equals(Long.ZERO)) { - writer.uint32(32).int64(message.observedGeneration); + writer.uint32(32).int64(message.observedGeneration.toString()); } if (message.replicas !== 0) { writer.uint32(40).int32(message.replicas); @@ -147,9 +156,12 @@ export const LeaseServiceStatus = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): LeaseServiceStatus { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): LeaseServiceStatus { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseLeaseServiceStatus(); while (reader.pos < end) { @@ -181,7 +193,9 @@ export const LeaseServiceStatus = { break; } - message.observedGeneration = reader.int64() as Long; + message.observedGeneration = Long.fromString( + reader.int64().toString(), + ); continue; case 5: if (tag !== 40) { @@ -215,7 +229,7 @@ export const LeaseServiceStatus = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -310,13 +324,16 @@ function createBaseLeaseIPStatus(): LeaseIPStatus { }; } -export const LeaseIPStatus = { +export const LeaseIPStatus: MessageFns< + LeaseIPStatus, + "akash.provider.lease.v1.LeaseIPStatus" +> = { $type: "akash.provider.lease.v1.LeaseIPStatus" as const, encode( message: LeaseIPStatus, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.port !== 0) { writer.uint32(8).uint32(message.port); } @@ -332,9 +349,9 @@ export const LeaseIPStatus = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): LeaseIPStatus { + decode(input: BinaryReader | Uint8Array, length?: number): LeaseIPStatus { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseLeaseIPStatus(); while (reader.pos < end) { @@ -372,7 +389,7 @@ export const LeaseIPStatus = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -434,13 +451,16 @@ function createBaseForwarderPortStatus(): ForwarderPortStatus { }; } -export const ForwarderPortStatus = { +export const ForwarderPortStatus: MessageFns< + ForwarderPortStatus, + "akash.provider.lease.v1.ForwarderPortStatus" +> = { $type: "akash.provider.lease.v1.ForwarderPortStatus" as const, encode( message: ForwarderPortStatus, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.host !== "") { writer.uint32(10).string(message.host); } @@ -459,9 +479,12 @@ export const ForwarderPortStatus = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ForwarderPortStatus { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): ForwarderPortStatus { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseForwarderPortStatus(); while (reader.pos < end) { @@ -506,7 +529,7 @@ export const ForwarderPortStatus = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -570,13 +593,16 @@ function createBaseServiceStatus(): ServiceStatus { }; } -export const ServiceStatus = { +export const ServiceStatus: MessageFns< + ServiceStatus, + "akash.provider.lease.v1.ServiceStatus" +> = { $type: "akash.provider.lease.v1.ServiceStatus" as const, encode( message: ServiceStatus, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.name !== "") { writer.uint32(10).string(message.name); } @@ -584,20 +610,20 @@ export const ServiceStatus = { LeaseServiceStatus.encode( message.status, writer.uint32(18).fork(), - ).ldelim(); + ).join(); } for (const v of message.ports) { - ForwarderPortStatus.encode(v!, writer.uint32(26).fork()).ldelim(); + ForwarderPortStatus.encode(v!, writer.uint32(26).fork()).join(); } for (const v of message.ips) { - LeaseIPStatus.encode(v!, writer.uint32(34).fork()).ldelim(); + LeaseIPStatus.encode(v!, writer.uint32(34).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceStatus { + decode(input: BinaryReader | Uint8Array, length?: number): ServiceStatus { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceStatus(); while (reader.pos < end) { @@ -637,7 +663,7 @@ export const ServiceStatus = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -702,25 +728,31 @@ function createBaseSendManifestRequest(): SendManifestRequest { }; } -export const SendManifestRequest = { +export const SendManifestRequest: MessageFns< + SendManifestRequest, + "akash.provider.lease.v1.SendManifestRequest" +> = { $type: "akash.provider.lease.v1.SendManifestRequest" as const, encode( message: SendManifestRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); + LeaseID.encode(message.leaseId, writer.uint32(10).fork()).join(); } for (const v of message.manifest) { - Group.encode(v!, writer.uint32(18).fork()).ldelim(); + Group.encode(v!, writer.uint32(18).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): SendManifestRequest { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): SendManifestRequest { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseSendManifestRequest(); while (reader.pos < end) { @@ -744,7 +776,7 @@ export const SendManifestRequest = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -792,22 +824,25 @@ function createBaseSendManifestResponse(): SendManifestResponse { return { $type: "akash.provider.lease.v1.SendManifestResponse" }; } -export const SendManifestResponse = { +export const SendManifestResponse: MessageFns< + SendManifestResponse, + "akash.provider.lease.v1.SendManifestResponse" +> = { $type: "akash.provider.lease.v1.SendManifestResponse" as const, encode( _: SendManifestResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { return writer; }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): SendManifestResponse { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseSendManifestResponse(); while (reader.pos < end) { @@ -817,7 +852,7 @@ export const SendManifestResponse = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -850,15 +885,18 @@ function createBaseServiceLogsRequest(): ServiceLogsRequest { }; } -export const ServiceLogsRequest = { +export const ServiceLogsRequest: MessageFns< + ServiceLogsRequest, + "akash.provider.lease.v1.ServiceLogsRequest" +> = { $type: "akash.provider.lease.v1.ServiceLogsRequest" as const, encode( message: ServiceLogsRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); + LeaseID.encode(message.leaseId, writer.uint32(10).fork()).join(); } for (const v of message.services) { writer.uint32(18).string(v!); @@ -866,9 +904,12 @@ export const ServiceLogsRequest = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceLogsRequest { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): ServiceLogsRequest { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceLogsRequest(); while (reader.pos < end) { @@ -892,7 +933,7 @@ export const ServiceLogsRequest = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -944,13 +985,16 @@ function createBaseServiceLogs(): ServiceLogs { }; } -export const ServiceLogs = { +export const ServiceLogs: MessageFns< + ServiceLogs, + "akash.provider.lease.v1.ServiceLogs" +> = { $type: "akash.provider.lease.v1.ServiceLogs" as const, encode( message: ServiceLogs, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.name !== "") { writer.uint32(10).string(message.name); } @@ -960,9 +1004,9 @@ export const ServiceLogs = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceLogs { + decode(input: BinaryReader | Uint8Array, length?: number): ServiceLogs { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceLogs(); while (reader.pos < end) { @@ -986,7 +1030,7 @@ export const ServiceLogs = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1029,22 +1073,28 @@ function createBaseServiceLogsResponse(): ServiceLogsResponse { return { $type: "akash.provider.lease.v1.ServiceLogsResponse", services: [] }; } -export const ServiceLogsResponse = { +export const ServiceLogsResponse: MessageFns< + ServiceLogsResponse, + "akash.provider.lease.v1.ServiceLogsResponse" +> = { $type: "akash.provider.lease.v1.ServiceLogsResponse" as const, encode( message: ServiceLogsResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { for (const v of message.services) { - ServiceLogs.encode(v!, writer.uint32(10).fork()).ldelim(); + ServiceLogs.encode(v!, writer.uint32(10).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceLogsResponse { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): ServiceLogsResponse { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceLogsResponse(); while (reader.pos < end) { @@ -1061,7 +1111,7 @@ export const ServiceLogsResponse = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1100,22 +1150,25 @@ function createBaseShellRequest(): ShellRequest { return { $type: "akash.provider.lease.v1.ShellRequest", leaseId: undefined }; } -export const ShellRequest = { +export const ShellRequest: MessageFns< + ShellRequest, + "akash.provider.lease.v1.ShellRequest" +> = { $type: "akash.provider.lease.v1.ShellRequest" as const, encode( message: ShellRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); + LeaseID.encode(message.leaseId, writer.uint32(10).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ShellRequest { + decode(input: BinaryReader | Uint8Array, length?: number): ShellRequest { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseShellRequest(); while (reader.pos < end) { @@ -1132,7 +1185,7 @@ export const ShellRequest = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1177,15 +1230,18 @@ function createBaseServiceStatusRequest(): ServiceStatusRequest { }; } -export const ServiceStatusRequest = { +export const ServiceStatusRequest: MessageFns< + ServiceStatusRequest, + "akash.provider.lease.v1.ServiceStatusRequest" +> = { $type: "akash.provider.lease.v1.ServiceStatusRequest" as const, encode( message: ServiceStatusRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.leaseId !== undefined) { - LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); + LeaseID.encode(message.leaseId, writer.uint32(10).fork()).join(); } for (const v of message.services) { writer.uint32(18).string(v!); @@ -1194,11 +1250,11 @@ export const ServiceStatusRequest = { }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): ServiceStatusRequest { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceStatusRequest(); while (reader.pos < end) { @@ -1222,7 +1278,7 @@ export const ServiceStatusRequest = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1273,25 +1329,28 @@ function createBaseServiceStatusResponse(): ServiceStatusResponse { }; } -export const ServiceStatusResponse = { +export const ServiceStatusResponse: MessageFns< + ServiceStatusResponse, + "akash.provider.lease.v1.ServiceStatusResponse" +> = { $type: "akash.provider.lease.v1.ServiceStatusResponse" as const, encode( message: ServiceStatusResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { for (const v of message.services) { - ServiceStatus.encode(v!, writer.uint32(10).fork()).ldelim(); + ServiceStatus.encode(v!, writer.uint32(10).fork()).join(); } return writer; }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): ServiceStatusResponse { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceStatusResponse(); while (reader.pos < end) { @@ -1308,7 +1367,7 @@ export const ServiceStatusResponse = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1396,7 +1455,7 @@ export class LeaseRPCClientImpl implements LeaseRPC { const data = SendManifestRequest.encode(request).finish(); const promise = this.rpc.request(this.service, "SendManifest", data); return promise.then((data) => - SendManifestResponse.decode(_m0.Reader.create(data)), + SendManifestResponse.decode(new BinaryReader(data)), ); } @@ -1404,7 +1463,7 @@ export class LeaseRPCClientImpl implements LeaseRPC { const data = ServiceStatusRequest.encode(request).finish(); const promise = this.rpc.request(this.service, "ServiceStatus", data); return promise.then((data) => - ServiceStatusResponse.decode(_m0.Reader.create(data)), + ServiceStatusResponse.decode(new BinaryReader(data)), ); } @@ -1418,7 +1477,7 @@ export class LeaseRPCClientImpl implements LeaseRPC { data, ); return result.pipe( - map((data) => ServiceStatusResponse.decode(_m0.Reader.create(data))), + map((data) => ServiceStatusResponse.decode(new BinaryReader(data))), ); } @@ -1426,7 +1485,7 @@ export class LeaseRPCClientImpl implements LeaseRPC { const data = ServiceLogsRequest.encode(request).finish(); const promise = this.rpc.request(this.service, "ServiceLogs", data); return promise.then((data) => - ServiceLogsResponse.decode(_m0.Reader.create(data)), + ServiceLogsResponse.decode(new BinaryReader(data)), ); } @@ -1440,7 +1499,7 @@ export class LeaseRPCClientImpl implements LeaseRPC { data, ); return result.pipe( - map((data) => ServiceLogsResponse.decode(_m0.Reader.create(data))), + map((data) => ServiceLogsResponse.decode(new BinaryReader(data))), ); } } @@ -1514,11 +1573,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/provider/v1/service.grpc-js.ts b/ts/src/generated/akash/provider/v1/service.grpc-js.ts index 675a62e9..d63c70db 100644 --- a/ts/src/generated/akash/provider/v1/service.grpc-js.ts +++ b/ts/src/generated/akash/provider/v1/service.grpc-js.ts @@ -1,3 +1,9 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/provider/v1/service.proto + /* eslint-disable */ import { ChannelCredentials, diff --git a/ts/src/generated/akash/provider/v1/service.ts b/ts/src/generated/akash/provider/v1/service.ts index bdef34da..e5018153 100644 --- a/ts/src/generated/akash/provider/v1/service.ts +++ b/ts/src/generated/akash/provider/v1/service.ts @@ -1,5 +1,11 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/provider/v1/service.proto + /* eslint-disable */ -import _m0 from "protobufjs/minimal"; +import { BinaryReader } from "@bufbuild/protobuf/wire"; import { Observable } from "rxjs"; import { map } from "rxjs/operators"; import { Empty } from "../../../google/protobuf/empty"; @@ -34,7 +40,7 @@ export class ProviderRPCClientImpl implements ProviderRPC { GetStatus(request: Empty): Promise { const data = Empty.encode(request).finish(); const promise = this.rpc.request(this.service, "GetStatus", data); - return promise.then((data) => Status.decode(_m0.Reader.create(data))); + return promise.then((data) => Status.decode(new BinaryReader(data))); } StreamStatus(request: Empty): Observable { @@ -44,7 +50,7 @@ export class ProviderRPCClientImpl implements ProviderRPC { "StreamStatus", data, ); - return result.pipe(map((data) => Status.decode(_m0.Reader.create(data)))); + return result.pipe(map((data) => Status.decode(new BinaryReader(data)))); } } diff --git a/ts/src/generated/akash/provider/v1/status.ts b/ts/src/generated/akash/provider/v1/status.ts index 308f5729..a11a7894 100644 --- a/ts/src/generated/akash/provider/v1/status.ts +++ b/ts/src/generated/akash/provider/v1/status.ts @@ -1,8 +1,13 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/provider/v1/status.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { Timestamp } from "../../../google/protobuf/timestamp"; -import { Quantity } from "../../../k8s.io/apimachinery/pkg/api/resource/generated"; +import { Quantity } from "../../../k8s/io/apimachinery/pkg/api/resource/generated"; import { messageTypeRegistry } from "../../../typeRegistry"; import { Cluster } from "../../inventory/v1/cluster"; @@ -76,7 +81,6 @@ export interface Status { bidEngine: BidEngineStatus | undefined; manifest: ManifestStatus | undefined; publicHostnames: string[]; - timestamp: Date | undefined; } function createBaseResourcesMetric(): ResourcesMetric { @@ -90,27 +94,30 @@ function createBaseResourcesMetric(): ResourcesMetric { }; } -export const ResourcesMetric = { +export const ResourcesMetric: MessageFns< + ResourcesMetric, + "akash.provider.v1.ResourcesMetric" +> = { $type: "akash.provider.v1.ResourcesMetric" as const, encode( message: ResourcesMetric, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.cpu !== undefined) { - Quantity.encode(message.cpu, writer.uint32(10).fork()).ldelim(); + Quantity.encode(message.cpu, writer.uint32(10).fork()).join(); } if (message.memory !== undefined) { - Quantity.encode(message.memory, writer.uint32(18).fork()).ldelim(); + Quantity.encode(message.memory, writer.uint32(18).fork()).join(); } if (message.gpu !== undefined) { - Quantity.encode(message.gpu, writer.uint32(26).fork()).ldelim(); + Quantity.encode(message.gpu, writer.uint32(26).fork()).join(); } if (message.ephemeralStorage !== undefined) { Quantity.encode( message.ephemeralStorage, writer.uint32(34).fork(), - ).ldelim(); + ).join(); } Object.entries(message.storage).forEach(([key, value]) => { ResourcesMetric_StorageEntry.encode( @@ -120,14 +127,14 @@ export const ResourcesMetric = { value, }, writer.uint32(42).fork(), - ).ldelim(); + ).join(); }); return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ResourcesMetric { + decode(input: BinaryReader | Uint8Array, length?: number): ResourcesMetric { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseResourcesMetric(); while (reader.pos < end) { @@ -178,7 +185,7 @@ export const ResourcesMetric = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -275,28 +282,31 @@ function createBaseResourcesMetric_StorageEntry(): ResourcesMetric_StorageEntry }; } -export const ResourcesMetric_StorageEntry = { +export const ResourcesMetric_StorageEntry: MessageFns< + ResourcesMetric_StorageEntry, + "akash.provider.v1.ResourcesMetric.StorageEntry" +> = { $type: "akash.provider.v1.ResourcesMetric.StorageEntry" as const, encode( message: ResourcesMetric_StorageEntry, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.key !== "") { writer.uint32(10).string(message.key); } if (message.value !== undefined) { - Quantity.encode(message.value, writer.uint32(18).fork()).ldelim(); + Quantity.encode(message.value, writer.uint32(18).fork()).join(); } return writer; }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): ResourcesMetric_StorageEntry { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseResourcesMetric_StorageEntry(); while (reader.pos < end) { @@ -320,7 +330,7 @@ export const ResourcesMetric_StorageEntry = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -371,22 +381,22 @@ function createBaseLeases(): Leases { return { $type: "akash.provider.v1.Leases", active: 0 }; } -export const Leases = { +export const Leases: MessageFns = { $type: "akash.provider.v1.Leases" as const, encode( message: Leases, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.active !== 0) { writer.uint32(8).uint32(message.active); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Leases { + decode(input: BinaryReader | Uint8Array, length?: number): Leases { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseLeases(); while (reader.pos < end) { @@ -403,7 +413,7 @@ export const Leases = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -443,13 +453,16 @@ function createBaseReservationsMetric(): ReservationsMetric { }; } -export const ReservationsMetric = { +export const ReservationsMetric: MessageFns< + ReservationsMetric, + "akash.provider.v1.ReservationsMetric" +> = { $type: "akash.provider.v1.ReservationsMetric" as const, encode( message: ReservationsMetric, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.count !== 0) { writer.uint32(8).uint32(message.count); } @@ -457,14 +470,17 @@ export const ReservationsMetric = { ResourcesMetric.encode( message.resources, writer.uint32(18).fork(), - ).ldelim(); + ).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ReservationsMetric { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): ReservationsMetric { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseReservationsMetric(); while (reader.pos < end) { @@ -488,7 +504,7 @@ export const ReservationsMetric = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -538,31 +554,34 @@ function createBaseReservations(): Reservations { }; } -export const Reservations = { +export const Reservations: MessageFns< + Reservations, + "akash.provider.v1.Reservations" +> = { $type: "akash.provider.v1.Reservations" as const, encode( message: Reservations, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.pending !== undefined) { ReservationsMetric.encode( message.pending, writer.uint32(10).fork(), - ).ldelim(); + ).join(); } if (message.active !== undefined) { ReservationsMetric.encode( message.active, writer.uint32(18).fork(), - ).ldelim(); + ).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Reservations { + decode(input: BinaryReader | Uint8Array, length?: number): Reservations { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseReservations(); while (reader.pos < end) { @@ -586,7 +605,7 @@ export const Reservations = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -641,28 +660,28 @@ function createBaseInventory(): Inventory { }; } -export const Inventory = { +export const Inventory: MessageFns = { $type: "akash.provider.v1.Inventory" as const, encode( message: Inventory, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.cluster !== undefined) { - Cluster.encode(message.cluster, writer.uint32(10).fork()).ldelim(); + Cluster.encode(message.cluster, writer.uint32(10).fork()).join(); } if (message.reservations !== undefined) { Reservations.encode( message.reservations, writer.uint32(18).fork(), - ).ldelim(); + ).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Inventory { + decode(input: BinaryReader | Uint8Array, length?: number): Inventory { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseInventory(); while (reader.pos < end) { @@ -686,7 +705,7 @@ export const Inventory = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -741,25 +760,28 @@ function createBaseClusterStatus(): ClusterStatus { }; } -export const ClusterStatus = { +export const ClusterStatus: MessageFns< + ClusterStatus, + "akash.provider.v1.ClusterStatus" +> = { $type: "akash.provider.v1.ClusterStatus" as const, encode( message: ClusterStatus, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.leases !== undefined) { - Leases.encode(message.leases, writer.uint32(10).fork()).ldelim(); + Leases.encode(message.leases, writer.uint32(10).fork()).join(); } if (message.inventory !== undefined) { - Inventory.encode(message.inventory, writer.uint32(18).fork()).ldelim(); + Inventory.encode(message.inventory, writer.uint32(18).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ClusterStatus { + decode(input: BinaryReader | Uint8Array, length?: number): ClusterStatus { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseClusterStatus(); while (reader.pos < end) { @@ -783,7 +805,7 @@ export const ClusterStatus = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -832,22 +854,25 @@ function createBaseBidEngineStatus(): BidEngineStatus { return { $type: "akash.provider.v1.BidEngineStatus", orders: 0 }; } -export const BidEngineStatus = { +export const BidEngineStatus: MessageFns< + BidEngineStatus, + "akash.provider.v1.BidEngineStatus" +> = { $type: "akash.provider.v1.BidEngineStatus" as const, encode( message: BidEngineStatus, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.orders !== 0) { writer.uint32(8).uint32(message.orders); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): BidEngineStatus { + decode(input: BinaryReader | Uint8Array, length?: number): BidEngineStatus { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseBidEngineStatus(); while (reader.pos < end) { @@ -864,7 +889,7 @@ export const BidEngineStatus = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -900,22 +925,25 @@ function createBaseManifestStatus(): ManifestStatus { return { $type: "akash.provider.v1.ManifestStatus", deployments: 0 }; } -export const ManifestStatus = { +export const ManifestStatus: MessageFns< + ManifestStatus, + "akash.provider.v1.ManifestStatus" +> = { $type: "akash.provider.v1.ManifestStatus" as const, encode( message: ManifestStatus, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.deployments !== 0) { writer.uint32(8).uint32(message.deployments); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ManifestStatus { + decode(input: BinaryReader | Uint8Array, length?: number): ManifestStatus { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseManifestStatus(); while (reader.pos < end) { @@ -932,7 +960,7 @@ export const ManifestStatus = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -974,50 +1002,40 @@ function createBaseStatus(): Status { bidEngine: undefined, manifest: undefined, publicHostnames: [], - timestamp: undefined, }; } -export const Status = { +export const Status: MessageFns = { $type: "akash.provider.v1.Status" as const, encode( message: Status, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { for (const v of message.errors) { writer.uint32(10).string(v!); } if (message.cluster !== undefined) { - ClusterStatus.encode(message.cluster, writer.uint32(18).fork()).ldelim(); + ClusterStatus.encode(message.cluster, writer.uint32(18).fork()).join(); } if (message.bidEngine !== undefined) { BidEngineStatus.encode( message.bidEngine, writer.uint32(26).fork(), - ).ldelim(); + ).join(); } if (message.manifest !== undefined) { - ManifestStatus.encode( - message.manifest, - writer.uint32(34).fork(), - ).ldelim(); + ManifestStatus.encode(message.manifest, writer.uint32(34).fork()).join(); } for (const v of message.publicHostnames) { writer.uint32(42).string(v!); } - if (message.timestamp !== undefined) { - Timestamp.encode( - toTimestamp(message.timestamp), - writer.uint32(50).fork(), - ).ldelim(); - } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Status { + decode(input: BinaryReader | Uint8Array, length?: number): Status { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseStatus(); while (reader.pos < end) { @@ -1058,20 +1076,11 @@ export const Status = { message.publicHostnames.push(reader.string()); continue; - case 6: - if (tag !== 50) { - break; - } - - message.timestamp = fromTimestamp( - Timestamp.decode(reader, reader.uint32()), - ); - continue; } if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1094,9 +1103,6 @@ export const Status = { publicHostnames: globalThis.Array.isArray(object?.publicHostnames) ? object.publicHostnames.map((e: any) => globalThis.String(e)) : [], - timestamp: isSet(object.timestamp) - ? fromJsonTimestamp(object.timestamp) - : undefined, }; }, @@ -1117,9 +1123,6 @@ export const Status = { if (message.publicHostnames?.length) { obj.publicHostnames = message.publicHostnames; } - if (message.timestamp !== undefined) { - obj.timestamp = message.timestamp.toISOString(); - } return obj; }, @@ -1142,7 +1145,6 @@ export const Status = { ? ManifestStatus.fromPartial(object.manifest) : undefined; message.publicHostnames = object.publicHostnames?.map((e) => e) || []; - message.timestamp = object.timestamp ?? undefined; return message; }, }; @@ -1170,37 +1172,6 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -function toTimestamp(date: Date): Timestamp { - const seconds = numberToLong(Math.trunc(date.getTime() / 1_000)); - const nanos = (date.getTime() % 1_000) * 1_000_000; - return { $type: "google.protobuf.Timestamp", seconds, nanos }; -} - -function fromTimestamp(t: Timestamp): Date { - let millis = (t.seconds.toNumber() || 0) * 1_000; - millis += (t.nanos || 0) / 1_000_000; - return new globalThis.Date(millis); -} - -function fromJsonTimestamp(o: any): Date { - if (o instanceof globalThis.Date) { - return o; - } else if (typeof o === "string") { - return new globalThis.Date(o); - } else { - return fromTimestamp(Timestamp.fromJSON(o)); - } -} - -function numberToLong(number: number) { - return Long.fromNumber(number); -} - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isObject(value: any): boolean { return typeof value === "object" && value !== null; } @@ -1208,3 +1179,13 @@ function isObject(value: any): boolean { function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/provider/v1beta1/provider.ts b/ts/src/generated/akash/provider/v1beta1/provider.ts deleted file mode 100644 index f1d31622..00000000 --- a/ts/src/generated/akash/provider/v1beta1/provider.ts +++ /dev/null @@ -1,869 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Attribute } from "../../base/v1beta1/attribute"; - -/** ProviderInfo */ -export interface ProviderInfo { - $type: "akash.provider.v1beta1.ProviderInfo"; - email: string; - website: string; -} - -/** MsgCreateProvider defines an SDK message for creating a provider */ -export interface MsgCreateProvider { - $type: "akash.provider.v1beta1.MsgCreateProvider"; - owner: string; - hostUri: string; - attributes: Attribute[]; - info: ProviderInfo | undefined; -} - -/** MsgCreateProviderResponse defines the Msg/CreateProvider response type. */ -export interface MsgCreateProviderResponse { - $type: "akash.provider.v1beta1.MsgCreateProviderResponse"; -} - -/** MsgUpdateProvider defines an SDK message for updating a provider */ -export interface MsgUpdateProvider { - $type: "akash.provider.v1beta1.MsgUpdateProvider"; - owner: string; - hostUri: string; - attributes: Attribute[]; - info: ProviderInfo | undefined; -} - -/** MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. */ -export interface MsgUpdateProviderResponse { - $type: "akash.provider.v1beta1.MsgUpdateProviderResponse"; -} - -/** MsgDeleteProvider defines an SDK message for deleting a provider */ -export interface MsgDeleteProvider { - $type: "akash.provider.v1beta1.MsgDeleteProvider"; - owner: string; -} - -/** MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. */ -export interface MsgDeleteProviderResponse { - $type: "akash.provider.v1beta1.MsgDeleteProviderResponse"; -} - -/** Provider stores owner and host details */ -export interface Provider { - $type: "akash.provider.v1beta1.Provider"; - owner: string; - hostUri: string; - attributes: Attribute[]; - info: ProviderInfo | undefined; -} - -function createBaseProviderInfo(): ProviderInfo { - return { - $type: "akash.provider.v1beta1.ProviderInfo", - email: "", - website: "", - }; -} - -export const ProviderInfo = { - $type: "akash.provider.v1beta1.ProviderInfo" as const, - - encode( - message: ProviderInfo, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.email !== "") { - writer.uint32(10).string(message.email); - } - if (message.website !== "") { - writer.uint32(18).string(message.website); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ProviderInfo { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseProviderInfo(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.email = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.website = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ProviderInfo { - return { - $type: ProviderInfo.$type, - email: isSet(object.email) ? globalThis.String(object.email) : "", - website: isSet(object.website) ? globalThis.String(object.website) : "", - }; - }, - - toJSON(message: ProviderInfo): unknown { - const obj: any = {}; - if (message.email !== "") { - obj.email = message.email; - } - if (message.website !== "") { - obj.website = message.website; - } - return obj; - }, - - create(base?: DeepPartial): ProviderInfo { - return ProviderInfo.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ProviderInfo { - const message = createBaseProviderInfo(); - message.email = object.email ?? ""; - message.website = object.website ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(ProviderInfo.$type, ProviderInfo); - -function createBaseMsgCreateProvider(): MsgCreateProvider { - return { - $type: "akash.provider.v1beta1.MsgCreateProvider", - owner: "", - hostUri: "", - attributes: [], - info: undefined, - }; -} - -export const MsgCreateProvider = { - $type: "akash.provider.v1beta1.MsgCreateProvider" as const, - - encode( - message: MsgCreateProvider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.hostUri !== "") { - writer.uint32(18).string(message.hostUri); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - if (message.info !== undefined) { - ProviderInfo.encode(message.info, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateProvider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.hostUri = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.info = ProviderInfo.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateProvider { - return { - $type: MsgCreateProvider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - info: isSet(object.info) ? ProviderInfo.fromJSON(object.info) : undefined, - }; - }, - - toJSON(message: MsgCreateProvider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.hostUri !== "") { - obj.hostUri = message.hostUri; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - if (message.info !== undefined) { - obj.info = ProviderInfo.toJSON(message.info); - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateProvider { - return MsgCreateProvider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateProvider { - const message = createBaseMsgCreateProvider(); - message.owner = object.owner ?? ""; - message.hostUri = object.hostUri ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - message.info = - object.info !== undefined && object.info !== null - ? ProviderInfo.fromPartial(object.info) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateProvider.$type, MsgCreateProvider); - -function createBaseMsgCreateProviderResponse(): MsgCreateProviderResponse { - return { $type: "akash.provider.v1beta1.MsgCreateProviderResponse" }; -} - -export const MsgCreateProviderResponse = { - $type: "akash.provider.v1beta1.MsgCreateProviderResponse" as const, - - encode( - _: MsgCreateProviderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateProviderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateProviderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateProviderResponse { - return { $type: MsgCreateProviderResponse.$type }; - }, - - toJSON(_: MsgCreateProviderResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgCreateProviderResponse { - return MsgCreateProviderResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgCreateProviderResponse { - const message = createBaseMsgCreateProviderResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCreateProviderResponse.$type, - MsgCreateProviderResponse, -); - -function createBaseMsgUpdateProvider(): MsgUpdateProvider { - return { - $type: "akash.provider.v1beta1.MsgUpdateProvider", - owner: "", - hostUri: "", - attributes: [], - info: undefined, - }; -} - -export const MsgUpdateProvider = { - $type: "akash.provider.v1beta1.MsgUpdateProvider" as const, - - encode( - message: MsgUpdateProvider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.hostUri !== "") { - writer.uint32(18).string(message.hostUri); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - if (message.info !== undefined) { - ProviderInfo.encode(message.info, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgUpdateProvider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.hostUri = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.info = ProviderInfo.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgUpdateProvider { - return { - $type: MsgUpdateProvider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - info: isSet(object.info) ? ProviderInfo.fromJSON(object.info) : undefined, - }; - }, - - toJSON(message: MsgUpdateProvider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.hostUri !== "") { - obj.hostUri = message.hostUri; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - if (message.info !== undefined) { - obj.info = ProviderInfo.toJSON(message.info); - } - return obj; - }, - - create(base?: DeepPartial): MsgUpdateProvider { - return MsgUpdateProvider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgUpdateProvider { - const message = createBaseMsgUpdateProvider(); - message.owner = object.owner ?? ""; - message.hostUri = object.hostUri ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - message.info = - object.info !== undefined && object.info !== null - ? ProviderInfo.fromPartial(object.info) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgUpdateProvider.$type, MsgUpdateProvider); - -function createBaseMsgUpdateProviderResponse(): MsgUpdateProviderResponse { - return { $type: "akash.provider.v1beta1.MsgUpdateProviderResponse" }; -} - -export const MsgUpdateProviderResponse = { - $type: "akash.provider.v1beta1.MsgUpdateProviderResponse" as const, - - encode( - _: MsgUpdateProviderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgUpdateProviderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateProviderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgUpdateProviderResponse { - return { $type: MsgUpdateProviderResponse.$type }; - }, - - toJSON(_: MsgUpdateProviderResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgUpdateProviderResponse { - return MsgUpdateProviderResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgUpdateProviderResponse { - const message = createBaseMsgUpdateProviderResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgUpdateProviderResponse.$type, - MsgUpdateProviderResponse, -); - -function createBaseMsgDeleteProvider(): MsgDeleteProvider { - return { $type: "akash.provider.v1beta1.MsgDeleteProvider", owner: "" }; -} - -export const MsgDeleteProvider = { - $type: "akash.provider.v1beta1.MsgDeleteProvider" as const, - - encode( - message: MsgDeleteProvider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgDeleteProvider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDeleteProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgDeleteProvider { - return { - $type: MsgDeleteProvider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - }; - }, - - toJSON(message: MsgDeleteProvider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - return obj; - }, - - create(base?: DeepPartial): MsgDeleteProvider { - return MsgDeleteProvider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgDeleteProvider { - const message = createBaseMsgDeleteProvider(); - message.owner = object.owner ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(MsgDeleteProvider.$type, MsgDeleteProvider); - -function createBaseMsgDeleteProviderResponse(): MsgDeleteProviderResponse { - return { $type: "akash.provider.v1beta1.MsgDeleteProviderResponse" }; -} - -export const MsgDeleteProviderResponse = { - $type: "akash.provider.v1beta1.MsgDeleteProviderResponse" as const, - - encode( - _: MsgDeleteProviderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDeleteProviderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDeleteProviderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgDeleteProviderResponse { - return { $type: MsgDeleteProviderResponse.$type }; - }, - - toJSON(_: MsgDeleteProviderResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgDeleteProviderResponse { - return MsgDeleteProviderResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgDeleteProviderResponse { - const message = createBaseMsgDeleteProviderResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgDeleteProviderResponse.$type, - MsgDeleteProviderResponse, -); - -function createBaseProvider(): Provider { - return { - $type: "akash.provider.v1beta1.Provider", - owner: "", - hostUri: "", - attributes: [], - info: undefined, - }; -} - -export const Provider = { - $type: "akash.provider.v1beta1.Provider" as const, - - encode( - message: Provider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.hostUri !== "") { - writer.uint32(18).string(message.hostUri); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - if (message.info !== undefined) { - ProviderInfo.encode(message.info, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Provider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.hostUri = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.info = ProviderInfo.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Provider { - return { - $type: Provider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - info: isSet(object.info) ? ProviderInfo.fromJSON(object.info) : undefined, - }; - }, - - toJSON(message: Provider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.hostUri !== "") { - obj.hostUri = message.hostUri; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - if (message.info !== undefined) { - obj.info = ProviderInfo.toJSON(message.info); - } - return obj; - }, - - create(base?: DeepPartial): Provider { - return Provider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Provider { - const message = createBaseProvider(); - message.owner = object.owner ?? ""; - message.hostUri = object.hostUri ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - message.info = - object.info !== undefined && object.info !== null - ? ProviderInfo.fromPartial(object.info) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Provider.$type, Provider); - -/** Msg defines the provider Msg service */ -export interface Msg { - /** CreateProvider defines a method that creates a provider given the proper inputs */ - CreateProvider( - request: MsgCreateProvider, - ): Promise; - /** UpdateProvider defines a method that updates a provider given the proper inputs */ - UpdateProvider( - request: MsgUpdateProvider, - ): Promise; - /** DeleteProvider defines a method that deletes a provider given the proper inputs */ - DeleteProvider( - request: MsgDeleteProvider, - ): Promise; -} - -export const MsgServiceName = "akash.provider.v1beta1.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.CreateProvider = this.CreateProvider.bind(this); - this.UpdateProvider = this.UpdateProvider.bind(this); - this.DeleteProvider = this.DeleteProvider.bind(this); - } - CreateProvider( - request: MsgCreateProvider, - ): Promise { - const data = MsgCreateProvider.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateProvider", data); - return promise.then((data) => - MsgCreateProviderResponse.decode(_m0.Reader.create(data)), - ); - } - - UpdateProvider( - request: MsgUpdateProvider, - ): Promise { - const data = MsgUpdateProvider.encode(request).finish(); - const promise = this.rpc.request(this.service, "UpdateProvider", data); - return promise.then((data) => - MsgUpdateProviderResponse.decode(_m0.Reader.create(data)), - ); - } - - DeleteProvider( - request: MsgDeleteProvider, - ): Promise { - const data = MsgDeleteProvider.encode(request).finish(); - const promise = this.rpc.request(this.service, "DeleteProvider", data); - return promise.then((data) => - MsgDeleteProviderResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/provider/v1beta2/genesis.ts b/ts/src/generated/akash/provider/v1beta2/genesis.ts deleted file mode 100644 index b1656b18..00000000 --- a/ts/src/generated/akash/provider/v1beta2/genesis.ts +++ /dev/null @@ -1,108 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Provider } from "./provider"; - -/** GenesisState defines the basic genesis state used by provider module */ -export interface GenesisState { - $type: "akash.provider.v1beta2.GenesisState"; - providers: Provider[]; -} - -function createBaseGenesisState(): GenesisState { - return { $type: "akash.provider.v1beta2.GenesisState", providers: [] }; -} - -export const GenesisState = { - $type: "akash.provider.v1beta2.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.providers) { - Provider.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.providers.push(Provider.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - providers: globalThis.Array.isArray(object?.providers) - ? object.providers.map((e: any) => Provider.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.providers?.length) { - obj.providers = message.providers.map((e) => Provider.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.providers = - object.providers?.map((e) => Provider.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} diff --git a/ts/src/generated/akash/provider/v1beta2/provider.ts b/ts/src/generated/akash/provider/v1beta2/provider.ts deleted file mode 100644 index c8379f6d..00000000 --- a/ts/src/generated/akash/provider/v1beta2/provider.ts +++ /dev/null @@ -1,869 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Attribute } from "../../base/v1beta2/attribute"; - -/** ProviderInfo */ -export interface ProviderInfo { - $type: "akash.provider.v1beta2.ProviderInfo"; - email: string; - website: string; -} - -/** MsgCreateProvider defines an SDK message for creating a provider */ -export interface MsgCreateProvider { - $type: "akash.provider.v1beta2.MsgCreateProvider"; - owner: string; - hostUri: string; - attributes: Attribute[]; - info: ProviderInfo | undefined; -} - -/** MsgCreateProviderResponse defines the Msg/CreateProvider response type. */ -export interface MsgCreateProviderResponse { - $type: "akash.provider.v1beta2.MsgCreateProviderResponse"; -} - -/** MsgUpdateProvider defines an SDK message for updating a provider */ -export interface MsgUpdateProvider { - $type: "akash.provider.v1beta2.MsgUpdateProvider"; - owner: string; - hostUri: string; - attributes: Attribute[]; - info: ProviderInfo | undefined; -} - -/** MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. */ -export interface MsgUpdateProviderResponse { - $type: "akash.provider.v1beta2.MsgUpdateProviderResponse"; -} - -/** MsgDeleteProvider defines an SDK message for deleting a provider */ -export interface MsgDeleteProvider { - $type: "akash.provider.v1beta2.MsgDeleteProvider"; - owner: string; -} - -/** MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. */ -export interface MsgDeleteProviderResponse { - $type: "akash.provider.v1beta2.MsgDeleteProviderResponse"; -} - -/** Provider stores owner and host details */ -export interface Provider { - $type: "akash.provider.v1beta2.Provider"; - owner: string; - hostUri: string; - attributes: Attribute[]; - info: ProviderInfo | undefined; -} - -function createBaseProviderInfo(): ProviderInfo { - return { - $type: "akash.provider.v1beta2.ProviderInfo", - email: "", - website: "", - }; -} - -export const ProviderInfo = { - $type: "akash.provider.v1beta2.ProviderInfo" as const, - - encode( - message: ProviderInfo, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.email !== "") { - writer.uint32(10).string(message.email); - } - if (message.website !== "") { - writer.uint32(18).string(message.website); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ProviderInfo { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseProviderInfo(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.email = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.website = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ProviderInfo { - return { - $type: ProviderInfo.$type, - email: isSet(object.email) ? globalThis.String(object.email) : "", - website: isSet(object.website) ? globalThis.String(object.website) : "", - }; - }, - - toJSON(message: ProviderInfo): unknown { - const obj: any = {}; - if (message.email !== "") { - obj.email = message.email; - } - if (message.website !== "") { - obj.website = message.website; - } - return obj; - }, - - create(base?: DeepPartial): ProviderInfo { - return ProviderInfo.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ProviderInfo { - const message = createBaseProviderInfo(); - message.email = object.email ?? ""; - message.website = object.website ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(ProviderInfo.$type, ProviderInfo); - -function createBaseMsgCreateProvider(): MsgCreateProvider { - return { - $type: "akash.provider.v1beta2.MsgCreateProvider", - owner: "", - hostUri: "", - attributes: [], - info: undefined, - }; -} - -export const MsgCreateProvider = { - $type: "akash.provider.v1beta2.MsgCreateProvider" as const, - - encode( - message: MsgCreateProvider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.hostUri !== "") { - writer.uint32(18).string(message.hostUri); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - if (message.info !== undefined) { - ProviderInfo.encode(message.info, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateProvider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.hostUri = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.info = ProviderInfo.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateProvider { - return { - $type: MsgCreateProvider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - info: isSet(object.info) ? ProviderInfo.fromJSON(object.info) : undefined, - }; - }, - - toJSON(message: MsgCreateProvider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.hostUri !== "") { - obj.hostUri = message.hostUri; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - if (message.info !== undefined) { - obj.info = ProviderInfo.toJSON(message.info); - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateProvider { - return MsgCreateProvider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateProvider { - const message = createBaseMsgCreateProvider(); - message.owner = object.owner ?? ""; - message.hostUri = object.hostUri ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - message.info = - object.info !== undefined && object.info !== null - ? ProviderInfo.fromPartial(object.info) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateProvider.$type, MsgCreateProvider); - -function createBaseMsgCreateProviderResponse(): MsgCreateProviderResponse { - return { $type: "akash.provider.v1beta2.MsgCreateProviderResponse" }; -} - -export const MsgCreateProviderResponse = { - $type: "akash.provider.v1beta2.MsgCreateProviderResponse" as const, - - encode( - _: MsgCreateProviderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateProviderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateProviderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateProviderResponse { - return { $type: MsgCreateProviderResponse.$type }; - }, - - toJSON(_: MsgCreateProviderResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgCreateProviderResponse { - return MsgCreateProviderResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgCreateProviderResponse { - const message = createBaseMsgCreateProviderResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCreateProviderResponse.$type, - MsgCreateProviderResponse, -); - -function createBaseMsgUpdateProvider(): MsgUpdateProvider { - return { - $type: "akash.provider.v1beta2.MsgUpdateProvider", - owner: "", - hostUri: "", - attributes: [], - info: undefined, - }; -} - -export const MsgUpdateProvider = { - $type: "akash.provider.v1beta2.MsgUpdateProvider" as const, - - encode( - message: MsgUpdateProvider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.hostUri !== "") { - writer.uint32(18).string(message.hostUri); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - if (message.info !== undefined) { - ProviderInfo.encode(message.info, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgUpdateProvider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.hostUri = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.info = ProviderInfo.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgUpdateProvider { - return { - $type: MsgUpdateProvider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - info: isSet(object.info) ? ProviderInfo.fromJSON(object.info) : undefined, - }; - }, - - toJSON(message: MsgUpdateProvider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.hostUri !== "") { - obj.hostUri = message.hostUri; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - if (message.info !== undefined) { - obj.info = ProviderInfo.toJSON(message.info); - } - return obj; - }, - - create(base?: DeepPartial): MsgUpdateProvider { - return MsgUpdateProvider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgUpdateProvider { - const message = createBaseMsgUpdateProvider(); - message.owner = object.owner ?? ""; - message.hostUri = object.hostUri ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - message.info = - object.info !== undefined && object.info !== null - ? ProviderInfo.fromPartial(object.info) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgUpdateProvider.$type, MsgUpdateProvider); - -function createBaseMsgUpdateProviderResponse(): MsgUpdateProviderResponse { - return { $type: "akash.provider.v1beta2.MsgUpdateProviderResponse" }; -} - -export const MsgUpdateProviderResponse = { - $type: "akash.provider.v1beta2.MsgUpdateProviderResponse" as const, - - encode( - _: MsgUpdateProviderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgUpdateProviderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateProviderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgUpdateProviderResponse { - return { $type: MsgUpdateProviderResponse.$type }; - }, - - toJSON(_: MsgUpdateProviderResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgUpdateProviderResponse { - return MsgUpdateProviderResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgUpdateProviderResponse { - const message = createBaseMsgUpdateProviderResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgUpdateProviderResponse.$type, - MsgUpdateProviderResponse, -); - -function createBaseMsgDeleteProvider(): MsgDeleteProvider { - return { $type: "akash.provider.v1beta2.MsgDeleteProvider", owner: "" }; -} - -export const MsgDeleteProvider = { - $type: "akash.provider.v1beta2.MsgDeleteProvider" as const, - - encode( - message: MsgDeleteProvider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgDeleteProvider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDeleteProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgDeleteProvider { - return { - $type: MsgDeleteProvider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - }; - }, - - toJSON(message: MsgDeleteProvider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - return obj; - }, - - create(base?: DeepPartial): MsgDeleteProvider { - return MsgDeleteProvider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgDeleteProvider { - const message = createBaseMsgDeleteProvider(); - message.owner = object.owner ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(MsgDeleteProvider.$type, MsgDeleteProvider); - -function createBaseMsgDeleteProviderResponse(): MsgDeleteProviderResponse { - return { $type: "akash.provider.v1beta2.MsgDeleteProviderResponse" }; -} - -export const MsgDeleteProviderResponse = { - $type: "akash.provider.v1beta2.MsgDeleteProviderResponse" as const, - - encode( - _: MsgDeleteProviderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDeleteProviderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDeleteProviderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgDeleteProviderResponse { - return { $type: MsgDeleteProviderResponse.$type }; - }, - - toJSON(_: MsgDeleteProviderResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgDeleteProviderResponse { - return MsgDeleteProviderResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgDeleteProviderResponse { - const message = createBaseMsgDeleteProviderResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgDeleteProviderResponse.$type, - MsgDeleteProviderResponse, -); - -function createBaseProvider(): Provider { - return { - $type: "akash.provider.v1beta2.Provider", - owner: "", - hostUri: "", - attributes: [], - info: undefined, - }; -} - -export const Provider = { - $type: "akash.provider.v1beta2.Provider" as const, - - encode( - message: Provider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.hostUri !== "") { - writer.uint32(18).string(message.hostUri); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - if (message.info !== undefined) { - ProviderInfo.encode(message.info, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Provider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.hostUri = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.info = ProviderInfo.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Provider { - return { - $type: Provider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - info: isSet(object.info) ? ProviderInfo.fromJSON(object.info) : undefined, - }; - }, - - toJSON(message: Provider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.hostUri !== "") { - obj.hostUri = message.hostUri; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - if (message.info !== undefined) { - obj.info = ProviderInfo.toJSON(message.info); - } - return obj; - }, - - create(base?: DeepPartial): Provider { - return Provider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Provider { - const message = createBaseProvider(); - message.owner = object.owner ?? ""; - message.hostUri = object.hostUri ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - message.info = - object.info !== undefined && object.info !== null - ? ProviderInfo.fromPartial(object.info) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Provider.$type, Provider); - -/** Msg defines the provider Msg service */ -export interface Msg { - /** CreateProvider defines a method that creates a provider given the proper inputs */ - CreateProvider( - request: MsgCreateProvider, - ): Promise; - /** UpdateProvider defines a method that updates a provider given the proper inputs */ - UpdateProvider( - request: MsgUpdateProvider, - ): Promise; - /** DeleteProvider defines a method that deletes a provider given the proper inputs */ - DeleteProvider( - request: MsgDeleteProvider, - ): Promise; -} - -export const MsgServiceName = "akash.provider.v1beta2.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.CreateProvider = this.CreateProvider.bind(this); - this.UpdateProvider = this.UpdateProvider.bind(this); - this.DeleteProvider = this.DeleteProvider.bind(this); - } - CreateProvider( - request: MsgCreateProvider, - ): Promise { - const data = MsgCreateProvider.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateProvider", data); - return promise.then((data) => - MsgCreateProviderResponse.decode(_m0.Reader.create(data)), - ); - } - - UpdateProvider( - request: MsgUpdateProvider, - ): Promise { - const data = MsgUpdateProvider.encode(request).finish(); - const promise = this.rpc.request(this.service, "UpdateProvider", data); - return promise.then((data) => - MsgUpdateProviderResponse.decode(_m0.Reader.create(data)), - ); - } - - DeleteProvider( - request: MsgDeleteProvider, - ): Promise { - const data = MsgDeleteProvider.encode(request).finish(); - const promise = this.rpc.request(this.service, "DeleteProvider", data); - return promise.then((data) => - MsgDeleteProviderResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/provider/v1beta2/query.ts b/ts/src/generated/akash/provider/v1beta2/query.ts deleted file mode 100644 index de36ec6d..00000000 --- a/ts/src/generated/akash/provider/v1beta2/query.ts +++ /dev/null @@ -1,443 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Provider } from "./provider"; - -/** QueryProvidersRequest is request type for the Query/Providers RPC method */ -export interface QueryProvidersRequest { - $type: "akash.provider.v1beta2.QueryProvidersRequest"; - pagination: PageRequest | undefined; -} - -/** QueryProvidersResponse is response type for the Query/Providers RPC method */ -export interface QueryProvidersResponse { - $type: "akash.provider.v1beta2.QueryProvidersResponse"; - providers: Provider[]; - pagination: PageResponse | undefined; -} - -/** QueryProviderRequest is request type for the Query/Provider RPC method */ -export interface QueryProviderRequest { - $type: "akash.provider.v1beta2.QueryProviderRequest"; - owner: string; -} - -/** QueryProviderResponse is response type for the Query/Provider RPC method */ -export interface QueryProviderResponse { - $type: "akash.provider.v1beta2.QueryProviderResponse"; - provider: Provider | undefined; -} - -function createBaseQueryProvidersRequest(): QueryProvidersRequest { - return { - $type: "akash.provider.v1beta2.QueryProvidersRequest", - pagination: undefined, - }; -} - -export const QueryProvidersRequest = { - $type: "akash.provider.v1beta2.QueryProvidersRequest" as const, - - encode( - message: QueryProvidersRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProvidersRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProvidersRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProvidersRequest { - return { - $type: QueryProvidersRequest.$type, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryProvidersRequest): unknown { - const obj: any = {}; - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryProvidersRequest { - return QueryProvidersRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryProvidersRequest { - const message = createBaseQueryProvidersRequest(); - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryProvidersRequest.$type, QueryProvidersRequest); - -function createBaseQueryProvidersResponse(): QueryProvidersResponse { - return { - $type: "akash.provider.v1beta2.QueryProvidersResponse", - providers: [], - pagination: undefined, - }; -} - -export const QueryProvidersResponse = { - $type: "akash.provider.v1beta2.QueryProvidersResponse" as const, - - encode( - message: QueryProvidersResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.providers) { - Provider.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProvidersResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProvidersResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.providers.push(Provider.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProvidersResponse { - return { - $type: QueryProvidersResponse.$type, - providers: globalThis.Array.isArray(object?.providers) - ? object.providers.map((e: any) => Provider.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryProvidersResponse): unknown { - const obj: any = {}; - if (message.providers?.length) { - obj.providers = message.providers.map((e) => Provider.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryProvidersResponse { - return QueryProvidersResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryProvidersResponse { - const message = createBaseQueryProvidersResponse(); - message.providers = - object.providers?.map((e) => Provider.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryProvidersResponse.$type, QueryProvidersResponse); - -function createBaseQueryProviderRequest(): QueryProviderRequest { - return { $type: "akash.provider.v1beta2.QueryProviderRequest", owner: "" }; -} - -export const QueryProviderRequest = { - $type: "akash.provider.v1beta2.QueryProviderRequest" as const, - - encode( - message: QueryProviderRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProviderRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProviderRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProviderRequest { - return { - $type: QueryProviderRequest.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - }; - }, - - toJSON(message: QueryProviderRequest): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - return obj; - }, - - create(base?: DeepPartial): QueryProviderRequest { - return QueryProviderRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryProviderRequest { - const message = createBaseQueryProviderRequest(); - message.owner = object.owner ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(QueryProviderRequest.$type, QueryProviderRequest); - -function createBaseQueryProviderResponse(): QueryProviderResponse { - return { - $type: "akash.provider.v1beta2.QueryProviderResponse", - provider: undefined, - }; -} - -export const QueryProviderResponse = { - $type: "akash.provider.v1beta2.QueryProviderResponse" as const, - - encode( - message: QueryProviderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.provider !== undefined) { - Provider.encode(message.provider, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProviderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProviderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.provider = Provider.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProviderResponse { - return { - $type: QueryProviderResponse.$type, - provider: isSet(object.provider) - ? Provider.fromJSON(object.provider) - : undefined, - }; - }, - - toJSON(message: QueryProviderResponse): unknown { - const obj: any = {}; - if (message.provider !== undefined) { - obj.provider = Provider.toJSON(message.provider); - } - return obj; - }, - - create(base?: DeepPartial): QueryProviderResponse { - return QueryProviderResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryProviderResponse { - const message = createBaseQueryProviderResponse(); - message.provider = - object.provider !== undefined && object.provider !== null - ? Provider.fromPartial(object.provider) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryProviderResponse.$type, QueryProviderResponse); - -/** Query defines the gRPC querier service */ -export interface Query { - /** Providers queries providers */ - Providers(request: QueryProvidersRequest): Promise; - /** Provider queries provider details */ - Provider(request: QueryProviderRequest): Promise; -} - -export const QueryServiceName = "akash.provider.v1beta2.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.Providers = this.Providers.bind(this); - this.Provider = this.Provider.bind(this); - } - Providers(request: QueryProvidersRequest): Promise { - const data = QueryProvidersRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Providers", data); - return promise.then((data) => - QueryProvidersResponse.decode(_m0.Reader.create(data)), - ); - } - - Provider(request: QueryProviderRequest): Promise { - const data = QueryProviderRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Provider", data); - return promise.then((data) => - QueryProviderResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/provider/v1beta3/genesis.ts b/ts/src/generated/akash/provider/v1beta3/genesis.ts deleted file mode 100644 index 7b4eb280..00000000 --- a/ts/src/generated/akash/provider/v1beta3/genesis.ts +++ /dev/null @@ -1,108 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Provider } from "./provider"; - -/** GenesisState defines the basic genesis state used by provider module */ -export interface GenesisState { - $type: "akash.provider.v1beta3.GenesisState"; - providers: Provider[]; -} - -function createBaseGenesisState(): GenesisState { - return { $type: "akash.provider.v1beta3.GenesisState", providers: [] }; -} - -export const GenesisState = { - $type: "akash.provider.v1beta3.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.providers) { - Provider.encode(v!, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.providers.push(Provider.decode(reader, reader.uint32())); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - providers: globalThis.Array.isArray(object?.providers) - ? object.providers.map((e: any) => Provider.fromJSON(e)) - : [], - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.providers?.length) { - obj.providers = message.providers.map((e) => Provider.toJSON(e)); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.providers = - object.providers?.map((e) => Provider.fromPartial(e)) || []; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} diff --git a/ts/src/generated/akash/provider/v1beta3/provider.ts b/ts/src/generated/akash/provider/v1beta3/provider.ts deleted file mode 100644 index ad5d4da1..00000000 --- a/ts/src/generated/akash/provider/v1beta3/provider.ts +++ /dev/null @@ -1,869 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Attribute } from "../../base/v1beta3/attribute"; - -/** ProviderInfo */ -export interface ProviderInfo { - $type: "akash.provider.v1beta3.ProviderInfo"; - email: string; - website: string; -} - -/** MsgCreateProvider defines an SDK message for creating a provider */ -export interface MsgCreateProvider { - $type: "akash.provider.v1beta3.MsgCreateProvider"; - owner: string; - hostUri: string; - attributes: Attribute[]; - info: ProviderInfo | undefined; -} - -/** MsgCreateProviderResponse defines the Msg/CreateProvider response type. */ -export interface MsgCreateProviderResponse { - $type: "akash.provider.v1beta3.MsgCreateProviderResponse"; -} - -/** MsgUpdateProvider defines an SDK message for updating a provider */ -export interface MsgUpdateProvider { - $type: "akash.provider.v1beta3.MsgUpdateProvider"; - owner: string; - hostUri: string; - attributes: Attribute[]; - info: ProviderInfo | undefined; -} - -/** MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. */ -export interface MsgUpdateProviderResponse { - $type: "akash.provider.v1beta3.MsgUpdateProviderResponse"; -} - -/** MsgDeleteProvider defines an SDK message for deleting a provider */ -export interface MsgDeleteProvider { - $type: "akash.provider.v1beta3.MsgDeleteProvider"; - owner: string; -} - -/** MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. */ -export interface MsgDeleteProviderResponse { - $type: "akash.provider.v1beta3.MsgDeleteProviderResponse"; -} - -/** Provider stores owner and host details */ -export interface Provider { - $type: "akash.provider.v1beta3.Provider"; - owner: string; - hostUri: string; - attributes: Attribute[]; - info: ProviderInfo | undefined; -} - -function createBaseProviderInfo(): ProviderInfo { - return { - $type: "akash.provider.v1beta3.ProviderInfo", - email: "", - website: "", - }; -} - -export const ProviderInfo = { - $type: "akash.provider.v1beta3.ProviderInfo" as const, - - encode( - message: ProviderInfo, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.email !== "") { - writer.uint32(10).string(message.email); - } - if (message.website !== "") { - writer.uint32(18).string(message.website); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): ProviderInfo { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseProviderInfo(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.email = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.website = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): ProviderInfo { - return { - $type: ProviderInfo.$type, - email: isSet(object.email) ? globalThis.String(object.email) : "", - website: isSet(object.website) ? globalThis.String(object.website) : "", - }; - }, - - toJSON(message: ProviderInfo): unknown { - const obj: any = {}; - if (message.email !== "") { - obj.email = message.email; - } - if (message.website !== "") { - obj.website = message.website; - } - return obj; - }, - - create(base?: DeepPartial): ProviderInfo { - return ProviderInfo.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): ProviderInfo { - const message = createBaseProviderInfo(); - message.email = object.email ?? ""; - message.website = object.website ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(ProviderInfo.$type, ProviderInfo); - -function createBaseMsgCreateProvider(): MsgCreateProvider { - return { - $type: "akash.provider.v1beta3.MsgCreateProvider", - owner: "", - hostUri: "", - attributes: [], - info: undefined, - }; -} - -export const MsgCreateProvider = { - $type: "akash.provider.v1beta3.MsgCreateProvider" as const, - - encode( - message: MsgCreateProvider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.hostUri !== "") { - writer.uint32(18).string(message.hostUri); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - if (message.info !== undefined) { - ProviderInfo.encode(message.info, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateProvider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.hostUri = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.info = ProviderInfo.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgCreateProvider { - return { - $type: MsgCreateProvider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - info: isSet(object.info) ? ProviderInfo.fromJSON(object.info) : undefined, - }; - }, - - toJSON(message: MsgCreateProvider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.hostUri !== "") { - obj.hostUri = message.hostUri; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - if (message.info !== undefined) { - obj.info = ProviderInfo.toJSON(message.info); - } - return obj; - }, - - create(base?: DeepPartial): MsgCreateProvider { - return MsgCreateProvider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgCreateProvider { - const message = createBaseMsgCreateProvider(); - message.owner = object.owner ?? ""; - message.hostUri = object.hostUri ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - message.info = - object.info !== undefined && object.info !== null - ? ProviderInfo.fromPartial(object.info) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgCreateProvider.$type, MsgCreateProvider); - -function createBaseMsgCreateProviderResponse(): MsgCreateProviderResponse { - return { $type: "akash.provider.v1beta3.MsgCreateProviderResponse" }; -} - -export const MsgCreateProviderResponse = { - $type: "akash.provider.v1beta3.MsgCreateProviderResponse" as const, - - encode( - _: MsgCreateProviderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgCreateProviderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgCreateProviderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgCreateProviderResponse { - return { $type: MsgCreateProviderResponse.$type }; - }, - - toJSON(_: MsgCreateProviderResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgCreateProviderResponse { - return MsgCreateProviderResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgCreateProviderResponse { - const message = createBaseMsgCreateProviderResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgCreateProviderResponse.$type, - MsgCreateProviderResponse, -); - -function createBaseMsgUpdateProvider(): MsgUpdateProvider { - return { - $type: "akash.provider.v1beta3.MsgUpdateProvider", - owner: "", - hostUri: "", - attributes: [], - info: undefined, - }; -} - -export const MsgUpdateProvider = { - $type: "akash.provider.v1beta3.MsgUpdateProvider" as const, - - encode( - message: MsgUpdateProvider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.hostUri !== "") { - writer.uint32(18).string(message.hostUri); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - if (message.info !== undefined) { - ProviderInfo.encode(message.info, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgUpdateProvider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.hostUri = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.info = ProviderInfo.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgUpdateProvider { - return { - $type: MsgUpdateProvider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - info: isSet(object.info) ? ProviderInfo.fromJSON(object.info) : undefined, - }; - }, - - toJSON(message: MsgUpdateProvider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.hostUri !== "") { - obj.hostUri = message.hostUri; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - if (message.info !== undefined) { - obj.info = ProviderInfo.toJSON(message.info); - } - return obj; - }, - - create(base?: DeepPartial): MsgUpdateProvider { - return MsgUpdateProvider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgUpdateProvider { - const message = createBaseMsgUpdateProvider(); - message.owner = object.owner ?? ""; - message.hostUri = object.hostUri ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - message.info = - object.info !== undefined && object.info !== null - ? ProviderInfo.fromPartial(object.info) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(MsgUpdateProvider.$type, MsgUpdateProvider); - -function createBaseMsgUpdateProviderResponse(): MsgUpdateProviderResponse { - return { $type: "akash.provider.v1beta3.MsgUpdateProviderResponse" }; -} - -export const MsgUpdateProviderResponse = { - $type: "akash.provider.v1beta3.MsgUpdateProviderResponse" as const, - - encode( - _: MsgUpdateProviderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgUpdateProviderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgUpdateProviderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgUpdateProviderResponse { - return { $type: MsgUpdateProviderResponse.$type }; - }, - - toJSON(_: MsgUpdateProviderResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgUpdateProviderResponse { - return MsgUpdateProviderResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgUpdateProviderResponse { - const message = createBaseMsgUpdateProviderResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgUpdateProviderResponse.$type, - MsgUpdateProviderResponse, -); - -function createBaseMsgDeleteProvider(): MsgDeleteProvider { - return { $type: "akash.provider.v1beta3.MsgDeleteProvider", owner: "" }; -} - -export const MsgDeleteProvider = { - $type: "akash.provider.v1beta3.MsgDeleteProvider" as const, - - encode( - message: MsgDeleteProvider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): MsgDeleteProvider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDeleteProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): MsgDeleteProvider { - return { - $type: MsgDeleteProvider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - }; - }, - - toJSON(message: MsgDeleteProvider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - return obj; - }, - - create(base?: DeepPartial): MsgDeleteProvider { - return MsgDeleteProvider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): MsgDeleteProvider { - const message = createBaseMsgDeleteProvider(); - message.owner = object.owner ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(MsgDeleteProvider.$type, MsgDeleteProvider); - -function createBaseMsgDeleteProviderResponse(): MsgDeleteProviderResponse { - return { $type: "akash.provider.v1beta3.MsgDeleteProviderResponse" }; -} - -export const MsgDeleteProviderResponse = { - $type: "akash.provider.v1beta3.MsgDeleteProviderResponse" as const, - - encode( - _: MsgDeleteProviderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): MsgDeleteProviderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseMsgDeleteProviderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(_: any): MsgDeleteProviderResponse { - return { $type: MsgDeleteProviderResponse.$type }; - }, - - toJSON(_: MsgDeleteProviderResponse): unknown { - const obj: any = {}; - return obj; - }, - - create( - base?: DeepPartial, - ): MsgDeleteProviderResponse { - return MsgDeleteProviderResponse.fromPartial(base ?? {}); - }, - fromPartial( - _: DeepPartial, - ): MsgDeleteProviderResponse { - const message = createBaseMsgDeleteProviderResponse(); - return message; - }, -}; - -messageTypeRegistry.set( - MsgDeleteProviderResponse.$type, - MsgDeleteProviderResponse, -); - -function createBaseProvider(): Provider { - return { - $type: "akash.provider.v1beta3.Provider", - owner: "", - hostUri: "", - attributes: [], - info: undefined, - }; -} - -export const Provider = { - $type: "akash.provider.v1beta3.Provider" as const, - - encode( - message: Provider, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - if (message.hostUri !== "") { - writer.uint32(18).string(message.hostUri); - } - for (const v of message.attributes) { - Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); - } - if (message.info !== undefined) { - ProviderInfo.encode(message.info, writer.uint32(34).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Provider { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseProvider(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.hostUri = reader.string(); - continue; - case 3: - if (tag !== 26) { - break; - } - - message.attributes.push(Attribute.decode(reader, reader.uint32())); - continue; - case 4: - if (tag !== 34) { - break; - } - - message.info = ProviderInfo.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Provider { - return { - $type: Provider.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : "", - attributes: globalThis.Array.isArray(object?.attributes) - ? object.attributes.map((e: any) => Attribute.fromJSON(e)) - : [], - info: isSet(object.info) ? ProviderInfo.fromJSON(object.info) : undefined, - }; - }, - - toJSON(message: Provider): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - if (message.hostUri !== "") { - obj.hostUri = message.hostUri; - } - if (message.attributes?.length) { - obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); - } - if (message.info !== undefined) { - obj.info = ProviderInfo.toJSON(message.info); - } - return obj; - }, - - create(base?: DeepPartial): Provider { - return Provider.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Provider { - const message = createBaseProvider(); - message.owner = object.owner ?? ""; - message.hostUri = object.hostUri ?? ""; - message.attributes = - object.attributes?.map((e) => Attribute.fromPartial(e)) || []; - message.info = - object.info !== undefined && object.info !== null - ? ProviderInfo.fromPartial(object.info) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(Provider.$type, Provider); - -/** Msg defines the provider Msg service */ -export interface Msg { - /** CreateProvider defines a method that creates a provider given the proper inputs */ - CreateProvider( - request: MsgCreateProvider, - ): Promise; - /** UpdateProvider defines a method that updates a provider given the proper inputs */ - UpdateProvider( - request: MsgUpdateProvider, - ): Promise; - /** DeleteProvider defines a method that deletes a provider given the proper inputs */ - DeleteProvider( - request: MsgDeleteProvider, - ): Promise; -} - -export const MsgServiceName = "akash.provider.v1beta3.Msg"; -export class MsgClientImpl implements Msg { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || MsgServiceName; - this.rpc = rpc; - this.CreateProvider = this.CreateProvider.bind(this); - this.UpdateProvider = this.UpdateProvider.bind(this); - this.DeleteProvider = this.DeleteProvider.bind(this); - } - CreateProvider( - request: MsgCreateProvider, - ): Promise { - const data = MsgCreateProvider.encode(request).finish(); - const promise = this.rpc.request(this.service, "CreateProvider", data); - return promise.then((data) => - MsgCreateProviderResponse.decode(_m0.Reader.create(data)), - ); - } - - UpdateProvider( - request: MsgUpdateProvider, - ): Promise { - const data = MsgUpdateProvider.encode(request).finish(); - const promise = this.rpc.request(this.service, "UpdateProvider", data); - return promise.then((data) => - MsgUpdateProviderResponse.decode(_m0.Reader.create(data)), - ); - } - - DeleteProvider( - request: MsgDeleteProvider, - ): Promise { - const data = MsgDeleteProvider.encode(request).finish(); - const promise = this.rpc.request(this.service, "DeleteProvider", data); - return promise.then((data) => - MsgDeleteProviderResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/provider/v1beta3/query.ts b/ts/src/generated/akash/provider/v1beta3/query.ts deleted file mode 100644 index 2e0d8b31..00000000 --- a/ts/src/generated/akash/provider/v1beta3/query.ts +++ /dev/null @@ -1,443 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { - PageRequest, - PageResponse, -} from "../../../cosmos/base/query/v1beta1/pagination"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Provider } from "./provider"; - -/** QueryProvidersRequest is request type for the Query/Providers RPC method */ -export interface QueryProvidersRequest { - $type: "akash.provider.v1beta3.QueryProvidersRequest"; - pagination: PageRequest | undefined; -} - -/** QueryProvidersResponse is response type for the Query/Providers RPC method */ -export interface QueryProvidersResponse { - $type: "akash.provider.v1beta3.QueryProvidersResponse"; - providers: Provider[]; - pagination: PageResponse | undefined; -} - -/** QueryProviderRequest is request type for the Query/Provider RPC method */ -export interface QueryProviderRequest { - $type: "akash.provider.v1beta3.QueryProviderRequest"; - owner: string; -} - -/** QueryProviderResponse is response type for the Query/Provider RPC method */ -export interface QueryProviderResponse { - $type: "akash.provider.v1beta3.QueryProviderResponse"; - provider: Provider | undefined; -} - -function createBaseQueryProvidersRequest(): QueryProvidersRequest { - return { - $type: "akash.provider.v1beta3.QueryProvidersRequest", - pagination: undefined, - }; -} - -export const QueryProvidersRequest = { - $type: "akash.provider.v1beta3.QueryProvidersRequest" as const, - - encode( - message: QueryProvidersRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.pagination !== undefined) { - PageRequest.encode(message.pagination, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProvidersRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProvidersRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.pagination = PageRequest.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProvidersRequest { - return { - $type: QueryProvidersRequest.$type, - pagination: isSet(object.pagination) - ? PageRequest.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryProvidersRequest): unknown { - const obj: any = {}; - if (message.pagination !== undefined) { - obj.pagination = PageRequest.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryProvidersRequest { - return QueryProvidersRequest.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryProvidersRequest { - const message = createBaseQueryProvidersRequest(); - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageRequest.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryProvidersRequest.$type, QueryProvidersRequest); - -function createBaseQueryProvidersResponse(): QueryProvidersResponse { - return { - $type: "akash.provider.v1beta3.QueryProvidersResponse", - providers: [], - pagination: undefined, - }; -} - -export const QueryProvidersResponse = { - $type: "akash.provider.v1beta3.QueryProvidersResponse" as const, - - encode( - message: QueryProvidersResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.providers) { - Provider.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.pagination !== undefined) { - PageResponse.encode( - message.pagination, - writer.uint32(18).fork(), - ).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProvidersResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProvidersResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.providers.push(Provider.decode(reader, reader.uint32())); - continue; - case 2: - if (tag !== 18) { - break; - } - - message.pagination = PageResponse.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProvidersResponse { - return { - $type: QueryProvidersResponse.$type, - providers: globalThis.Array.isArray(object?.providers) - ? object.providers.map((e: any) => Provider.fromJSON(e)) - : [], - pagination: isSet(object.pagination) - ? PageResponse.fromJSON(object.pagination) - : undefined, - }; - }, - - toJSON(message: QueryProvidersResponse): unknown { - const obj: any = {}; - if (message.providers?.length) { - obj.providers = message.providers.map((e) => Provider.toJSON(e)); - } - if (message.pagination !== undefined) { - obj.pagination = PageResponse.toJSON(message.pagination); - } - return obj; - }, - - create(base?: DeepPartial): QueryProvidersResponse { - return QueryProvidersResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryProvidersResponse { - const message = createBaseQueryProvidersResponse(); - message.providers = - object.providers?.map((e) => Provider.fromPartial(e)) || []; - message.pagination = - object.pagination !== undefined && object.pagination !== null - ? PageResponse.fromPartial(object.pagination) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryProvidersResponse.$type, QueryProvidersResponse); - -function createBaseQueryProviderRequest(): QueryProviderRequest { - return { $type: "akash.provider.v1beta3.QueryProviderRequest", owner: "" }; -} - -export const QueryProviderRequest = { - $type: "akash.provider.v1beta3.QueryProviderRequest" as const, - - encode( - message: QueryProviderRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.owner !== "") { - writer.uint32(10).string(message.owner); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProviderRequest { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProviderRequest(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.owner = reader.string(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProviderRequest { - return { - $type: QueryProviderRequest.$type, - owner: isSet(object.owner) ? globalThis.String(object.owner) : "", - }; - }, - - toJSON(message: QueryProviderRequest): unknown { - const obj: any = {}; - if (message.owner !== "") { - obj.owner = message.owner; - } - return obj; - }, - - create(base?: DeepPartial): QueryProviderRequest { - return QueryProviderRequest.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): QueryProviderRequest { - const message = createBaseQueryProviderRequest(); - message.owner = object.owner ?? ""; - return message; - }, -}; - -messageTypeRegistry.set(QueryProviderRequest.$type, QueryProviderRequest); - -function createBaseQueryProviderResponse(): QueryProviderResponse { - return { - $type: "akash.provider.v1beta3.QueryProviderResponse", - provider: undefined, - }; -} - -export const QueryProviderResponse = { - $type: "akash.provider.v1beta3.QueryProviderResponse" as const, - - encode( - message: QueryProviderResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.provider !== undefined) { - Provider.encode(message.provider, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode( - input: _m0.Reader | Uint8Array, - length?: number, - ): QueryProviderResponse { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseQueryProviderResponse(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.provider = Provider.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): QueryProviderResponse { - return { - $type: QueryProviderResponse.$type, - provider: isSet(object.provider) - ? Provider.fromJSON(object.provider) - : undefined, - }; - }, - - toJSON(message: QueryProviderResponse): unknown { - const obj: any = {}; - if (message.provider !== undefined) { - obj.provider = Provider.toJSON(message.provider); - } - return obj; - }, - - create(base?: DeepPartial): QueryProviderResponse { - return QueryProviderResponse.fromPartial(base ?? {}); - }, - fromPartial( - object: DeepPartial, - ): QueryProviderResponse { - const message = createBaseQueryProviderResponse(); - message.provider = - object.provider !== undefined && object.provider !== null - ? Provider.fromPartial(object.provider) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(QueryProviderResponse.$type, QueryProviderResponse); - -/** Query defines the gRPC querier service */ -export interface Query { - /** Providers queries providers */ - Providers(request: QueryProvidersRequest): Promise; - /** Provider queries provider details */ - Provider(request: QueryProviderRequest): Promise; -} - -export const QueryServiceName = "akash.provider.v1beta3.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - this.Providers = this.Providers.bind(this); - this.Provider = this.Provider.bind(this); - } - Providers(request: QueryProvidersRequest): Promise { - const data = QueryProvidersRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Providers", data); - return promise.then((data) => - QueryProvidersResponse.decode(_m0.Reader.create(data)), - ); - } - - Provider(request: QueryProviderRequest): Promise { - const data = QueryProviderRequest.encode(request).finish(); - const promise = this.rpc.request(this.service, "Provider", data); - return promise.then((data) => - QueryProviderResponse.decode(_m0.Reader.create(data)), - ); - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/provider/v1beta4/event.ts b/ts/src/generated/akash/provider/v1beta4/event.ts new file mode 100644 index 00000000..7b0ffb0e --- /dev/null +++ b/ts/src/generated/akash/provider/v1beta4/event.ts @@ -0,0 +1,285 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/provider/v1beta4/event.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** EventProviderCreated defines an SDK message for provider created event */ +export interface EventProviderCreated { + $type: "akash.provider.v1beta4.EventProviderCreated"; + owner: string; +} + +/** EventProviderUpdated defines an SDK message for provider updated event */ +export interface EventProviderUpdated { + $type: "akash.provider.v1beta4.EventProviderUpdated"; + owner: string; +} + +/** EventProviderDeleted defines an SDK message for provider deleted event */ +export interface EventProviderDeleted { + $type: "akash.provider.v1beta4.EventProviderDeleted"; + owner: string; +} + +function createBaseEventProviderCreated(): EventProviderCreated { + return { $type: "akash.provider.v1beta4.EventProviderCreated", owner: "" }; +} + +export const EventProviderCreated: MessageFns< + EventProviderCreated, + "akash.provider.v1beta4.EventProviderCreated" +> = { + $type: "akash.provider.v1beta4.EventProviderCreated" as const, + + encode( + message: EventProviderCreated, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): EventProviderCreated { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventProviderCreated(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventProviderCreated { + return { + $type: EventProviderCreated.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + }; + }, + + toJSON(message: EventProviderCreated): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + return obj; + }, + + create(base?: DeepPartial): EventProviderCreated { + return EventProviderCreated.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): EventProviderCreated { + const message = createBaseEventProviderCreated(); + message.owner = object.owner ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(EventProviderCreated.$type, EventProviderCreated); + +function createBaseEventProviderUpdated(): EventProviderUpdated { + return { $type: "akash.provider.v1beta4.EventProviderUpdated", owner: "" }; +} + +export const EventProviderUpdated: MessageFns< + EventProviderUpdated, + "akash.provider.v1beta4.EventProviderUpdated" +> = { + $type: "akash.provider.v1beta4.EventProviderUpdated" as const, + + encode( + message: EventProviderUpdated, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): EventProviderUpdated { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventProviderUpdated(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventProviderUpdated { + return { + $type: EventProviderUpdated.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + }; + }, + + toJSON(message: EventProviderUpdated): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + return obj; + }, + + create(base?: DeepPartial): EventProviderUpdated { + return EventProviderUpdated.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): EventProviderUpdated { + const message = createBaseEventProviderUpdated(); + message.owner = object.owner ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(EventProviderUpdated.$type, EventProviderUpdated); + +function createBaseEventProviderDeleted(): EventProviderDeleted { + return { $type: "akash.provider.v1beta4.EventProviderDeleted", owner: "" }; +} + +export const EventProviderDeleted: MessageFns< + EventProviderDeleted, + "akash.provider.v1beta4.EventProviderDeleted" +> = { + $type: "akash.provider.v1beta4.EventProviderDeleted" as const, + + encode( + message: EventProviderDeleted, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): EventProviderDeleted { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEventProviderDeleted(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): EventProviderDeleted { + return { + $type: EventProviderDeleted.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + }; + }, + + toJSON(message: EventProviderDeleted): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + return obj; + }, + + create(base?: DeepPartial): EventProviderDeleted { + return EventProviderDeleted.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): EventProviderDeleted { + const message = createBaseEventProviderDeleted(); + message.owner = object.owner ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(EventProviderDeleted.$type, EventProviderDeleted); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/provider/v1beta4/genesis.ts b/ts/src/generated/akash/provider/v1beta4/genesis.ts new file mode 100644 index 00000000..baef3db3 --- /dev/null +++ b/ts/src/generated/akash/provider/v1beta4/genesis.ts @@ -0,0 +1,122 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/provider/v1beta4/genesis.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Provider } from "./provider"; + +/** GenesisState defines the basic genesis state used by provider module */ +export interface GenesisState { + $type: "akash.provider.v1beta4.GenesisState"; + providers: Provider[]; +} + +function createBaseGenesisState(): GenesisState { + return { $type: "akash.provider.v1beta4.GenesisState", providers: [] }; +} + +export const GenesisState: MessageFns< + GenesisState, + "akash.provider.v1beta4.GenesisState" +> = { + $type: "akash.provider.v1beta4.GenesisState" as const, + + encode( + message: GenesisState, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.providers) { + Provider.encode(v!, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GenesisState { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisState(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.providers.push(Provider.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisState { + return { + $type: GenesisState.$type, + providers: globalThis.Array.isArray(object?.providers) + ? object.providers.map((e: any) => Provider.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GenesisState): unknown { + const obj: any = {}; + if (message.providers?.length) { + obj.providers = message.providers.map((e) => Provider.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): GenesisState { + return GenesisState.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisState { + const message = createBaseGenesisState(); + message.providers = + object.providers?.map((e) => Provider.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GenesisState.$type, GenesisState); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/provider/v1beta4/msg.ts b/ts/src/generated/akash/provider/v1beta4/msg.ts new file mode 100644 index 00000000..1d461477 --- /dev/null +++ b/ts/src/generated/akash/provider/v1beta4/msg.ts @@ -0,0 +1,605 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/provider/v1beta4/msg.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Attribute } from "../../base/attributes/v1/attribute"; +import { Info } from "./provider"; + +/** MsgCreateProvider defines an SDK message for creating a provider */ +export interface MsgCreateProvider { + $type: "akash.provider.v1beta4.MsgCreateProvider"; + owner: string; + hostUri: string; + attributes: Attribute[]; + info: Info | undefined; +} + +/** MsgCreateProviderResponse defines the Msg/CreateProvider response type. */ +export interface MsgCreateProviderResponse { + $type: "akash.provider.v1beta4.MsgCreateProviderResponse"; +} + +/** MsgUpdateProvider defines an SDK message for updating a provider */ +export interface MsgUpdateProvider { + $type: "akash.provider.v1beta4.MsgUpdateProvider"; + owner: string; + hostUri: string; + attributes: Attribute[]; + info: Info | undefined; +} + +/** MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. */ +export interface MsgUpdateProviderResponse { + $type: "akash.provider.v1beta4.MsgUpdateProviderResponse"; +} + +/** MsgDeleteProvider defines an SDK message for deleting a provider */ +export interface MsgDeleteProvider { + $type: "akash.provider.v1beta4.MsgDeleteProvider"; + owner: string; +} + +/** MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. */ +export interface MsgDeleteProviderResponse { + $type: "akash.provider.v1beta4.MsgDeleteProviderResponse"; +} + +function createBaseMsgCreateProvider(): MsgCreateProvider { + return { + $type: "akash.provider.v1beta4.MsgCreateProvider", + owner: "", + hostUri: "", + attributes: [], + info: undefined, + }; +} + +export const MsgCreateProvider: MessageFns< + MsgCreateProvider, + "akash.provider.v1beta4.MsgCreateProvider" +> = { + $type: "akash.provider.v1beta4.MsgCreateProvider" as const, + + encode( + message: MsgCreateProvider, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (message.hostUri !== "") { + writer.uint32(18).string(message.hostUri); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(26).fork()).join(); + } + if (message.info !== undefined) { + Info.encode(message.info, writer.uint32(34).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgCreateProvider { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateProvider(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.hostUri = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.info = Info.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCreateProvider { + return { + $type: MsgCreateProvider.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : "", + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + info: isSet(object.info) ? Info.fromJSON(object.info) : undefined, + }; + }, + + toJSON(message: MsgCreateProvider): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.hostUri !== "") { + obj.hostUri = message.hostUri; + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + if (message.info !== undefined) { + obj.info = Info.toJSON(message.info); + } + return obj; + }, + + create(base?: DeepPartial): MsgCreateProvider { + return MsgCreateProvider.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCreateProvider { + const message = createBaseMsgCreateProvider(); + message.owner = object.owner ?? ""; + message.hostUri = object.hostUri ?? ""; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + message.info = + object.info !== undefined && object.info !== null + ? Info.fromPartial(object.info) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgCreateProvider.$type, MsgCreateProvider); + +function createBaseMsgCreateProviderResponse(): MsgCreateProviderResponse { + return { $type: "akash.provider.v1beta4.MsgCreateProviderResponse" }; +} + +export const MsgCreateProviderResponse: MessageFns< + MsgCreateProviderResponse, + "akash.provider.v1beta4.MsgCreateProviderResponse" +> = { + $type: "akash.provider.v1beta4.MsgCreateProviderResponse" as const, + + encode( + _: MsgCreateProviderResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgCreateProviderResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateProviderResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCreateProviderResponse { + return { $type: MsgCreateProviderResponse.$type }; + }, + + toJSON(_: MsgCreateProviderResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgCreateProviderResponse { + return MsgCreateProviderResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgCreateProviderResponse { + const message = createBaseMsgCreateProviderResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgCreateProviderResponse.$type, + MsgCreateProviderResponse, +); + +function createBaseMsgUpdateProvider(): MsgUpdateProvider { + return { + $type: "akash.provider.v1beta4.MsgUpdateProvider", + owner: "", + hostUri: "", + attributes: [], + info: undefined, + }; +} + +export const MsgUpdateProvider: MessageFns< + MsgUpdateProvider, + "akash.provider.v1beta4.MsgUpdateProvider" +> = { + $type: "akash.provider.v1beta4.MsgUpdateProvider" as const, + + encode( + message: MsgUpdateProvider, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (message.hostUri !== "") { + writer.uint32(18).string(message.hostUri); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(26).fork()).join(); + } + if (message.info !== undefined) { + Info.encode(message.info, writer.uint32(34).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgUpdateProvider { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateProvider(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.hostUri = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.info = Info.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgUpdateProvider { + return { + $type: MsgUpdateProvider.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : "", + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + info: isSet(object.info) ? Info.fromJSON(object.info) : undefined, + }; + }, + + toJSON(message: MsgUpdateProvider): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.hostUri !== "") { + obj.hostUri = message.hostUri; + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + if (message.info !== undefined) { + obj.info = Info.toJSON(message.info); + } + return obj; + }, + + create(base?: DeepPartial): MsgUpdateProvider { + return MsgUpdateProvider.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgUpdateProvider { + const message = createBaseMsgUpdateProvider(); + message.owner = object.owner ?? ""; + message.hostUri = object.hostUri ?? ""; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + message.info = + object.info !== undefined && object.info !== null + ? Info.fromPartial(object.info) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgUpdateProvider.$type, MsgUpdateProvider); + +function createBaseMsgUpdateProviderResponse(): MsgUpdateProviderResponse { + return { $type: "akash.provider.v1beta4.MsgUpdateProviderResponse" }; +} + +export const MsgUpdateProviderResponse: MessageFns< + MsgUpdateProviderResponse, + "akash.provider.v1beta4.MsgUpdateProviderResponse" +> = { + $type: "akash.provider.v1beta4.MsgUpdateProviderResponse" as const, + + encode( + _: MsgUpdateProviderResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgUpdateProviderResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateProviderResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgUpdateProviderResponse { + return { $type: MsgUpdateProviderResponse.$type }; + }, + + toJSON(_: MsgUpdateProviderResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgUpdateProviderResponse { + return MsgUpdateProviderResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgUpdateProviderResponse { + const message = createBaseMsgUpdateProviderResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgUpdateProviderResponse.$type, + MsgUpdateProviderResponse, +); + +function createBaseMsgDeleteProvider(): MsgDeleteProvider { + return { $type: "akash.provider.v1beta4.MsgDeleteProvider", owner: "" }; +} + +export const MsgDeleteProvider: MessageFns< + MsgDeleteProvider, + "akash.provider.v1beta4.MsgDeleteProvider" +> = { + $type: "akash.provider.v1beta4.MsgDeleteProvider" as const, + + encode( + message: MsgDeleteProvider, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgDeleteProvider { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgDeleteProvider(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgDeleteProvider { + return { + $type: MsgDeleteProvider.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + }; + }, + + toJSON(message: MsgDeleteProvider): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + return obj; + }, + + create(base?: DeepPartial): MsgDeleteProvider { + return MsgDeleteProvider.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgDeleteProvider { + const message = createBaseMsgDeleteProvider(); + message.owner = object.owner ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(MsgDeleteProvider.$type, MsgDeleteProvider); + +function createBaseMsgDeleteProviderResponse(): MsgDeleteProviderResponse { + return { $type: "akash.provider.v1beta4.MsgDeleteProviderResponse" }; +} + +export const MsgDeleteProviderResponse: MessageFns< + MsgDeleteProviderResponse, + "akash.provider.v1beta4.MsgDeleteProviderResponse" +> = { + $type: "akash.provider.v1beta4.MsgDeleteProviderResponse" as const, + + encode( + _: MsgDeleteProviderResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgDeleteProviderResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgDeleteProviderResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgDeleteProviderResponse { + return { $type: MsgDeleteProviderResponse.$type }; + }, + + toJSON(_: MsgDeleteProviderResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgDeleteProviderResponse { + return MsgDeleteProviderResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgDeleteProviderResponse { + const message = createBaseMsgDeleteProviderResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgDeleteProviderResponse.$type, + MsgDeleteProviderResponse, +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/provider/v1beta4/provider.ts b/ts/src/generated/akash/provider/v1beta4/provider.ts new file mode 100644 index 00000000..b3f7e8f2 --- /dev/null +++ b/ts/src/generated/akash/provider/v1beta4/provider.ts @@ -0,0 +1,271 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/provider/v1beta4/provider.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Attribute } from "../../base/attributes/v1/attribute"; + +/** Info */ +export interface Info { + $type: "akash.provider.v1beta4.Info"; + email: string; + website: string; +} + +/** Provider stores owner and host details */ +export interface Provider { + $type: "akash.provider.v1beta4.Provider"; + owner: string; + hostUri: string; + attributes: Attribute[]; + info: Info | undefined; +} + +function createBaseInfo(): Info { + return { $type: "akash.provider.v1beta4.Info", email: "", website: "" }; +} + +export const Info: MessageFns = { + $type: "akash.provider.v1beta4.Info" as const, + + encode( + message: Info, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.email !== "") { + writer.uint32(10).string(message.email); + } + if (message.website !== "") { + writer.uint32(18).string(message.website); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Info { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseInfo(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.email = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.website = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Info { + return { + $type: Info.$type, + email: isSet(object.email) ? globalThis.String(object.email) : "", + website: isSet(object.website) ? globalThis.String(object.website) : "", + }; + }, + + toJSON(message: Info): unknown { + const obj: any = {}; + if (message.email !== "") { + obj.email = message.email; + } + if (message.website !== "") { + obj.website = message.website; + } + return obj; + }, + + create(base?: DeepPartial): Info { + return Info.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Info { + const message = createBaseInfo(); + message.email = object.email ?? ""; + message.website = object.website ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(Info.$type, Info); + +function createBaseProvider(): Provider { + return { + $type: "akash.provider.v1beta4.Provider", + owner: "", + hostUri: "", + attributes: [], + info: undefined, + }; +} + +export const Provider: MessageFns = + { + $type: "akash.provider.v1beta4.Provider" as const, + + encode( + message: Provider, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + if (message.hostUri !== "") { + writer.uint32(18).string(message.hostUri); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(26).fork()).join(); + } + if (message.info !== undefined) { + Info.encode(message.info, writer.uint32(34).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Provider { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseProvider(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.hostUri = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.info = Info.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Provider { + return { + $type: Provider.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : "", + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + info: isSet(object.info) ? Info.fromJSON(object.info) : undefined, + }; + }, + + toJSON(message: Provider): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + if (message.hostUri !== "") { + obj.hostUri = message.hostUri; + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + if (message.info !== undefined) { + obj.info = Info.toJSON(message.info); + } + return obj; + }, + + create(base?: DeepPartial): Provider { + return Provider.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Provider { + const message = createBaseProvider(); + message.owner = object.owner ?? ""; + message.hostUri = object.hostUri ?? ""; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + message.info = + object.info !== undefined && object.info !== null + ? Info.fromPartial(object.info) + : undefined; + return message; + }, + }; + +messageTypeRegistry.set(Provider.$type, Provider); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/provider/v1beta4/query.ts b/ts/src/generated/akash/provider/v1beta4/query.ts new file mode 100644 index 00000000..fdcb2ffe --- /dev/null +++ b/ts/src/generated/akash/provider/v1beta4/query.ts @@ -0,0 +1,463 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/provider/v1beta4/query.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { + PageRequest, + PageResponse, +} from "../../../cosmos/base/query/v1beta1/pagination"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Provider } from "./provider"; + +/** QueryProvidersRequest is request type for the Query/Providers RPC method */ +export interface QueryProvidersRequest { + $type: "akash.provider.v1beta4.QueryProvidersRequest"; + pagination: PageRequest | undefined; +} + +/** QueryProvidersResponse is response type for the Query/Providers RPC method */ +export interface QueryProvidersResponse { + $type: "akash.provider.v1beta4.QueryProvidersResponse"; + providers: Provider[]; + pagination: PageResponse | undefined; +} + +/** QueryProviderRequest is request type for the Query/Provider RPC method */ +export interface QueryProviderRequest { + $type: "akash.provider.v1beta4.QueryProviderRequest"; + owner: string; +} + +/** QueryProviderResponse is response type for the Query/Provider RPC method */ +export interface QueryProviderResponse { + $type: "akash.provider.v1beta4.QueryProviderResponse"; + provider: Provider | undefined; +} + +function createBaseQueryProvidersRequest(): QueryProvidersRequest { + return { + $type: "akash.provider.v1beta4.QueryProvidersRequest", + pagination: undefined, + }; +} + +export const QueryProvidersRequest: MessageFns< + QueryProvidersRequest, + "akash.provider.v1beta4.QueryProvidersRequest" +> = { + $type: "akash.provider.v1beta4.QueryProvidersRequest" as const, + + encode( + message: QueryProvidersRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryProvidersRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProvidersRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProvidersRequest { + return { + $type: QueryProvidersRequest.$type, + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryProvidersRequest): unknown { + const obj: any = {}; + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryProvidersRequest { + return QueryProvidersRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryProvidersRequest { + const message = createBaseQueryProvidersRequest(); + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryProvidersRequest.$type, QueryProvidersRequest); + +function createBaseQueryProvidersResponse(): QueryProvidersResponse { + return { + $type: "akash.provider.v1beta4.QueryProvidersResponse", + providers: [], + pagination: undefined, + }; +} + +export const QueryProvidersResponse: MessageFns< + QueryProvidersResponse, + "akash.provider.v1beta4.QueryProvidersResponse" +> = { + $type: "akash.provider.v1beta4.QueryProvidersResponse" as const, + + encode( + message: QueryProvidersResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.providers) { + Provider.encode(v!, writer.uint32(10).fork()).join(); + } + if (message.pagination !== undefined) { + PageResponse.encode(message.pagination, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryProvidersResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProvidersResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.providers.push(Provider.decode(reader, reader.uint32())); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProvidersResponse { + return { + $type: QueryProvidersResponse.$type, + providers: globalThis.Array.isArray(object?.providers) + ? object.providers.map((e: any) => Provider.fromJSON(e)) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryProvidersResponse): unknown { + const obj: any = {}; + if (message.providers?.length) { + obj.providers = message.providers.map((e) => Provider.toJSON(e)); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryProvidersResponse { + return QueryProvidersResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryProvidersResponse { + const message = createBaseQueryProvidersResponse(); + message.providers = + object.providers?.map((e) => Provider.fromPartial(e)) || []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryProvidersResponse.$type, QueryProvidersResponse); + +function createBaseQueryProviderRequest(): QueryProviderRequest { + return { $type: "akash.provider.v1beta4.QueryProviderRequest", owner: "" }; +} + +export const QueryProviderRequest: MessageFns< + QueryProviderRequest, + "akash.provider.v1beta4.QueryProviderRequest" +> = { + $type: "akash.provider.v1beta4.QueryProviderRequest" as const, + + encode( + message: QueryProviderRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.owner !== "") { + writer.uint32(10).string(message.owner); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryProviderRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProviderRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProviderRequest { + return { + $type: QueryProviderRequest.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : "", + }; + }, + + toJSON(message: QueryProviderRequest): unknown { + const obj: any = {}; + if (message.owner !== "") { + obj.owner = message.owner; + } + return obj; + }, + + create(base?: DeepPartial): QueryProviderRequest { + return QueryProviderRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryProviderRequest { + const message = createBaseQueryProviderRequest(); + message.owner = object.owner ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(QueryProviderRequest.$type, QueryProviderRequest); + +function createBaseQueryProviderResponse(): QueryProviderResponse { + return { + $type: "akash.provider.v1beta4.QueryProviderResponse", + provider: undefined, + }; +} + +export const QueryProviderResponse: MessageFns< + QueryProviderResponse, + "akash.provider.v1beta4.QueryProviderResponse" +> = { + $type: "akash.provider.v1beta4.QueryProviderResponse" as const, + + encode( + message: QueryProviderResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.provider !== undefined) { + Provider.encode(message.provider, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryProviderResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProviderResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.provider = Provider.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProviderResponse { + return { + $type: QueryProviderResponse.$type, + provider: isSet(object.provider) + ? Provider.fromJSON(object.provider) + : undefined, + }; + }, + + toJSON(message: QueryProviderResponse): unknown { + const obj: any = {}; + if (message.provider !== undefined) { + obj.provider = Provider.toJSON(message.provider); + } + return obj; + }, + + create(base?: DeepPartial): QueryProviderResponse { + return QueryProviderResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryProviderResponse { + const message = createBaseQueryProviderResponse(); + message.provider = + object.provider !== undefined && object.provider !== null + ? Provider.fromPartial(object.provider) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryProviderResponse.$type, QueryProviderResponse); + +/** Query defines the gRPC querier service */ +export interface Query { + /** Providers queries providers */ + Providers(request: QueryProvidersRequest): Promise; + /** Provider queries provider details */ + Provider(request: QueryProviderRequest): Promise; +} + +export const QueryServiceName = "akash.provider.v1beta4.Query"; +export class QueryClientImpl implements Query { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || QueryServiceName; + this.rpc = rpc; + this.Providers = this.Providers.bind(this); + this.Provider = this.Provider.bind(this); + } + Providers(request: QueryProvidersRequest): Promise { + const data = QueryProvidersRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Providers", data); + return promise.then((data) => + QueryProvidersResponse.decode(new BinaryReader(data)), + ); + } + + Provider(request: QueryProviderRequest): Promise { + const data = QueryProviderRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Provider", data); + return promise.then((data) => + QueryProviderResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/provider/v1beta4/service.grpc-js.ts b/ts/src/generated/akash/provider/v1beta4/service.grpc-js.ts new file mode 100644 index 00000000..b6dd0c0d --- /dev/null +++ b/ts/src/generated/akash/provider/v1beta4/service.grpc-js.ts @@ -0,0 +1,175 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/provider/v1beta4/service.proto + +/* eslint-disable */ +import { + ChannelCredentials, + Client, + makeGenericClientConstructor, + Metadata, +} from "@grpc/grpc-js"; +import type { + CallOptions, + ClientOptions, + ClientUnaryCall, + handleUnaryCall, + ServiceError, + UntypedServiceImplementation, +} from "@grpc/grpc-js"; +import { + MsgCreateProvider, + MsgCreateProviderResponse, + MsgDeleteProvider, + MsgDeleteProviderResponse, + MsgUpdateProvider, + MsgUpdateProviderResponse, +} from "./msg"; + +export const protobufPackage = "akash.provider.v1beta4"; + +/** Msg defines the provider Msg service */ +export type MsgService = typeof MsgService; +export const MsgService = { + /** CreateProvider defines a method that creates a provider given the proper inputs */ + createProvider: { + path: "/akash.provider.v1beta4.Msg/CreateProvider", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCreateProvider) => + Buffer.from(MsgCreateProvider.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCreateProvider.decode(value), + responseSerialize: (value: MsgCreateProviderResponse) => + Buffer.from(MsgCreateProviderResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgCreateProviderResponse.decode(value), + }, + /** UpdateProvider defines a method that updates a provider given the proper inputs */ + updateProvider: { + path: "/akash.provider.v1beta4.Msg/UpdateProvider", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgUpdateProvider) => + Buffer.from(MsgUpdateProvider.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgUpdateProvider.decode(value), + responseSerialize: (value: MsgUpdateProviderResponse) => + Buffer.from(MsgUpdateProviderResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgUpdateProviderResponse.decode(value), + }, + /** DeleteProvider defines a method that deletes a provider given the proper inputs */ + deleteProvider: { + path: "/akash.provider.v1beta4.Msg/DeleteProvider", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgDeleteProvider) => + Buffer.from(MsgDeleteProvider.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgDeleteProvider.decode(value), + responseSerialize: (value: MsgDeleteProviderResponse) => + Buffer.from(MsgDeleteProviderResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgDeleteProviderResponse.decode(value), + }, +} as const; + +export interface MsgServer extends UntypedServiceImplementation { + /** CreateProvider defines a method that creates a provider given the proper inputs */ + createProvider: handleUnaryCall; + /** UpdateProvider defines a method that updates a provider given the proper inputs */ + updateProvider: handleUnaryCall; + /** DeleteProvider defines a method that deletes a provider given the proper inputs */ + deleteProvider: handleUnaryCall; +} + +export interface MsgClient extends Client { + /** CreateProvider defines a method that creates a provider given the proper inputs */ + createProvider( + request: MsgCreateProvider, + callback: ( + error: ServiceError | null, + response: MsgCreateProviderResponse, + ) => void, + ): ClientUnaryCall; + createProvider( + request: MsgCreateProvider, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCreateProviderResponse, + ) => void, + ): ClientUnaryCall; + createProvider( + request: MsgCreateProvider, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCreateProviderResponse, + ) => void, + ): ClientUnaryCall; + /** UpdateProvider defines a method that updates a provider given the proper inputs */ + updateProvider( + request: MsgUpdateProvider, + callback: ( + error: ServiceError | null, + response: MsgUpdateProviderResponse, + ) => void, + ): ClientUnaryCall; + updateProvider( + request: MsgUpdateProvider, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgUpdateProviderResponse, + ) => void, + ): ClientUnaryCall; + updateProvider( + request: MsgUpdateProvider, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgUpdateProviderResponse, + ) => void, + ): ClientUnaryCall; + /** DeleteProvider defines a method that deletes a provider given the proper inputs */ + deleteProvider( + request: MsgDeleteProvider, + callback: ( + error: ServiceError | null, + response: MsgDeleteProviderResponse, + ) => void, + ): ClientUnaryCall; + deleteProvider( + request: MsgDeleteProvider, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgDeleteProviderResponse, + ) => void, + ): ClientUnaryCall; + deleteProvider( + request: MsgDeleteProvider, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgDeleteProviderResponse, + ) => void, + ): ClientUnaryCall; +} + +export const MsgClient = makeGenericClientConstructor( + MsgService, + "akash.provider.v1beta4.Msg", +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial, + ): MsgClient; + service: typeof MsgService; + serviceName: string; +}; diff --git a/ts/src/generated/akash/provider/v1beta4/service.ts b/ts/src/generated/akash/provider/v1beta4/service.ts new file mode 100644 index 00000000..a3ea5578 --- /dev/null +++ b/ts/src/generated/akash/provider/v1beta4/service.ts @@ -0,0 +1,82 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/provider/v1beta4/service.proto + +/* eslint-disable */ +import { BinaryReader } from "@bufbuild/protobuf/wire"; +import { + MsgCreateProvider, + MsgCreateProviderResponse, + MsgDeleteProvider, + MsgDeleteProviderResponse, + MsgUpdateProvider, + MsgUpdateProviderResponse, +} from "./msg"; + +/** Msg defines the provider Msg service */ +export interface Msg { + /** CreateProvider defines a method that creates a provider given the proper inputs */ + CreateProvider( + request: MsgCreateProvider, + ): Promise; + /** UpdateProvider defines a method that updates a provider given the proper inputs */ + UpdateProvider( + request: MsgUpdateProvider, + ): Promise; + /** DeleteProvider defines a method that deletes a provider given the proper inputs */ + DeleteProvider( + request: MsgDeleteProvider, + ): Promise; +} + +export const MsgServiceName = "akash.provider.v1beta4.Msg"; +export class MsgClientImpl implements Msg { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || MsgServiceName; + this.rpc = rpc; + this.CreateProvider = this.CreateProvider.bind(this); + this.UpdateProvider = this.UpdateProvider.bind(this); + this.DeleteProvider = this.DeleteProvider.bind(this); + } + CreateProvider( + request: MsgCreateProvider, + ): Promise { + const data = MsgCreateProvider.encode(request).finish(); + const promise = this.rpc.request(this.service, "CreateProvider", data); + return promise.then((data) => + MsgCreateProviderResponse.decode(new BinaryReader(data)), + ); + } + + UpdateProvider( + request: MsgUpdateProvider, + ): Promise { + const data = MsgUpdateProvider.encode(request).finish(); + const promise = this.rpc.request(this.service, "UpdateProvider", data); + return promise.then((data) => + MsgUpdateProviderResponse.decode(new BinaryReader(data)), + ); + } + + DeleteProvider( + request: MsgDeleteProvider, + ): Promise { + const data = MsgDeleteProvider.encode(request).finish(); + const promise = this.rpc.request(this.service, "DeleteProvider", data); + return promise.then((data) => + MsgDeleteProviderResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} diff --git a/ts/src/generated/akash/staking/v1beta3/genesis.ts b/ts/src/generated/akash/staking/v1beta3/genesis.ts index aee7902a..78707451 100644 --- a/ts/src/generated/akash/staking/v1beta3/genesis.ts +++ b/ts/src/generated/akash/staking/v1beta3/genesis.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/staking/v1beta3/genesis.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; import { Params } from "./params"; @@ -14,22 +20,25 @@ function createBaseGenesisState(): GenesisState { return { $type: "akash.staking.v1beta3.GenesisState", params: undefined }; } -export const GenesisState = { +export const GenesisState: MessageFns< + GenesisState, + "akash.staking.v1beta3.GenesisState" +> = { $type: "akash.staking.v1beta3.GenesisState" as const, encode( message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.params !== undefined) { - Params.encode(message.params, writer.uint32(10).fork()).ldelim(); + Params.encode(message.params, writer.uint32(10).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { + decode(input: BinaryReader | Uint8Array, length?: number): GenesisState { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseGenesisState(); while (reader.pos < end) { @@ -46,7 +55,7 @@ export const GenesisState = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -102,11 +111,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/staking/v1beta3/params.ts b/ts/src/generated/akash/staking/v1beta3/params.ts index fd8db94a..f2e8601e 100644 --- a/ts/src/generated/akash/staking/v1beta3/params.ts +++ b/ts/src/generated/akash/staking/v1beta3/params.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/staking/v1beta3/params.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; /** Params extends the parameters for the x/staking module */ @@ -14,22 +20,22 @@ function createBaseParams(): Params { return { $type: "akash.staking.v1beta3.Params", minCommissionRate: "" }; } -export const Params = { +export const Params: MessageFns = { $type: "akash.staking.v1beta3.Params" as const, encode( message: Params, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.minCommissionRate !== "") { writer.uint32(10).string(message.minCommissionRate); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Params { + decode(input: BinaryReader | Uint8Array, length?: number): Params { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseParams(); while (reader.pos < end) { @@ -46,7 +52,7 @@ export const Params = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -101,11 +107,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/staking/v1beta3/paramsmsg.ts b/ts/src/generated/akash/staking/v1beta3/paramsmsg.ts new file mode 100644 index 00000000..5924b3b1 --- /dev/null +++ b/ts/src/generated/akash/staking/v1beta3/paramsmsg.ts @@ -0,0 +1,227 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/staking/v1beta3/paramsmsg.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Params } from "./params"; + +/** + * MsgUpdateParams is the Msg/UpdateParams request type. + * + * Since: akash v1.0.0 + */ +export interface MsgUpdateParams { + $type: "akash.staking.v1beta3.MsgUpdateParams"; + /** authority is the address of the governance account. */ + authority: string; + /** + * params defines the x/deployment parameters to update. + * + * NOTE: All parameters must be supplied. + */ + params: Params | undefined; +} + +/** + * MsgUpdateParamsResponse defines the response structure for executing a + * MsgUpdateParams message. + * + * Since: akash v1.0.0 + */ +export interface MsgUpdateParamsResponse { + $type: "akash.staking.v1beta3.MsgUpdateParamsResponse"; +} + +function createBaseMsgUpdateParams(): MsgUpdateParams { + return { + $type: "akash.staking.v1beta3.MsgUpdateParams", + authority: "", + params: undefined, + }; +} + +export const MsgUpdateParams: MessageFns< + MsgUpdateParams, + "akash.staking.v1beta3.MsgUpdateParams" +> = { + $type: "akash.staking.v1beta3.MsgUpdateParams" as const, + + encode( + message: MsgUpdateParams, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.authority !== "") { + writer.uint32(10).string(message.authority); + } + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgUpdateParams { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.authority = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.params = Params.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgUpdateParams { + return { + $type: MsgUpdateParams.$type, + authority: isSet(object.authority) + ? globalThis.String(object.authority) + : "", + params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, + }; + }, + + toJSON(message: MsgUpdateParams): unknown { + const obj: any = {}; + if (message.authority !== "") { + obj.authority = message.authority; + } + if (message.params !== undefined) { + obj.params = Params.toJSON(message.params); + } + return obj; + }, + + create(base?: DeepPartial): MsgUpdateParams { + return MsgUpdateParams.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgUpdateParams { + const message = createBaseMsgUpdateParams(); + message.authority = object.authority ?? ""; + message.params = + object.params !== undefined && object.params !== null + ? Params.fromPartial(object.params) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgUpdateParams.$type, MsgUpdateParams); + +function createBaseMsgUpdateParamsResponse(): MsgUpdateParamsResponse { + return { $type: "akash.staking.v1beta3.MsgUpdateParamsResponse" }; +} + +export const MsgUpdateParamsResponse: MessageFns< + MsgUpdateParamsResponse, + "akash.staking.v1beta3.MsgUpdateParamsResponse" +> = { + $type: "akash.staking.v1beta3.MsgUpdateParamsResponse" as const, + + encode( + _: MsgUpdateParamsResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgUpdateParamsResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateParamsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgUpdateParamsResponse { + return { $type: MsgUpdateParamsResponse.$type }; + }, + + toJSON(_: MsgUpdateParamsResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgUpdateParamsResponse { + return MsgUpdateParamsResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgUpdateParamsResponse { + const message = createBaseMsgUpdateParamsResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgUpdateParamsResponse.$type, MsgUpdateParamsResponse); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/staking/v1beta3/query.ts b/ts/src/generated/akash/staking/v1beta3/query.ts new file mode 100644 index 00000000..026f39b1 --- /dev/null +++ b/ts/src/generated/akash/staking/v1beta3/query.ts @@ -0,0 +1,227 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/staking/v1beta3/query.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Params } from "./params"; + +/** QueryParamsRequest is the request type for the Query/Params RPC method. */ +export interface QueryParamsRequest { + $type: "akash.staking.v1beta3.QueryParamsRequest"; +} + +/** QueryParamsResponse is the response type for the Query/Params RPC method. */ +export interface QueryParamsResponse { + $type: "akash.staking.v1beta3.QueryParamsResponse"; + /** params defines the parameters of the module. */ + params: Params | undefined; +} + +function createBaseQueryParamsRequest(): QueryParamsRequest { + return { $type: "akash.staking.v1beta3.QueryParamsRequest" }; +} + +export const QueryParamsRequest: MessageFns< + QueryParamsRequest, + "akash.staking.v1beta3.QueryParamsRequest" +> = { + $type: "akash.staking.v1beta3.QueryParamsRequest" as const, + + encode( + _: QueryParamsRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryParamsRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryParamsRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): QueryParamsRequest { + return { $type: QueryParamsRequest.$type }; + }, + + toJSON(_: QueryParamsRequest): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): QueryParamsRequest { + return QueryParamsRequest.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): QueryParamsRequest { + const message = createBaseQueryParamsRequest(); + return message; + }, +}; + +messageTypeRegistry.set(QueryParamsRequest.$type, QueryParamsRequest); + +function createBaseQueryParamsResponse(): QueryParamsResponse { + return { + $type: "akash.staking.v1beta3.QueryParamsResponse", + params: undefined, + }; +} + +export const QueryParamsResponse: MessageFns< + QueryParamsResponse, + "akash.staking.v1beta3.QueryParamsResponse" +> = { + $type: "akash.staking.v1beta3.QueryParamsResponse" as const, + + encode( + message: QueryParamsResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryParamsResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryParamsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.params = Params.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryParamsResponse { + return { + $type: QueryParamsResponse.$type, + params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, + }; + }, + + toJSON(message: QueryParamsResponse): unknown { + const obj: any = {}; + if (message.params !== undefined) { + obj.params = Params.toJSON(message.params); + } + return obj; + }, + + create(base?: DeepPartial): QueryParamsResponse { + return QueryParamsResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryParamsResponse { + const message = createBaseQueryParamsResponse(); + message.params = + object.params !== undefined && object.params !== null + ? Params.fromPartial(object.params) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryParamsResponse.$type, QueryParamsResponse); + +/** Query defines the gRPC querier service */ +export interface Query { + /** Params returns the total set of minting parameters. */ + Params(request: QueryParamsRequest): Promise; +} + +export const QueryServiceName = "akash.staking.v1beta3.Query"; +export class QueryClientImpl implements Query { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || QueryServiceName; + this.rpc = rpc; + this.Params = this.Params.bind(this); + } + Params(request: QueryParamsRequest): Promise { + const data = QueryParamsRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Params", data); + return promise.then((data) => + QueryParamsResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/staking/v1beta3/service.grpc-js.ts b/ts/src/generated/akash/staking/v1beta3/service.grpc-js.ts new file mode 100644 index 00000000..aa657a32 --- /dev/null +++ b/ts/src/generated/akash/staking/v1beta3/service.grpc-js.ts @@ -0,0 +1,103 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/staking/v1beta3/service.proto + +/* eslint-disable */ +import { + ChannelCredentials, + Client, + makeGenericClientConstructor, + Metadata, +} from "@grpc/grpc-js"; +import type { + CallOptions, + ClientOptions, + ClientUnaryCall, + handleUnaryCall, + ServiceError, + UntypedServiceImplementation, +} from "@grpc/grpc-js"; +import { MsgUpdateParams, MsgUpdateParamsResponse } from "./paramsmsg"; + +export const protobufPackage = "akash.staking.v1beta3"; + +/** Msg defines the market Msg service */ +export type MsgService = typeof MsgService; +export const MsgService = { + /** + * UpdateParams defines a governance operation for updating the x/market module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + updateParams: { + path: "/akash.staking.v1beta3.Msg/UpdateParams", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgUpdateParams) => + Buffer.from(MsgUpdateParams.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgUpdateParams.decode(value), + responseSerialize: (value: MsgUpdateParamsResponse) => + Buffer.from(MsgUpdateParamsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgUpdateParamsResponse.decode(value), + }, +} as const; + +export interface MsgServer extends UntypedServiceImplementation { + /** + * UpdateParams defines a governance operation for updating the x/market module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + updateParams: handleUnaryCall; +} + +export interface MsgClient extends Client { + /** + * UpdateParams defines a governance operation for updating the x/market module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + updateParams( + request: MsgUpdateParams, + callback: ( + error: ServiceError | null, + response: MsgUpdateParamsResponse, + ) => void, + ): ClientUnaryCall; + updateParams( + request: MsgUpdateParams, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgUpdateParamsResponse, + ) => void, + ): ClientUnaryCall; + updateParams( + request: MsgUpdateParams, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgUpdateParamsResponse, + ) => void, + ): ClientUnaryCall; +} + +export const MsgClient = makeGenericClientConstructor( + MsgService, + "akash.staking.v1beta3.Msg", +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial, + ): MsgClient; + service: typeof MsgService; + serviceName: string; +}; diff --git a/ts/src/generated/akash/staking/v1beta3/service.ts b/ts/src/generated/akash/staking/v1beta3/service.ts new file mode 100644 index 00000000..bbdb53c1 --- /dev/null +++ b/ts/src/generated/akash/staking/v1beta3/service.ts @@ -0,0 +1,46 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/staking/v1beta3/service.proto + +/* eslint-disable */ +import { BinaryReader } from "@bufbuild/protobuf/wire"; +import { MsgUpdateParams, MsgUpdateParamsResponse } from "./paramsmsg"; + +/** Msg defines the market Msg service */ +export interface Msg { + /** + * UpdateParams defines a governance operation for updating the x/market module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + UpdateParams(request: MsgUpdateParams): Promise; +} + +export const MsgServiceName = "akash.staking.v1beta3.Msg"; +export class MsgClientImpl implements Msg { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || MsgServiceName; + this.rpc = rpc; + this.UpdateParams = this.UpdateParams.bind(this); + } + UpdateParams(request: MsgUpdateParams): Promise { + const data = MsgUpdateParams.encode(request).finish(); + const promise = this.rpc.request(this.service, "UpdateParams", data); + return promise.then((data) => + MsgUpdateParamsResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} diff --git a/ts/src/generated/akash/take/v1/genesis.ts b/ts/src/generated/akash/take/v1/genesis.ts new file mode 100644 index 00000000..81ff0691 --- /dev/null +++ b/ts/src/generated/akash/take/v1/genesis.ts @@ -0,0 +1,126 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/take/v1/genesis.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Params } from "./params"; + +/** GenesisState stores slice of genesis deployment instance */ +export interface GenesisState { + $type: "akash.take.v1.GenesisState"; + params: Params | undefined; +} + +function createBaseGenesisState(): GenesisState { + return { $type: "akash.take.v1.GenesisState", params: undefined }; +} + +export const GenesisState: MessageFns< + GenesisState, + "akash.take.v1.GenesisState" +> = { + $type: "akash.take.v1.GenesisState" as const, + + encode( + message: GenesisState, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): GenesisState { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisState(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.params = Params.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisState { + return { + $type: GenesisState.$type, + params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, + }; + }, + + toJSON(message: GenesisState): unknown { + const obj: any = {}; + if (message.params !== undefined) { + obj.params = Params.toJSON(message.params); + } + return obj; + }, + + create(base?: DeepPartial): GenesisState { + return GenesisState.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisState { + const message = createBaseGenesisState(); + message.params = + object.params !== undefined && object.params !== null + ? Params.fromPartial(object.params) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(GenesisState.$type, GenesisState); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/take/v1/params.ts b/ts/src/generated/akash/take/v1/params.ts new file mode 100644 index 00000000..c2911d1d --- /dev/null +++ b/ts/src/generated/akash/take/v1/params.ts @@ -0,0 +1,242 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/take/v1/params.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; + +/** DenomTakeRate describes take rate for specified denom */ +export interface DenomTakeRate { + $type: "akash.take.v1.DenomTakeRate"; + denom: string; + rate: number; +} + +/** Params defines the parameters for the x/take package */ +export interface Params { + $type: "akash.take.v1.Params"; + /** denom -> % take rate */ + denomTakeRates: DenomTakeRate[]; + defaultTakeRate: number; +} + +function createBaseDenomTakeRate(): DenomTakeRate { + return { $type: "akash.take.v1.DenomTakeRate", denom: "", rate: 0 }; +} + +export const DenomTakeRate: MessageFns< + DenomTakeRate, + "akash.take.v1.DenomTakeRate" +> = { + $type: "akash.take.v1.DenomTakeRate" as const, + + encode( + message: DenomTakeRate, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.denom !== "") { + writer.uint32(10).string(message.denom); + } + if (message.rate !== 0) { + writer.uint32(16).uint32(message.rate); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): DenomTakeRate { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDenomTakeRate(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.denom = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.rate = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): DenomTakeRate { + return { + $type: DenomTakeRate.$type, + denom: isSet(object.denom) ? globalThis.String(object.denom) : "", + rate: isSet(object.rate) ? globalThis.Number(object.rate) : 0, + }; + }, + + toJSON(message: DenomTakeRate): unknown { + const obj: any = {}; + if (message.denom !== "") { + obj.denom = message.denom; + } + if (message.rate !== 0) { + obj.rate = Math.round(message.rate); + } + return obj; + }, + + create(base?: DeepPartial): DenomTakeRate { + return DenomTakeRate.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DenomTakeRate { + const message = createBaseDenomTakeRate(); + message.denom = object.denom ?? ""; + message.rate = object.rate ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(DenomTakeRate.$type, DenomTakeRate); + +function createBaseParams(): Params { + return { + $type: "akash.take.v1.Params", + denomTakeRates: [], + defaultTakeRate: 0, + }; +} + +export const Params: MessageFns = { + $type: "akash.take.v1.Params" as const, + + encode( + message: Params, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.denomTakeRates) { + DenomTakeRate.encode(v!, writer.uint32(10).fork()).join(); + } + if (message.defaultTakeRate !== 0) { + writer.uint32(16).uint32(message.defaultTakeRate); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): Params { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.denomTakeRates.push( + DenomTakeRate.decode(reader, reader.uint32()), + ); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.defaultTakeRate = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): Params { + return { + $type: Params.$type, + denomTakeRates: globalThis.Array.isArray(object?.denomTakeRates) + ? object.denomTakeRates.map((e: any) => DenomTakeRate.fromJSON(e)) + : [], + defaultTakeRate: isSet(object.defaultTakeRate) + ? globalThis.Number(object.defaultTakeRate) + : 0, + }; + }, + + toJSON(message: Params): unknown { + const obj: any = {}; + if (message.denomTakeRates?.length) { + obj.denomTakeRates = message.denomTakeRates.map((e) => + DenomTakeRate.toJSON(e), + ); + } + if (message.defaultTakeRate !== 0) { + obj.defaultTakeRate = Math.round(message.defaultTakeRate); + } + return obj; + }, + + create(base?: DeepPartial): Params { + return Params.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Params { + const message = createBaseParams(); + message.denomTakeRates = + object.denomTakeRates?.map((e) => DenomTakeRate.fromPartial(e)) || []; + message.defaultTakeRate = object.defaultTakeRate ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Params.$type, Params); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/take/v1/paramsmsg.ts b/ts/src/generated/akash/take/v1/paramsmsg.ts new file mode 100644 index 00000000..b77bb1fd --- /dev/null +++ b/ts/src/generated/akash/take/v1/paramsmsg.ts @@ -0,0 +1,227 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/take/v1/paramsmsg.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Params } from "./params"; + +/** + * MsgUpdateParams is the Msg/UpdateParams request type. + * + * Since: akash v1.0.0 + */ +export interface MsgUpdateParams { + $type: "akash.take.v1.MsgUpdateParams"; + /** authority is the address of the governance account. */ + authority: string; + /** + * params defines the x/deployment parameters to update. + * + * NOTE: All parameters must be supplied. + */ + params: Params | undefined; +} + +/** + * MsgUpdateParamsResponse defines the response structure for executing a + * MsgUpdateParams message. + * + * Since: akash v1.0.0 + */ +export interface MsgUpdateParamsResponse { + $type: "akash.take.v1.MsgUpdateParamsResponse"; +} + +function createBaseMsgUpdateParams(): MsgUpdateParams { + return { + $type: "akash.take.v1.MsgUpdateParams", + authority: "", + params: undefined, + }; +} + +export const MsgUpdateParams: MessageFns< + MsgUpdateParams, + "akash.take.v1.MsgUpdateParams" +> = { + $type: "akash.take.v1.MsgUpdateParams" as const, + + encode( + message: MsgUpdateParams, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.authority !== "") { + writer.uint32(10).string(message.authority); + } + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): MsgUpdateParams { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.authority = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.params = Params.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgUpdateParams { + return { + $type: MsgUpdateParams.$type, + authority: isSet(object.authority) + ? globalThis.String(object.authority) + : "", + params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, + }; + }, + + toJSON(message: MsgUpdateParams): unknown { + const obj: any = {}; + if (message.authority !== "") { + obj.authority = message.authority; + } + if (message.params !== undefined) { + obj.params = Params.toJSON(message.params); + } + return obj; + }, + + create(base?: DeepPartial): MsgUpdateParams { + return MsgUpdateParams.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgUpdateParams { + const message = createBaseMsgUpdateParams(); + message.authority = object.authority ?? ""; + message.params = + object.params !== undefined && object.params !== null + ? Params.fromPartial(object.params) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgUpdateParams.$type, MsgUpdateParams); + +function createBaseMsgUpdateParamsResponse(): MsgUpdateParamsResponse { + return { $type: "akash.take.v1.MsgUpdateParamsResponse" }; +} + +export const MsgUpdateParamsResponse: MessageFns< + MsgUpdateParamsResponse, + "akash.take.v1.MsgUpdateParamsResponse" +> = { + $type: "akash.take.v1.MsgUpdateParamsResponse" as const, + + encode( + _: MsgUpdateParamsResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): MsgUpdateParamsResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateParamsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgUpdateParamsResponse { + return { $type: MsgUpdateParamsResponse.$type }; + }, + + toJSON(_: MsgUpdateParamsResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgUpdateParamsResponse { + return MsgUpdateParamsResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgUpdateParamsResponse { + const message = createBaseMsgUpdateParamsResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgUpdateParamsResponse.$type, MsgUpdateParamsResponse); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/take/v1/query.ts b/ts/src/generated/akash/take/v1/query.ts new file mode 100644 index 00000000..9bd35e42 --- /dev/null +++ b/ts/src/generated/akash/take/v1/query.ts @@ -0,0 +1,224 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/take/v1/query.proto + +/* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../../../typeRegistry"; +import { Params } from "./params"; + +/** QueryParamsRequest is the request type for the Query/Params RPC method. */ +export interface QueryParamsRequest { + $type: "akash.take.v1.QueryParamsRequest"; +} + +/** QueryParamsResponse is the response type for the Query/Params RPC method. */ +export interface QueryParamsResponse { + $type: "akash.take.v1.QueryParamsResponse"; + /** params defines the parameters of the module. */ + params: Params | undefined; +} + +function createBaseQueryParamsRequest(): QueryParamsRequest { + return { $type: "akash.take.v1.QueryParamsRequest" }; +} + +export const QueryParamsRequest: MessageFns< + QueryParamsRequest, + "akash.take.v1.QueryParamsRequest" +> = { + $type: "akash.take.v1.QueryParamsRequest" as const, + + encode( + _: QueryParamsRequest, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryParamsRequest { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryParamsRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(_: any): QueryParamsRequest { + return { $type: QueryParamsRequest.$type }; + }, + + toJSON(_: QueryParamsRequest): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): QueryParamsRequest { + return QueryParamsRequest.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): QueryParamsRequest { + const message = createBaseQueryParamsRequest(); + return message; + }, +}; + +messageTypeRegistry.set(QueryParamsRequest.$type, QueryParamsRequest); + +function createBaseQueryParamsResponse(): QueryParamsResponse { + return { $type: "akash.take.v1.QueryParamsResponse", params: undefined }; +} + +export const QueryParamsResponse: MessageFns< + QueryParamsResponse, + "akash.take.v1.QueryParamsResponse" +> = { + $type: "akash.take.v1.QueryParamsResponse" as const, + + encode( + message: QueryParamsResponse, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(10).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): QueryParamsResponse { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryParamsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.params = Params.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryParamsResponse { + return { + $type: QueryParamsResponse.$type, + params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, + }; + }, + + toJSON(message: QueryParamsResponse): unknown { + const obj: any = {}; + if (message.params !== undefined) { + obj.params = Params.toJSON(message.params); + } + return obj; + }, + + create(base?: DeepPartial): QueryParamsResponse { + return QueryParamsResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryParamsResponse { + const message = createBaseQueryParamsResponse(); + message.params = + object.params !== undefined && object.params !== null + ? Params.fromPartial(object.params) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryParamsResponse.$type, QueryParamsResponse); + +/** Query defines the gRPC querier service */ +export interface Query { + /** Params returns the total set of minting parameters. */ + Params(request: QueryParamsRequest): Promise; +} + +export const QueryServiceName = "akash.take.v1.Query"; +export class QueryClientImpl implements Query { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || QueryServiceName; + this.rpc = rpc; + this.Params = this.Params.bind(this); + } + Params(request: QueryParamsRequest): Promise { + const data = QueryParamsRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, "Params", data); + return promise.then((data) => + QueryParamsResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/akash/take/v1/service.grpc-js.ts b/ts/src/generated/akash/take/v1/service.grpc-js.ts new file mode 100644 index 00000000..3f99b5be --- /dev/null +++ b/ts/src/generated/akash/take/v1/service.grpc-js.ts @@ -0,0 +1,103 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/take/v1/service.proto + +/* eslint-disable */ +import { + ChannelCredentials, + Client, + makeGenericClientConstructor, + Metadata, +} from "@grpc/grpc-js"; +import type { + CallOptions, + ClientOptions, + ClientUnaryCall, + handleUnaryCall, + ServiceError, + UntypedServiceImplementation, +} from "@grpc/grpc-js"; +import { MsgUpdateParams, MsgUpdateParamsResponse } from "./paramsmsg"; + +export const protobufPackage = "akash.take.v1"; + +/** Msg defines the market Msg service */ +export type MsgService = typeof MsgService; +export const MsgService = { + /** + * UpdateParams defines a governance operation for updating the x/market module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + updateParams: { + path: "/akash.take.v1.Msg/UpdateParams", + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgUpdateParams) => + Buffer.from(MsgUpdateParams.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgUpdateParams.decode(value), + responseSerialize: (value: MsgUpdateParamsResponse) => + Buffer.from(MsgUpdateParamsResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgUpdateParamsResponse.decode(value), + }, +} as const; + +export interface MsgServer extends UntypedServiceImplementation { + /** + * UpdateParams defines a governance operation for updating the x/market module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + updateParams: handleUnaryCall; +} + +export interface MsgClient extends Client { + /** + * UpdateParams defines a governance operation for updating the x/market module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + updateParams( + request: MsgUpdateParams, + callback: ( + error: ServiceError | null, + response: MsgUpdateParamsResponse, + ) => void, + ): ClientUnaryCall; + updateParams( + request: MsgUpdateParams, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgUpdateParamsResponse, + ) => void, + ): ClientUnaryCall; + updateParams( + request: MsgUpdateParams, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgUpdateParamsResponse, + ) => void, + ): ClientUnaryCall; +} + +export const MsgClient = makeGenericClientConstructor( + MsgService, + "akash.take.v1.Msg", +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial, + ): MsgClient; + service: typeof MsgService; + serviceName: string; +}; diff --git a/ts/src/generated/akash/take/v1/service.ts b/ts/src/generated/akash/take/v1/service.ts new file mode 100644 index 00000000..7f14fbcb --- /dev/null +++ b/ts/src/generated/akash/take/v1/service.ts @@ -0,0 +1,46 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: akash/take/v1/service.proto + +/* eslint-disable */ +import { BinaryReader } from "@bufbuild/protobuf/wire"; +import { MsgUpdateParams, MsgUpdateParamsResponse } from "./paramsmsg"; + +/** Msg defines the market Msg service */ +export interface Msg { + /** + * UpdateParams defines a governance operation for updating the x/market module + * parameters. The authority is hard-coded to the x/gov module account. + * + * Since: akash v1.0.0 + */ + UpdateParams(request: MsgUpdateParams): Promise; +} + +export const MsgServiceName = "akash.take.v1.Msg"; +export class MsgClientImpl implements Msg { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || MsgServiceName; + this.rpc = rpc; + this.UpdateParams = this.UpdateParams.bind(this); + } + UpdateParams(request: MsgUpdateParams): Promise { + const data = MsgUpdateParams.encode(request).finish(); + const promise = this.rpc.request(this.service, "UpdateParams", data); + return promise.then((data) => + MsgUpdateParamsResponse.decode(new BinaryReader(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} diff --git a/ts/src/generated/akash/take/v1beta3/genesis.ts b/ts/src/generated/akash/take/v1beta3/genesis.ts deleted file mode 100644 index 433e36aa..00000000 --- a/ts/src/generated/akash/take/v1beta3/genesis.ts +++ /dev/null @@ -1,112 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; -import { Params } from "./params"; - -/** GenesisState stores slice of genesis deployment instance */ -export interface GenesisState { - $type: "akash.take.v1beta3.GenesisState"; - params: Params | undefined; -} - -function createBaseGenesisState(): GenesisState { - return { $type: "akash.take.v1beta3.GenesisState", params: undefined }; -} - -export const GenesisState = { - $type: "akash.take.v1beta3.GenesisState" as const, - - encode( - message: GenesisState, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.params !== undefined) { - Params.encode(message.params, writer.uint32(10).fork()).ldelim(); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseGenesisState(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.params = Params.decode(reader, reader.uint32()); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): GenesisState { - return { - $type: GenesisState.$type, - params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, - }; - }, - - toJSON(message: GenesisState): unknown { - const obj: any = {}; - if (message.params !== undefined) { - obj.params = Params.toJSON(message.params); - } - return obj; - }, - - create(base?: DeepPartial): GenesisState { - return GenesisState.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): GenesisState { - const message = createBaseGenesisState(); - message.params = - object.params !== undefined && object.params !== null - ? Params.fromPartial(object.params) - : undefined; - return message; - }, -}; - -messageTypeRegistry.set(GenesisState.$type, GenesisState); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/take/v1beta3/params.ts b/ts/src/generated/akash/take/v1beta3/params.ts deleted file mode 100644 index bb277ea4..00000000 --- a/ts/src/generated/akash/take/v1beta3/params.ts +++ /dev/null @@ -1,228 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../typeRegistry"; - -/** DenomTakeRate describes take rate for specified denom */ -export interface DenomTakeRate { - $type: "akash.take.v1beta3.DenomTakeRate"; - denom: string; - rate: number; -} - -/** Params defines the parameters for the x/take package */ -export interface Params { - $type: "akash.take.v1beta3.Params"; - /** denom -> % take rate */ - denomTakeRates: DenomTakeRate[]; - defaultTakeRate: number; -} - -function createBaseDenomTakeRate(): DenomTakeRate { - return { $type: "akash.take.v1beta3.DenomTakeRate", denom: "", rate: 0 }; -} - -export const DenomTakeRate = { - $type: "akash.take.v1beta3.DenomTakeRate" as const, - - encode( - message: DenomTakeRate, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (message.denom !== "") { - writer.uint32(10).string(message.denom); - } - if (message.rate !== 0) { - writer.uint32(16).uint32(message.rate); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): DenomTakeRate { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseDenomTakeRate(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.denom = reader.string(); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.rate = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): DenomTakeRate { - return { - $type: DenomTakeRate.$type, - denom: isSet(object.denom) ? globalThis.String(object.denom) : "", - rate: isSet(object.rate) ? globalThis.Number(object.rate) : 0, - }; - }, - - toJSON(message: DenomTakeRate): unknown { - const obj: any = {}; - if (message.denom !== "") { - obj.denom = message.denom; - } - if (message.rate !== 0) { - obj.rate = Math.round(message.rate); - } - return obj; - }, - - create(base?: DeepPartial): DenomTakeRate { - return DenomTakeRate.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): DenomTakeRate { - const message = createBaseDenomTakeRate(); - message.denom = object.denom ?? ""; - message.rate = object.rate ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(DenomTakeRate.$type, DenomTakeRate); - -function createBaseParams(): Params { - return { - $type: "akash.take.v1beta3.Params", - denomTakeRates: [], - defaultTakeRate: 0, - }; -} - -export const Params = { - $type: "akash.take.v1beta3.Params" as const, - - encode( - message: Params, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - for (const v of message.denomTakeRates) { - DenomTakeRate.encode(v!, writer.uint32(10).fork()).ldelim(); - } - if (message.defaultTakeRate !== 0) { - writer.uint32(16).uint32(message.defaultTakeRate); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Params { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseParams(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 10) { - break; - } - - message.denomTakeRates.push( - DenomTakeRate.decode(reader, reader.uint32()), - ); - continue; - case 2: - if (tag !== 16) { - break; - } - - message.defaultTakeRate = reader.uint32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Params { - return { - $type: Params.$type, - denomTakeRates: globalThis.Array.isArray(object?.denomTakeRates) - ? object.denomTakeRates.map((e: any) => DenomTakeRate.fromJSON(e)) - : [], - defaultTakeRate: isSet(object.defaultTakeRate) - ? globalThis.Number(object.defaultTakeRate) - : 0, - }; - }, - - toJSON(message: Params): unknown { - const obj: any = {}; - if (message.denomTakeRates?.length) { - obj.denomTakeRates = message.denomTakeRates.map((e) => - DenomTakeRate.toJSON(e), - ); - } - if (message.defaultTakeRate !== 0) { - obj.defaultTakeRate = Math.round(message.defaultTakeRate); - } - return obj; - }, - - create(base?: DeepPartial): Params { - return Params.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Params { - const message = createBaseParams(); - message.denomTakeRates = - object.denomTakeRates?.map((e) => DenomTakeRate.fromPartial(e)) || []; - message.defaultTakeRate = object.defaultTakeRate ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(Params.$type, Params); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/akash/take/v1beta3/query.ts b/ts/src/generated/akash/take/v1beta3/query.ts deleted file mode 100644 index 6950f5af..00000000 --- a/ts/src/generated/akash/take/v1beta3/query.ts +++ /dev/null @@ -1,22 +0,0 @@ -/* eslint-disable */ - -/** Query defines the gRPC querier service */ -export interface Query {} - -export const QueryServiceName = "akash.take.v1beta3.Query"; -export class QueryClientImpl implements Query { - private readonly rpc: Rpc; - private readonly service: string; - constructor(rpc: Rpc, opts?: { service?: string }) { - this.service = opts?.service || QueryServiceName; - this.rpc = rpc; - } -} - -interface Rpc { - request( - service: string, - method: string, - data: Uint8Array, - ): Promise; -} diff --git a/ts/src/generated/amino/amino.ts b/ts/src/generated/amino/amino.ts new file mode 100644 index 00000000..5e18b1b5 --- /dev/null +++ b/ts/src/generated/amino/amino.ts @@ -0,0 +1,9 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: amino/amino.proto + +/* eslint-disable */ + +export {}; diff --git a/ts/src/generated/cosmos/base/query/v1beta1/pagination.ts b/ts/src/generated/cosmos/base/query/v1beta1/pagination.ts index d89436f1..68b86c92 100644 --- a/ts/src/generated/cosmos/base/query/v1beta1/pagination.ts +++ b/ts/src/generated/cosmos/base/query/v1beta1/pagination.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: cosmos/base/query/v1beta1/pagination.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../../typeRegistry"; /** @@ -59,7 +65,8 @@ export interface PageResponse { $type: "cosmos.base.query.v1beta1.PageResponse"; /** * next_key is the key to be passed to PageRequest.key to - * query the next page most efficiently + * query the next page most efficiently. It will be empty if + * there are no more results. */ nextKey: Uint8Array; /** @@ -80,21 +87,24 @@ function createBasePageRequest(): PageRequest { }; } -export const PageRequest = { +export const PageRequest: MessageFns< + PageRequest, + "cosmos.base.query.v1beta1.PageRequest" +> = { $type: "cosmos.base.query.v1beta1.PageRequest" as const, encode( message: PageRequest, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.key.length !== 0) { writer.uint32(10).bytes(message.key); } if (!message.offset.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.offset); + writer.uint32(16).uint64(message.offset.toString()); } if (!message.limit.equals(Long.UZERO)) { - writer.uint32(24).uint64(message.limit); + writer.uint32(24).uint64(message.limit.toString()); } if (message.countTotal !== false) { writer.uint32(32).bool(message.countTotal); @@ -105,9 +115,9 @@ export const PageRequest = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): PageRequest { + decode(input: BinaryReader | Uint8Array, length?: number): PageRequest { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBasePageRequest(); while (reader.pos < end) { @@ -125,14 +135,14 @@ export const PageRequest = { break; } - message.offset = reader.uint64() as Long; + message.offset = Long.fromString(reader.uint64().toString(), true); continue; case 3: if (tag !== 24) { break; } - message.limit = reader.uint64() as Long; + message.limit = Long.fromString(reader.uint64().toString(), true); continue; case 4: if (tag !== 32) { @@ -152,7 +162,7 @@ export const PageRequest = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -222,25 +232,28 @@ function createBasePageResponse(): PageResponse { }; } -export const PageResponse = { +export const PageResponse: MessageFns< + PageResponse, + "cosmos.base.query.v1beta1.PageResponse" +> = { $type: "cosmos.base.query.v1beta1.PageResponse" as const, encode( message: PageResponse, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.nextKey.length !== 0) { writer.uint32(10).bytes(message.nextKey); } if (!message.total.equals(Long.UZERO)) { - writer.uint32(16).uint64(message.total); + writer.uint32(16).uint64(message.total.toString()); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): PageResponse { + decode(input: BinaryReader | Uint8Array, length?: number): PageResponse { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBasePageResponse(); while (reader.pos < end) { @@ -258,13 +271,13 @@ export const PageResponse = { break; } - message.total = reader.uint64() as Long; + message.total = Long.fromString(reader.uint64().toString(), true); continue; } if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -352,11 +365,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/cosmos/base/v1beta1/coin.original.ts b/ts/src/generated/cosmos/base/v1beta1/coin.original.ts index 845af104..e022cb50 100644 --- a/ts/src/generated/cosmos/base/v1beta1/coin.original.ts +++ b/ts/src/generated/cosmos/base/v1beta1/coin.original.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: cosmos/base/v1beta1/coin.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../../typeRegistry"; /** @@ -43,10 +49,13 @@ function createBaseCoin(): Coin { return { $type: "cosmos.base.v1beta1.Coin", denom: "", amount: "" }; } -export const Coin = { +export const Coin: MessageFns = { $type: "cosmos.base.v1beta1.Coin" as const, - encode(message: Coin, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + encode( + message: Coin, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.denom !== "") { writer.uint32(10).string(message.denom); } @@ -56,9 +65,9 @@ export const Coin = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Coin { + decode(input: BinaryReader | Uint8Array, length?: number): Coin { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseCoin(); while (reader.pos < end) { @@ -82,7 +91,7 @@ export const Coin = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -123,13 +132,13 @@ function createBaseDecCoin(): DecCoin { return { $type: "cosmos.base.v1beta1.DecCoin", denom: "", amount: "" }; } -export const DecCoin = { +export const DecCoin: MessageFns = { $type: "cosmos.base.v1beta1.DecCoin" as const, encode( message: DecCoin, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.denom !== "") { writer.uint32(10).string(message.denom); } @@ -139,9 +148,9 @@ export const DecCoin = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): DecCoin { + decode(input: BinaryReader | Uint8Array, length?: number): DecCoin { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseDecCoin(); while (reader.pos < end) { @@ -165,7 +174,7 @@ export const DecCoin = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -206,22 +215,22 @@ function createBaseIntProto(): IntProto { return { $type: "cosmos.base.v1beta1.IntProto", int: "" }; } -export const IntProto = { +export const IntProto: MessageFns = { $type: "cosmos.base.v1beta1.IntProto" as const, encode( message: IntProto, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.int !== "") { writer.uint32(10).string(message.int); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): IntProto { + decode(input: BinaryReader | Uint8Array, length?: number): IntProto { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseIntProto(); while (reader.pos < end) { @@ -238,7 +247,7 @@ export const IntProto = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -274,22 +283,22 @@ function createBaseDecProto(): DecProto { return { $type: "cosmos.base.v1beta1.DecProto", dec: "" }; } -export const DecProto = { +export const DecProto: MessageFns = { $type: "cosmos.base.v1beta1.DecProto" as const, encode( message: DecProto, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.dec !== "") { writer.uint32(10).string(message.dec); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): DecProto { + decode(input: BinaryReader | Uint8Array, length?: number): DecProto { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseDecProto(); while (reader.pos < end) { @@ -306,7 +315,7 @@ export const DecProto = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -359,11 +368,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/cosmos/base/v1beta1/coin.ts b/ts/src/generated/cosmos/base/v1beta1/coin.ts index ca0fdc37..9dd5c450 100644 --- a/ts/src/generated/cosmos/base/v1beta1/coin.ts +++ b/ts/src/generated/cosmos/base/v1beta1/coin.ts @@ -1 +1 @@ -export * from "../../../../patch/cosmos/base/v1beta1/coin"; +export * from "./coin.original"; diff --git a/ts/src/generated/cosmos/msg/v1/msg.ts b/ts/src/generated/cosmos/msg/v1/msg.ts new file mode 100644 index 00000000..ce964fc6 --- /dev/null +++ b/ts/src/generated/cosmos/msg/v1/msg.ts @@ -0,0 +1,9 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: cosmos/msg/v1/msg.proto + +/* eslint-disable */ + +export {}; diff --git a/ts/src/generated/cosmos_proto/cosmos.ts b/ts/src/generated/cosmos_proto/cosmos.ts index c71cf731..adb656d4 100644 --- a/ts/src/generated/cosmos_proto/cosmos.ts +++ b/ts/src/generated/cosmos_proto/cosmos.ts @@ -1,3 +1,354 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: cosmos_proto/cosmos.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; +import Long from "long"; +import { messageTypeRegistry } from "../typeRegistry"; + +export enum ScalarType { + SCALAR_TYPE_UNSPECIFIED = 0, + SCALAR_TYPE_STRING = 1, + SCALAR_TYPE_BYTES = 2, + UNRECOGNIZED = -1, +} + +export function scalarTypeFromJSON(object: any): ScalarType { + switch (object) { + case 0: + case "SCALAR_TYPE_UNSPECIFIED": + return ScalarType.SCALAR_TYPE_UNSPECIFIED; + case 1: + case "SCALAR_TYPE_STRING": + return ScalarType.SCALAR_TYPE_STRING; + case 2: + case "SCALAR_TYPE_BYTES": + return ScalarType.SCALAR_TYPE_BYTES; + case -1: + case "UNRECOGNIZED": + default: + return ScalarType.UNRECOGNIZED; + } +} + +export function scalarTypeToJSON(object: ScalarType): string { + switch (object) { + case ScalarType.SCALAR_TYPE_UNSPECIFIED: + return "SCALAR_TYPE_UNSPECIFIED"; + case ScalarType.SCALAR_TYPE_STRING: + return "SCALAR_TYPE_STRING"; + case ScalarType.SCALAR_TYPE_BYTES: + return "SCALAR_TYPE_BYTES"; + case ScalarType.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +/** + * InterfaceDescriptor describes an interface type to be used with + * accepts_interface and implements_interface and declared by declare_interface. + */ +export interface InterfaceDescriptor { + $type: "cosmos_proto.InterfaceDescriptor"; + /** + * name is the name of the interface. It should be a short-name (without + * a period) such that the fully qualified name of the interface will be + * package.name, ex. for the package a.b and interface named C, the + * fully-qualified name will be a.b.C. + */ + name: string; + /** + * description is a human-readable description of the interface and its + * purpose. + */ + description: string; +} + +/** + * ScalarDescriptor describes an scalar type to be used with + * the scalar field option and declared by declare_scalar. + * Scalars extend simple protobuf built-in types with additional + * syntax and semantics, for instance to represent big integers. + * Scalars should ideally define an encoding such that there is only one + * valid syntactical representation for a given semantic meaning, + * i.e. the encoding should be deterministic. + */ +export interface ScalarDescriptor { + $type: "cosmos_proto.ScalarDescriptor"; + /** + * name is the name of the scalar. It should be a short-name (without + * a period) such that the fully qualified name of the scalar will be + * package.name, ex. for the package a.b and scalar named C, the + * fully-qualified name will be a.b.C. + */ + name: string; + /** + * description is a human-readable description of the scalar and its + * encoding format. For instance a big integer or decimal scalar should + * specify precisely the expected encoding format. + */ + description: string; + /** + * field_type is the type of field with which this scalar can be used. + * Scalars can be used with one and only one type of field so that + * encoding standards and simple and clear. Currently only string and + * bytes fields are supported for scalars. + */ + fieldType: ScalarType[]; +} + +function createBaseInterfaceDescriptor(): InterfaceDescriptor { + return { + $type: "cosmos_proto.InterfaceDescriptor", + name: "", + description: "", + }; +} + +export const InterfaceDescriptor: MessageFns< + InterfaceDescriptor, + "cosmos_proto.InterfaceDescriptor" +> = { + $type: "cosmos_proto.InterfaceDescriptor" as const, + + encode( + message: InterfaceDescriptor, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.description !== "") { + writer.uint32(18).string(message.description); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): InterfaceDescriptor { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseInterfaceDescriptor(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.description = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): InterfaceDescriptor { + return { + $type: InterfaceDescriptor.$type, + name: isSet(object.name) ? globalThis.String(object.name) : "", + description: isSet(object.description) + ? globalThis.String(object.description) + : "", + }; + }, + + toJSON(message: InterfaceDescriptor): unknown { + const obj: any = {}; + if (message.name !== "") { + obj.name = message.name; + } + if (message.description !== "") { + obj.description = message.description; + } + return obj; + }, + + create(base?: DeepPartial): InterfaceDescriptor { + return InterfaceDescriptor.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): InterfaceDescriptor { + const message = createBaseInterfaceDescriptor(); + message.name = object.name ?? ""; + message.description = object.description ?? ""; + return message; + }, +}; + +messageTypeRegistry.set(InterfaceDescriptor.$type, InterfaceDescriptor); + +function createBaseScalarDescriptor(): ScalarDescriptor { + return { + $type: "cosmos_proto.ScalarDescriptor", + name: "", + description: "", + fieldType: [], + }; +} + +export const ScalarDescriptor: MessageFns< + ScalarDescriptor, + "cosmos_proto.ScalarDescriptor" +> = { + $type: "cosmos_proto.ScalarDescriptor" as const, + + encode( + message: ScalarDescriptor, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.name !== "") { + writer.uint32(10).string(message.name); + } + if (message.description !== "") { + writer.uint32(18).string(message.description); + } + writer.uint32(26).fork(); + for (const v of message.fieldType) { + writer.int32(v); + } + writer.join(); + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): ScalarDescriptor { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseScalarDescriptor(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.description = reader.string(); + continue; + case 3: + if (tag === 24) { + message.fieldType.push(reader.int32() as any); + + continue; + } + + if (tag === 26) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.fieldType.push(reader.int32() as any); + } + + continue; + } + + break; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): ScalarDescriptor { + return { + $type: ScalarDescriptor.$type, + name: isSet(object.name) ? globalThis.String(object.name) : "", + description: isSet(object.description) + ? globalThis.String(object.description) + : "", + fieldType: globalThis.Array.isArray(object?.fieldType) + ? object.fieldType.map((e: any) => scalarTypeFromJSON(e)) + : [], + }; + }, + + toJSON(message: ScalarDescriptor): unknown { + const obj: any = {}; + if (message.name !== "") { + obj.name = message.name; + } + if (message.description !== "") { + obj.description = message.description; + } + if (message.fieldType?.length) { + obj.fieldType = message.fieldType.map((e) => scalarTypeToJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): ScalarDescriptor { + return ScalarDescriptor.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ScalarDescriptor { + const message = createBaseScalarDescriptor(); + message.name = object.name ?? ""; + message.description = object.description ?? ""; + message.fieldType = object.fieldType?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(ScalarDescriptor.$type, ScalarDescriptor); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} -export {}; +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/gogoproto/gogo.ts b/ts/src/generated/gogoproto/gogo.ts index c71cf731..8861e44f 100644 --- a/ts/src/generated/gogoproto/gogo.ts +++ b/ts/src/generated/gogoproto/gogo.ts @@ -1,3 +1,9 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: gogoproto/gogo.proto + /* eslint-disable */ export {}; diff --git a/ts/src/generated/google/api/annotations.ts b/ts/src/generated/google/api/annotations.ts index c71cf731..5ac777f6 100644 --- a/ts/src/generated/google/api/annotations.ts +++ b/ts/src/generated/google/api/annotations.ts @@ -1,3 +1,9 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: google/api/annotations.proto + /* eslint-disable */ export {}; diff --git a/ts/src/generated/google/api/http.ts b/ts/src/generated/google/api/http.ts index 61c762f9..93764a9c 100644 --- a/ts/src/generated/google/api/http.ts +++ b/ts/src/generated/google/api/http.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: google/api/http.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../typeRegistry"; /** @@ -17,7 +23,7 @@ export interface Http { */ rules: HttpRule[]; /** - * When set to true, URL path parmeters will be fully URI-decoded except in + * When set to true, URL path parameters will be fully URI-decoded except in * cases of single segment matches in reserved expansion, where "%2F" will be * left encoded. * @@ -28,90 +34,94 @@ export interface Http { } /** - * `HttpRule` defines the mapping of an RPC method to one or more HTTP - * REST API methods. The mapping specifies how different portions of the RPC - * request message are mapped to URL path, URL query parameters, and - * HTTP request body. The mapping is typically specified as an - * `google.api.http` annotation on the RPC method, - * see "google/api/annotations.proto" for details. - * - * The mapping consists of a field specifying the path template and - * method kind. The path template can refer to fields in the request - * message, as in the example below which describes a REST GET - * operation on a resource collection of messages: + * # gRPC Transcoding + * + * gRPC Transcoding is a feature for mapping between a gRPC method and one or + * more HTTP REST endpoints. It allows developers to build a single API service + * that supports both gRPC APIs and REST APIs. Many systems, including [Google + * APIs](https://github.com/googleapis/googleapis), + * [Cloud Endpoints](https://cloud.google.com/endpoints), [gRPC + * Gateway](https://github.com/grpc-ecosystem/grpc-gateway), + * and [Envoy](https://github.com/envoyproxy/envoy) proxy support this feature + * and use it for large scale production services. + * + * `HttpRule` defines the schema of the gRPC/REST mapping. The mapping specifies + * how different portions of the gRPC request message are mapped to the URL + * path, URL query parameters, and HTTP request body. It also controls how the + * gRPC response message is mapped to the HTTP response body. `HttpRule` is + * typically specified as an `google.api.http` annotation on the gRPC method. + * + * Each mapping specifies a URL path template and an HTTP method. The path + * template may refer to one or more fields in the gRPC request message, as long + * as each field is a non-repeated field with a primitive (non-message) type. + * The path template controls how fields of the request message are mapped to + * the URL path. + * + * Example: * * service Messaging { * rpc GetMessage(GetMessageRequest) returns (Message) { - * option (google.api.http).get = "/v1/messages/{message_id}/{sub.subfield}"; + * option (google.api.http) = { + * get: "/v1/{name=messages/*}" + * }; * } * } * message GetMessageRequest { - * message SubMessage { - * string subfield = 1; - * } - * string message_id = 1; // mapped to the URL - * SubMessage sub = 2; // `sub.subfield` is url-mapped + * string name = 1; // Mapped to URL path. * } * message Message { - * string text = 1; // content of the resource + * string text = 1; // The resource content. * } * - * The same http annotation can alternatively be expressed inside the - * `GRPC API Configuration` YAML file. - * - * http: - * rules: - * - selector: .Messaging.GetMessage - * get: /v1/messages/{message_id}/{sub.subfield} + * This enables an HTTP REST to gRPC mapping as below: * - * This definition enables an automatic, bidrectional mapping of HTTP - * JSON to RPC. Example: - * - * HTTP | RPC + * HTTP | gRPC * -----|----- - * `GET /v1/messages/123456/foo` | `GetMessage(message_id: "123456" sub: SubMessage(subfield: "foo"))` - * - * In general, not only fields but also field paths can be referenced - * from a path pattern. Fields mapped to the path pattern cannot be - * repeated and must have a primitive (non-message) type. + * `GET /v1/messages/123456` | `GetMessage(name: "messages/123456")` * - * Any fields in the request message which are not bound by the path - * pattern automatically become (optional) HTTP query - * parameters. Assume the following definition of the request message: + * Any fields in the request message which are not bound by the path template + * automatically become HTTP query parameters if there is no HTTP request body. + * For example: * * service Messaging { * rpc GetMessage(GetMessageRequest) returns (Message) { - * option (google.api.http).get = "/v1/messages/{message_id}"; + * option (google.api.http) = { + * get:"/v1/messages/{message_id}" + * }; * } * } * message GetMessageRequest { * message SubMessage { * string subfield = 1; * } - * string message_id = 1; // mapped to the URL - * int64 revision = 2; // becomes a parameter - * SubMessage sub = 3; // `sub.subfield` becomes a parameter + * string message_id = 1; // Mapped to URL path. + * int64 revision = 2; // Mapped to URL query parameter `revision`. + * SubMessage sub = 3; // Mapped to URL query parameter `sub.subfield`. * } * * This enables a HTTP JSON to RPC mapping as below: * - * HTTP | RPC + * HTTP | gRPC * -----|----- - * `GET /v1/messages/123456?revision=2&sub.subfield=foo` | `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: "foo"))` - * - * Note that fields which are mapped to HTTP parameters must have a - * primitive type or a repeated primitive type. Message types are not - * allowed. In the case of a repeated type, the parameter can be - * repeated in the URL, as in `...?param=A¶m=B`. - * - * For HTTP method kinds which allow a request body, the `body` field + * `GET /v1/messages/123456?revision=2&sub.subfield=foo` | + * `GetMessage(message_id: "123456" revision: 2 sub: SubMessage(subfield: + * "foo"))` + * + * Note that fields which are mapped to URL query parameters must have a + * primitive type or a repeated primitive type or a non-repeated message type. + * In the case of a repeated type, the parameter can be repeated in the URL + * as `...?param=A¶m=B`. In the case of a message type, each field of the + * message is mapped to a separate parameter, such as + * `...?foo.a=A&foo.b=B&foo.c=C`. + * + * For HTTP methods that allow a request body, the `body` field * specifies the mapping. Consider a REST update method on the * message resource collection: * * service Messaging { * rpc UpdateMessage(UpdateMessageRequest) returns (Message) { * option (google.api.http) = { - * put: "/v1/messages/{message_id}" + * patch: "/v1/messages/{message_id}" * body: "message" * }; * } @@ -125,9 +135,10 @@ export interface Http { * representation of the JSON in the request body is determined by * protos JSON encoding: * - * HTTP | RPC + * HTTP | gRPC * -----|----- - * `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" message { text: "Hi!" })` + * `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: + * "123456" message { text: "Hi!" })` * * The special name `*` can be used in the body mapping to define that * every field not bound by the path template should be mapped to the @@ -137,7 +148,7 @@ export interface Http { * service Messaging { * rpc UpdateMessage(Message) returns (Message) { * option (google.api.http) = { - * put: "/v1/messages/{message_id}" + * patch: "/v1/messages/{message_id}" * body: "*" * }; * } @@ -149,13 +160,14 @@ export interface Http { * * The following HTTP JSON to RPC mapping is enabled: * - * HTTP | RPC + * HTTP | gRPC * -----|----- - * `PUT /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: "123456" text: "Hi!")` + * `PATCH /v1/messages/123456 { "text": "Hi!" }` | `UpdateMessage(message_id: + * "123456" text: "Hi!")` * * Note that when using `*` in the body mapping, it is not possible to * have HTTP parameters, as all fields not bound by the path end in - * the body. This makes this option more rarely used in practice of + * the body. This makes this option more rarely used in practice when * defining REST APIs. The common usage of `*` is in custom methods * which don't use the URL at all for transferring data. * @@ -177,31 +189,34 @@ export interface Http { * string user_id = 2; * } * - * This enables the following two alternative HTTP JSON to RPC - * mappings: + * This enables the following two alternative HTTP JSON to RPC mappings: * - * HTTP | RPC + * HTTP | gRPC * -----|----- * `GET /v1/messages/123456` | `GetMessage(message_id: "123456")` - * `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: "123456")` - * - * # Rules for HTTP mapping - * - * The rules for mapping HTTP path, query parameters, and body fields - * to the request message are as follows: - * - * 1. The `body` field specifies either `*` or a field path, or is - * omitted. If omitted, it indicates there is no HTTP request body. - * 2. Leaf fields (recursive expansion of nested messages in the - * request) can be classified into three types: - * (a) Matched in the URL template. - * (b) Covered by body (if body is `*`, everything except (a) fields; - * else everything under the body field) - * (c) All other fields. - * 3. URL query parameters found in the HTTP request are mapped to (c) fields. - * 4. Any body sent with an HTTP request can contain only (b) fields. - * - * The syntax of the path template is as follows: + * `GET /v1/users/me/messages/123456` | `GetMessage(user_id: "me" message_id: + * "123456")` + * + * ## Rules for HTTP mapping + * + * 1. Leaf request fields (recursive expansion nested messages in the request + * message) are classified into three categories: + * - Fields referred by the path template. They are passed via the URL path. + * - Fields referred by the [HttpRule.body][google.api.HttpRule.body]. They + * are passed via the HTTP + * request body. + * - All other fields are passed via the URL query parameters, and the + * parameter name is the field path in the request message. A repeated + * field can be represented as multiple query parameters under the same + * name. + * 2. If [HttpRule.body][google.api.HttpRule.body] is "*", there is no URL + * query parameter, all fields + * are passed via URL path and HTTP request body. + * 3. If [HttpRule.body][google.api.HttpRule.body] is omitted, there is no HTTP + * request body, all + * fields are passed via URL path and URL query parameters. + * + * ### Path template syntax * * Template = "/" Segments [ Verb ] ; * Segments = Segment { "/" Segment } ; @@ -210,52 +225,108 @@ export interface Http { * FieldPath = IDENT { "." IDENT } ; * Verb = ":" LITERAL ; * - * The syntax `*` matches a single path segment. The syntax `**` matches zero - * or more path segments, which must be the last part of the path except the - * `Verb`. The syntax `LITERAL` matches literal text in the path. + * The syntax `*` matches a single URL path segment. The syntax `**` matches + * zero or more URL path segments, which must be the last part of the URL path + * except the `Verb`. * * The syntax `Variable` matches part of the URL path as specified by its * template. A variable template must not contain other variables. If a variable * matches a single path segment, its template may be omitted, e.g. `{var}` * is equivalent to `{var=*}`. * + * The syntax `LITERAL` matches literal text in the URL path. If the `LITERAL` + * contains any reserved character, such characters should be percent-encoded + * before the matching. + * * If a variable contains exactly one path segment, such as `"{var}"` or - * `"{var=*}"`, when such a variable is expanded into a URL path, all characters - * except `[-_.~0-9a-zA-Z]` are percent-encoded. Such variables show up in the - * Discovery Document as `{var}`. - * - * If a variable contains one or more path segments, such as `"{var=foo/*}"` - * or `"{var=**}"`, when such a variable is expanded into a URL path, all - * characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. Such variables - * show up in the Discovery Document as `{+var}`. - * - * NOTE: While the single segment variable matches the semantics of - * [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 - * Simple String Expansion, the multi segment variable **does not** match - * RFC 6570 Reserved Expansion. The reason is that the Reserved Expansion + * `"{var=*}"`, when such a variable is expanded into a URL path on the client + * side, all characters except `[-_.~0-9a-zA-Z]` are percent-encoded. The + * server side does the reverse decoding. Such variables show up in the + * [Discovery + * Document](https://developers.google.com/discovery/v1/reference/apis) as + * `{var}`. + * + * If a variable contains multiple path segments, such as `"{var=foo/*}"` + * or `"{var=**}"`, when such a variable is expanded into a URL path on the + * client side, all characters except `[-_.~/0-9a-zA-Z]` are percent-encoded. + * The server side does the reverse decoding, except "%2F" and "%2f" are left + * unchanged. Such variables show up in the + * [Discovery + * Document](https://developers.google.com/discovery/v1/reference/apis) as + * `{+var}`. + * + * ## Using gRPC API Service Configuration + * + * gRPC API Service Configuration (service config) is a configuration language + * for configuring a gRPC service to become a user-facing product. The + * service config is simply the YAML representation of the `google.api.Service` + * proto message. + * + * As an alternative to annotating your proto file, you can configure gRPC + * transcoding in your service config YAML files. You do this by specifying a + * `HttpRule` that maps the gRPC method to a REST endpoint, achieving the same + * effect as the proto annotation. This can be particularly useful if you + * have a proto that is reused in multiple services. Note that any transcoding + * specified in the service config will override any matching transcoding + * configuration in the proto. + * + * Example: + * + * http: + * rules: + * # Selects a gRPC method and applies HttpRule to it. + * - selector: example.v1.Messaging.GetMessage + * get: /v1/messages/{message_id}/{sub.subfield} + * + * ## Special notes + * + * When gRPC Transcoding is used to map a gRPC to JSON REST endpoints, the + * proto to JSON conversion must follow the [proto3 + * specification](https://developers.google.com/protocol-buffers/docs/proto3#json). + * + * While the single segment variable follows the semantics of + * [RFC 6570](https://tools.ietf.org/html/rfc6570) Section 3.2.2 Simple String + * Expansion, the multi segment variable **does not** follow RFC 6570 Section + * 3.2.3 Reserved Expansion. The reason is that the Reserved Expansion * does not expand special characters like `?` and `#`, which would lead - * to invalid URLs. + * to invalid URLs. As the result, gRPC Transcoding uses a custom encoding + * for multi segment variables. + * + * The path variables **must not** refer to any repeated or mapped field, + * because client libraries are not capable of handling such variable expansion. + * + * The path variables **must not** capture the leading "/" character. The reason + * is that the most common use case "{var}" does not capture the leading "/" + * character. For consistency, all path variables must share the same behavior. * - * NOTE: the field paths in variables and in the `body` must not refer to - * repeated fields or map fields. + * Repeated message fields must not be mapped to URL query parameters, because + * no client library can support such complicated mapping. + * + * If an API needs to use a JSON array for request or response body, it can map + * the request or response body to a repeated field. However, some gRPC + * Transcoding implementations may not support this feature. */ export interface HttpRule { $type: "google.api.HttpRule"; /** - * Selects methods to which this rule applies. + * Selects a method to which this rule applies. * - * Refer to [selector][google.api.DocumentationRule.selector] for syntax details. + * Refer to [selector][google.api.DocumentationRule.selector] for syntax + * details. */ selector: string; - /** Used for listing and getting information about resources. */ + /** + * Maps to HTTP GET. Used for listing and getting information about + * resources. + */ get?: string | undefined; - /** Used for updating a resource. */ + /** Maps to HTTP PUT. Used for replacing a resource. */ put?: string | undefined; - /** Used for creating a resource. */ + /** Maps to HTTP POST. Used for creating a resource or performing an action. */ post?: string | undefined; - /** Used for deleting a resource. */ + /** Maps to HTTP DELETE. Used for deleting a resource. */ delete?: string | undefined; - /** Used for updating a resource. */ + /** Maps to HTTP PATCH. Used for updating a resource. */ patch?: string | undefined; /** * The custom pattern is used for specifying an HTTP method that is not @@ -265,16 +336,21 @@ export interface HttpRule { */ custom?: CustomHttpPattern | undefined; /** - * The name of the request field whose value is mapped to the HTTP body, or - * `*` for mapping all fields not captured by the path pattern to the HTTP - * body. NOTE: the referred field must not be a repeated field and must be - * present at the top-level of request message type. + * The name of the request field whose value is mapped to the HTTP request + * body, or `*` for mapping all request fields not captured by the path + * pattern to the HTTP body, or omitted for not having any HTTP request body. + * + * NOTE: the referred field must be present at the top-level of the request + * message type. */ body: string; /** * Optional. The name of the response field whose value is mapped to the HTTP - * body of response. Other response fields are ignored. When - * not set, the response message will be used as HTTP body of response. + * response body. When omitted, the entire response message will be used + * as the HTTP response body. + * + * NOTE: The referred field must be present at the top-level of the response + * message type. */ responseBody: string; /** @@ -302,12 +378,15 @@ function createBaseHttp(): Http { }; } -export const Http = { +export const Http: MessageFns = { $type: "google.api.Http" as const, - encode(message: Http, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + encode( + message: Http, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { for (const v of message.rules) { - HttpRule.encode(v!, writer.uint32(10).fork()).ldelim(); + HttpRule.encode(v!, writer.uint32(10).fork()).join(); } if (message.fullyDecodeReservedExpansion !== false) { writer.uint32(16).bool(message.fullyDecodeReservedExpansion); @@ -315,9 +394,9 @@ export const Http = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Http { + decode(input: BinaryReader | Uint8Array, length?: number): Http { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseHttp(); while (reader.pos < end) { @@ -341,7 +420,7 @@ export const Http = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -399,13 +478,13 @@ function createBaseHttpRule(): HttpRule { }; } -export const HttpRule = { +export const HttpRule: MessageFns = { $type: "google.api.HttpRule" as const, encode( message: HttpRule, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.selector !== "") { writer.uint32(10).string(message.selector); } @@ -425,10 +504,7 @@ export const HttpRule = { writer.uint32(50).string(message.patch); } if (message.custom !== undefined) { - CustomHttpPattern.encode( - message.custom, - writer.uint32(66).fork(), - ).ldelim(); + CustomHttpPattern.encode(message.custom, writer.uint32(66).fork()).join(); } if (message.body !== "") { writer.uint32(58).string(message.body); @@ -437,14 +513,14 @@ export const HttpRule = { writer.uint32(98).string(message.responseBody); } for (const v of message.additionalBindings) { - HttpRule.encode(v!, writer.uint32(90).fork()).ldelim(); + HttpRule.encode(v!, writer.uint32(90).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): HttpRule { + decode(input: BinaryReader | Uint8Array, length?: number): HttpRule { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseHttpRule(); while (reader.pos < end) { @@ -526,7 +602,7 @@ export const HttpRule = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -623,13 +699,16 @@ function createBaseCustomHttpPattern(): CustomHttpPattern { return { $type: "google.api.CustomHttpPattern", kind: "", path: "" }; } -export const CustomHttpPattern = { +export const CustomHttpPattern: MessageFns< + CustomHttpPattern, + "google.api.CustomHttpPattern" +> = { $type: "google.api.CustomHttpPattern" as const, encode( message: CustomHttpPattern, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.kind !== "") { writer.uint32(10).string(message.kind); } @@ -639,9 +718,9 @@ export const CustomHttpPattern = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): CustomHttpPattern { + decode(input: BinaryReader | Uint8Array, length?: number): CustomHttpPattern { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseCustomHttpPattern(); while (reader.pos < end) { @@ -665,7 +744,7 @@ export const CustomHttpPattern = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -723,11 +802,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/google/protobuf/descriptor.ts b/ts/src/generated/google/protobuf/descriptor.ts index 2b7c4513..1a5d5631 100644 --- a/ts/src/generated/google/protobuf/descriptor.ts +++ b/ts/src/generated/google/protobuf/descriptor.ts @@ -1,8 +1,123 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: google/protobuf/descriptor.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../typeRegistry"; +/** The full set of known editions. */ +export enum Edition { + /** EDITION_UNKNOWN - A placeholder for an unknown edition value. */ + EDITION_UNKNOWN = 0, + /** + * EDITION_PROTO2 - Legacy syntax "editions". These pre-date editions, but behave much like + * distinct editions. These can't be used to specify the edition of proto + * files, but feature definitions must supply proto2/proto3 defaults for + * backwards compatibility. + */ + EDITION_PROTO2 = 998, + EDITION_PROTO3 = 999, + /** + * EDITION_2023 - Editions that have been released. The specific values are arbitrary and + * should not be depended on, but they will always be time-ordered for easy + * comparison. + */ + EDITION_2023 = 1000, + EDITION_2024 = 1001, + /** + * EDITION_1_TEST_ONLY - Placeholder editions for testing feature resolution. These should not be + * used or relyed on outside of tests. + */ + EDITION_1_TEST_ONLY = 1, + EDITION_2_TEST_ONLY = 2, + EDITION_99997_TEST_ONLY = 99997, + EDITION_99998_TEST_ONLY = 99998, + EDITION_99999_TEST_ONLY = 99999, + /** + * EDITION_MAX - Placeholder for specifying unbounded edition support. This should only + * ever be used by plugins that can expect to never require any changes to + * support a new edition. + */ + EDITION_MAX = 2147483647, + UNRECOGNIZED = -1, +} + +export function editionFromJSON(object: any): Edition { + switch (object) { + case 0: + case "EDITION_UNKNOWN": + return Edition.EDITION_UNKNOWN; + case 998: + case "EDITION_PROTO2": + return Edition.EDITION_PROTO2; + case 999: + case "EDITION_PROTO3": + return Edition.EDITION_PROTO3; + case 1000: + case "EDITION_2023": + return Edition.EDITION_2023; + case 1001: + case "EDITION_2024": + return Edition.EDITION_2024; + case 1: + case "EDITION_1_TEST_ONLY": + return Edition.EDITION_1_TEST_ONLY; + case 2: + case "EDITION_2_TEST_ONLY": + return Edition.EDITION_2_TEST_ONLY; + case 99997: + case "EDITION_99997_TEST_ONLY": + return Edition.EDITION_99997_TEST_ONLY; + case 99998: + case "EDITION_99998_TEST_ONLY": + return Edition.EDITION_99998_TEST_ONLY; + case 99999: + case "EDITION_99999_TEST_ONLY": + return Edition.EDITION_99999_TEST_ONLY; + case 2147483647: + case "EDITION_MAX": + return Edition.EDITION_MAX; + case -1: + case "UNRECOGNIZED": + default: + return Edition.UNRECOGNIZED; + } +} + +export function editionToJSON(object: Edition): string { + switch (object) { + case Edition.EDITION_UNKNOWN: + return "EDITION_UNKNOWN"; + case Edition.EDITION_PROTO2: + return "EDITION_PROTO2"; + case Edition.EDITION_PROTO3: + return "EDITION_PROTO3"; + case Edition.EDITION_2023: + return "EDITION_2023"; + case Edition.EDITION_2024: + return "EDITION_2024"; + case Edition.EDITION_1_TEST_ONLY: + return "EDITION_1_TEST_ONLY"; + case Edition.EDITION_2_TEST_ONLY: + return "EDITION_2_TEST_ONLY"; + case Edition.EDITION_99997_TEST_ONLY: + return "EDITION_99997_TEST_ONLY"; + case Edition.EDITION_99998_TEST_ONLY: + return "EDITION_99998_TEST_ONLY"; + case Edition.EDITION_99999_TEST_ONLY: + return "EDITION_99999_TEST_ONLY"; + case Edition.EDITION_MAX: + return "EDITION_MAX"; + case Edition.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + /** * The protocol compiler can output a FileDescriptorSet containing the .proto * files it parses. @@ -43,9 +158,13 @@ export interface FileDescriptorProto { sourceCodeInfo?: SourceCodeInfo | undefined; /** * The syntax of the proto file. - * The supported values are "proto2" and "proto3". + * The supported values are "proto2", "proto3", and "editions". + * + * If `edition` is present, this value must be "editions". */ syntax?: string | undefined; + /** The edition of the proto file. */ + edition?: Edition | undefined; } /** Describes a message type. */ @@ -93,6 +212,87 @@ export interface ExtensionRangeOptions { $type: "google.protobuf.ExtensionRangeOptions"; /** The parser stores options it doesn't recognize here. See above. */ uninterpretedOption: UninterpretedOption[]; + /** + * For external users: DO NOT USE. We are in the process of open sourcing + * extension declaration and executing internal cleanups before it can be + * used externally. + */ + declaration: ExtensionRangeOptions_Declaration[]; + /** Any features defined in the specific edition. */ + features?: FeatureSet | undefined; + /** + * The verification state of the range. + * TODO: flip the default to DECLARATION once all empty ranges + * are marked as UNVERIFIED. + */ + verification?: ExtensionRangeOptions_VerificationState | undefined; +} + +/** The verification state of the extension range. */ +export enum ExtensionRangeOptions_VerificationState { + /** DECLARATION - All the extensions of the range must be declared. */ + DECLARATION = 0, + UNVERIFIED = 1, + UNRECOGNIZED = -1, +} + +export function extensionRangeOptions_VerificationStateFromJSON( + object: any, +): ExtensionRangeOptions_VerificationState { + switch (object) { + case 0: + case "DECLARATION": + return ExtensionRangeOptions_VerificationState.DECLARATION; + case 1: + case "UNVERIFIED": + return ExtensionRangeOptions_VerificationState.UNVERIFIED; + case -1: + case "UNRECOGNIZED": + default: + return ExtensionRangeOptions_VerificationState.UNRECOGNIZED; + } +} + +export function extensionRangeOptions_VerificationStateToJSON( + object: ExtensionRangeOptions_VerificationState, +): string { + switch (object) { + case ExtensionRangeOptions_VerificationState.DECLARATION: + return "DECLARATION"; + case ExtensionRangeOptions_VerificationState.UNVERIFIED: + return "UNVERIFIED"; + case ExtensionRangeOptions_VerificationState.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +export interface ExtensionRangeOptions_Declaration { + $type: "google.protobuf.ExtensionRangeOptions.Declaration"; + /** The extension number declared within the extension range. */ + number?: number | undefined; + /** + * The fully-qualified name of the extension field. There must be a leading + * dot in front of the full name. + */ + fullName?: string | undefined; + /** + * The fully-qualified type name of the extension field. Unlike + * Metadata.type, Declaration.type must have a leading dot for messages + * and enums. + */ + type?: string | undefined; + /** + * If true, indicates that the number is reserved in the extension range, + * and any extension field with the number will fail to compile. Set this + * when a declared extension field is deleted. + */ + reserved?: boolean | undefined; + /** + * If true, indicates that the extension must be defined as repeated. + * Otherwise the extension must be defined as optional. + */ + repeated?: boolean | undefined; } /** Describes a field within a message. */ @@ -143,12 +343,12 @@ export interface FieldDescriptorProto { * If true, this is a proto3 "optional". When a proto3 field is optional, it * tracks presence regardless of field type. * - * When proto3_optional is true, this field must be belong to a oneof to - * signal to old proto3 clients that presence is tracked for this field. This - * oneof is known as a "synthetic" oneof, and this field must be its sole - * member (each proto3 optional field gets its own synthetic oneof). Synthetic - * oneofs exist in the descriptor only, and do not generate any API. Synthetic - * oneofs must be ordered after all "real" oneofs. + * When proto3_optional is true, this field must belong to a oneof to signal + * to old proto3 clients that presence is tracked for this field. This oneof + * is known as a "synthetic" oneof, and this field must be its sole member + * (each proto3 optional field gets its own synthetic oneof). Synthetic oneofs + * exist in the descriptor only, and do not generate any API. Synthetic oneofs + * must be ordered after all "real" oneofs. * * For message fields, proto3_optional doesn't create any semantic change, * since non-repeated message fields always track presence. However it still @@ -189,9 +389,10 @@ export enum FieldDescriptorProto_Type { TYPE_STRING = 9, /** * TYPE_GROUP - Tag-delimited aggregate. - * Group type is deprecated and not supported in proto3. However, Proto3 + * Group type is deprecated and not supported after google.protobuf. However, Proto3 * implementations should still be able to parse the group wire format and - * treat group fields as unknown fields. + * treat group fields as unknown fields. In Editions, the group wire format + * can be enabled via the `message_encoding` feature. */ TYPE_GROUP = 10, /** TYPE_MESSAGE - Length-delimited aggregate. */ @@ -323,8 +524,13 @@ export function fieldDescriptorProto_TypeToJSON( export enum FieldDescriptorProto_Label { /** LABEL_OPTIONAL - 0 is reserved for errors */ LABEL_OPTIONAL = 1, - LABEL_REQUIRED = 2, LABEL_REPEATED = 3, + /** + * LABEL_REQUIRED - The required label is only allowed in google.protobuf. In proto3 and Editions + * it's explicitly prohibited. In Editions, the `field_presence` feature + * can be used to get this behavior. + */ + LABEL_REQUIRED = 2, UNRECOGNIZED = -1, } @@ -335,12 +541,12 @@ export function fieldDescriptorProto_LabelFromJSON( case 1: case "LABEL_OPTIONAL": return FieldDescriptorProto_Label.LABEL_OPTIONAL; - case 2: - case "LABEL_REQUIRED": - return FieldDescriptorProto_Label.LABEL_REQUIRED; case 3: case "LABEL_REPEATED": return FieldDescriptorProto_Label.LABEL_REPEATED; + case 2: + case "LABEL_REQUIRED": + return FieldDescriptorProto_Label.LABEL_REQUIRED; case -1: case "UNRECOGNIZED": default: @@ -354,10 +560,10 @@ export function fieldDescriptorProto_LabelToJSON( switch (object) { case FieldDescriptorProto_Label.LABEL_OPTIONAL: return "LABEL_OPTIONAL"; - case FieldDescriptorProto_Label.LABEL_REQUIRED: - return "LABEL_REQUIRED"; case FieldDescriptorProto_Label.LABEL_REPEATED: return "LABEL_REPEATED"; + case FieldDescriptorProto_Label.LABEL_REQUIRED: + return "LABEL_REQUIRED"; case FieldDescriptorProto_Label.UNRECOGNIZED: default: return "UNRECOGNIZED"; @@ -504,7 +710,6 @@ export interface FileOptions { ccGenericServices?: boolean | undefined; javaGenericServices?: boolean | undefined; pyGenericServices?: boolean | undefined; - phpGenericServices?: boolean | undefined; /** * Is this file deprecated? * Depending on the target platform, this can emit Deprecated annotations @@ -554,6 +759,8 @@ export interface FileOptions { * determining the ruby package. */ rubyPackage?: string | undefined; + /** Any features defined in the specific edition. */ + features?: FeatureSet | undefined; /** * The parser stores options it doesn't recognize here. * See the documentation for the "Options" section above. @@ -668,6 +875,23 @@ export interface MessageOptions { * parser. */ mapEntry?: boolean | undefined; + /** + * Enable the legacy handling of JSON field name conflicts. This lowercases + * and strips underscored from the fields before comparison in proto3 only. + * The new behavior takes `json_name` into account and applies to proto2 as + * well. + * + * This should only be used as a temporary measure against broken builds due + * to the change in behavior for JSON field name conflicts. + * + * TODO This is legacy behavior we plan to remove once downstream + * teams have had time to migrate. + * + * @deprecated + */ + deprecatedLegacyJsonFieldConflicts?: boolean | undefined; + /** Any features defined in the specific edition. */ + features?: FeatureSet | undefined; /** The parser stores options it doesn't recognize here. See above. */ uninterpretedOption: UninterpretedOption[]; } @@ -677,8 +901,10 @@ export interface FieldOptions { /** * The ctype option instructs the C++ code generator to use a different * representation of the field than it normally would. See the specific - * options below. This option is not yet implemented in the open source - * release -- sorry, we'll try to include it in a future version! + * options below. This option is only implemented to support use of + * [ctype=CORD] and [ctype=STRING] (the default) on non-repeated fields of + * type "bytes" in the open source release -- sorry, we'll try to include + * other types in a future version! */ ctype?: FieldOptions_CType | undefined; /** @@ -686,7 +912,9 @@ export interface FieldOptions { * a more efficient representation on the wire. Rather than repeatedly * writing the tag and type for each element, the entire array is encoded as * a single length-delimited blob. In proto3, only explicit setting it to - * false will avoid using packed encoding. + * false will avoid using packed encoding. This option is prohibited in + * Editions, but the `repeated_field_encoding` feature can be used to control + * the behavior. */ packed?: boolean | undefined; /** @@ -721,22 +949,11 @@ export interface FieldOptions { * call from multiple threads concurrently, while non-const methods continue * to require exclusive access. * - * Note that implementations may choose not to check required fields within - * a lazy sub-message. That is, calling IsInitialized() on the outer message - * may return true even if the inner message has missing required fields. - * This is necessary because otherwise the inner message would have to be - * parsed in order to perform the check, defeating the purpose of lazy - * parsing. An implementation which chooses not to check required fields - * must be consistent about it. That is, for any particular sub-message, the - * implementation must either *always* check its required fields, or *never* - * check its required fields, regardless of whether or not the message has - * been parsed. - * - * As of 2021, lazy does no correctness checks on the byte stream during - * parsing. This may lead to crashes if and when an invalid byte stream is - * finally parsed upon access. - * - * TODO(b/211906113): Enable validation on lazy fields. + * Note that lazy message fields are still eagerly verified to check + * ill-formed wireformat or missing required fields. Calling IsInitialized() + * on the outer message would fail if the inner message has missing required + * fields. Failed verification would result in parsing failure (except when + * uninitialized messages are acceptable). */ lazy?: boolean | undefined; /** @@ -754,6 +971,16 @@ export interface FieldOptions { deprecated?: boolean | undefined; /** For Google-internal migration only. Do not use. */ weak?: boolean | undefined; + /** + * Indicate that the field value should not be printed out when using debug + * formats, e.g. when the field contains sensitive credentials. + */ + debugRedact?: boolean | undefined; + retention?: FieldOptions_OptionRetention | undefined; + targets: FieldOptions_OptionTargetType[]; + editionDefaults: FieldOptions_EditionDefault[]; + /** Any features defined in the specific edition. */ + features?: FeatureSet | undefined; /** The parser stores options it doesn't recognize here. See above. */ uninterpretedOption: UninterpretedOption[]; } @@ -761,6 +988,14 @@ export interface FieldOptions { export enum FieldOptions_CType { /** STRING - Default mode. */ STRING = 0, + /** + * CORD - The option [ctype=CORD] may be applied to a non-repeated field of type + * "bytes". It indicates that in C++, the data should be stored in a Cord + * instead of a string. For very large strings, this may reduce memory + * fragmentation. It may also allow better performance when parsing from a + * Cord, or when parsing with aliasing enabled, as the parsed Cord may then + * alias the original buffer. + */ CORD = 1, STRING_PIECE = 2, UNRECOGNIZED = -1, @@ -840,8 +1075,156 @@ export function fieldOptions_JSTypeToJSON(object: FieldOptions_JSType): string { } } +/** + * If set to RETENTION_SOURCE, the option will be omitted from the binary. + * Note: as of January 2023, support for this is in progress and does not yet + * have an effect (b/264593489). + */ +export enum FieldOptions_OptionRetention { + RETENTION_UNKNOWN = 0, + RETENTION_RUNTIME = 1, + RETENTION_SOURCE = 2, + UNRECOGNIZED = -1, +} + +export function fieldOptions_OptionRetentionFromJSON( + object: any, +): FieldOptions_OptionRetention { + switch (object) { + case 0: + case "RETENTION_UNKNOWN": + return FieldOptions_OptionRetention.RETENTION_UNKNOWN; + case 1: + case "RETENTION_RUNTIME": + return FieldOptions_OptionRetention.RETENTION_RUNTIME; + case 2: + case "RETENTION_SOURCE": + return FieldOptions_OptionRetention.RETENTION_SOURCE; + case -1: + case "UNRECOGNIZED": + default: + return FieldOptions_OptionRetention.UNRECOGNIZED; + } +} + +export function fieldOptions_OptionRetentionToJSON( + object: FieldOptions_OptionRetention, +): string { + switch (object) { + case FieldOptions_OptionRetention.RETENTION_UNKNOWN: + return "RETENTION_UNKNOWN"; + case FieldOptions_OptionRetention.RETENTION_RUNTIME: + return "RETENTION_RUNTIME"; + case FieldOptions_OptionRetention.RETENTION_SOURCE: + return "RETENTION_SOURCE"; + case FieldOptions_OptionRetention.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +/** + * This indicates the types of entities that the field may apply to when used + * as an option. If it is unset, then the field may be freely used as an + * option on any kind of entity. Note: as of January 2023, support for this is + * in progress and does not yet have an effect (b/264593489). + */ +export enum FieldOptions_OptionTargetType { + TARGET_TYPE_UNKNOWN = 0, + TARGET_TYPE_FILE = 1, + TARGET_TYPE_EXTENSION_RANGE = 2, + TARGET_TYPE_MESSAGE = 3, + TARGET_TYPE_FIELD = 4, + TARGET_TYPE_ONEOF = 5, + TARGET_TYPE_ENUM = 6, + TARGET_TYPE_ENUM_ENTRY = 7, + TARGET_TYPE_SERVICE = 8, + TARGET_TYPE_METHOD = 9, + UNRECOGNIZED = -1, +} + +export function fieldOptions_OptionTargetTypeFromJSON( + object: any, +): FieldOptions_OptionTargetType { + switch (object) { + case 0: + case "TARGET_TYPE_UNKNOWN": + return FieldOptions_OptionTargetType.TARGET_TYPE_UNKNOWN; + case 1: + case "TARGET_TYPE_FILE": + return FieldOptions_OptionTargetType.TARGET_TYPE_FILE; + case 2: + case "TARGET_TYPE_EXTENSION_RANGE": + return FieldOptions_OptionTargetType.TARGET_TYPE_EXTENSION_RANGE; + case 3: + case "TARGET_TYPE_MESSAGE": + return FieldOptions_OptionTargetType.TARGET_TYPE_MESSAGE; + case 4: + case "TARGET_TYPE_FIELD": + return FieldOptions_OptionTargetType.TARGET_TYPE_FIELD; + case 5: + case "TARGET_TYPE_ONEOF": + return FieldOptions_OptionTargetType.TARGET_TYPE_ONEOF; + case 6: + case "TARGET_TYPE_ENUM": + return FieldOptions_OptionTargetType.TARGET_TYPE_ENUM; + case 7: + case "TARGET_TYPE_ENUM_ENTRY": + return FieldOptions_OptionTargetType.TARGET_TYPE_ENUM_ENTRY; + case 8: + case "TARGET_TYPE_SERVICE": + return FieldOptions_OptionTargetType.TARGET_TYPE_SERVICE; + case 9: + case "TARGET_TYPE_METHOD": + return FieldOptions_OptionTargetType.TARGET_TYPE_METHOD; + case -1: + case "UNRECOGNIZED": + default: + return FieldOptions_OptionTargetType.UNRECOGNIZED; + } +} + +export function fieldOptions_OptionTargetTypeToJSON( + object: FieldOptions_OptionTargetType, +): string { + switch (object) { + case FieldOptions_OptionTargetType.TARGET_TYPE_UNKNOWN: + return "TARGET_TYPE_UNKNOWN"; + case FieldOptions_OptionTargetType.TARGET_TYPE_FILE: + return "TARGET_TYPE_FILE"; + case FieldOptions_OptionTargetType.TARGET_TYPE_EXTENSION_RANGE: + return "TARGET_TYPE_EXTENSION_RANGE"; + case FieldOptions_OptionTargetType.TARGET_TYPE_MESSAGE: + return "TARGET_TYPE_MESSAGE"; + case FieldOptions_OptionTargetType.TARGET_TYPE_FIELD: + return "TARGET_TYPE_FIELD"; + case FieldOptions_OptionTargetType.TARGET_TYPE_ONEOF: + return "TARGET_TYPE_ONEOF"; + case FieldOptions_OptionTargetType.TARGET_TYPE_ENUM: + return "TARGET_TYPE_ENUM"; + case FieldOptions_OptionTargetType.TARGET_TYPE_ENUM_ENTRY: + return "TARGET_TYPE_ENUM_ENTRY"; + case FieldOptions_OptionTargetType.TARGET_TYPE_SERVICE: + return "TARGET_TYPE_SERVICE"; + case FieldOptions_OptionTargetType.TARGET_TYPE_METHOD: + return "TARGET_TYPE_METHOD"; + case FieldOptions_OptionTargetType.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +export interface FieldOptions_EditionDefault { + $type: "google.protobuf.FieldOptions.EditionDefault"; + edition?: Edition | undefined; + /** Textproto value. */ + value?: string | undefined; +} + export interface OneofOptions { $type: "google.protobuf.OneofOptions"; + /** Any features defined in the specific edition. */ + features?: FeatureSet | undefined; /** The parser stores options it doesn't recognize here. See above. */ uninterpretedOption: UninterpretedOption[]; } @@ -860,6 +1243,19 @@ export interface EnumOptions { * is a formalization for deprecating enums. */ deprecated?: boolean | undefined; + /** + * Enable the legacy handling of JSON field name conflicts. This lowercases + * and strips underscored from the fields before comparison in proto3 only. + * The new behavior takes `json_name` into account and applies to proto2 as + * well. + * TODO Remove this legacy behavior once downstream teams have + * had time to migrate. + * + * @deprecated + */ + deprecatedLegacyJsonFieldConflicts?: boolean | undefined; + /** Any features defined in the specific edition. */ + features?: FeatureSet | undefined; /** The parser stores options it doesn't recognize here. See above. */ uninterpretedOption: UninterpretedOption[]; } @@ -873,12 +1269,22 @@ export interface EnumValueOptions { * this is a formalization for deprecating enum values. */ deprecated?: boolean | undefined; + /** Any features defined in the specific edition. */ + features?: FeatureSet | undefined; + /** + * Indicate that fields annotated with this enum value should not be printed + * out when using debug formats, e.g. when the field contains sensitive + * credentials. + */ + debugRedact?: boolean | undefined; /** The parser stores options it doesn't recognize here. See above. */ uninterpretedOption: UninterpretedOption[]; } export interface ServiceOptions { $type: "google.protobuf.ServiceOptions"; + /** Any features defined in the specific edition. */ + features?: FeatureSet | undefined; /** * Is this service deprecated? * Depending on the target platform, this can emit Deprecated annotations @@ -900,6 +1306,8 @@ export interface MethodOptions { */ deprecated?: boolean | undefined; idempotencyLevel?: MethodOptions_IdempotencyLevel | undefined; + /** Any features defined in the specific edition. */ + features?: FeatureSet | undefined; /** The parser stores options it doesn't recognize here. See above. */ uninterpretedOption: UninterpretedOption[]; } @@ -990,6 +1398,317 @@ export interface UninterpretedOption_NamePart { isExtension: boolean; } +/** + * TODO Enums in C++ gencode (and potentially other languages) are + * not well scoped. This means that each of the feature enums below can clash + * with each other. The short names we've chosen maximize call-site + * readability, but leave us very open to this scenario. A future feature will + * be designed and implemented to handle this, hopefully before we ever hit a + * conflict here. + */ +export interface FeatureSet { + $type: "google.protobuf.FeatureSet"; + fieldPresence?: FeatureSet_FieldPresence | undefined; + enumType?: FeatureSet_EnumType | undefined; + repeatedFieldEncoding?: FeatureSet_RepeatedFieldEncoding | undefined; + utf8Validation?: FeatureSet_Utf8Validation | undefined; + messageEncoding?: FeatureSet_MessageEncoding | undefined; + jsonFormat?: FeatureSet_JsonFormat | undefined; +} + +export enum FeatureSet_FieldPresence { + FIELD_PRESENCE_UNKNOWN = 0, + EXPLICIT = 1, + IMPLICIT = 2, + LEGACY_REQUIRED = 3, + UNRECOGNIZED = -1, +} + +export function featureSet_FieldPresenceFromJSON( + object: any, +): FeatureSet_FieldPresence { + switch (object) { + case 0: + case "FIELD_PRESENCE_UNKNOWN": + return FeatureSet_FieldPresence.FIELD_PRESENCE_UNKNOWN; + case 1: + case "EXPLICIT": + return FeatureSet_FieldPresence.EXPLICIT; + case 2: + case "IMPLICIT": + return FeatureSet_FieldPresence.IMPLICIT; + case 3: + case "LEGACY_REQUIRED": + return FeatureSet_FieldPresence.LEGACY_REQUIRED; + case -1: + case "UNRECOGNIZED": + default: + return FeatureSet_FieldPresence.UNRECOGNIZED; + } +} + +export function featureSet_FieldPresenceToJSON( + object: FeatureSet_FieldPresence, +): string { + switch (object) { + case FeatureSet_FieldPresence.FIELD_PRESENCE_UNKNOWN: + return "FIELD_PRESENCE_UNKNOWN"; + case FeatureSet_FieldPresence.EXPLICIT: + return "EXPLICIT"; + case FeatureSet_FieldPresence.IMPLICIT: + return "IMPLICIT"; + case FeatureSet_FieldPresence.LEGACY_REQUIRED: + return "LEGACY_REQUIRED"; + case FeatureSet_FieldPresence.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +export enum FeatureSet_EnumType { + ENUM_TYPE_UNKNOWN = 0, + OPEN = 1, + CLOSED = 2, + UNRECOGNIZED = -1, +} + +export function featureSet_EnumTypeFromJSON(object: any): FeatureSet_EnumType { + switch (object) { + case 0: + case "ENUM_TYPE_UNKNOWN": + return FeatureSet_EnumType.ENUM_TYPE_UNKNOWN; + case 1: + case "OPEN": + return FeatureSet_EnumType.OPEN; + case 2: + case "CLOSED": + return FeatureSet_EnumType.CLOSED; + case -1: + case "UNRECOGNIZED": + default: + return FeatureSet_EnumType.UNRECOGNIZED; + } +} + +export function featureSet_EnumTypeToJSON(object: FeatureSet_EnumType): string { + switch (object) { + case FeatureSet_EnumType.ENUM_TYPE_UNKNOWN: + return "ENUM_TYPE_UNKNOWN"; + case FeatureSet_EnumType.OPEN: + return "OPEN"; + case FeatureSet_EnumType.CLOSED: + return "CLOSED"; + case FeatureSet_EnumType.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +export enum FeatureSet_RepeatedFieldEncoding { + REPEATED_FIELD_ENCODING_UNKNOWN = 0, + PACKED = 1, + EXPANDED = 2, + UNRECOGNIZED = -1, +} + +export function featureSet_RepeatedFieldEncodingFromJSON( + object: any, +): FeatureSet_RepeatedFieldEncoding { + switch (object) { + case 0: + case "REPEATED_FIELD_ENCODING_UNKNOWN": + return FeatureSet_RepeatedFieldEncoding.REPEATED_FIELD_ENCODING_UNKNOWN; + case 1: + case "PACKED": + return FeatureSet_RepeatedFieldEncoding.PACKED; + case 2: + case "EXPANDED": + return FeatureSet_RepeatedFieldEncoding.EXPANDED; + case -1: + case "UNRECOGNIZED": + default: + return FeatureSet_RepeatedFieldEncoding.UNRECOGNIZED; + } +} + +export function featureSet_RepeatedFieldEncodingToJSON( + object: FeatureSet_RepeatedFieldEncoding, +): string { + switch (object) { + case FeatureSet_RepeatedFieldEncoding.REPEATED_FIELD_ENCODING_UNKNOWN: + return "REPEATED_FIELD_ENCODING_UNKNOWN"; + case FeatureSet_RepeatedFieldEncoding.PACKED: + return "PACKED"; + case FeatureSet_RepeatedFieldEncoding.EXPANDED: + return "EXPANDED"; + case FeatureSet_RepeatedFieldEncoding.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +export enum FeatureSet_Utf8Validation { + UTF8_VALIDATION_UNKNOWN = 0, + VERIFY = 2, + NONE = 3, + UNRECOGNIZED = -1, +} + +export function featureSet_Utf8ValidationFromJSON( + object: any, +): FeatureSet_Utf8Validation { + switch (object) { + case 0: + case "UTF8_VALIDATION_UNKNOWN": + return FeatureSet_Utf8Validation.UTF8_VALIDATION_UNKNOWN; + case 2: + case "VERIFY": + return FeatureSet_Utf8Validation.VERIFY; + case 3: + case "NONE": + return FeatureSet_Utf8Validation.NONE; + case -1: + case "UNRECOGNIZED": + default: + return FeatureSet_Utf8Validation.UNRECOGNIZED; + } +} + +export function featureSet_Utf8ValidationToJSON( + object: FeatureSet_Utf8Validation, +): string { + switch (object) { + case FeatureSet_Utf8Validation.UTF8_VALIDATION_UNKNOWN: + return "UTF8_VALIDATION_UNKNOWN"; + case FeatureSet_Utf8Validation.VERIFY: + return "VERIFY"; + case FeatureSet_Utf8Validation.NONE: + return "NONE"; + case FeatureSet_Utf8Validation.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +export enum FeatureSet_MessageEncoding { + MESSAGE_ENCODING_UNKNOWN = 0, + LENGTH_PREFIXED = 1, + DELIMITED = 2, + UNRECOGNIZED = -1, +} + +export function featureSet_MessageEncodingFromJSON( + object: any, +): FeatureSet_MessageEncoding { + switch (object) { + case 0: + case "MESSAGE_ENCODING_UNKNOWN": + return FeatureSet_MessageEncoding.MESSAGE_ENCODING_UNKNOWN; + case 1: + case "LENGTH_PREFIXED": + return FeatureSet_MessageEncoding.LENGTH_PREFIXED; + case 2: + case "DELIMITED": + return FeatureSet_MessageEncoding.DELIMITED; + case -1: + case "UNRECOGNIZED": + default: + return FeatureSet_MessageEncoding.UNRECOGNIZED; + } +} + +export function featureSet_MessageEncodingToJSON( + object: FeatureSet_MessageEncoding, +): string { + switch (object) { + case FeatureSet_MessageEncoding.MESSAGE_ENCODING_UNKNOWN: + return "MESSAGE_ENCODING_UNKNOWN"; + case FeatureSet_MessageEncoding.LENGTH_PREFIXED: + return "LENGTH_PREFIXED"; + case FeatureSet_MessageEncoding.DELIMITED: + return "DELIMITED"; + case FeatureSet_MessageEncoding.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +export enum FeatureSet_JsonFormat { + JSON_FORMAT_UNKNOWN = 0, + ALLOW = 1, + LEGACY_BEST_EFFORT = 2, + UNRECOGNIZED = -1, +} + +export function featureSet_JsonFormatFromJSON( + object: any, +): FeatureSet_JsonFormat { + switch (object) { + case 0: + case "JSON_FORMAT_UNKNOWN": + return FeatureSet_JsonFormat.JSON_FORMAT_UNKNOWN; + case 1: + case "ALLOW": + return FeatureSet_JsonFormat.ALLOW; + case 2: + case "LEGACY_BEST_EFFORT": + return FeatureSet_JsonFormat.LEGACY_BEST_EFFORT; + case -1: + case "UNRECOGNIZED": + default: + return FeatureSet_JsonFormat.UNRECOGNIZED; + } +} + +export function featureSet_JsonFormatToJSON( + object: FeatureSet_JsonFormat, +): string { + switch (object) { + case FeatureSet_JsonFormat.JSON_FORMAT_UNKNOWN: + return "JSON_FORMAT_UNKNOWN"; + case FeatureSet_JsonFormat.ALLOW: + return "ALLOW"; + case FeatureSet_JsonFormat.LEGACY_BEST_EFFORT: + return "LEGACY_BEST_EFFORT"; + case FeatureSet_JsonFormat.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } +} + +/** + * A compiled specification for the defaults of a set of features. These + * messages are generated from FeatureSet extensions and can be used to seed + * feature resolution. The resolution with this object becomes a simple search + * for the closest matching edition, followed by proto merges. + */ +export interface FeatureSetDefaults { + $type: "google.protobuf.FeatureSetDefaults"; + defaults: FeatureSetDefaults_FeatureSetEditionDefault[]; + /** + * The minimum supported edition (inclusive) when this was constructed. + * Editions before this will not have defaults. + */ + minimumEdition?: Edition | undefined; + /** + * The maximum known edition (inclusive) when this was constructed. Editions + * after this will not have reliable defaults. + */ + maximumEdition?: Edition | undefined; +} + +/** + * A map from every known edition with a unique set of defaults to its + * defaults. Not all editions may be contained here. For a given edition, + * the defaults at the closest matching edition ordered at or before it should + * be used. This field must be in strict ascending order by edition. + */ +export interface FeatureSetDefaults_FeatureSetEditionDefault { + $type: "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault"; + edition?: Edition | undefined; + features?: FeatureSet | undefined; +} + /** * Encapsulates information about the original source file from which a * FileDescriptorProto was generated. @@ -1051,7 +1770,7 @@ export interface SourceCodeInfo_Location { * location. * * Each element is a field number or an index. They form a path from - * the root FileDescriptorProto to the place where the definition occurs. + * the root FileDescriptorProto to the place where the definition appears. * For example, this path: * [ 4, 3, 2, 7, 1 ] * refers to: @@ -1164,32 +1883,86 @@ export interface GeneratedCodeInfo_Annotation { begin?: number | undefined; /** * Identifies the ending offset in bytes in the generated code that - * relates to the identified offset. The end offset should be one past + * relates to the identified object. The end offset should be one past * the last relevant byte (so the length of the text = end - begin). */ end?: number | undefined; + semantic?: GeneratedCodeInfo_Annotation_Semantic | undefined; +} + +/** + * Represents the identified object's effect on the element in the original + * .proto file. + */ +export enum GeneratedCodeInfo_Annotation_Semantic { + /** NONE - There is no effect or the effect is indescribable. */ + NONE = 0, + /** SET - The element is set or otherwise mutated. */ + SET = 1, + /** ALIAS - An alias to the element is returned. */ + ALIAS = 2, + UNRECOGNIZED = -1, +} + +export function generatedCodeInfo_Annotation_SemanticFromJSON( + object: any, +): GeneratedCodeInfo_Annotation_Semantic { + switch (object) { + case 0: + case "NONE": + return GeneratedCodeInfo_Annotation_Semantic.NONE; + case 1: + case "SET": + return GeneratedCodeInfo_Annotation_Semantic.SET; + case 2: + case "ALIAS": + return GeneratedCodeInfo_Annotation_Semantic.ALIAS; + case -1: + case "UNRECOGNIZED": + default: + return GeneratedCodeInfo_Annotation_Semantic.UNRECOGNIZED; + } +} + +export function generatedCodeInfo_Annotation_SemanticToJSON( + object: GeneratedCodeInfo_Annotation_Semantic, +): string { + switch (object) { + case GeneratedCodeInfo_Annotation_Semantic.NONE: + return "NONE"; + case GeneratedCodeInfo_Annotation_Semantic.SET: + return "SET"; + case GeneratedCodeInfo_Annotation_Semantic.ALIAS: + return "ALIAS"; + case GeneratedCodeInfo_Annotation_Semantic.UNRECOGNIZED: + default: + return "UNRECOGNIZED"; + } } function createBaseFileDescriptorSet(): FileDescriptorSet { return { $type: "google.protobuf.FileDescriptorSet", file: [] }; } -export const FileDescriptorSet = { +export const FileDescriptorSet: MessageFns< + FileDescriptorSet, + "google.protobuf.FileDescriptorSet" +> = { $type: "google.protobuf.FileDescriptorSet" as const, encode( message: FileDescriptorSet, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { for (const v of message.file) { - FileDescriptorProto.encode(v!, writer.uint32(10).fork()).ldelim(); + FileDescriptorProto.encode(v!, writer.uint32(10).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): FileDescriptorSet { + decode(input: BinaryReader | Uint8Array, length?: number): FileDescriptorSet { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseFileDescriptorSet(); while (reader.pos < end) { @@ -1208,7 +1981,7 @@ export const FileDescriptorSet = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1258,16 +2031,20 @@ function createBaseFileDescriptorProto(): FileDescriptorProto { options: undefined, sourceCodeInfo: undefined, syntax: "", + edition: 0, }; } -export const FileDescriptorProto = { +export const FileDescriptorProto: MessageFns< + FileDescriptorProto, + "google.protobuf.FileDescriptorProto" +> = { $type: "google.protobuf.FileDescriptorProto" as const, encode( message: FileDescriptorProto, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.name !== undefined && message.name !== "") { writer.uint32(10).string(message.name); } @@ -1281,42 +2058,48 @@ export const FileDescriptorProto = { for (const v of message.publicDependency) { writer.int32(v); } - writer.ldelim(); + writer.join(); writer.uint32(90).fork(); for (const v of message.weakDependency) { writer.int32(v); } - writer.ldelim(); + writer.join(); for (const v of message.messageType) { - DescriptorProto.encode(v!, writer.uint32(34).fork()).ldelim(); + DescriptorProto.encode(v!, writer.uint32(34).fork()).join(); } for (const v of message.enumType) { - EnumDescriptorProto.encode(v!, writer.uint32(42).fork()).ldelim(); + EnumDescriptorProto.encode(v!, writer.uint32(42).fork()).join(); } for (const v of message.service) { - ServiceDescriptorProto.encode(v!, writer.uint32(50).fork()).ldelim(); + ServiceDescriptorProto.encode(v!, writer.uint32(50).fork()).join(); } for (const v of message.extension) { - FieldDescriptorProto.encode(v!, writer.uint32(58).fork()).ldelim(); + FieldDescriptorProto.encode(v!, writer.uint32(58).fork()).join(); } if (message.options !== undefined) { - FileOptions.encode(message.options, writer.uint32(66).fork()).ldelim(); + FileOptions.encode(message.options, writer.uint32(66).fork()).join(); } if (message.sourceCodeInfo !== undefined) { SourceCodeInfo.encode( message.sourceCodeInfo, writer.uint32(74).fork(), - ).ldelim(); + ).join(); } if (message.syntax !== undefined && message.syntax !== "") { writer.uint32(98).string(message.syntax); } + if (message.edition !== undefined && message.edition !== 0) { + writer.uint32(112).int32(message.edition); + } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): FileDescriptorProto { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): FileDescriptorProto { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseFileDescriptorProto(); while (reader.pos < end) { @@ -1437,11 +2220,18 @@ export const FileDescriptorProto = { message.syntax = reader.string(); continue; + case 14: + if (tag !== 112) { + break; + } + + message.edition = reader.int32() as any; + continue; } if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1479,6 +2269,7 @@ export const FileDescriptorProto = { ? SourceCodeInfo.fromJSON(object.sourceCodeInfo) : undefined, syntax: isSet(object.syntax) ? globalThis.String(object.syntax) : "", + edition: isSet(object.edition) ? editionFromJSON(object.edition) : 0, }; }, @@ -1526,6 +2317,9 @@ export const FileDescriptorProto = { if (message.syntax !== undefined && message.syntax !== "") { obj.syntax = message.syntax; } + if (message.edition !== undefined && message.edition !== 0) { + obj.edition = editionToJSON(message.edition); + } return obj; }, @@ -1556,6 +2350,7 @@ export const FileDescriptorProto = { ? SourceCodeInfo.fromPartial(object.sourceCodeInfo) : undefined; message.syntax = object.syntax ?? ""; + message.edition = object.edition ?? 0; return message; }, }; @@ -1578,45 +2373,45 @@ function createBaseDescriptorProto(): DescriptorProto { }; } -export const DescriptorProto = { +export const DescriptorProto: MessageFns< + DescriptorProto, + "google.protobuf.DescriptorProto" +> = { $type: "google.protobuf.DescriptorProto" as const, encode( message: DescriptorProto, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.name !== undefined && message.name !== "") { writer.uint32(10).string(message.name); } for (const v of message.field) { - FieldDescriptorProto.encode(v!, writer.uint32(18).fork()).ldelim(); + FieldDescriptorProto.encode(v!, writer.uint32(18).fork()).join(); } for (const v of message.extension) { - FieldDescriptorProto.encode(v!, writer.uint32(50).fork()).ldelim(); + FieldDescriptorProto.encode(v!, writer.uint32(50).fork()).join(); } for (const v of message.nestedType) { - DescriptorProto.encode(v!, writer.uint32(26).fork()).ldelim(); + DescriptorProto.encode(v!, writer.uint32(26).fork()).join(); } for (const v of message.enumType) { - EnumDescriptorProto.encode(v!, writer.uint32(34).fork()).ldelim(); + EnumDescriptorProto.encode(v!, writer.uint32(34).fork()).join(); } for (const v of message.extensionRange) { DescriptorProto_ExtensionRange.encode( v!, writer.uint32(42).fork(), - ).ldelim(); + ).join(); } for (const v of message.oneofDecl) { - OneofDescriptorProto.encode(v!, writer.uint32(66).fork()).ldelim(); + OneofDescriptorProto.encode(v!, writer.uint32(66).fork()).join(); } if (message.options !== undefined) { - MessageOptions.encode(message.options, writer.uint32(58).fork()).ldelim(); + MessageOptions.encode(message.options, writer.uint32(58).fork()).join(); } for (const v of message.reservedRange) { - DescriptorProto_ReservedRange.encode( - v!, - writer.uint32(74).fork(), - ).ldelim(); + DescriptorProto_ReservedRange.encode(v!, writer.uint32(74).fork()).join(); } for (const v of message.reservedName) { writer.uint32(82).string(v!); @@ -1624,9 +2419,9 @@ export const DescriptorProto = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): DescriptorProto { + decode(input: BinaryReader | Uint8Array, length?: number): DescriptorProto { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseDescriptorProto(); while (reader.pos < end) { @@ -1720,7 +2515,7 @@ export const DescriptorProto = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1850,13 +2645,16 @@ function createBaseDescriptorProto_ExtensionRange(): DescriptorProto_ExtensionRa }; } -export const DescriptorProto_ExtensionRange = { +export const DescriptorProto_ExtensionRange: MessageFns< + DescriptorProto_ExtensionRange, + "google.protobuf.DescriptorProto.ExtensionRange" +> = { $type: "google.protobuf.DescriptorProto.ExtensionRange" as const, encode( message: DescriptorProto_ExtensionRange, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.start !== undefined && message.start !== 0) { writer.uint32(8).int32(message.start); } @@ -1867,17 +2665,17 @@ export const DescriptorProto_ExtensionRange = { ExtensionRangeOptions.encode( message.options, writer.uint32(26).fork(), - ).ldelim(); + ).join(); } return writer; }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): DescriptorProto_ExtensionRange { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseDescriptorProto_ExtensionRange(); while (reader.pos < end) { @@ -1911,7 +2709,7 @@ export const DescriptorProto_ExtensionRange = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -1973,13 +2771,16 @@ function createBaseDescriptorProto_ReservedRange(): DescriptorProto_ReservedRang }; } -export const DescriptorProto_ReservedRange = { +export const DescriptorProto_ReservedRange: MessageFns< + DescriptorProto_ReservedRange, + "google.protobuf.DescriptorProto.ReservedRange" +> = { $type: "google.protobuf.DescriptorProto.ReservedRange" as const, encode( message: DescriptorProto_ReservedRange, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.start !== undefined && message.start !== 0) { writer.uint32(8).int32(message.start); } @@ -1990,11 +2791,11 @@ export const DescriptorProto_ReservedRange = { }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): DescriptorProto_ReservedRange { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseDescriptorProto_ReservedRange(); while (reader.pos < end) { @@ -2018,7 +2819,7 @@ export const DescriptorProto_ReservedRange = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -2066,88 +2867,316 @@ function createBaseExtensionRangeOptions(): ExtensionRangeOptions { return { $type: "google.protobuf.ExtensionRangeOptions", uninterpretedOption: [], + declaration: [], + features: undefined, + verification: 1, }; } -export const ExtensionRangeOptions = { +export const ExtensionRangeOptions: MessageFns< + ExtensionRangeOptions, + "google.protobuf.ExtensionRangeOptions" +> = { $type: "google.protobuf.ExtensionRangeOptions" as const, encode( message: ExtensionRangeOptions, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).join(); + } + for (const v of message.declaration) { + ExtensionRangeOptions_Declaration.encode( + v!, + writer.uint32(18).fork(), + ).join(); + } + if (message.features !== undefined) { + FeatureSet.encode(message.features, writer.uint32(402).fork()).join(); + } + if (message.verification !== undefined && message.verification !== 1) { + writer.uint32(24).int32(message.verification); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): ExtensionRangeOptions { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseExtensionRangeOptions(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 999: + if (tag !== 7994) { + break; + } + + message.uninterpretedOption.push( + UninterpretedOption.decode(reader, reader.uint32()), + ); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.declaration.push( + ExtensionRangeOptions_Declaration.decode(reader, reader.uint32()), + ); + continue; + case 50: + if (tag !== 402) { + break; + } + + message.features = FeatureSet.decode(reader, reader.uint32()); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.verification = reader.int32() as any; + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): ExtensionRangeOptions { + return { + $type: ExtensionRangeOptions.$type, + uninterpretedOption: globalThis.Array.isArray(object?.uninterpretedOption) + ? object.uninterpretedOption.map((e: any) => + UninterpretedOption.fromJSON(e), + ) + : [], + declaration: globalThis.Array.isArray(object?.declaration) + ? object.declaration.map((e: any) => + ExtensionRangeOptions_Declaration.fromJSON(e), + ) + : [], + features: isSet(object.features) + ? FeatureSet.fromJSON(object.features) + : undefined, + verification: isSet(object.verification) + ? extensionRangeOptions_VerificationStateFromJSON(object.verification) + : 1, + }; + }, + + toJSON(message: ExtensionRangeOptions): unknown { + const obj: any = {}; + if (message.uninterpretedOption?.length) { + obj.uninterpretedOption = message.uninterpretedOption.map((e) => + UninterpretedOption.toJSON(e), + ); + } + if (message.declaration?.length) { + obj.declaration = message.declaration.map((e) => + ExtensionRangeOptions_Declaration.toJSON(e), + ); + } + if (message.features !== undefined) { + obj.features = FeatureSet.toJSON(message.features); + } + if (message.verification !== undefined && message.verification !== 1) { + obj.verification = extensionRangeOptions_VerificationStateToJSON( + message.verification, + ); + } + return obj; + }, + + create(base?: DeepPartial): ExtensionRangeOptions { + return ExtensionRangeOptions.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): ExtensionRangeOptions { + const message = createBaseExtensionRangeOptions(); + message.uninterpretedOption = + object.uninterpretedOption?.map((e) => + UninterpretedOption.fromPartial(e), + ) || []; + message.declaration = + object.declaration?.map((e) => + ExtensionRangeOptions_Declaration.fromPartial(e), + ) || []; + message.features = + object.features !== undefined && object.features !== null + ? FeatureSet.fromPartial(object.features) + : undefined; + message.verification = object.verification ?? 1; + return message; + }, +}; + +messageTypeRegistry.set(ExtensionRangeOptions.$type, ExtensionRangeOptions); + +function createBaseExtensionRangeOptions_Declaration(): ExtensionRangeOptions_Declaration { + return { + $type: "google.protobuf.ExtensionRangeOptions.Declaration", + number: 0, + fullName: "", + type: "", + reserved: false, + repeated: false, + }; +} + +export const ExtensionRangeOptions_Declaration: MessageFns< + ExtensionRangeOptions_Declaration, + "google.protobuf.ExtensionRangeOptions.Declaration" +> = { + $type: "google.protobuf.ExtensionRangeOptions.Declaration" as const, + + encode( + message: ExtensionRangeOptions_Declaration, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.number !== undefined && message.number !== 0) { + writer.uint32(8).int32(message.number); + } + if (message.fullName !== undefined && message.fullName !== "") { + writer.uint32(18).string(message.fullName); + } + if (message.type !== undefined && message.type !== "") { + writer.uint32(26).string(message.type); + } + if (message.reserved !== undefined && message.reserved !== false) { + writer.uint32(40).bool(message.reserved); + } + if (message.repeated !== undefined && message.repeated !== false) { + writer.uint32(48).bool(message.repeated); } return writer; }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, - ): ExtensionRangeOptions { + ): ExtensionRangeOptions_Declaration { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseExtensionRangeOptions(); + const message = createBaseExtensionRangeOptions_Declaration(); while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { - case 999: - if (tag !== 7994) { + case 1: + if (tag !== 8) { break; } - message.uninterpretedOption.push( - UninterpretedOption.decode(reader, reader.uint32()), - ); + message.number = reader.int32(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.fullName = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.type = reader.string(); + continue; + case 5: + if (tag !== 40) { + break; + } + + message.reserved = reader.bool(); + continue; + case 6: + if (tag !== 48) { + break; + } + + message.repeated = reader.bool(); continue; } if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, - fromJSON(object: any): ExtensionRangeOptions { + fromJSON(object: any): ExtensionRangeOptions_Declaration { return { - $type: ExtensionRangeOptions.$type, - uninterpretedOption: globalThis.Array.isArray(object?.uninterpretedOption) - ? object.uninterpretedOption.map((e: any) => - UninterpretedOption.fromJSON(e), - ) - : [], + $type: ExtensionRangeOptions_Declaration.$type, + number: isSet(object.number) ? globalThis.Number(object.number) : 0, + fullName: isSet(object.fullName) + ? globalThis.String(object.fullName) + : "", + type: isSet(object.type) ? globalThis.String(object.type) : "", + reserved: isSet(object.reserved) + ? globalThis.Boolean(object.reserved) + : false, + repeated: isSet(object.repeated) + ? globalThis.Boolean(object.repeated) + : false, }; }, - toJSON(message: ExtensionRangeOptions): unknown { + toJSON(message: ExtensionRangeOptions_Declaration): unknown { const obj: any = {}; - if (message.uninterpretedOption?.length) { - obj.uninterpretedOption = message.uninterpretedOption.map((e) => - UninterpretedOption.toJSON(e), - ); + if (message.number !== undefined && message.number !== 0) { + obj.number = Math.round(message.number); + } + if (message.fullName !== undefined && message.fullName !== "") { + obj.fullName = message.fullName; + } + if (message.type !== undefined && message.type !== "") { + obj.type = message.type; + } + if (message.reserved !== undefined && message.reserved !== false) { + obj.reserved = message.reserved; + } + if (message.repeated !== undefined && message.repeated !== false) { + obj.repeated = message.repeated; } return obj; }, - create(base?: DeepPartial): ExtensionRangeOptions { - return ExtensionRangeOptions.fromPartial(base ?? {}); + create( + base?: DeepPartial, + ): ExtensionRangeOptions_Declaration { + return ExtensionRangeOptions_Declaration.fromPartial(base ?? {}); }, fromPartial( - object: DeepPartial, - ): ExtensionRangeOptions { - const message = createBaseExtensionRangeOptions(); - message.uninterpretedOption = - object.uninterpretedOption?.map((e) => - UninterpretedOption.fromPartial(e), - ) || []; + object: DeepPartial, + ): ExtensionRangeOptions_Declaration { + const message = createBaseExtensionRangeOptions_Declaration(); + message.number = object.number ?? 0; + message.fullName = object.fullName ?? ""; + message.type = object.type ?? ""; + message.reserved = object.reserved ?? false; + message.repeated = object.repeated ?? false; return message; }, }; -messageTypeRegistry.set(ExtensionRangeOptions.$type, ExtensionRangeOptions); +messageTypeRegistry.set( + ExtensionRangeOptions_Declaration.$type, + ExtensionRangeOptions_Declaration, +); function createBaseFieldDescriptorProto(): FieldDescriptorProto { return { @@ -2166,13 +3195,16 @@ function createBaseFieldDescriptorProto(): FieldDescriptorProto { }; } -export const FieldDescriptorProto = { +export const FieldDescriptorProto: MessageFns< + FieldDescriptorProto, + "google.protobuf.FieldDescriptorProto" +> = { $type: "google.protobuf.FieldDescriptorProto" as const, encode( message: FieldDescriptorProto, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.name !== undefined && message.name !== "") { writer.uint32(10).string(message.name); } @@ -2201,7 +3233,7 @@ export const FieldDescriptorProto = { writer.uint32(82).string(message.jsonName); } if (message.options !== undefined) { - FieldOptions.encode(message.options, writer.uint32(66).fork()).ldelim(); + FieldOptions.encode(message.options, writer.uint32(66).fork()).join(); } if ( message.proto3Optional !== undefined && @@ -2213,11 +3245,11 @@ export const FieldDescriptorProto = { }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): FieldDescriptorProto { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseFieldDescriptorProto(); while (reader.pos < end) { @@ -2304,7 +3336,7 @@ export const FieldDescriptorProto = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -2418,28 +3450,31 @@ function createBaseOneofDescriptorProto(): OneofDescriptorProto { }; } -export const OneofDescriptorProto = { +export const OneofDescriptorProto: MessageFns< + OneofDescriptorProto, + "google.protobuf.OneofDescriptorProto" +> = { $type: "google.protobuf.OneofDescriptorProto" as const, encode( message: OneofDescriptorProto, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.name !== undefined && message.name !== "") { writer.uint32(10).string(message.name); } if (message.options !== undefined) { - OneofOptions.encode(message.options, writer.uint32(18).fork()).ldelim(); + OneofOptions.encode(message.options, writer.uint32(18).fork()).join(); } return writer; }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): OneofDescriptorProto { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseOneofDescriptorProto(); while (reader.pos < end) { @@ -2463,7 +3498,7 @@ export const OneofDescriptorProto = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -2516,27 +3551,30 @@ function createBaseEnumDescriptorProto(): EnumDescriptorProto { }; } -export const EnumDescriptorProto = { +export const EnumDescriptorProto: MessageFns< + EnumDescriptorProto, + "google.protobuf.EnumDescriptorProto" +> = { $type: "google.protobuf.EnumDescriptorProto" as const, encode( message: EnumDescriptorProto, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.name !== undefined && message.name !== "") { writer.uint32(10).string(message.name); } for (const v of message.value) { - EnumValueDescriptorProto.encode(v!, writer.uint32(18).fork()).ldelim(); + EnumValueDescriptorProto.encode(v!, writer.uint32(18).fork()).join(); } if (message.options !== undefined) { - EnumOptions.encode(message.options, writer.uint32(26).fork()).ldelim(); + EnumOptions.encode(message.options, writer.uint32(26).fork()).join(); } for (const v of message.reservedRange) { EnumDescriptorProto_EnumReservedRange.encode( v!, writer.uint32(34).fork(), - ).ldelim(); + ).join(); } for (const v of message.reservedName) { writer.uint32(42).string(v!); @@ -2544,9 +3582,12 @@ export const EnumDescriptorProto = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): EnumDescriptorProto { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): EnumDescriptorProto { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseEnumDescriptorProto(); while (reader.pos < end) { @@ -2598,7 +3639,7 @@ export const EnumDescriptorProto = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -2677,13 +3718,16 @@ function createBaseEnumDescriptorProto_EnumReservedRange(): EnumDescriptorProto_ }; } -export const EnumDescriptorProto_EnumReservedRange = { +export const EnumDescriptorProto_EnumReservedRange: MessageFns< + EnumDescriptorProto_EnumReservedRange, + "google.protobuf.EnumDescriptorProto.EnumReservedRange" +> = { $type: "google.protobuf.EnumDescriptorProto.EnumReservedRange" as const, encode( message: EnumDescriptorProto_EnumReservedRange, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.start !== undefined && message.start !== 0) { writer.uint32(8).int32(message.start); } @@ -2694,11 +3738,11 @@ export const EnumDescriptorProto_EnumReservedRange = { }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): EnumDescriptorProto_EnumReservedRange { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseEnumDescriptorProto_EnumReservedRange(); while (reader.pos < end) { @@ -2722,7 +3766,7 @@ export const EnumDescriptorProto_EnumReservedRange = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -2775,13 +3819,16 @@ function createBaseEnumValueDescriptorProto(): EnumValueDescriptorProto { }; } -export const EnumValueDescriptorProto = { +export const EnumValueDescriptorProto: MessageFns< + EnumValueDescriptorProto, + "google.protobuf.EnumValueDescriptorProto" +> = { $type: "google.protobuf.EnumValueDescriptorProto" as const, encode( message: EnumValueDescriptorProto, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.name !== undefined && message.name !== "") { writer.uint32(10).string(message.name); } @@ -2789,20 +3836,17 @@ export const EnumValueDescriptorProto = { writer.uint32(16).int32(message.number); } if (message.options !== undefined) { - EnumValueOptions.encode( - message.options, - writer.uint32(26).fork(), - ).ldelim(); + EnumValueOptions.encode(message.options, writer.uint32(26).fork()).join(); } return writer; }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): EnumValueDescriptorProto { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseEnumValueDescriptorProto(); while (reader.pos < end) { @@ -2833,7 +3877,7 @@ export const EnumValueDescriptorProto = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -2896,31 +3940,34 @@ function createBaseServiceDescriptorProto(): ServiceDescriptorProto { }; } -export const ServiceDescriptorProto = { +export const ServiceDescriptorProto: MessageFns< + ServiceDescriptorProto, + "google.protobuf.ServiceDescriptorProto" +> = { $type: "google.protobuf.ServiceDescriptorProto" as const, encode( message: ServiceDescriptorProto, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.name !== undefined && message.name !== "") { writer.uint32(10).string(message.name); } for (const v of message.method) { - MethodDescriptorProto.encode(v!, writer.uint32(18).fork()).ldelim(); + MethodDescriptorProto.encode(v!, writer.uint32(18).fork()).join(); } if (message.options !== undefined) { - ServiceOptions.encode(message.options, writer.uint32(26).fork()).ldelim(); + ServiceOptions.encode(message.options, writer.uint32(26).fork()).join(); } return writer; }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): ServiceDescriptorProto { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceDescriptorProto(); while (reader.pos < end) { @@ -2953,7 +4000,7 @@ export const ServiceDescriptorProto = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -3017,13 +4064,16 @@ function createBaseMethodDescriptorProto(): MethodDescriptorProto { }; } -export const MethodDescriptorProto = { +export const MethodDescriptorProto: MessageFns< + MethodDescriptorProto, + "google.protobuf.MethodDescriptorProto" +> = { $type: "google.protobuf.MethodDescriptorProto" as const, encode( message: MethodDescriptorProto, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.name !== undefined && message.name !== "") { writer.uint32(10).string(message.name); } @@ -3034,7 +4084,7 @@ export const MethodDescriptorProto = { writer.uint32(26).string(message.outputType); } if (message.options !== undefined) { - MethodOptions.encode(message.options, writer.uint32(34).fork()).ldelim(); + MethodOptions.encode(message.options, writer.uint32(34).fork()).join(); } if ( message.clientStreaming !== undefined && @@ -3052,11 +4102,11 @@ export const MethodDescriptorProto = { }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): MethodDescriptorProto { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseMethodDescriptorProto(); while (reader.pos < end) { @@ -3108,7 +4158,7 @@ export const MethodDescriptorProto = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -3199,7 +4249,6 @@ function createBaseFileOptions(): FileOptions { ccGenericServices: false, javaGenericServices: false, pyGenericServices: false, - phpGenericServices: false, deprecated: false, ccEnableArenas: true, objcClassPrefix: "", @@ -3209,17 +4258,21 @@ function createBaseFileOptions(): FileOptions { phpNamespace: "", phpMetadataNamespace: "", rubyPackage: "", + features: undefined, uninterpretedOption: [], }; } -export const FileOptions = { +export const FileOptions: MessageFns< + FileOptions, + "google.protobuf.FileOptions" +> = { $type: "google.protobuf.FileOptions" as const, encode( message: FileOptions, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.javaPackage !== undefined && message.javaPackage !== "") { writer.uint32(10).string(message.javaPackage); } @@ -3271,12 +4324,6 @@ export const FileOptions = { ) { writer.uint32(144).bool(message.pyGenericServices); } - if ( - message.phpGenericServices !== undefined && - message.phpGenericServices !== false - ) { - writer.uint32(336).bool(message.phpGenericServices); - } if (message.deprecated !== undefined && message.deprecated !== false) { writer.uint32(184).bool(message.deprecated); } @@ -3316,15 +4363,18 @@ export const FileOptions = { if (message.rubyPackage !== undefined && message.rubyPackage !== "") { writer.uint32(362).string(message.rubyPackage); } + if (message.features !== undefined) { + FeatureSet.encode(message.features, writer.uint32(402).fork()).join(); + } for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): FileOptions { + decode(input: BinaryReader | Uint8Array, length?: number): FileOptions { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseFileOptions(); while (reader.pos < end) { @@ -3400,13 +4450,6 @@ export const FileOptions = { message.pyGenericServices = reader.bool(); continue; - case 42: - if (tag !== 336) { - break; - } - - message.phpGenericServices = reader.bool(); - continue; case 23: if (tag !== 184) { break; @@ -3470,6 +4513,13 @@ export const FileOptions = { message.rubyPackage = reader.string(); continue; + case 50: + if (tag !== 402) { + break; + } + + message.features = FeatureSet.decode(reader, reader.uint32()); + continue; case 999: if (tag !== 7994) { break; @@ -3483,7 +4533,7 @@ export const FileOptions = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -3521,9 +4571,6 @@ export const FileOptions = { pyGenericServices: isSet(object.pyGenericServices) ? globalThis.Boolean(object.pyGenericServices) : false, - phpGenericServices: isSet(object.phpGenericServices) - ? globalThis.Boolean(object.phpGenericServices) - : false, deprecated: isSet(object.deprecated) ? globalThis.Boolean(object.deprecated) : false, @@ -3551,6 +4598,9 @@ export const FileOptions = { rubyPackage: isSet(object.rubyPackage) ? globalThis.String(object.rubyPackage) : "", + features: isSet(object.features) + ? FeatureSet.fromJSON(object.features) + : undefined, uninterpretedOption: globalThis.Array.isArray(object?.uninterpretedOption) ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e), @@ -3612,12 +4662,6 @@ export const FileOptions = { ) { obj.pyGenericServices = message.pyGenericServices; } - if ( - message.phpGenericServices !== undefined && - message.phpGenericServices !== false - ) { - obj.phpGenericServices = message.phpGenericServices; - } if (message.deprecated !== undefined && message.deprecated !== false) { obj.deprecated = message.deprecated; } @@ -3657,6 +4701,9 @@ export const FileOptions = { if (message.rubyPackage !== undefined && message.rubyPackage !== "") { obj.rubyPackage = message.rubyPackage; } + if (message.features !== undefined) { + obj.features = FeatureSet.toJSON(message.features); + } if (message.uninterpretedOption?.length) { obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e), @@ -3681,7 +4728,6 @@ export const FileOptions = { message.ccGenericServices = object.ccGenericServices ?? false; message.javaGenericServices = object.javaGenericServices ?? false; message.pyGenericServices = object.pyGenericServices ?? false; - message.phpGenericServices = object.phpGenericServices ?? false; message.deprecated = object.deprecated ?? false; message.ccEnableArenas = object.ccEnableArenas ?? true; message.objcClassPrefix = object.objcClassPrefix ?? ""; @@ -3691,6 +4737,10 @@ export const FileOptions = { message.phpNamespace = object.phpNamespace ?? ""; message.phpMetadataNamespace = object.phpMetadataNamespace ?? ""; message.rubyPackage = object.rubyPackage ?? ""; + message.features = + object.features !== undefined && object.features !== null + ? FeatureSet.fromPartial(object.features) + : undefined; message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e), @@ -3708,17 +4758,22 @@ function createBaseMessageOptions(): MessageOptions { noStandardDescriptorAccessor: false, deprecated: false, mapEntry: false, + deprecatedLegacyJsonFieldConflicts: false, + features: undefined, uninterpretedOption: [], }; } -export const MessageOptions = { +export const MessageOptions: MessageFns< + MessageOptions, + "google.protobuf.MessageOptions" +> = { $type: "google.protobuf.MessageOptions" as const, encode( message: MessageOptions, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if ( message.messageSetWireFormat !== undefined && message.messageSetWireFormat !== false @@ -3737,15 +4792,24 @@ export const MessageOptions = { if (message.mapEntry !== undefined && message.mapEntry !== false) { writer.uint32(56).bool(message.mapEntry); } + if ( + message.deprecatedLegacyJsonFieldConflicts !== undefined && + message.deprecatedLegacyJsonFieldConflicts !== false + ) { + writer.uint32(88).bool(message.deprecatedLegacyJsonFieldConflicts); + } + if (message.features !== undefined) { + FeatureSet.encode(message.features, writer.uint32(98).fork()).join(); + } for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): MessageOptions { + decode(input: BinaryReader | Uint8Array, length?: number): MessageOptions { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseMessageOptions(); while (reader.pos < end) { @@ -3779,6 +4843,20 @@ export const MessageOptions = { message.mapEntry = reader.bool(); continue; + case 11: + if (tag !== 88) { + break; + } + + message.deprecatedLegacyJsonFieldConflicts = reader.bool(); + continue; + case 12: + if (tag !== 98) { + break; + } + + message.features = FeatureSet.decode(reader, reader.uint32()); + continue; case 999: if (tag !== 7994) { break; @@ -3792,7 +4870,7 @@ export const MessageOptions = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -3812,6 +4890,14 @@ export const MessageOptions = { mapEntry: isSet(object.mapEntry) ? globalThis.Boolean(object.mapEntry) : false, + deprecatedLegacyJsonFieldConflicts: isSet( + object.deprecatedLegacyJsonFieldConflicts, + ) + ? globalThis.Boolean(object.deprecatedLegacyJsonFieldConflicts) + : false, + features: isSet(object.features) + ? FeatureSet.fromJSON(object.features) + : undefined, uninterpretedOption: globalThis.Array.isArray(object?.uninterpretedOption) ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e), @@ -3840,6 +4926,16 @@ export const MessageOptions = { if (message.mapEntry !== undefined && message.mapEntry !== false) { obj.mapEntry = message.mapEntry; } + if ( + message.deprecatedLegacyJsonFieldConflicts !== undefined && + message.deprecatedLegacyJsonFieldConflicts !== false + ) { + obj.deprecatedLegacyJsonFieldConflicts = + message.deprecatedLegacyJsonFieldConflicts; + } + if (message.features !== undefined) { + obj.features = FeatureSet.toJSON(message.features); + } if (message.uninterpretedOption?.length) { obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e), @@ -3858,6 +4954,12 @@ export const MessageOptions = { object.noStandardDescriptorAccessor ?? false; message.deprecated = object.deprecated ?? false; message.mapEntry = object.mapEntry ?? false; + message.deprecatedLegacyJsonFieldConflicts = + object.deprecatedLegacyJsonFieldConflicts ?? false; + message.features = + object.features !== undefined && object.features !== null + ? FeatureSet.fromPartial(object.features) + : undefined; message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e), @@ -3878,17 +4980,25 @@ function createBaseFieldOptions(): FieldOptions { unverifiedLazy: false, deprecated: false, weak: false, + debugRedact: false, + retention: 0, + targets: [], + editionDefaults: [], + features: undefined, uninterpretedOption: [], }; } -export const FieldOptions = { +export const FieldOptions: MessageFns< + FieldOptions, + "google.protobuf.FieldOptions" +> = { $type: "google.protobuf.FieldOptions" as const, encode( message: FieldOptions, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.ctype !== undefined && message.ctype !== 0) { writer.uint32(8).int32(message.ctype); } @@ -3913,15 +5023,32 @@ export const FieldOptions = { if (message.weak !== undefined && message.weak !== false) { writer.uint32(80).bool(message.weak); } + if (message.debugRedact !== undefined && message.debugRedact !== false) { + writer.uint32(128).bool(message.debugRedact); + } + if (message.retention !== undefined && message.retention !== 0) { + writer.uint32(136).int32(message.retention); + } + writer.uint32(154).fork(); + for (const v of message.targets) { + writer.int32(v); + } + writer.join(); + for (const v of message.editionDefaults) { + FieldOptions_EditionDefault.encode(v!, writer.uint32(162).fork()).join(); + } + if (message.features !== undefined) { + FeatureSet.encode(message.features, writer.uint32(170).fork()).join(); + } for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): FieldOptions { + decode(input: BinaryReader | Uint8Array, length?: number): FieldOptions { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseFieldOptions(); while (reader.pos < end) { @@ -3976,6 +5103,53 @@ export const FieldOptions = { message.weak = reader.bool(); continue; + case 16: + if (tag !== 128) { + break; + } + + message.debugRedact = reader.bool(); + continue; + case 17: + if (tag !== 136) { + break; + } + + message.retention = reader.int32() as any; + continue; + case 19: + if (tag === 152) { + message.targets.push(reader.int32() as any); + + continue; + } + + if (tag === 154) { + const end2 = reader.uint32() + reader.pos; + while (reader.pos < end2) { + message.targets.push(reader.int32() as any); + } + + continue; + } + + break; + case 20: + if (tag !== 162) { + break; + } + + message.editionDefaults.push( + FieldOptions_EditionDefault.decode(reader, reader.uint32()), + ); + continue; + case 21: + if (tag !== 170) { + break; + } + + message.features = FeatureSet.decode(reader, reader.uint32()); + continue; case 999: if (tag !== 7994) { break; @@ -3989,7 +5163,7 @@ export const FieldOptions = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -4010,6 +5184,25 @@ export const FieldOptions = { ? globalThis.Boolean(object.deprecated) : false, weak: isSet(object.weak) ? globalThis.Boolean(object.weak) : false, + debugRedact: isSet(object.debugRedact) + ? globalThis.Boolean(object.debugRedact) + : false, + retention: isSet(object.retention) + ? fieldOptions_OptionRetentionFromJSON(object.retention) + : 0, + targets: globalThis.Array.isArray(object?.targets) + ? object.targets.map((e: any) => + fieldOptions_OptionTargetTypeFromJSON(e), + ) + : [], + editionDefaults: globalThis.Array.isArray(object?.editionDefaults) + ? object.editionDefaults.map((e: any) => + FieldOptions_EditionDefault.fromJSON(e), + ) + : [], + features: isSet(object.features) + ? FeatureSet.fromJSON(object.features) + : undefined, uninterpretedOption: globalThis.Array.isArray(object?.uninterpretedOption) ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e), @@ -4044,6 +5237,25 @@ export const FieldOptions = { if (message.weak !== undefined && message.weak !== false) { obj.weak = message.weak; } + if (message.debugRedact !== undefined && message.debugRedact !== false) { + obj.debugRedact = message.debugRedact; + } + if (message.retention !== undefined && message.retention !== 0) { + obj.retention = fieldOptions_OptionRetentionToJSON(message.retention); + } + if (message.targets?.length) { + obj.targets = message.targets.map((e) => + fieldOptions_OptionTargetTypeToJSON(e), + ); + } + if (message.editionDefaults?.length) { + obj.editionDefaults = message.editionDefaults.map((e) => + FieldOptions_EditionDefault.toJSON(e), + ); + } + if (message.features !== undefined) { + obj.features = FeatureSet.toJSON(message.features); + } if (message.uninterpretedOption?.length) { obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e), @@ -4064,6 +5276,17 @@ export const FieldOptions = { message.unverifiedLazy = object.unverifiedLazy ?? false; message.deprecated = object.deprecated ?? false; message.weak = object.weak ?? false; + message.debugRedact = object.debugRedact ?? false; + message.retention = object.retention ?? 0; + message.targets = object.targets?.map((e) => e) || []; + message.editionDefaults = + object.editionDefaults?.map((e) => + FieldOptions_EditionDefault.fromPartial(e), + ) || []; + message.features = + object.features !== undefined && object.features !== null + ? FeatureSet.fromPartial(object.features) + : undefined; message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e), @@ -4074,31 +5297,148 @@ export const FieldOptions = { messageTypeRegistry.set(FieldOptions.$type, FieldOptions); +function createBaseFieldOptions_EditionDefault(): FieldOptions_EditionDefault { + return { + $type: "google.protobuf.FieldOptions.EditionDefault", + edition: 0, + value: "", + }; +} + +export const FieldOptions_EditionDefault: MessageFns< + FieldOptions_EditionDefault, + "google.protobuf.FieldOptions.EditionDefault" +> = { + $type: "google.protobuf.FieldOptions.EditionDefault" as const, + + encode( + message: FieldOptions_EditionDefault, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.edition !== undefined && message.edition !== 0) { + writer.uint32(24).int32(message.edition); + } + if (message.value !== undefined && message.value !== "") { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): FieldOptions_EditionDefault { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseFieldOptions_EditionDefault(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 3: + if (tag !== 24) { + break; + } + + message.edition = reader.int32() as any; + continue; + case 2: + if (tag !== 18) { + break; + } + + message.value = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): FieldOptions_EditionDefault { + return { + $type: FieldOptions_EditionDefault.$type, + edition: isSet(object.edition) ? editionFromJSON(object.edition) : 0, + value: isSet(object.value) ? globalThis.String(object.value) : "", + }; + }, + + toJSON(message: FieldOptions_EditionDefault): unknown { + const obj: any = {}; + if (message.edition !== undefined && message.edition !== 0) { + obj.edition = editionToJSON(message.edition); + } + if (message.value !== undefined && message.value !== "") { + obj.value = message.value; + } + return obj; + }, + + create( + base?: DeepPartial, + ): FieldOptions_EditionDefault { + return FieldOptions_EditionDefault.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): FieldOptions_EditionDefault { + const message = createBaseFieldOptions_EditionDefault(); + message.edition = object.edition ?? 0; + message.value = object.value ?? ""; + return message; + }, +}; + +messageTypeRegistry.set( + FieldOptions_EditionDefault.$type, + FieldOptions_EditionDefault, +); + function createBaseOneofOptions(): OneofOptions { - return { $type: "google.protobuf.OneofOptions", uninterpretedOption: [] }; + return { + $type: "google.protobuf.OneofOptions", + features: undefined, + uninterpretedOption: [], + }; } -export const OneofOptions = { +export const OneofOptions: MessageFns< + OneofOptions, + "google.protobuf.OneofOptions" +> = { $type: "google.protobuf.OneofOptions" as const, encode( message: OneofOptions, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.features !== undefined) { + FeatureSet.encode(message.features, writer.uint32(10).fork()).join(); + } for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): OneofOptions { + decode(input: BinaryReader | Uint8Array, length?: number): OneofOptions { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseOneofOptions(); while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.features = FeatureSet.decode(reader, reader.uint32()); + continue; case 999: if (tag !== 7994) { break; @@ -4112,7 +5452,7 @@ export const OneofOptions = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -4120,6 +5460,9 @@ export const OneofOptions = { fromJSON(object: any): OneofOptions { return { $type: OneofOptions.$type, + features: isSet(object.features) + ? FeatureSet.fromJSON(object.features) + : undefined, uninterpretedOption: globalThis.Array.isArray(object?.uninterpretedOption) ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e), @@ -4130,6 +5473,9 @@ export const OneofOptions = { toJSON(message: OneofOptions): unknown { const obj: any = {}; + if (message.features !== undefined) { + obj.features = FeatureSet.toJSON(message.features); + } if (message.uninterpretedOption?.length) { obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e), @@ -4143,6 +5489,10 @@ export const OneofOptions = { }, fromPartial(object: DeepPartial): OneofOptions { const message = createBaseOneofOptions(); + message.features = + object.features !== undefined && object.features !== null + ? FeatureSet.fromPartial(object.features) + : undefined; message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e), @@ -4158,32 +5508,46 @@ function createBaseEnumOptions(): EnumOptions { $type: "google.protobuf.EnumOptions", allowAlias: false, deprecated: false, + deprecatedLegacyJsonFieldConflicts: false, + features: undefined, uninterpretedOption: [], }; } -export const EnumOptions = { +export const EnumOptions: MessageFns< + EnumOptions, + "google.protobuf.EnumOptions" +> = { $type: "google.protobuf.EnumOptions" as const, encode( message: EnumOptions, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.allowAlias !== undefined && message.allowAlias !== false) { writer.uint32(16).bool(message.allowAlias); } - if (message.deprecated !== undefined && message.deprecated !== false) { - writer.uint32(24).bool(message.deprecated); + if (message.deprecated !== undefined && message.deprecated !== false) { + writer.uint32(24).bool(message.deprecated); + } + if ( + message.deprecatedLegacyJsonFieldConflicts !== undefined && + message.deprecatedLegacyJsonFieldConflicts !== false + ) { + writer.uint32(48).bool(message.deprecatedLegacyJsonFieldConflicts); + } + if (message.features !== undefined) { + FeatureSet.encode(message.features, writer.uint32(58).fork()).join(); } for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): EnumOptions { + decode(input: BinaryReader | Uint8Array, length?: number): EnumOptions { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseEnumOptions(); while (reader.pos < end) { @@ -4203,6 +5567,20 @@ export const EnumOptions = { message.deprecated = reader.bool(); continue; + case 6: + if (tag !== 48) { + break; + } + + message.deprecatedLegacyJsonFieldConflicts = reader.bool(); + continue; + case 7: + if (tag !== 58) { + break; + } + + message.features = FeatureSet.decode(reader, reader.uint32()); + continue; case 999: if (tag !== 7994) { break; @@ -4216,7 +5594,7 @@ export const EnumOptions = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -4230,6 +5608,14 @@ export const EnumOptions = { deprecated: isSet(object.deprecated) ? globalThis.Boolean(object.deprecated) : false, + deprecatedLegacyJsonFieldConflicts: isSet( + object.deprecatedLegacyJsonFieldConflicts, + ) + ? globalThis.Boolean(object.deprecatedLegacyJsonFieldConflicts) + : false, + features: isSet(object.features) + ? FeatureSet.fromJSON(object.features) + : undefined, uninterpretedOption: globalThis.Array.isArray(object?.uninterpretedOption) ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e), @@ -4246,6 +5632,16 @@ export const EnumOptions = { if (message.deprecated !== undefined && message.deprecated !== false) { obj.deprecated = message.deprecated; } + if ( + message.deprecatedLegacyJsonFieldConflicts !== undefined && + message.deprecatedLegacyJsonFieldConflicts !== false + ) { + obj.deprecatedLegacyJsonFieldConflicts = + message.deprecatedLegacyJsonFieldConflicts; + } + if (message.features !== undefined) { + obj.features = FeatureSet.toJSON(message.features); + } if (message.uninterpretedOption?.length) { obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e), @@ -4261,6 +5657,12 @@ export const EnumOptions = { const message = createBaseEnumOptions(); message.allowAlias = object.allowAlias ?? false; message.deprecated = object.deprecated ?? false; + message.deprecatedLegacyJsonFieldConflicts = + object.deprecatedLegacyJsonFieldConflicts ?? false; + message.features = + object.features !== undefined && object.features !== null + ? FeatureSet.fromPartial(object.features) + : undefined; message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e), @@ -4275,29 +5677,40 @@ function createBaseEnumValueOptions(): EnumValueOptions { return { $type: "google.protobuf.EnumValueOptions", deprecated: false, + features: undefined, + debugRedact: false, uninterpretedOption: [], }; } -export const EnumValueOptions = { +export const EnumValueOptions: MessageFns< + EnumValueOptions, + "google.protobuf.EnumValueOptions" +> = { $type: "google.protobuf.EnumValueOptions" as const, encode( message: EnumValueOptions, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.deprecated !== undefined && message.deprecated !== false) { writer.uint32(8).bool(message.deprecated); } + if (message.features !== undefined) { + FeatureSet.encode(message.features, writer.uint32(18).fork()).join(); + } + if (message.debugRedact !== undefined && message.debugRedact !== false) { + writer.uint32(24).bool(message.debugRedact); + } for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): EnumValueOptions { + decode(input: BinaryReader | Uint8Array, length?: number): EnumValueOptions { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseEnumValueOptions(); while (reader.pos < end) { @@ -4310,6 +5723,20 @@ export const EnumValueOptions = { message.deprecated = reader.bool(); continue; + case 2: + if (tag !== 18) { + break; + } + + message.features = FeatureSet.decode(reader, reader.uint32()); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.debugRedact = reader.bool(); + continue; case 999: if (tag !== 7994) { break; @@ -4323,7 +5750,7 @@ export const EnumValueOptions = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -4334,6 +5761,12 @@ export const EnumValueOptions = { deprecated: isSet(object.deprecated) ? globalThis.Boolean(object.deprecated) : false, + features: isSet(object.features) + ? FeatureSet.fromJSON(object.features) + : undefined, + debugRedact: isSet(object.debugRedact) + ? globalThis.Boolean(object.debugRedact) + : false, uninterpretedOption: globalThis.Array.isArray(object?.uninterpretedOption) ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e), @@ -4347,6 +5780,12 @@ export const EnumValueOptions = { if (message.deprecated !== undefined && message.deprecated !== false) { obj.deprecated = message.deprecated; } + if (message.features !== undefined) { + obj.features = FeatureSet.toJSON(message.features); + } + if (message.debugRedact !== undefined && message.debugRedact !== false) { + obj.debugRedact = message.debugRedact; + } if (message.uninterpretedOption?.length) { obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e), @@ -4361,6 +5800,11 @@ export const EnumValueOptions = { fromPartial(object: DeepPartial): EnumValueOptions { const message = createBaseEnumValueOptions(); message.deprecated = object.deprecated ?? false; + message.features = + object.features !== undefined && object.features !== null + ? FeatureSet.fromPartial(object.features) + : undefined; + message.debugRedact = object.debugRedact ?? false; message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e), @@ -4374,35 +5818,49 @@ messageTypeRegistry.set(EnumValueOptions.$type, EnumValueOptions); function createBaseServiceOptions(): ServiceOptions { return { $type: "google.protobuf.ServiceOptions", + features: undefined, deprecated: false, uninterpretedOption: [], }; } -export const ServiceOptions = { +export const ServiceOptions: MessageFns< + ServiceOptions, + "google.protobuf.ServiceOptions" +> = { $type: "google.protobuf.ServiceOptions" as const, encode( message: ServiceOptions, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.features !== undefined) { + FeatureSet.encode(message.features, writer.uint32(274).fork()).join(); + } if (message.deprecated !== undefined && message.deprecated !== false) { writer.uint32(264).bool(message.deprecated); } for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): ServiceOptions { + decode(input: BinaryReader | Uint8Array, length?: number): ServiceOptions { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseServiceOptions(); while (reader.pos < end) { const tag = reader.uint32(); switch (tag >>> 3) { + case 34: + if (tag !== 274) { + break; + } + + message.features = FeatureSet.decode(reader, reader.uint32()); + continue; case 33: if (tag !== 264) { break; @@ -4423,7 +5881,7 @@ export const ServiceOptions = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -4431,6 +5889,9 @@ export const ServiceOptions = { fromJSON(object: any): ServiceOptions { return { $type: ServiceOptions.$type, + features: isSet(object.features) + ? FeatureSet.fromJSON(object.features) + : undefined, deprecated: isSet(object.deprecated) ? globalThis.Boolean(object.deprecated) : false, @@ -4444,6 +5905,9 @@ export const ServiceOptions = { toJSON(message: ServiceOptions): unknown { const obj: any = {}; + if (message.features !== undefined) { + obj.features = FeatureSet.toJSON(message.features); + } if (message.deprecated !== undefined && message.deprecated !== false) { obj.deprecated = message.deprecated; } @@ -4460,6 +5924,10 @@ export const ServiceOptions = { }, fromPartial(object: DeepPartial): ServiceOptions { const message = createBaseServiceOptions(); + message.features = + object.features !== undefined && object.features !== null + ? FeatureSet.fromPartial(object.features) + : undefined; message.deprecated = object.deprecated ?? false; message.uninterpretedOption = object.uninterpretedOption?.map((e) => @@ -4476,17 +5944,21 @@ function createBaseMethodOptions(): MethodOptions { $type: "google.protobuf.MethodOptions", deprecated: false, idempotencyLevel: 0, + features: undefined, uninterpretedOption: [], }; } -export const MethodOptions = { +export const MethodOptions: MessageFns< + MethodOptions, + "google.protobuf.MethodOptions" +> = { $type: "google.protobuf.MethodOptions" as const, encode( message: MethodOptions, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.deprecated !== undefined && message.deprecated !== false) { writer.uint32(264).bool(message.deprecated); } @@ -4496,15 +5968,18 @@ export const MethodOptions = { ) { writer.uint32(272).int32(message.idempotencyLevel); } + if (message.features !== undefined) { + FeatureSet.encode(message.features, writer.uint32(282).fork()).join(); + } for (const v of message.uninterpretedOption) { - UninterpretedOption.encode(v!, writer.uint32(7994).fork()).ldelim(); + UninterpretedOption.encode(v!, writer.uint32(7994).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): MethodOptions { + decode(input: BinaryReader | Uint8Array, length?: number): MethodOptions { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseMethodOptions(); while (reader.pos < end) { @@ -4524,6 +5999,13 @@ export const MethodOptions = { message.idempotencyLevel = reader.int32() as any; continue; + case 35: + if (tag !== 282) { + break; + } + + message.features = FeatureSet.decode(reader, reader.uint32()); + continue; case 999: if (tag !== 7994) { break; @@ -4537,7 +6019,7 @@ export const MethodOptions = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -4551,6 +6033,9 @@ export const MethodOptions = { idempotencyLevel: isSet(object.idempotencyLevel) ? methodOptions_IdempotencyLevelFromJSON(object.idempotencyLevel) : 0, + features: isSet(object.features) + ? FeatureSet.fromJSON(object.features) + : undefined, uninterpretedOption: globalThis.Array.isArray(object?.uninterpretedOption) ? object.uninterpretedOption.map((e: any) => UninterpretedOption.fromJSON(e), @@ -4572,6 +6057,9 @@ export const MethodOptions = { message.idempotencyLevel, ); } + if (message.features !== undefined) { + obj.features = FeatureSet.toJSON(message.features); + } if (message.uninterpretedOption?.length) { obj.uninterpretedOption = message.uninterpretedOption.map((e) => UninterpretedOption.toJSON(e), @@ -4587,6 +6075,10 @@ export const MethodOptions = { const message = createBaseMethodOptions(); message.deprecated = object.deprecated ?? false; message.idempotencyLevel = object.idempotencyLevel ?? 0; + message.features = + object.features !== undefined && object.features !== null + ? FeatureSet.fromPartial(object.features) + : undefined; message.uninterpretedOption = object.uninterpretedOption?.map((e) => UninterpretedOption.fromPartial(e), @@ -4610,18 +6102,18 @@ function createBaseUninterpretedOption(): UninterpretedOption { }; } -export const UninterpretedOption = { +export const UninterpretedOption: MessageFns< + UninterpretedOption, + "google.protobuf.UninterpretedOption" +> = { $type: "google.protobuf.UninterpretedOption" as const, encode( message: UninterpretedOption, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { for (const v of message.name) { - UninterpretedOption_NamePart.encode( - v!, - writer.uint32(18).fork(), - ).ldelim(); + UninterpretedOption_NamePart.encode(v!, writer.uint32(18).fork()).join(); } if ( message.identifierValue !== undefined && @@ -4633,13 +6125,13 @@ export const UninterpretedOption = { message.positiveIntValue !== undefined && !message.positiveIntValue.equals(Long.UZERO) ) { - writer.uint32(32).uint64(message.positiveIntValue); + writer.uint32(32).uint64(message.positiveIntValue.toString()); } if ( message.negativeIntValue !== undefined && !message.negativeIntValue.equals(Long.ZERO) ) { - writer.uint32(40).int64(message.negativeIntValue); + writer.uint32(40).int64(message.negativeIntValue.toString()); } if (message.doubleValue !== undefined && message.doubleValue !== 0) { writer.uint32(49).double(message.doubleValue); @@ -4653,9 +6145,12 @@ export const UninterpretedOption = { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): UninterpretedOption { + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): UninterpretedOption { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseUninterpretedOption(); while (reader.pos < end) { @@ -4682,14 +6177,17 @@ export const UninterpretedOption = { break; } - message.positiveIntValue = reader.uint64() as Long; + message.positiveIntValue = Long.fromString( + reader.uint64().toString(), + true, + ); continue; case 5: if (tag !== 40) { break; } - message.negativeIntValue = reader.int64() as Long; + message.negativeIntValue = Long.fromString(reader.int64().toString()); continue; case 6: if (tag !== 49) { @@ -4716,7 +6214,7 @@ export const UninterpretedOption = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -4821,13 +6319,16 @@ function createBaseUninterpretedOption_NamePart(): UninterpretedOption_NamePart }; } -export const UninterpretedOption_NamePart = { +export const UninterpretedOption_NamePart: MessageFns< + UninterpretedOption_NamePart, + "google.protobuf.UninterpretedOption.NamePart" +> = { $type: "google.protobuf.UninterpretedOption.NamePart" as const, encode( message: UninterpretedOption_NamePart, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.namePart !== "") { writer.uint32(10).string(message.namePart); } @@ -4838,11 +6339,11 @@ export const UninterpretedOption_NamePart = { }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): UninterpretedOption_NamePart { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseUninterpretedOption_NamePart(); while (reader.pos < end) { @@ -4866,7 +6367,7 @@ export const UninterpretedOption_NamePart = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -4914,26 +6415,456 @@ messageTypeRegistry.set( UninterpretedOption_NamePart, ); +function createBaseFeatureSet(): FeatureSet { + return { + $type: "google.protobuf.FeatureSet", + fieldPresence: 0, + enumType: 0, + repeatedFieldEncoding: 0, + utf8Validation: 0, + messageEncoding: 0, + jsonFormat: 0, + }; +} + +export const FeatureSet: MessageFns = + { + $type: "google.protobuf.FeatureSet" as const, + + encode( + message: FeatureSet, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.fieldPresence !== undefined && message.fieldPresence !== 0) { + writer.uint32(8).int32(message.fieldPresence); + } + if (message.enumType !== undefined && message.enumType !== 0) { + writer.uint32(16).int32(message.enumType); + } + if ( + message.repeatedFieldEncoding !== undefined && + message.repeatedFieldEncoding !== 0 + ) { + writer.uint32(24).int32(message.repeatedFieldEncoding); + } + if ( + message.utf8Validation !== undefined && + message.utf8Validation !== 0 + ) { + writer.uint32(32).int32(message.utf8Validation); + } + if ( + message.messageEncoding !== undefined && + message.messageEncoding !== 0 + ) { + writer.uint32(40).int32(message.messageEncoding); + } + if (message.jsonFormat !== undefined && message.jsonFormat !== 0) { + writer.uint32(48).int32(message.jsonFormat); + } + return writer; + }, + + decode(input: BinaryReader | Uint8Array, length?: number): FeatureSet { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseFeatureSet(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 8) { + break; + } + + message.fieldPresence = reader.int32() as any; + continue; + case 2: + if (tag !== 16) { + break; + } + + message.enumType = reader.int32() as any; + continue; + case 3: + if (tag !== 24) { + break; + } + + message.repeatedFieldEncoding = reader.int32() as any; + continue; + case 4: + if (tag !== 32) { + break; + } + + message.utf8Validation = reader.int32() as any; + continue; + case 5: + if (tag !== 40) { + break; + } + + message.messageEncoding = reader.int32() as any; + continue; + case 6: + if (tag !== 48) { + break; + } + + message.jsonFormat = reader.int32() as any; + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): FeatureSet { + return { + $type: FeatureSet.$type, + fieldPresence: isSet(object.fieldPresence) + ? featureSet_FieldPresenceFromJSON(object.fieldPresence) + : 0, + enumType: isSet(object.enumType) + ? featureSet_EnumTypeFromJSON(object.enumType) + : 0, + repeatedFieldEncoding: isSet(object.repeatedFieldEncoding) + ? featureSet_RepeatedFieldEncodingFromJSON( + object.repeatedFieldEncoding, + ) + : 0, + utf8Validation: isSet(object.utf8Validation) + ? featureSet_Utf8ValidationFromJSON(object.utf8Validation) + : 0, + messageEncoding: isSet(object.messageEncoding) + ? featureSet_MessageEncodingFromJSON(object.messageEncoding) + : 0, + jsonFormat: isSet(object.jsonFormat) + ? featureSet_JsonFormatFromJSON(object.jsonFormat) + : 0, + }; + }, + + toJSON(message: FeatureSet): unknown { + const obj: any = {}; + if (message.fieldPresence !== undefined && message.fieldPresence !== 0) { + obj.fieldPresence = featureSet_FieldPresenceToJSON( + message.fieldPresence, + ); + } + if (message.enumType !== undefined && message.enumType !== 0) { + obj.enumType = featureSet_EnumTypeToJSON(message.enumType); + } + if ( + message.repeatedFieldEncoding !== undefined && + message.repeatedFieldEncoding !== 0 + ) { + obj.repeatedFieldEncoding = featureSet_RepeatedFieldEncodingToJSON( + message.repeatedFieldEncoding, + ); + } + if ( + message.utf8Validation !== undefined && + message.utf8Validation !== 0 + ) { + obj.utf8Validation = featureSet_Utf8ValidationToJSON( + message.utf8Validation, + ); + } + if ( + message.messageEncoding !== undefined && + message.messageEncoding !== 0 + ) { + obj.messageEncoding = featureSet_MessageEncodingToJSON( + message.messageEncoding, + ); + } + if (message.jsonFormat !== undefined && message.jsonFormat !== 0) { + obj.jsonFormat = featureSet_JsonFormatToJSON(message.jsonFormat); + } + return obj; + }, + + create(base?: DeepPartial): FeatureSet { + return FeatureSet.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): FeatureSet { + const message = createBaseFeatureSet(); + message.fieldPresence = object.fieldPresence ?? 0; + message.enumType = object.enumType ?? 0; + message.repeatedFieldEncoding = object.repeatedFieldEncoding ?? 0; + message.utf8Validation = object.utf8Validation ?? 0; + message.messageEncoding = object.messageEncoding ?? 0; + message.jsonFormat = object.jsonFormat ?? 0; + return message; + }, + }; + +messageTypeRegistry.set(FeatureSet.$type, FeatureSet); + +function createBaseFeatureSetDefaults(): FeatureSetDefaults { + return { + $type: "google.protobuf.FeatureSetDefaults", + defaults: [], + minimumEdition: 0, + maximumEdition: 0, + }; +} + +export const FeatureSetDefaults: MessageFns< + FeatureSetDefaults, + "google.protobuf.FeatureSetDefaults" +> = { + $type: "google.protobuf.FeatureSetDefaults" as const, + + encode( + message: FeatureSetDefaults, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + for (const v of message.defaults) { + FeatureSetDefaults_FeatureSetEditionDefault.encode( + v!, + writer.uint32(10).fork(), + ).join(); + } + if (message.minimumEdition !== undefined && message.minimumEdition !== 0) { + writer.uint32(32).int32(message.minimumEdition); + } + if (message.maximumEdition !== undefined && message.maximumEdition !== 0) { + writer.uint32(40).int32(message.maximumEdition); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): FeatureSetDefaults { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseFeatureSetDefaults(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.defaults.push( + FeatureSetDefaults_FeatureSetEditionDefault.decode( + reader, + reader.uint32(), + ), + ); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.minimumEdition = reader.int32() as any; + continue; + case 5: + if (tag !== 40) { + break; + } + + message.maximumEdition = reader.int32() as any; + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): FeatureSetDefaults { + return { + $type: FeatureSetDefaults.$type, + defaults: globalThis.Array.isArray(object?.defaults) + ? object.defaults.map((e: any) => + FeatureSetDefaults_FeatureSetEditionDefault.fromJSON(e), + ) + : [], + minimumEdition: isSet(object.minimumEdition) + ? editionFromJSON(object.minimumEdition) + : 0, + maximumEdition: isSet(object.maximumEdition) + ? editionFromJSON(object.maximumEdition) + : 0, + }; + }, + + toJSON(message: FeatureSetDefaults): unknown { + const obj: any = {}; + if (message.defaults?.length) { + obj.defaults = message.defaults.map((e) => + FeatureSetDefaults_FeatureSetEditionDefault.toJSON(e), + ); + } + if (message.minimumEdition !== undefined && message.minimumEdition !== 0) { + obj.minimumEdition = editionToJSON(message.minimumEdition); + } + if (message.maximumEdition !== undefined && message.maximumEdition !== 0) { + obj.maximumEdition = editionToJSON(message.maximumEdition); + } + return obj; + }, + + create(base?: DeepPartial): FeatureSetDefaults { + return FeatureSetDefaults.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): FeatureSetDefaults { + const message = createBaseFeatureSetDefaults(); + message.defaults = + object.defaults?.map((e) => + FeatureSetDefaults_FeatureSetEditionDefault.fromPartial(e), + ) || []; + message.minimumEdition = object.minimumEdition ?? 0; + message.maximumEdition = object.maximumEdition ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(FeatureSetDefaults.$type, FeatureSetDefaults); + +function createBaseFeatureSetDefaults_FeatureSetEditionDefault(): FeatureSetDefaults_FeatureSetEditionDefault { + return { + $type: "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault", + edition: 0, + features: undefined, + }; +} + +export const FeatureSetDefaults_FeatureSetEditionDefault: MessageFns< + FeatureSetDefaults_FeatureSetEditionDefault, + "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault" +> = { + $type: "google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault" as const, + + encode( + message: FeatureSetDefaults_FeatureSetEditionDefault, + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { + if (message.edition !== undefined && message.edition !== 0) { + writer.uint32(24).int32(message.edition); + } + if (message.features !== undefined) { + FeatureSet.encode(message.features, writer.uint32(18).fork()).join(); + } + return writer; + }, + + decode( + input: BinaryReader | Uint8Array, + length?: number, + ): FeatureSetDefaults_FeatureSetEditionDefault { + const reader = + input instanceof BinaryReader ? input : new BinaryReader(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseFeatureSetDefaults_FeatureSetEditionDefault(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 3: + if (tag !== 24) { + break; + } + + message.edition = reader.int32() as any; + continue; + case 2: + if (tag !== 18) { + break; + } + + message.features = FeatureSet.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skip(tag & 7); + } + return message; + }, + + fromJSON(object: any): FeatureSetDefaults_FeatureSetEditionDefault { + return { + $type: FeatureSetDefaults_FeatureSetEditionDefault.$type, + edition: isSet(object.edition) ? editionFromJSON(object.edition) : 0, + features: isSet(object.features) + ? FeatureSet.fromJSON(object.features) + : undefined, + }; + }, + + toJSON(message: FeatureSetDefaults_FeatureSetEditionDefault): unknown { + const obj: any = {}; + if (message.edition !== undefined && message.edition !== 0) { + obj.edition = editionToJSON(message.edition); + } + if (message.features !== undefined) { + obj.features = FeatureSet.toJSON(message.features); + } + return obj; + }, + + create( + base?: DeepPartial, + ): FeatureSetDefaults_FeatureSetEditionDefault { + return FeatureSetDefaults_FeatureSetEditionDefault.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): FeatureSetDefaults_FeatureSetEditionDefault { + const message = createBaseFeatureSetDefaults_FeatureSetEditionDefault(); + message.edition = object.edition ?? 0; + message.features = + object.features !== undefined && object.features !== null + ? FeatureSet.fromPartial(object.features) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + FeatureSetDefaults_FeatureSetEditionDefault.$type, + FeatureSetDefaults_FeatureSetEditionDefault, +); + function createBaseSourceCodeInfo(): SourceCodeInfo { return { $type: "google.protobuf.SourceCodeInfo", location: [] }; } -export const SourceCodeInfo = { +export const SourceCodeInfo: MessageFns< + SourceCodeInfo, + "google.protobuf.SourceCodeInfo" +> = { $type: "google.protobuf.SourceCodeInfo" as const, encode( message: SourceCodeInfo, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { for (const v of message.location) { - SourceCodeInfo_Location.encode(v!, writer.uint32(10).fork()).ldelim(); + SourceCodeInfo_Location.encode(v!, writer.uint32(10).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): SourceCodeInfo { + decode(input: BinaryReader | Uint8Array, length?: number): SourceCodeInfo { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseSourceCodeInfo(); while (reader.pos < end) { @@ -4952,7 +6883,7 @@ export const SourceCodeInfo = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -5000,23 +6931,26 @@ function createBaseSourceCodeInfo_Location(): SourceCodeInfo_Location { }; } -export const SourceCodeInfo_Location = { +export const SourceCodeInfo_Location: MessageFns< + SourceCodeInfo_Location, + "google.protobuf.SourceCodeInfo.Location" +> = { $type: "google.protobuf.SourceCodeInfo.Location" as const, encode( message: SourceCodeInfo_Location, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { writer.uint32(10).fork(); for (const v of message.path) { writer.int32(v); } - writer.ldelim(); + writer.join(); writer.uint32(18).fork(); for (const v of message.span) { writer.int32(v); } - writer.ldelim(); + writer.join(); if ( message.leadingComments !== undefined && message.leadingComments !== "" @@ -5036,11 +6970,11 @@ export const SourceCodeInfo_Location = { }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): SourceCodeInfo_Location { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseSourceCodeInfo_Location(); while (reader.pos < end) { @@ -5105,7 +7039,7 @@ export const SourceCodeInfo_Location = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -5182,25 +7116,25 @@ function createBaseGeneratedCodeInfo(): GeneratedCodeInfo { return { $type: "google.protobuf.GeneratedCodeInfo", annotation: [] }; } -export const GeneratedCodeInfo = { +export const GeneratedCodeInfo: MessageFns< + GeneratedCodeInfo, + "google.protobuf.GeneratedCodeInfo" +> = { $type: "google.protobuf.GeneratedCodeInfo" as const, encode( message: GeneratedCodeInfo, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { for (const v of message.annotation) { - GeneratedCodeInfo_Annotation.encode( - v!, - writer.uint32(10).fork(), - ).ldelim(); + GeneratedCodeInfo_Annotation.encode(v!, writer.uint32(10).fork()).join(); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): GeneratedCodeInfo { + decode(input: BinaryReader | Uint8Array, length?: number): GeneratedCodeInfo { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseGeneratedCodeInfo(); while (reader.pos < end) { @@ -5219,7 +7153,7 @@ export const GeneratedCodeInfo = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -5267,21 +7201,25 @@ function createBaseGeneratedCodeInfo_Annotation(): GeneratedCodeInfo_Annotation sourceFile: "", begin: 0, end: 0, + semantic: 0, }; } -export const GeneratedCodeInfo_Annotation = { +export const GeneratedCodeInfo_Annotation: MessageFns< + GeneratedCodeInfo_Annotation, + "google.protobuf.GeneratedCodeInfo.Annotation" +> = { $type: "google.protobuf.GeneratedCodeInfo.Annotation" as const, encode( message: GeneratedCodeInfo_Annotation, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { writer.uint32(10).fork(); for (const v of message.path) { writer.int32(v); } - writer.ldelim(); + writer.join(); if (message.sourceFile !== undefined && message.sourceFile !== "") { writer.uint32(18).string(message.sourceFile); } @@ -5291,15 +7229,18 @@ export const GeneratedCodeInfo_Annotation = { if (message.end !== undefined && message.end !== 0) { writer.uint32(32).int32(message.end); } + if (message.semantic !== undefined && message.semantic !== 0) { + writer.uint32(40).int32(message.semantic); + } return writer; }, decode( - input: _m0.Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): GeneratedCodeInfo_Annotation { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseGeneratedCodeInfo_Annotation(); while (reader.pos < end) { @@ -5343,11 +7284,18 @@ export const GeneratedCodeInfo_Annotation = { message.end = reader.int32(); continue; + case 5: + if (tag !== 40) { + break; + } + + message.semantic = reader.int32() as any; + continue; } if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -5363,6 +7311,9 @@ export const GeneratedCodeInfo_Annotation = { : "", begin: isSet(object.begin) ? globalThis.Number(object.begin) : 0, end: isSet(object.end) ? globalThis.Number(object.end) : 0, + semantic: isSet(object.semantic) + ? generatedCodeInfo_Annotation_SemanticFromJSON(object.semantic) + : 0, }; }, @@ -5380,6 +7331,11 @@ export const GeneratedCodeInfo_Annotation = { if (message.end !== undefined && message.end !== 0) { obj.end = Math.round(message.end); } + if (message.semantic !== undefined && message.semantic !== 0) { + obj.semantic = generatedCodeInfo_Annotation_SemanticToJSON( + message.semantic, + ); + } return obj; }, @@ -5396,6 +7352,7 @@ export const GeneratedCodeInfo_Annotation = { message.sourceFile = object.sourceFile ?? ""; message.begin = object.begin ?? 0; message.end = object.end ?? 0; + message.semantic = object.semantic ?? 0; return message; }, }; @@ -5451,11 +7408,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/google/protobuf/empty.ts b/ts/src/generated/google/protobuf/empty.ts index 2a977331..ed3bda6d 100644 --- a/ts/src/generated/google/protobuf/empty.ts +++ b/ts/src/generated/google/protobuf/empty.ts @@ -1,6 +1,12 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: google/protobuf/empty.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; import { messageTypeRegistry } from "../../typeRegistry"; /** @@ -20,16 +26,16 @@ function createBaseEmpty(): Empty { return { $type: "google.protobuf.Empty" }; } -export const Empty = { +export const Empty: MessageFns = { $type: "google.protobuf.Empty" as const, - encode(_: Empty, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + encode(_: Empty, writer: BinaryWriter = new BinaryWriter()): BinaryWriter { return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Empty { + decode(input: BinaryReader | Uint8Array, length?: number): Empty { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseEmpty(); while (reader.pos < end) { @@ -39,7 +45,7 @@ export const Empty = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -85,7 +91,12 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; } diff --git a/ts/src/generated/google/protobuf/timestamp.ts b/ts/src/generated/google/protobuf/timestamp.ts deleted file mode 100644 index 2b32178c..00000000 --- a/ts/src/generated/google/protobuf/timestamp.ts +++ /dev/null @@ -1,230 +0,0 @@ -/* eslint-disable */ -import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../typeRegistry"; - -/** - * A Timestamp represents a point in time independent of any time zone or local - * calendar, encoded as a count of seconds and fractions of seconds at - * nanosecond resolution. The count is relative to an epoch at UTC midnight on - * January 1, 1970, in the proleptic Gregorian calendar which extends the - * Gregorian calendar backwards to year one. - * - * All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap - * second table is needed for interpretation, using a [24-hour linear - * smear](https://developers.google.com/time/smear). - * - * The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By - * restricting to that range, we ensure that we can convert to and from [RFC - * 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. - * - * # Examples - * - * Example 1: Compute Timestamp from POSIX `time()`. - * - * Timestamp timestamp; - * timestamp.set_seconds(time(NULL)); - * timestamp.set_nanos(0); - * - * Example 2: Compute Timestamp from POSIX `gettimeofday()`. - * - * struct timeval tv; - * gettimeofday(&tv, NULL); - * - * Timestamp timestamp; - * timestamp.set_seconds(tv.tv_sec); - * timestamp.set_nanos(tv.tv_usec * 1000); - * - * Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. - * - * FILETIME ft; - * GetSystemTimeAsFileTime(&ft); - * UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; - * - * // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z - * // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. - * Timestamp timestamp; - * timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); - * timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); - * - * Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. - * - * long millis = System.currentTimeMillis(); - * - * Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) - * .setNanos((int) ((millis % 1000) * 1000000)).build(); - * - * Example 5: Compute Timestamp from Java `Instant.now()`. - * - * Instant now = Instant.now(); - * - * Timestamp timestamp = - * Timestamp.newBuilder().setSeconds(now.getEpochSecond()) - * .setNanos(now.getNano()).build(); - * - * Example 6: Compute Timestamp from current time in Python. - * - * timestamp = Timestamp() - * timestamp.GetCurrentTime() - * - * # JSON Mapping - * - * In JSON format, the Timestamp type is encoded as a string in the - * [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the - * format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" - * where {year} is always expressed using four digits while {month}, {day}, - * {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional - * seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), - * are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone - * is required. A proto3 JSON serializer should always use UTC (as indicated by - * "Z") when printing the Timestamp type and a proto3 JSON parser should be - * able to accept both UTC and other timezones (as indicated by an offset). - * - * For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past - * 01:30 UTC on January 15, 2017. - * - * In JavaScript, one can convert a Date object to this format using the - * standard - * [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) - * method. In Python, a standard `datetime.datetime` object can be converted - * to this format using - * [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with - * the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use - * the Joda Time's [`ISODateTimeFormat.dateTime()`]( - * http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D - * ) to obtain a formatter capable of generating timestamps in this format. - */ -export interface Timestamp { - $type: "google.protobuf.Timestamp"; - /** - * Represents seconds of UTC time since Unix epoch - * 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - * 9999-12-31T23:59:59Z inclusive. - */ - seconds: Long; - /** - * Non-negative fractions of a second at nanosecond resolution. Negative - * second values with fractions must still have non-negative nanos values - * that count forward in time. Must be from 0 to 999,999,999 - * inclusive. - */ - nanos: number; -} - -function createBaseTimestamp(): Timestamp { - return { $type: "google.protobuf.Timestamp", seconds: Long.ZERO, nanos: 0 }; -} - -export const Timestamp = { - $type: "google.protobuf.Timestamp" as const, - - encode( - message: Timestamp, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { - if (!message.seconds.equals(Long.ZERO)) { - writer.uint32(8).int64(message.seconds); - } - if (message.nanos !== 0) { - writer.uint32(16).int32(message.nanos); - } - return writer; - }, - - decode(input: _m0.Reader | Uint8Array, length?: number): Timestamp { - const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); - let end = length === undefined ? reader.len : reader.pos + length; - const message = createBaseTimestamp(); - while (reader.pos < end) { - const tag = reader.uint32(); - switch (tag >>> 3) { - case 1: - if (tag !== 8) { - break; - } - - message.seconds = reader.int64() as Long; - continue; - case 2: - if (tag !== 16) { - break; - } - - message.nanos = reader.int32(); - continue; - } - if ((tag & 7) === 4 || tag === 0) { - break; - } - reader.skipType(tag & 7); - } - return message; - }, - - fromJSON(object: any): Timestamp { - return { - $type: Timestamp.$type, - seconds: isSet(object.seconds) - ? Long.fromValue(object.seconds) - : Long.ZERO, - nanos: isSet(object.nanos) ? globalThis.Number(object.nanos) : 0, - }; - }, - - toJSON(message: Timestamp): unknown { - const obj: any = {}; - if (!message.seconds.equals(Long.ZERO)) { - obj.seconds = (message.seconds || Long.ZERO).toString(); - } - if (message.nanos !== 0) { - obj.nanos = Math.round(message.nanos); - } - return obj; - }, - - create(base?: DeepPartial): Timestamp { - return Timestamp.fromPartial(base ?? {}); - }, - fromPartial(object: DeepPartial): Timestamp { - const message = createBaseTimestamp(); - message.seconds = - object.seconds !== undefined && object.seconds !== null - ? Long.fromValue(object.seconds) - : Long.ZERO; - message.nanos = object.nanos ?? 0; - return message; - }, -}; - -messageTypeRegistry.set(Timestamp.$type, Timestamp); - -type Builtin = - | Date - | Function - | Uint8Array - | string - | number - | boolean - | undefined; - -type DeepPartial = T extends Builtin - ? T - : T extends Long - ? string | number | Long - : T extends globalThis.Array - ? globalThis.Array> - : T extends ReadonlyArray - ? ReadonlyArray> - : T extends {} - ? { [K in Exclude]?: DeepPartial } - : Partial; - -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - -function isSet(value: any): boolean { - return value !== null && value !== undefined; -} diff --git a/ts/src/generated/index.akash.audit.ts b/ts/src/generated/index.akash.audit.ts index 141b22fb..a5c1f8b1 100644 --- a/ts/src/generated/index.akash.audit.ts +++ b/ts/src/generated/index.akash.audit.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ -export * as v1beta3 from "./index.akash.audit.v1beta3"; +export * as v1 from "./index.akash.audit.v1"; diff --git a/ts/src/generated/index.akash.audit.v1.grpc-js.ts b/ts/src/generated/index.akash.audit.v1.grpc-js.ts new file mode 100644 index 00000000..ef9cea83 --- /dev/null +++ b/ts/src/generated/index.akash.audit.v1.grpc-js.ts @@ -0,0 +1 @@ +export * from "./akash/audit/v1/service.grpc-js"; diff --git a/ts/src/generated/index.akash.audit.v1.ts b/ts/src/generated/index.akash.audit.v1.ts new file mode 100644 index 00000000..d212c6d7 --- /dev/null +++ b/ts/src/generated/index.akash.audit.v1.ts @@ -0,0 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./akash/audit/v1/msg"; diff --git a/ts/src/generated/index.akash.audit.v1beta1.ts b/ts/src/generated/index.akash.audit.v1beta1.ts deleted file mode 100644 index 0ef95141..00000000 --- a/ts/src/generated/index.akash.audit.v1beta1.ts +++ /dev/null @@ -1,3 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/audit/v1beta1/audit"; diff --git a/ts/src/generated/index.akash.audit.v1beta2.ts b/ts/src/generated/index.akash.audit.v1beta2.ts deleted file mode 100644 index 3eb52871..00000000 --- a/ts/src/generated/index.akash.audit.v1beta2.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/audit/v1beta2/audit"; -export * from "./akash/audit/v1beta2/query"; -export * from "./akash/audit/v1beta2/genesis"; diff --git a/ts/src/generated/index.akash.audit.v1beta3.ts b/ts/src/generated/index.akash.audit.v1beta3.ts deleted file mode 100644 index 96329323..00000000 --- a/ts/src/generated/index.akash.audit.v1beta3.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/audit/v1beta3/audit"; -export * from "./akash/audit/v1beta3/query"; -export * from "./akash/audit/v1beta3/genesis"; diff --git a/ts/src/generated/index.akash.base.attributes.ts b/ts/src/generated/index.akash.base.attributes.ts new file mode 100644 index 00000000..f21f2712 --- /dev/null +++ b/ts/src/generated/index.akash.base.attributes.ts @@ -0,0 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * as v1 from "./index.akash.base.attributes.v1"; diff --git a/ts/src/generated/index.akash.base.attributes.v1.ts b/ts/src/generated/index.akash.base.attributes.v1.ts new file mode 100644 index 00000000..3d4a12fd --- /dev/null +++ b/ts/src/generated/index.akash.base.attributes.v1.ts @@ -0,0 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./akash/base/attributes/v1/attribute"; diff --git a/ts/src/generated/index.akash.base.resources.ts b/ts/src/generated/index.akash.base.resources.ts new file mode 100644 index 00000000..4d6affe6 --- /dev/null +++ b/ts/src/generated/index.akash.base.resources.ts @@ -0,0 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * as v1beta4 from "./index.akash.base.resources.v1beta4"; diff --git a/ts/src/generated/index.akash.base.resources.v1beta4.ts b/ts/src/generated/index.akash.base.resources.v1beta4.ts new file mode 100644 index 00000000..b17efc55 --- /dev/null +++ b/ts/src/generated/index.akash.base.resources.v1beta4.ts @@ -0,0 +1,14 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./akash/base/resources/v1beta4/resourcevalue"; +export * from "./akash/base/resources/v1beta4/cpu"; +export * from "./akash/base/resources/v1beta4/gpu"; +export * from "./akash/base/resources/v1beta4/memory"; +export * from "./akash/base/resources/v1beta4/storage"; +export * from "./akash/base/resources/v1beta4/endpoint"; +export * from "./akash/base/resources/v1beta4/resources"; diff --git a/ts/src/generated/index.akash.base.ts b/ts/src/generated/index.akash.base.ts index b44aa19f..2f2a9821 100644 --- a/ts/src/generated/index.akash.base.ts +++ b/ts/src/generated/index.akash.base.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ -export * as v1beta3 from "./index.akash.base.v1beta3"; +export * as attributes from "./index.akash.base.attributes"; diff --git a/ts/src/generated/index.akash.base.v1beta1.ts b/ts/src/generated/index.akash.base.v1beta1.ts deleted file mode 100644 index 8d32351b..00000000 --- a/ts/src/generated/index.akash.base.v1beta1.ts +++ /dev/null @@ -1,3 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/base/v1beta1/attribute"; diff --git a/ts/src/generated/index.akash.base.v1beta2.ts b/ts/src/generated/index.akash.base.v1beta2.ts deleted file mode 100644 index 19a4fcc4..00000000 --- a/ts/src/generated/index.akash.base.v1beta2.ts +++ /dev/null @@ -1,7 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/base/v1beta2/attribute"; -export * from "./akash/base/v1beta2/resourcevalue"; -export * from "./akash/base/v1beta2/resource"; -export * from "./akash/base/v1beta2/endpoint"; -export * from "./akash/base/v1beta2/resourceunits"; diff --git a/ts/src/generated/index.akash.base.v1beta3.ts b/ts/src/generated/index.akash.base.v1beta3.ts deleted file mode 100644 index a9744583..00000000 --- a/ts/src/generated/index.akash.base.v1beta3.ts +++ /dev/null @@ -1,3 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/base/v1beta3/attribute"; diff --git a/ts/src/generated/index.akash.cert.ts b/ts/src/generated/index.akash.cert.ts index 61bf67af..959b170b 100644 --- a/ts/src/generated/index.akash.cert.ts +++ b/ts/src/generated/index.akash.cert.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ -export * as v1beta3 from "./index.akash.cert.v1beta3"; +export * as v1 from "./index.akash.cert.v1"; diff --git a/ts/src/generated/index.akash.cert.v1.grpc-js.ts b/ts/src/generated/index.akash.cert.v1.grpc-js.ts new file mode 100644 index 00000000..9bfb7686 --- /dev/null +++ b/ts/src/generated/index.akash.cert.v1.grpc-js.ts @@ -0,0 +1 @@ +export * from "./akash/cert/v1/service.grpc-js"; diff --git a/ts/src/generated/index.akash.cert.v1.ts b/ts/src/generated/index.akash.cert.v1.ts new file mode 100644 index 00000000..fa2f7914 --- /dev/null +++ b/ts/src/generated/index.akash.cert.v1.ts @@ -0,0 +1,9 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./akash/cert/v1/cert"; +export * from "./akash/cert/v1/msg"; diff --git a/ts/src/generated/index.akash.cert.v1beta2.ts b/ts/src/generated/index.akash.cert.v1beta2.ts deleted file mode 100644 index 6893caa9..00000000 --- a/ts/src/generated/index.akash.cert.v1beta2.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/cert/v1beta2/cert"; -export * from "./akash/cert/v1beta2/query"; -export * from "./akash/cert/v1beta2/genesis"; diff --git a/ts/src/generated/index.akash.cert.v1beta3.ts b/ts/src/generated/index.akash.cert.v1beta3.ts deleted file mode 100644 index d83c7e9d..00000000 --- a/ts/src/generated/index.akash.cert.v1beta3.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/cert/v1beta3/cert"; -export * from "./akash/cert/v1beta3/query"; -export * from "./akash/cert/v1beta3/genesis"; diff --git a/ts/src/generated/index.akash.deployment.ts b/ts/src/generated/index.akash.deployment.ts index f465693a..a564b97d 100644 --- a/ts/src/generated/index.akash.deployment.ts +++ b/ts/src/generated/index.akash.deployment.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ -export * as v1beta3 from "./index.akash.deployment.v1beta3"; +export * as v1beta4 from "./index.akash.deployment.v1beta4"; diff --git a/ts/src/generated/index.akash.deployment.v1.ts b/ts/src/generated/index.akash.deployment.v1.ts new file mode 100644 index 00000000..a4ac9c3e --- /dev/null +++ b/ts/src/generated/index.akash.deployment.v1.ts @@ -0,0 +1,9 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./akash/deployment/v1/deployment"; +export * from "./akash/deployment/v1/group"; diff --git a/ts/src/generated/index.akash.deployment.v1beta1.ts b/ts/src/generated/index.akash.deployment.v1beta1.ts deleted file mode 100644 index 83fc7f47..00000000 --- a/ts/src/generated/index.akash.deployment.v1beta1.ts +++ /dev/null @@ -1,8 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/deployment/v1beta1/group"; -export * from "./akash/deployment/v1beta1/deployment"; -export * from "./akash/deployment/v1beta1/query"; -export * from "./akash/deployment/v1beta1/authz"; -export * from "./akash/deployment/v1beta1/params"; -export * from "./akash/deployment/v1beta1/genesis"; diff --git a/ts/src/generated/index.akash.deployment.v1beta2.grpc-js.ts b/ts/src/generated/index.akash.deployment.v1beta2.grpc-js.ts deleted file mode 100644 index 11822aa1..00000000 --- a/ts/src/generated/index.akash.deployment.v1beta2.grpc-js.ts +++ /dev/null @@ -1 +0,0 @@ -export * from "./akash/deployment/v1beta2/service.grpc-js"; diff --git a/ts/src/generated/index.akash.deployment.v1beta2.ts b/ts/src/generated/index.akash.deployment.v1beta2.ts deleted file mode 100644 index 56efdd96..00000000 --- a/ts/src/generated/index.akash.deployment.v1beta2.ts +++ /dev/null @@ -1,4 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/deployment/v1beta2/resource"; -export * from "./akash/deployment/v1beta2/groupspec"; diff --git a/ts/src/generated/index.akash.deployment.v1beta3.grpc-js.ts b/ts/src/generated/index.akash.deployment.v1beta3.grpc-js.ts deleted file mode 100644 index f79defd7..00000000 --- a/ts/src/generated/index.akash.deployment.v1beta3.grpc-js.ts +++ /dev/null @@ -1 +0,0 @@ -export * from "./akash/deployment/v1beta3/service.grpc-js"; diff --git a/ts/src/generated/index.akash.deployment.v1beta3.ts b/ts/src/generated/index.akash.deployment.v1beta3.ts deleted file mode 100644 index 27a505ec..00000000 --- a/ts/src/generated/index.akash.deployment.v1beta3.ts +++ /dev/null @@ -1,4 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/deployment/v1beta3/resourceunit"; -export * from "./akash/deployment/v1beta3/groupspec"; diff --git a/ts/src/generated/index.akash.deployment.v1beta4.grpc-js.ts b/ts/src/generated/index.akash.deployment.v1beta4.grpc-js.ts new file mode 100644 index 00000000..5a6886bb --- /dev/null +++ b/ts/src/generated/index.akash.deployment.v1beta4.grpc-js.ts @@ -0,0 +1 @@ +export * from "./akash/deployment/v1beta4/service.grpc-js"; diff --git a/ts/src/generated/index.akash.deployment.v1beta4.ts b/ts/src/generated/index.akash.deployment.v1beta4.ts new file mode 100644 index 00000000..34730f1d --- /dev/null +++ b/ts/src/generated/index.akash.deployment.v1beta4.ts @@ -0,0 +1,9 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./akash/deployment/v1beta4/resourceunit"; +export * from "./akash/deployment/v1beta4/groupspec"; diff --git a/ts/src/generated/index.akash.discovery.ts b/ts/src/generated/index.akash.discovery.ts index 8c69353e..12e9cd44 100644 --- a/ts/src/generated/index.akash.discovery.ts +++ b/ts/src/generated/index.akash.discovery.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as v1 from "./index.akash.discovery.v1"; diff --git a/ts/src/generated/index.akash.discovery.v1.ts b/ts/src/generated/index.akash.discovery.v1.ts index 87078ad7..a9516c78 100644 --- a/ts/src/generated/index.akash.discovery.v1.ts +++ b/ts/src/generated/index.akash.discovery.v1.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * from "./akash/discovery/v1/client_info"; diff --git a/ts/src/generated/index.akash.escrow.ts b/ts/src/generated/index.akash.escrow.ts index f7417f29..68a6bc40 100644 --- a/ts/src/generated/index.akash.escrow.ts +++ b/ts/src/generated/index.akash.escrow.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ -export * as v1beta3 from "./index.akash.escrow.v1beta3"; +export * as v1 from "./index.akash.escrow.v1"; diff --git a/ts/src/generated/index.akash.escrow.v1.ts b/ts/src/generated/index.akash.escrow.v1.ts new file mode 100644 index 00000000..5e7d041a --- /dev/null +++ b/ts/src/generated/index.akash.escrow.v1.ts @@ -0,0 +1,10 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./akash/escrow/v1/accountid"; +export * from "./akash/escrow/v1/account"; +export * from "./akash/escrow/v1/fractional_payment"; diff --git a/ts/src/generated/index.akash.escrow.v1beta1.ts b/ts/src/generated/index.akash.escrow.v1beta1.ts deleted file mode 100644 index 210052b4..00000000 --- a/ts/src/generated/index.akash.escrow.v1beta1.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/escrow/v1beta1/types"; -export * from "./akash/escrow/v1beta1/query"; -export * from "./akash/escrow/v1beta1/genesis"; diff --git a/ts/src/generated/index.akash.escrow.v1beta2.ts b/ts/src/generated/index.akash.escrow.v1beta2.ts deleted file mode 100644 index 62742fb3..00000000 --- a/ts/src/generated/index.akash.escrow.v1beta2.ts +++ /dev/null @@ -1,3 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/escrow/v1beta2/types"; diff --git a/ts/src/generated/index.akash.escrow.v1beta3.ts b/ts/src/generated/index.akash.escrow.v1beta3.ts deleted file mode 100644 index cc6790cb..00000000 --- a/ts/src/generated/index.akash.escrow.v1beta3.ts +++ /dev/null @@ -1,3 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/escrow/v1beta3/types"; diff --git a/ts/src/generated/index.akash.gov.ts b/ts/src/generated/index.akash.gov.ts index 37710fa3..a5a7a695 100644 --- a/ts/src/generated/index.akash.gov.ts +++ b/ts/src/generated/index.akash.gov.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as v1beta3 from "./index.akash.gov.v1beta3"; diff --git a/ts/src/generated/index.akash.gov.v1beta3.ts b/ts/src/generated/index.akash.gov.v1beta3.ts index 1cd9917e..21e8260e 100644 --- a/ts/src/generated/index.akash.gov.v1beta3.ts +++ b/ts/src/generated/index.akash.gov.v1beta3.ts @@ -1,4 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * from "./akash/gov/v1beta3/params"; -export * from "./akash/gov/v1beta3/genesis"; diff --git a/ts/src/generated/index.akash.inflation.ts b/ts/src/generated/index.akash.inflation.ts index 2915a4da..467ff6e8 100644 --- a/ts/src/generated/index.akash.inflation.ts +++ b/ts/src/generated/index.akash.inflation.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as v1beta3 from "./index.akash.inflation.v1beta3"; diff --git a/ts/src/generated/index.akash.inflation.v1beta2.ts b/ts/src/generated/index.akash.inflation.v1beta2.ts index 5677f2ef..eaa21440 100644 --- a/ts/src/generated/index.akash.inflation.v1beta2.ts +++ b/ts/src/generated/index.akash.inflation.v1beta2.ts @@ -1,4 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * from "./akash/inflation/v1beta2/params"; -export * from "./akash/inflation/v1beta2/genesis"; diff --git a/ts/src/generated/index.akash.inflation.v1beta3.ts b/ts/src/generated/index.akash.inflation.v1beta3.ts index e0db2869..b3dce010 100644 --- a/ts/src/generated/index.akash.inflation.v1beta3.ts +++ b/ts/src/generated/index.akash.inflation.v1beta3.ts @@ -1,4 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * from "./akash/inflation/v1beta3/params"; -export * from "./akash/inflation/v1beta3/genesis"; diff --git a/ts/src/generated/index.akash.inventory.ts b/ts/src/generated/index.akash.inventory.ts index 750ed3b8..20fa8dab 100644 --- a/ts/src/generated/index.akash.inventory.ts +++ b/ts/src/generated/index.akash.inventory.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as v1 from "./index.akash.inventory.v1"; diff --git a/ts/src/generated/index.akash.inventory.v1.ts b/ts/src/generated/index.akash.inventory.v1.ts index ca2226ce..83330ea2 100644 --- a/ts/src/generated/index.akash.inventory.v1.ts +++ b/ts/src/generated/index.akash.inventory.v1.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * from "./akash/inventory/v1/resourcepair"; diff --git a/ts/src/generated/index.akash.manifest.ts b/ts/src/generated/index.akash.manifest.ts index 05e87bdb..bb2a95dd 100644 --- a/ts/src/generated/index.akash.manifest.ts +++ b/ts/src/generated/index.akash.manifest.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ -export * as v2beta2 from "./index.akash.manifest.v2beta2"; +export * as v2beta3 from "./index.akash.manifest.v2beta3"; diff --git a/ts/src/generated/index.akash.manifest.v2beta1.grpc-js.ts b/ts/src/generated/index.akash.manifest.v2beta1.grpc-js.ts deleted file mode 100644 index 3430ffb2..00000000 --- a/ts/src/generated/index.akash.manifest.v2beta1.grpc-js.ts +++ /dev/null @@ -1 +0,0 @@ -export * from "./akash/manifest/v2beta1/service.grpc-js"; diff --git a/ts/src/generated/index.akash.manifest.v2beta1.ts b/ts/src/generated/index.akash.manifest.v2beta1.ts deleted file mode 100644 index 2871a316..00000000 --- a/ts/src/generated/index.akash.manifest.v2beta1.ts +++ /dev/null @@ -1,6 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/manifest/v2beta1/httpoptions"; -export * from "./akash/manifest/v2beta1/serviceexpose"; -export * from "./akash/manifest/v2beta1/service"; -export * from "./akash/manifest/v2beta1/group"; diff --git a/ts/src/generated/index.akash.manifest.v2beta2.grpc-js.ts b/ts/src/generated/index.akash.manifest.v2beta2.grpc-js.ts deleted file mode 100644 index f0e71b12..00000000 --- a/ts/src/generated/index.akash.manifest.v2beta2.grpc-js.ts +++ /dev/null @@ -1 +0,0 @@ -export * from "./akash/manifest/v2beta2/service.grpc-js"; diff --git a/ts/src/generated/index.akash.manifest.v2beta2.ts b/ts/src/generated/index.akash.manifest.v2beta2.ts deleted file mode 100644 index 3c915f3c..00000000 --- a/ts/src/generated/index.akash.manifest.v2beta2.ts +++ /dev/null @@ -1,6 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/manifest/v2beta2/httpoptions"; -export * from "./akash/manifest/v2beta2/serviceexpose"; -export * from "./akash/manifest/v2beta2/service"; -export * from "./akash/manifest/v2beta2/group"; diff --git a/ts/src/generated/index.akash.manifest.v2beta3.grpc-js.ts b/ts/src/generated/index.akash.manifest.v2beta3.grpc-js.ts new file mode 100644 index 00000000..b35b71ba --- /dev/null +++ b/ts/src/generated/index.akash.manifest.v2beta3.grpc-js.ts @@ -0,0 +1 @@ +export * from "./akash/manifest/v2beta3/service.grpc-js"; diff --git a/ts/src/generated/index.akash.manifest.v2beta3.ts b/ts/src/generated/index.akash.manifest.v2beta3.ts new file mode 100644 index 00000000..e022d2a6 --- /dev/null +++ b/ts/src/generated/index.akash.manifest.v2beta3.ts @@ -0,0 +1,11 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./akash/manifest/v2beta3/httpoptions"; +export * from "./akash/manifest/v2beta3/serviceexpose"; +export * from "./akash/manifest/v2beta3/service"; +export * from "./akash/manifest/v2beta3/group"; diff --git a/ts/src/generated/index.akash.market.ts b/ts/src/generated/index.akash.market.ts index d700a988..f7a810cd 100644 --- a/ts/src/generated/index.akash.market.ts +++ b/ts/src/generated/index.akash.market.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ -export * as v1beta4 from "./index.akash.market.v1beta4"; +export * as v1 from "./index.akash.market.v1"; diff --git a/ts/src/generated/index.akash.market.v1.ts b/ts/src/generated/index.akash.market.v1.ts new file mode 100644 index 00000000..fe8072a6 --- /dev/null +++ b/ts/src/generated/index.akash.market.v1.ts @@ -0,0 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./akash/market/v1/lease"; diff --git a/ts/src/generated/index.akash.market.v1beta2.grpc-js.ts b/ts/src/generated/index.akash.market.v1beta2.grpc-js.ts deleted file mode 100644 index a96efc87..00000000 --- a/ts/src/generated/index.akash.market.v1beta2.grpc-js.ts +++ /dev/null @@ -1 +0,0 @@ -export * from "./akash/market/v1beta2/service.grpc-js"; diff --git a/ts/src/generated/index.akash.market.v1beta2.ts b/ts/src/generated/index.akash.market.v1beta2.ts deleted file mode 100644 index 58a2bee2..00000000 --- a/ts/src/generated/index.akash.market.v1beta2.ts +++ /dev/null @@ -1,9 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/market/v1beta2/order"; -export * from "./akash/market/v1beta2/bid"; -export * from "./akash/market/v1beta2/lease"; -export * from "./akash/market/v1beta2/query"; -export * from "./akash/market/v1beta2/service"; -export * from "./akash/market/v1beta2/params"; -export * from "./akash/market/v1beta2/genesis"; diff --git a/ts/src/generated/index.akash.market.v1beta3.grpc-js.ts b/ts/src/generated/index.akash.market.v1beta3.grpc-js.ts deleted file mode 100644 index cbb2d07c..00000000 --- a/ts/src/generated/index.akash.market.v1beta3.grpc-js.ts +++ /dev/null @@ -1 +0,0 @@ -export * from "./akash/market/v1beta3/service.grpc-js"; diff --git a/ts/src/generated/index.akash.market.v1beta3.ts b/ts/src/generated/index.akash.market.v1beta3.ts deleted file mode 100644 index aafe5c7f..00000000 --- a/ts/src/generated/index.akash.market.v1beta3.ts +++ /dev/null @@ -1,9 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/market/v1beta3/order"; -export * from "./akash/market/v1beta3/bid"; -export * from "./akash/market/v1beta3/lease"; -export * from "./akash/market/v1beta3/query"; -export * from "./akash/market/v1beta3/service"; -export * from "./akash/market/v1beta3/params"; -export * from "./akash/market/v1beta3/genesis"; diff --git a/ts/src/generated/index.akash.market.v1beta4.grpc-js.ts b/ts/src/generated/index.akash.market.v1beta4.grpc-js.ts deleted file mode 100644 index 9e0ff265..00000000 --- a/ts/src/generated/index.akash.market.v1beta4.grpc-js.ts +++ /dev/null @@ -1 +0,0 @@ -export * from "./akash/market/v1beta4/service.grpc-js"; diff --git a/ts/src/generated/index.akash.market.v1beta4.ts b/ts/src/generated/index.akash.market.v1beta4.ts deleted file mode 100644 index 08971ecc..00000000 --- a/ts/src/generated/index.akash.market.v1beta4.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/market/v1beta4/order"; -export * from "./akash/market/v1beta4/bid"; -export * from "./akash/market/v1beta4/lease"; diff --git a/ts/src/generated/index.akash.market.v1beta5.grpc-js.ts b/ts/src/generated/index.akash.market.v1beta5.grpc-js.ts new file mode 100644 index 00000000..373c7595 --- /dev/null +++ b/ts/src/generated/index.akash.market.v1beta5.grpc-js.ts @@ -0,0 +1 @@ +export * from "./akash/market/v1beta5/service.grpc-js"; diff --git a/ts/src/generated/index.akash.market.v1beta5.ts b/ts/src/generated/index.akash.market.v1beta5.ts new file mode 100644 index 00000000..a08afb85 --- /dev/null +++ b/ts/src/generated/index.akash.market.v1beta5.ts @@ -0,0 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./akash/market/v1beta5/params"; diff --git a/ts/src/generated/index.akash.provider.lease.ts b/ts/src/generated/index.akash.provider.lease.ts index b24a6a2a..1ae52070 100644 --- a/ts/src/generated/index.akash.provider.lease.ts +++ b/ts/src/generated/index.akash.provider.lease.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as v1 from "./index.akash.provider.lease.v1"; diff --git a/ts/src/generated/index.akash.provider.lease.v1.ts b/ts/src/generated/index.akash.provider.lease.v1.ts index 2346ab99..5d68a263 100644 --- a/ts/src/generated/index.akash.provider.lease.v1.ts +++ b/ts/src/generated/index.akash.provider.lease.v1.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * from "./akash/provider/lease/v1/service"; diff --git a/ts/src/generated/index.akash.provider.ts b/ts/src/generated/index.akash.provider.ts index 1e96ea30..4c3eb585 100644 --- a/ts/src/generated/index.akash.provider.ts +++ b/ts/src/generated/index.akash.provider.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as v1 from "./index.akash.provider.v1"; diff --git a/ts/src/generated/index.akash.provider.v1.ts b/ts/src/generated/index.akash.provider.v1.ts index 3e17ac05..7698c551 100644 --- a/ts/src/generated/index.akash.provider.v1.ts +++ b/ts/src/generated/index.akash.provider.v1.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * from "./akash/provider/v1/status"; diff --git a/ts/src/generated/index.akash.provider.v1beta1.ts b/ts/src/generated/index.akash.provider.v1beta1.ts deleted file mode 100644 index 75847877..00000000 --- a/ts/src/generated/index.akash.provider.v1beta1.ts +++ /dev/null @@ -1,3 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/provider/v1beta1/provider"; diff --git a/ts/src/generated/index.akash.provider.v1beta2.ts b/ts/src/generated/index.akash.provider.v1beta2.ts deleted file mode 100644 index 4e755b71..00000000 --- a/ts/src/generated/index.akash.provider.v1beta2.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/provider/v1beta2/provider"; -export * from "./akash/provider/v1beta2/query"; -export * from "./akash/provider/v1beta2/genesis"; diff --git a/ts/src/generated/index.akash.provider.v1beta3.ts b/ts/src/generated/index.akash.provider.v1beta3.ts deleted file mode 100644 index 1b43afa9..00000000 --- a/ts/src/generated/index.akash.provider.v1beta3.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/provider/v1beta3/provider"; -export * from "./akash/provider/v1beta3/query"; -export * from "./akash/provider/v1beta3/genesis"; diff --git a/ts/src/generated/index.akash.provider.v1beta4.grpc-js.ts b/ts/src/generated/index.akash.provider.v1beta4.grpc-js.ts new file mode 100644 index 00000000..ed5daa07 --- /dev/null +++ b/ts/src/generated/index.akash.provider.v1beta4.grpc-js.ts @@ -0,0 +1 @@ +export * from "./akash/provider/v1beta4/service.grpc-js"; diff --git a/ts/src/generated/index.akash.provider.v1beta4.ts b/ts/src/generated/index.akash.provider.v1beta4.ts new file mode 100644 index 00000000..1c05173e --- /dev/null +++ b/ts/src/generated/index.akash.provider.v1beta4.ts @@ -0,0 +1,9 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./akash/provider/v1beta4/provider"; +export * from "./akash/provider/v1beta4/msg"; diff --git a/ts/src/generated/index.akash.staking.ts b/ts/src/generated/index.akash.staking.ts index 4117c58f..738981f6 100644 --- a/ts/src/generated/index.akash.staking.ts +++ b/ts/src/generated/index.akash.staking.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as v1beta3 from "./index.akash.staking.v1beta3"; diff --git a/ts/src/generated/index.akash.staking.v1beta3.grpc-js.ts b/ts/src/generated/index.akash.staking.v1beta3.grpc-js.ts new file mode 100644 index 00000000..27eb5c1c --- /dev/null +++ b/ts/src/generated/index.akash.staking.v1beta3.grpc-js.ts @@ -0,0 +1 @@ +export * from "./akash/staking/v1beta3/service.grpc-js"; diff --git a/ts/src/generated/index.akash.staking.v1beta3.ts b/ts/src/generated/index.akash.staking.v1beta3.ts index 30f96858..9f92d6e2 100644 --- a/ts/src/generated/index.akash.staking.v1beta3.ts +++ b/ts/src/generated/index.akash.staking.v1beta3.ts @@ -1,4 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * from "./akash/staking/v1beta3/params"; -export * from "./akash/staking/v1beta3/genesis"; diff --git a/ts/src/generated/index.akash.take.ts b/ts/src/generated/index.akash.take.ts index 672b6a9a..7163edd1 100644 --- a/ts/src/generated/index.akash.take.ts +++ b/ts/src/generated/index.akash.take.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ -export * as v1beta3 from "./index.akash.take.v1beta3"; +export * as v1 from "./index.akash.take.v1"; diff --git a/ts/src/generated/index.akash.take.v1.grpc-js.ts b/ts/src/generated/index.akash.take.v1.grpc-js.ts new file mode 100644 index 00000000..6dc001cb --- /dev/null +++ b/ts/src/generated/index.akash.take.v1.grpc-js.ts @@ -0,0 +1 @@ +export * from "./akash/take/v1/service.grpc-js"; diff --git a/ts/src/generated/index.akash.take.v1.ts b/ts/src/generated/index.akash.take.v1.ts new file mode 100644 index 00000000..03dcc69a --- /dev/null +++ b/ts/src/generated/index.akash.take.v1.ts @@ -0,0 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./akash/take/v1/params"; diff --git a/ts/src/generated/index.akash.take.v1beta3.ts b/ts/src/generated/index.akash.take.v1beta3.ts deleted file mode 100644 index 9143e17e..00000000 --- a/ts/src/generated/index.akash.take.v1beta3.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* eslint-disable */ - -export * from "./akash/take/v1beta3/query"; -export * from "./akash/take/v1beta3/params"; -export * from "./akash/take/v1beta3/genesis"; diff --git a/ts/src/generated/index.akash.ts b/ts/src/generated/index.akash.ts index f680aefc..461b3554 100644 --- a/ts/src/generated/index.akash.ts +++ b/ts/src/generated/index.akash.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as base from "./index.akash.base"; diff --git a/ts/src/generated/index.amino.ts b/ts/src/generated/index.amino.ts new file mode 100644 index 00000000..e31961d0 --- /dev/null +++ b/ts/src/generated/index.amino.ts @@ -0,0 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./amino/amino"; diff --git a/ts/src/generated/index.cosmos.base.query.ts b/ts/src/generated/index.cosmos.base.query.ts index e68b936a..0463e256 100644 --- a/ts/src/generated/index.cosmos.base.query.ts +++ b/ts/src/generated/index.cosmos.base.query.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as v1beta1 from "./index.cosmos.base.query.v1beta1"; diff --git a/ts/src/generated/index.cosmos.base.query.v1beta1.ts b/ts/src/generated/index.cosmos.base.query.v1beta1.ts index e4d63a35..aca6f79a 100644 --- a/ts/src/generated/index.cosmos.base.query.v1beta1.ts +++ b/ts/src/generated/index.cosmos.base.query.v1beta1.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * from "./cosmos/base/query/v1beta1/pagination"; diff --git a/ts/src/generated/index.cosmos.base.ts b/ts/src/generated/index.cosmos.base.ts index 897dbd1c..96eb8442 100644 --- a/ts/src/generated/index.cosmos.base.ts +++ b/ts/src/generated/index.cosmos.base.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as v1beta1 from "./index.cosmos.base.v1beta1"; diff --git a/ts/src/generated/index.cosmos.base.v1beta1.ts b/ts/src/generated/index.cosmos.base.v1beta1.ts index ea5040f3..b4a2faa1 100644 --- a/ts/src/generated/index.cosmos.base.v1beta1.ts +++ b/ts/src/generated/index.cosmos.base.v1beta1.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * from "./cosmos/base/v1beta1/coin"; diff --git a/ts/src/generated/index.cosmos.msg.ts b/ts/src/generated/index.cosmos.msg.ts new file mode 100644 index 00000000..b474a9fc --- /dev/null +++ b/ts/src/generated/index.cosmos.msg.ts @@ -0,0 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * as v1 from "./index.cosmos.msg.v1"; diff --git a/ts/src/generated/index.cosmos.msg.v1.ts b/ts/src/generated/index.cosmos.msg.v1.ts new file mode 100644 index 00000000..fe926337 --- /dev/null +++ b/ts/src/generated/index.cosmos.msg.v1.ts @@ -0,0 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + +/* eslint-disable */ + +export * from "./cosmos/msg/v1/msg"; diff --git a/ts/src/generated/index.cosmos.ts b/ts/src/generated/index.cosmos.ts index 23e88326..ea6d1f25 100644 --- a/ts/src/generated/index.cosmos.ts +++ b/ts/src/generated/index.cosmos.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as base from "./index.cosmos.base"; diff --git a/ts/src/generated/index.cosmos_proto.ts b/ts/src/generated/index.cosmos_proto.ts index 74998ca7..713bfce4 100644 --- a/ts/src/generated/index.cosmos_proto.ts +++ b/ts/src/generated/index.cosmos_proto.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * from "./cosmos_proto/cosmos"; diff --git a/ts/src/generated/index.gogoproto.ts b/ts/src/generated/index.gogoproto.ts index 87f6077e..16813d26 100644 --- a/ts/src/generated/index.gogoproto.ts +++ b/ts/src/generated/index.gogoproto.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * from "./gogoproto/gogo"; diff --git a/ts/src/generated/index.google.api.ts b/ts/src/generated/index.google.api.ts index b1551d1a..dad9e389 100644 --- a/ts/src/generated/index.google.api.ts +++ b/ts/src/generated/index.google.api.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * from "./google/api/http"; diff --git a/ts/src/generated/index.google.protobuf.ts b/ts/src/generated/index.google.protobuf.ts index 6a64e501..89eb17fb 100644 --- a/ts/src/generated/index.google.protobuf.ts +++ b/ts/src/generated/index.google.protobuf.ts @@ -1,5 +1,9 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ -export * from "./google/protobuf/timestamp"; -export * from "./google/protobuf/descriptor"; export * from "./google/protobuf/empty"; +export * from "./google/protobuf/descriptor"; diff --git a/ts/src/generated/index.google.ts b/ts/src/generated/index.google.ts index 074cb468..4b5a6d1e 100644 --- a/ts/src/generated/index.google.ts +++ b/ts/src/generated/index.google.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as protobuf from "./index.google.protobuf"; diff --git a/ts/src/generated/index.k8s.io.apimachinery.pkg.api.resource.ts b/ts/src/generated/index.k8s.io.apimachinery.pkg.api.resource.ts index 80d7b3c8..1b71c0a2 100644 --- a/ts/src/generated/index.k8s.io.apimachinery.pkg.api.resource.ts +++ b/ts/src/generated/index.k8s.io.apimachinery.pkg.api.resource.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ -export * from "./k8s.io/apimachinery/pkg/api/resource/generated"; +export * from "./k8s/io/apimachinery/pkg/api/resource/generated"; diff --git a/ts/src/generated/index.k8s.io.apimachinery.pkg.api.ts b/ts/src/generated/index.k8s.io.apimachinery.pkg.api.ts index 7ca1ac32..01cc6d84 100644 --- a/ts/src/generated/index.k8s.io.apimachinery.pkg.api.ts +++ b/ts/src/generated/index.k8s.io.apimachinery.pkg.api.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as resource from "./index.k8s.io.apimachinery.pkg.api.resource"; diff --git a/ts/src/generated/index.k8s.io.apimachinery.pkg.ts b/ts/src/generated/index.k8s.io.apimachinery.pkg.ts index 132a6a60..47652c1c 100644 --- a/ts/src/generated/index.k8s.io.apimachinery.pkg.ts +++ b/ts/src/generated/index.k8s.io.apimachinery.pkg.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as api from "./index.k8s.io.apimachinery.pkg.api"; diff --git a/ts/src/generated/index.k8s.io.apimachinery.ts b/ts/src/generated/index.k8s.io.apimachinery.ts index 123796f7..a16c8918 100644 --- a/ts/src/generated/index.k8s.io.apimachinery.ts +++ b/ts/src/generated/index.k8s.io.apimachinery.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as pkg from "./index.k8s.io.apimachinery.pkg"; diff --git a/ts/src/generated/index.k8s.io.ts b/ts/src/generated/index.k8s.io.ts index 3b8034f0..4a6a498a 100644 --- a/ts/src/generated/index.k8s.io.ts +++ b/ts/src/generated/index.k8s.io.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as apimachinery from "./index.k8s.io.apimachinery"; diff --git a/ts/src/generated/index.k8s.ts b/ts/src/generated/index.k8s.ts index 64fa0f3c..fe7709d4 100644 --- a/ts/src/generated/index.k8s.ts +++ b/ts/src/generated/index.k8s.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as io from "./index.k8s.io"; diff --git a/ts/src/generated/index.ts b/ts/src/generated/index.ts index 30153d27..65249e11 100644 --- a/ts/src/generated/index.ts +++ b/ts/src/generated/index.ts @@ -1,3 +1,8 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ export * as google from "./index.google"; diff --git a/ts/src/generated/k8s.io/apimachinery/pkg/api/resource/generated.ts b/ts/src/generated/k8s/io/apimachinery/pkg/api/resource/generated.ts similarity index 83% rename from ts/src/generated/k8s.io/apimachinery/pkg/api/resource/generated.ts rename to ts/src/generated/k8s/io/apimachinery/pkg/api/resource/generated.ts index 75c284b0..969f2582 100644 --- a/ts/src/generated/k8s.io/apimachinery/pkg/api/resource/generated.ts +++ b/ts/src/generated/k8s/io/apimachinery/pkg/api/resource/generated.ts @@ -1,7 +1,13 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown +// source: k8s/io/apimachinery/pkg/api/resource/generated.proto + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; -import { messageTypeRegistry } from "../../../../../typeRegistry"; +import { messageTypeRegistry } from "../../../../../../typeRegistry"; /** * Quantity is a fixed-point representation of a number. @@ -98,22 +104,25 @@ function createBaseQuantity(): Quantity { return { $type: "k8s.io.apimachinery.pkg.api.resource.Quantity", string: "" }; } -export const Quantity = { +export const Quantity: MessageFns< + Quantity, + "k8s.io.apimachinery.pkg.api.resource.Quantity" +> = { $type: "k8s.io.apimachinery.pkg.api.resource.Quantity" as const, encode( message: Quantity, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.string !== undefined && message.string !== "") { writer.uint32(10).string(message.string); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): Quantity { + decode(input: BinaryReader | Uint8Array, length?: number): Quantity { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseQuantity(); while (reader.pos < end) { @@ -130,7 +139,7 @@ export const Quantity = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -169,22 +178,25 @@ function createBaseQuantityValue(): QuantityValue { }; } -export const QuantityValue = { +export const QuantityValue: MessageFns< + QuantityValue, + "k8s.io.apimachinery.pkg.api.resource.QuantityValue" +> = { $type: "k8s.io.apimachinery.pkg.api.resource.QuantityValue" as const, encode( message: QuantityValue, - writer: _m0.Writer = _m0.Writer.create(), - ): _m0.Writer { + writer: BinaryWriter = new BinaryWriter(), + ): BinaryWriter { if (message.string !== undefined && message.string !== "") { writer.uint32(10).string(message.string); } return writer; }, - decode(input: _m0.Reader | Uint8Array, length?: number): QuantityValue { + decode(input: BinaryReader | Uint8Array, length?: number): QuantityValue { const reader = - input instanceof _m0.Reader ? input : _m0.Reader.create(input); + input instanceof BinaryReader ? input : new BinaryReader(input); let end = length === undefined ? reader.len : reader.pos + length; const message = createBaseQuantityValue(); while (reader.pos < end) { @@ -201,7 +213,7 @@ export const QuantityValue = { if ((tag & 7) === 4 || tag === 0) { break; } - reader.skipType(tag & 7); + reader.skip(tag & 7); } return message; }, @@ -254,11 +266,16 @@ type DeepPartial = T extends Builtin ? { [K in Exclude]?: DeepPartial } : Partial; -if (_m0.util.Long !== Long) { - _m0.util.Long = Long as any; - _m0.configure(); -} - function isSet(value: any): boolean { return value !== null && value !== undefined; } + +interface MessageFns { + readonly $type: V; + encode(message: T, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): T; + fromJSON(object: any): T; + toJSON(message: T): unknown; + create(base?: DeepPartial): T; + fromPartial(object: DeepPartial): T; +} diff --git a/ts/src/generated/typeRegistry.ts b/ts/src/generated/typeRegistry.ts index 38158926..12671718 100644 --- a/ts/src/generated/typeRegistry.ts +++ b/ts/src/generated/typeRegistry.ts @@ -1,11 +1,16 @@ +// Code generated by protoc-gen-ts_proto. DO NOT EDIT. +// versions: +// protoc-gen-ts_proto v2.2.0 +// protoc unknown + /* eslint-disable */ +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import Long from "long"; -import _m0 from "protobufjs/minimal"; export interface MessageType { $type: Message["$type"]; - encode(message: Message, writer?: _m0.Writer): _m0.Writer; - decode(input: _m0.Reader | Uint8Array, length?: number): Message; + encode(message: Message, writer?: BinaryWriter): BinaryWriter; + decode(input: BinaryReader | Uint8Array, length?: number): Message; fromJSON(object: any): Message; toJSON(message: Message): unknown; fromPartial(object: DeepPartial): Message; diff --git a/ts/src/index.v1beta1.ts b/ts/src/index.v1beta1.ts deleted file mode 100644 index 6c13446c..00000000 --- a/ts/src/index.v1beta1.ts +++ /dev/null @@ -1,29 +0,0 @@ -export { - MsgSignProviderAttributes, - MsgDeleteProviderAttributes, -} from "./generated/index.akash.audit.v1beta1"; -export { - MsgCloseGroup, - MsgPauseGroup, - MsgStartGroup, - MsgCreateDeployment, - MsgDepositDeployment, - MsgUpdateDeployment, - MsgCloseDeployment, -} from "./generated/index.akash.deployment.v1beta1"; -export { - MsgCreateProvider, - MsgUpdateProvider, - MsgDeleteProvider, -} from "./generated/index.akash.provider.v1beta1"; -export { - MsgCreateCertificate, - MsgRevokeCertificate, -} from "./deprecated/index.akash.cert.v1beta1"; -export { - MsgCreateBid, - MsgCloseBid, - MsgCreateLease, - MsgWithdrawLease, - MsgCloseLease, -} from "./deprecated/index.akash.market.v1beta1"; diff --git a/ts/src/index.v1beta2.ts b/ts/src/index.v1beta2.ts deleted file mode 100644 index f3deeefc..00000000 --- a/ts/src/index.v1beta2.ts +++ /dev/null @@ -1,32 +0,0 @@ -export { - MsgSignProviderAttributes, - MsgDeleteProviderAttributes, -} from "./generated/index.akash.audit.v1beta2"; -export { - MsgCreateCertificate, - MsgRevokeCertificate, -} from "./generated/index.akash.cert.v1beta2"; -export { - MsgCloseGroup, - MsgPauseGroup, - MsgStartGroup, - MsgCreateDeployment, - MsgDepositDeployment, - MsgUpdateDeployment, - MsgCloseDeployment, -} from "./patch/index.akash.deployment.v1beta2"; -export { - MsgCreateBid, - MsgCloseBid, -} from "./generated/index.akash.market.v1beta2"; -export { - MsgCreateLease, - MsgWithdrawLease, - MsgCloseLease, -} from "./generated/index.akash.market.v1beta2"; -export { - MsgCreateProvider, - MsgUpdateProvider, - MsgDeleteProvider, -} from "./generated/index.akash.provider.v1beta2"; -export { Storage } from "./generated/index.akash.base.v1beta2"; diff --git a/ts/src/index.v1beta3.ts b/ts/src/index.v1beta3.ts deleted file mode 100644 index 8f8dcc7f..00000000 --- a/ts/src/index.v1beta3.ts +++ /dev/null @@ -1,33 +0,0 @@ -export { - MsgSignProviderAttributes, - MsgDeleteProviderAttributes, -} from "./generated/index.akash.audit.v1beta3"; -export { - MsgCreateCertificate, - MsgRevokeCertificate, -} from "./generated/index.akash.cert.v1beta3"; -export { - MsgCloseGroup, - MsgPauseGroup, - MsgStartGroup, - DepositDeploymentAuthorization, - MsgCreateDeployment, - MsgDepositDeployment, - MsgUpdateDeployment, - MsgCloseDeployment, -} from "./patch/index.akash.deployment.v1beta3"; -export { - MsgCreateBid, - MsgCloseBid, -} from "./generated/index.akash.market.v1beta3"; -export { - MsgCreateLease, - MsgWithdrawLease, - MsgCloseLease, -} from "./generated/index.akash.market.v1beta3"; -export { - MsgCreateProvider, - MsgUpdateProvider, - MsgDeleteProvider, -} from "./generated/index.akash.provider.v1beta3"; -export { Storage, GPU } from "./patch/index.akash.base.v1beta3"; diff --git a/ts/src/index.v1beta4.ts b/ts/src/index.v1beta4.ts deleted file mode 100644 index 733b948c..00000000 --- a/ts/src/index.v1beta4.ts +++ /dev/null @@ -1,7 +0,0 @@ -export { - MsgCreateBid, - MsgCloseBid, - MsgCreateLease, - MsgWithdrawLease, - MsgCloseLease, -} from "./generated/index.akash.market.v1beta4"; diff --git a/ts/src/patch/cosmos/base/v1beta1/coin.spec.ts b/ts/src/patch/cosmos/base/v1beta1/coin.spec.ts index 7de851e7..39a1bcf9 100644 --- a/ts/src/patch/cosmos/base/v1beta1/coin.spec.ts +++ b/ts/src/patch/cosmos/base/v1beta1/coin.spec.ts @@ -1,4 +1,4 @@ -import { Reader } from "protobufjs/minimal"; +import { BinaryReader } from "@bufbuild/protobuf/wire"; import * as coin from "./coin"; @@ -10,7 +10,7 @@ describe("DecCoin", () => { denom: "", amount: "1000", }).finish(); - const reader = new Reader(encodedCoin); + const reader = new BinaryReader(encodedCoin); const result = coin.DecCoin.decode(reader); expect(result.amount).toEqual("1000.00000000000000"); @@ -22,7 +22,7 @@ describe("DecCoin", () => { denom: "", amount: "1000.5", }).finish(); - const reader = new Reader(encodedCoin); + const reader = new BinaryReader(encodedCoin); const result = coin.DecCoin.decode(reader); expect(result.amount).toEqual("1000.50000000000000"); diff --git a/ts/src/patch/cosmos/base/v1beta1/coin.ts b/ts/src/patch/cosmos/base/v1beta1/coin.ts index edf3cca6..85bc8025 100644 --- a/ts/src/patch/cosmos/base/v1beta1/coin.ts +++ b/ts/src/patch/cosmos/base/v1beta1/coin.ts @@ -1,5 +1,4 @@ -import * as minimal from "protobufjs/minimal"; -import { Reader } from "protobufjs/minimal"; +import { BinaryReader, BinaryWriter } from "@bufbuild/protobuf/wire"; import * as coin from "../../../../generated/cosmos/base/v1beta1/coin.original"; import { DecCoin } from "../../../../generated/cosmos/base/v1beta1/coin.original"; @@ -8,8 +7,8 @@ const originalEncode = coin.DecCoin.encode; coin.DecCoin.encode = function encode( message: DecCoin, - writer: minimal.Writer = minimal.Writer.create(), -): minimal.Writer { + writer: BinaryWriter = new BinaryWriter(), +): BinaryWriter { const { amount } = message; const parts = amount.includes(".") ? message.amount.split(".") @@ -22,7 +21,7 @@ coin.DecCoin.encode = function encode( const originalDecode = coin.DecCoin.decode; coin.DecCoin.decode = function decode( - input: Reader | Uint8Array, + input: BinaryReader | Uint8Array, length?: number, ): coin.DecCoin { const message = originalDecode.apply(this, [input, length]); diff --git a/ts/src/patch/index.akash.base.v1beta3.ts b/ts/src/patch/index.akash.base.v1beta3.ts deleted file mode 100644 index 31559d97..00000000 --- a/ts/src/patch/index.akash.base.v1beta3.ts +++ /dev/null @@ -1,3 +0,0 @@ -export * from "../generated/index.akash.base.v1beta3"; -export * from "../generated/akash/base/v1beta3/storage"; -export * from "../generated/akash/base/v1beta3/gpu"; diff --git a/ts/src/patch/index.akash.base.v1beta4.ts b/ts/src/patch/index.akash.base.v1beta4.ts deleted file mode 100644 index 31559d97..00000000 --- a/ts/src/patch/index.akash.base.v1beta4.ts +++ /dev/null @@ -1,3 +0,0 @@ -export * from "../generated/index.akash.base.v1beta3"; -export * from "../generated/akash/base/v1beta3/storage"; -export * from "../generated/akash/base/v1beta3/gpu"; diff --git a/ts/src/patch/index.akash.deployment.v1beta2.ts b/ts/src/patch/index.akash.deployment.v1beta2.ts deleted file mode 100644 index 274ec064..00000000 --- a/ts/src/patch/index.akash.deployment.v1beta2.ts +++ /dev/null @@ -1,3 +0,0 @@ -export * from "../generated/index.akash.deployment.v1beta2"; -export * from "../generated/akash/deployment/v1beta2/groupmsg"; -export * from "../generated/akash/deployment/v1beta2/deploymentmsg"; diff --git a/ts/src/patch/index.akash.deployment.v1beta3.ts b/ts/src/patch/index.akash.deployment.v1beta3.ts deleted file mode 100644 index 1fc10f19..00000000 --- a/ts/src/patch/index.akash.deployment.v1beta3.ts +++ /dev/null @@ -1,5 +0,0 @@ -export * from "../generated/index.akash.deployment.v1beta3"; -export * from "../generated/akash/deployment/v1beta3/groupmsg"; -export * from "../generated/akash/deployment/v1beta3/deploymentmsg"; -export * from "../generated/akash/deployment/v1beta3/authz"; -export * from "../generated/akash/deployment/v1beta3/query"; diff --git a/ts/src/patch/index.akash.market.v1beta4.ts b/ts/src/patch/index.akash.market.v1beta4.ts deleted file mode 100644 index c95eba13..00000000 --- a/ts/src/patch/index.akash.market.v1beta4.ts +++ /dev/null @@ -1,5 +0,0 @@ -/* eslint-disable */ - -export * from "./../generated/index.akash.market.v1beta4"; -export * from "./../generated/akash/market/v1beta4/query"; -export * from "./../generated/akash/market/v1beta4/service";